file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
extension.ts | // The module 'vscode' contains the VS Code extensibility API
// Import the module and reference it with the alias vscode in your code below
import * as vscode from 'vscode';
import { StructCommandManager } from './struct_command_manager'
import { EditCommandManager } from './edit_command_manager';
import { runTestCasesForC, runTestCasesForPy, test_function } from './tester'
import {runEditTests} from './edit_tester'
import { getUserSpecs } from './user_specs'
const {spawn} = require('child_process');
var code_segments = [""];
var cursor_pos = 0;
var count_lines= [0];
var count_speech = [0];
var manager: StructCommandManager;
var editManager: EditCommandManager;
var microphone = true;
var codeBuffer = "";
var errorFlag = false;
var language = "";
var cwd = "";
var ast_cwd = "";
var cred = "";
var datatypes = ["int", "float", "long", "double", "char"]; |
// this method is called when your extension is activated
// your extension is activated the very first time the command is executed
export function activate(context: vscode.ExtensionContext) {
// Use the console to output diagnostic information (console.log) and errors (console.error)
// This line of code will only be executed once when your extension is activated
console.log('Congratulations, your extension "talk-to-code" is now active!');
// The command has been defined in the package.json file
// Now provide the implementation of the command with registerCommand
// The commandId parameter must match the command field in package.json
let disposable = vscode.commands.registerCommand('extension.helloWorld', () => {
// The code you place here will be executed every time your command is executed
// Display a message box to the user
vscode.window.showInformationMessage('coding by dictation!');
initUser("lawrence"); /* change here to set new user */
initManager();
listen();
// runEditTests();
// test_function();
// runTestCasesForC();
// runTestCasesForPy();
});
context.subscriptions.push(disposable);
}
function initUser(user: string) {
var userSpecs = getUserSpecs(user);
cwd = userSpecs[0];
cred = userSpecs[1];
ast_cwd = userSpecs[2];
}
function initManager() {
language = "c";
manager = new StructCommandManager(language, true);
editManager = new EditCommandManager(manager,count_lines,count_speech);
}
function listen() {
displayCode([""]);
// env: {GOOGLE_APPLICATION_CREDENTIALS: cred}
const child = spawn('node', ['speech_recognizer.js'], {shell:true, cwd: cwd});
child.stdout.on('data', (data: string)=>{
let transcribed_word = data.toString().trim();
console.log("TRANSCRIBED WORD: "+transcribed_word);
if (transcribed_word == 'Listening') vscode.window.showInformationMessage('Begin Speaking!');
else if (transcribed_word == "microphone off" || transcribed_word == "sleep" || transcribed_word == "go to sleep") {
microphone = false;
vscode.window.showInformationMessage("microphone asleep");
}
else if (transcribed_word == "microphone on" || transcribed_word == "wake up") {
microphone = true;
vscode.window.showInformationMessage("microphone active");
}
else if (microphone && editManager.check_if_edit_command(transcribed_word)) {
vscode.window.showInformationMessage("You just said the following edit command: " + transcribed_word);
console.log(transcribed_word)
editManager.checkAll(transcribed_word,count_lines);
displayCode(manager.struct_command_list);
console.log(manager.managerStatus());
}
else if (microphone) {
vscode.window.showInformationMessage("You just said: " + transcribed_word);
errorFlag = false;
codeBuffer = "";
manager.parse_speech(transcribed_word, count_lines);
displayCode(manager.struct_command_list);
}
});
}
function displayCode(struct_command_list: string[]) {
/* Set up commands to insert */
let commands = '#c_program SampleProgram #include "stdio.h";; ';
if (language == "c") commands = '#c_program SampleProgram #include "stdio.h";; ';
else if (language == "py") commands = '#p_program SampleProgram #include "sys";; ';
for (var i=0; i<struct_command_list.length; i++) commands += struct_command_list[i] + "\n"
commands += ' #program_end';
const other_child = spawn('java', ['ast/ASTParser 1'], {shell:true, cwd: ast_cwd});
other_child.stdin.setEncoding('utf8');
other_child.stdin.write(commands);
other_child.stdin.end();
other_child.stdout.setEncoding('utf8');
other_child.stdout.on('data', (data: string)=>{
codeBuffer += data;
if (data.includes("AST construction complete") && !errorFlag) {
var code = codeBuffer.split("ASTNode")[0].trimLeft();
codeBuffer = ""; // clear code stream
writeToEditor(code, struct_command_list);
}
else if (data.includes("Not Supported Syntax Format")) {
console.log("error");
codeBuffer = ""
errorFlag = true;
}
});
}
/* text2 - function prototype, text1 - actual function
Conditions for a function prototype and function:
- one ends with ";", the other ends with "{"
- both start with same data type value
- function name has to be the same
Only function declarations end with "{" and begins with a datatype value
statements that end with ";" and begin with datatype are declaration statements. However, they do not
include "(" in the second word.
*/
function checkIfFunctionPrototype(text1: string, text2: string){
if (!text2.endsWith(";")) return false;
if (!text1.endsWith("{")) return false;
/* Not needed because blank lines should alr be caught before entering this function call.
Just as a precaution. */
if (text1.length < 2 || text2.length < 2) return false;
text2 = text2.substring(0,text2.length-1);
text1 = text1.substring(0,text1.length-1);
text2 = text2.replace(/ +/g, ' ');
text1 = text1.replace(/ +/g, ' ');
/* Convert text1 to function prototype for comparision */
var splitted_text1 = text1.split(" ");
var splitted_text2 = text2.split(" ");
if (splitted_text1.length < 2 || splitted_text2.length < 2) return false;
if (!datatypes.includes(splitted_text1[0]) || !datatypes.includes(splitted_text2[0])) return false;
if (!splitted_text1[1].includes("(") || !splitted_text2[1].includes("(")) return false;
if (splitted_text1[0] != splitted_text2[0]) return false;
if (splitted_text1[1] != splitted_text2[1]) return false;
else return true;
}
function map_lines_to_code(struct_command_list: string[]){
console.log(JSON.stringify(code_segments));
cursor_pos = 0;
count_lines = [];
var count =0;
var j =0;
var includeStatement = false;
for (var i=0;i<code_segments.length;i++) {
console.log(JSON.stringify(code_segments[i]) + " " + i + " " + count);
includeStatement = false;
code_segments[i] = code_segments[i].trim();
if (code_segments[i].startsWith("#include") || code_segments[i].startsWith("import")) includeStatement = true;
if (includeStatement || code_segments[i] == "\r" || code_segments[i] == "" || code_segments[i] == "\t" || code_segments[i]=="*/"|| code_segments[i]=="/*") {
count++;
/* Because cursor position is a blank line in the code so this if-block to detect blank lines is used.
Blank line is a struct command "#string \"\";;", hence this blank line will be mapped to that
struct command as well. */
if (!includeStatement && j < struct_command_list.length && struct_command_list[j] == "#string \"\";;") {
count_lines[j] = count;
cursor_pos = i;
j++;
}
}
else if (i< code_segments.length-1 && checkIfFunctionPrototype(code_segments[i+1], code_segments[i])){
count++;
}
else {
if (struct_command_list[j].startsWith("#string")) cursor_pos = count;
count++;
count_lines[j] = count;
j++;
}
}
}
function map_speech_to_struct_command(){
count_speech = [];
var count =0;
var j =0;
for (var i=0;i<manager.struct_command_list.length;i++){
var line = manager.struct_command_list[i];
if (line.startsWith("#comment" || line.indexOf("cursor here")!=-1)|| line.startsWith("#if_branch_end;;")|| line.startsWith("#else_branch_end") || line.startsWith("#function_end;;")|| line.startsWith("#while_end;;")|| line.startsWith("#for_end;;")){
count++;
}
else{
count_speech[j] = count++;
j++;
}
}
}
function writeToEditor(code: string, struct_command_list: string[]) {
code_segments = code.split("\n");
map_lines_to_code(struct_command_list);
console.log("cursor pos: " + cursor_pos)
map_speech_to_struct_command();
console.log("LINE_COUNT: "+JSON.stringify(count_lines));
console.log("SPEECH_COUNT: "+JSON.stringify(count_speech));
let editor = vscode.window.activeTextEditor;
if (manager.holding) {
var line = code_segments[manager.heldline];
var numTabs = "";
for (var i = 0; i < line.length; i++) {
if (line[i] == "\t") numTabs += "\t";
}
var speech = manager.curr_speech.join(" ");
var temp = speech.split(" ");
if (speech.includes("spell") && speech.includes("end_spell")) {
var spellIdx = temp.indexOf("spell");
var spellEndIdx = temp.indexOf("end_spell");
speech = temp.slice(0, spellIdx).join(" ").trim() + " " +
temp.slice(spellIdx + 1, spellEndIdx).join("").trim() + " " +
temp.slice(spellEndIdx + 1).join(" ").trim();
}
code_segments.splice(manager.heldline - 1, 1, numTabs + speech + " *stay");
code = code_segments.join("\n");
cursor_pos = manager.heldline - 1;
}
if (editor) {
/* Get range to delete */
var lineCount = editor.document.lineCount;
var start_pos = new vscode.Position(0, 0);
var end_pos = new vscode.Position(lineCount, 0);
var range = new vscode.Range(start_pos, end_pos);
editor.edit(editBuilder => {
editBuilder.delete(range);
editBuilder.insert(start_pos, code);
}).then(() => {
/* Because editBuilder is a callback function, cursor position cannot be set (it will be outdated) without then().
then() is called when the callback function is done editing. */
if (editor) {
var lineAt = editor.document.lineAt(cursor_pos).text;
if (manager.isLeftRightCalled){
editor.selection = new vscode.Selection(new vscode.Position(cursor_pos, manager.len_cursor), new vscode.Position(cursor_pos, manager.len_cursor));
}
else editor.selection = new vscode.Selection(new vscode.Position(cursor_pos, lineAt.length), new vscode.Position(cursor_pos, lineAt.length));
}
})
}
}
// this method is called when your extension is deactivated
export function deactivate() {} | random_line_split |
|
app.js | var MyApp = (function () {
var socket = null;
var socker_url = 'http://localhost:3000';
var meeting_id = '';
var user_id = '';
var tag = document.createElement('script');
tag.src = "https://www.youtube.com/iframe_api";
var firstScriptTag = document.getElementsByTagName('script')[0];
firstScriptTag.parentNode.insertBefore(tag, firstScriptTag);
// 3. This function creates an <iframe> (and YouTube player)
// after the API code downloads.
var player;
function init(uid, mid) {
user_id = uid;
meeting_id = mid;
$('#meetingname').text(meeting_id);
$('#me h4').text(user_id + '(Me)');
document.title = user_id;
SignalServerEventBinding();
onYouTubeIframeAPIReady();
EventBinding();
}
function SignalServerEventBinding() |
function EventBinding() {
$('#btnResetMeeting').on('click', function () {
socket.emit('reset');
});
$('#btnsend').on('click', function () {
socket.emit('sendMessage', $('#msgbox').val());
$('#msgbox').val('');
});
$('#invite').on('click', function () {
var str1 = "https://127.0.0.1:5501/?mid=";
var str2 = meeting_id;
var res = str1.concat(str2);
navigator.clipboard.writeText(res);
alert("Meeting id copied to clipboard. ");
});
$('#msgbox').keypress(function (e) {
var key = e.which;
if (key == 13) // the enter key code
{
$('#btnsend').click();
return false;
}
});
$('#me').on('dblclick', 'video', function () {
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
var minVideo = document.getElementById('localVideoCtr');
// var maxVideo = document.getElementById('mVideoPlayer');
// var stream = minVideo.captureStream();
// maxVideo.srcObject = stream;
minVideo.requestFullscreen();
// $('#player').hide();
player.pauseVideo();
});
$('#mVideoPlayer').on('dblclick', function () {
this.requestFullscreen();
});
$('#vidURL').keypress(function (e) {
if (e.which == 13) {
var vidId = getId($('#vidURL').val());
// player.loadVideoById(vidId, 0);
socket.emit('newVideoId',
{
connId: socket.id,
videoId: vidId,
});
return false;
}
});
}
function AddNewUser(other_user_id, connId) {
var $newDiv = $('#otherTemplate').clone();
$newDiv = $newDiv.attr('id', connId).addClass('other');
$newDiv.dblclick(function () {
var minVideo = document.getElementById("v_" + connId);
minVideo.requestFullscreen();
player.pauseVideo();
});
$newDiv.find('h4').text(other_user_id);
$newDiv.find('video').attr('id', 'v_' + connId);
$newDiv.find('audio').attr('id', 'a_' + connId);
$newDiv.show();
$('#divUsers').append($newDiv);
}
function getId(url) {
const regExp = /^.*(youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=|&v=)([^#&?]*).*/;
const match = url.match(regExp);
return (match && match[2].length === 11)
? match[2]
: null;
}
function convertHMS(value) {
const sec = parseInt(value, 10); // convert value to number if it's string
let hours = Math.floor(sec / 3600); // get hours
let minutes = Math.floor((sec - (hours * 3600)) / 60); // get minutes
let seconds = sec - (hours * 3600) - (minutes * 60); // get seconds
// add 0 if value < 10; Example: 2 => 02
if (hours < 10) { hours = "0" + hours; }
if (minutes < 10) { minutes = "0" + minutes; }
if (seconds < 10) { seconds = "0" + seconds; }
return hours + ':' + minutes + ':' + seconds; // Return is HH : MM : SS
}
// 2. This code loads the IFrame Player API code asynchronously.
function onYouTubeIframeAPIReady() {
player = new YT.Player('player', {
height: 560,
width: 700,
videoId: 'G5RpJwCJDqc',
playerVars: {
'playsinline': 1
},
events: {
'onReady': onPlayerReady,
'onStateChange': onPlayerStateChange
}
});
}
// 4. The API will call this function when the video player is ready.
function onPlayerReady(event) {
event.target.pauseVideo();
}
// 5. The API calls this function when the player's state changes.
// The function indicates that when playing a video (state=1),
// the player should play for six seconds and then stop.
function onPlayerStateChange(event) {
// Event Listeners
playerStatus = event.data;
console.log(playerStatus)
var currTime = 0;
switch (playerStatus) {
case 0:
//video ended
break;
case 1:
//onplay
currTime = player.getCurrentTime();
socket.emit('play others',
{
connId: socket.id,
currentTime: currTime
});
break;
case 2:
//onpause
currTime = player.getCurrentTime();
socket.emit('pause others', {
connId: socket.id,
currentTime: currTime
});
break;
case 3:
currTime = player.getCurrentTime();
socket.emit('seek', {
connId: socket.id,
currentTime: currTime
});
break;
}
}
function stopVideo() {
player.stopVideo();
}
return {
_init: function (uid, mid) {
init(uid, mid);
}
};
}()); | {
socket = io.connect(socker_url);
var serverFn = function (data, to_connid) {
socket.emit('exchangeSDP', { message: data, to_connid: to_connid });
};
socket.on('reset', function () {
location.reload();
});
socket.on('exchangeSDP', async function (data) {
await WrtcHelper.ExecuteClientFn(data.message, data.from_connid);
});
socket.on('informAboutNewConnection', function (data) {
AddNewUser(data.other_user_id, data.connId);
WrtcHelper.createNewConnection(data.connId);
});
socket.on('informAboutConnectionEnd', function (connId) {
$('#' + connId).remove();
WrtcHelper.closeExistingConnection(connId);
});
socket.on('showChatMessage', function (data) {
var name = document.createElement("P");
name.innerHTML = data.from;
name.style.fontWeight = "bold";
name.style.marginBottom = "0px";
document.getElementById("messages").appendChild(name);
var dateandtime = document.createElement("P");
dateandtime.innerHTML = data.time;
dateandtime.style.marginBottom = "0px";
dateandtime.style.fontWeight = "bold";
dateandtime.style.fontSize = "12px";
dateandtime.style.color = "#000";
document.getElementById("messages").appendChild(dateandtime);
var messagetext = document.createElement("P");
messagetext.innerHTML = data.message;
document.getElementById("messages").appendChild(messagetext);
});
socket.on('connect', () => {
if (socket.connected) {
WrtcHelper.init(serverFn, socket.id);
if (user_id != "" && meeting_id != "") {
socket.emit('userconnect', { dsiplayName: user_id, meetingid: meeting_id });
}
}
});
socket.on('userconnected', function (other_users) {
$('#divUsers .other').remove();
if (other_users) {
for (var i = 0; i < other_users.length; i++) {
AddNewUser(other_users[i].user_id, other_users[i].connectionId);
WrtcHelper.createNewConnection(other_users[i].connectionId);
}
}
$(".toolbox").show();
$('#messages').show();
$('#divUsers').show();
});
socket.on('seekAll', function (time) {
console.log("justseek");
var clientTime = player.getCurrentTime();
if (clientTime < time - .2 || clientTime > time + .2) {
// if (alert('Do you want to sync with admin at ' + convertHMS(time))) {
player.seekTo(time);
// Forces video to play right after seek
player.playVideo();
// }
}
});
socket.on('playAll', function (data) {
console.log("playAll");
// player.seekTo(time);
// Forces video to play right after seek
// console.log("PlayAll" + data.meetingId +" : "+meeting_id);
// if (data.meetingId == meeting_id) {
player.playVideo();
// }
// player.playVideo();
});
socket.on('pauseAll', function (Time) {
console.log("pauseAll");
// player.seekTo(time);
// Forces video to stop right after seek
// if (data.meetingId == meeting_id) {
player.pauseVideo();
// }
// player.playVideo();
});
socket.on('playNewVid', function (vidId) {
player.loadVideoById(vidId, 0);
});
socket.on('Not Allowed', function () {
// $('divVidUrl').after($("<p></p>").text("Only admin can add new video"));
console.log('Not Allowed');
});
} | identifier_body |
app.js | var MyApp = (function () {
var socket = null;
var socker_url = 'http://localhost:3000';
var meeting_id = '';
var user_id = '';
var tag = document.createElement('script');
tag.src = "https://www.youtube.com/iframe_api";
var firstScriptTag = document.getElementsByTagName('script')[0];
firstScriptTag.parentNode.insertBefore(tag, firstScriptTag);
// 3. This function creates an <iframe> (and YouTube player)
// after the API code downloads.
var player;
function init(uid, mid) {
user_id = uid;
meeting_id = mid;
$('#meetingname').text(meeting_id);
$('#me h4').text(user_id + '(Me)');
document.title = user_id;
SignalServerEventBinding();
onYouTubeIframeAPIReady();
EventBinding();
}
function SignalServerEventBinding() {
socket = io.connect(socker_url);
var serverFn = function (data, to_connid) {
socket.emit('exchangeSDP', { message: data, to_connid: to_connid });
};
socket.on('reset', function () {
location.reload();
});
socket.on('exchangeSDP', async function (data) {
await WrtcHelper.ExecuteClientFn(data.message, data.from_connid);
});
socket.on('informAboutNewConnection', function (data) {
AddNewUser(data.other_user_id, data.connId);
WrtcHelper.createNewConnection(data.connId);
});
socket.on('informAboutConnectionEnd', function (connId) {
$('#' + connId).remove();
WrtcHelper.closeExistingConnection(connId);
});
socket.on('showChatMessage', function (data) {
var name = document.createElement("P");
name.innerHTML = data.from;
name.style.fontWeight = "bold";
name.style.marginBottom = "0px";
document.getElementById("messages").appendChild(name);
var dateandtime = document.createElement("P");
dateandtime.innerHTML = data.time;
dateandtime.style.marginBottom = "0px";
dateandtime.style.fontWeight = "bold";
dateandtime.style.fontSize = "12px";
dateandtime.style.color = "#000";
document.getElementById("messages").appendChild(dateandtime);
var messagetext = document.createElement("P");
messagetext.innerHTML = data.message;
document.getElementById("messages").appendChild(messagetext);
});
socket.on('connect', () => {
if (socket.connected) |
});
socket.on('userconnected', function (other_users) {
$('#divUsers .other').remove();
if (other_users) {
for (var i = 0; i < other_users.length; i++) {
AddNewUser(other_users[i].user_id, other_users[i].connectionId);
WrtcHelper.createNewConnection(other_users[i].connectionId);
}
}
$(".toolbox").show();
$('#messages').show();
$('#divUsers').show();
});
socket.on('seekAll', function (time) {
console.log("justseek");
var clientTime = player.getCurrentTime();
if (clientTime < time - .2 || clientTime > time + .2) {
// if (alert('Do you want to sync with admin at ' + convertHMS(time))) {
player.seekTo(time);
// Forces video to play right after seek
player.playVideo();
// }
}
});
socket.on('playAll', function (data) {
console.log("playAll");
// player.seekTo(time);
// Forces video to play right after seek
// console.log("PlayAll" + data.meetingId +" : "+meeting_id);
// if (data.meetingId == meeting_id) {
player.playVideo();
// }
// player.playVideo();
});
socket.on('pauseAll', function (Time) {
console.log("pauseAll");
// player.seekTo(time);
// Forces video to stop right after seek
// if (data.meetingId == meeting_id) {
player.pauseVideo();
// }
// player.playVideo();
});
socket.on('playNewVid', function (vidId) {
player.loadVideoById(vidId, 0);
});
socket.on('Not Allowed', function () {
// $('divVidUrl').after($("<p></p>").text("Only admin can add new video"));
console.log('Not Allowed');
});
}
function EventBinding() {
$('#btnResetMeeting').on('click', function () {
socket.emit('reset');
});
$('#btnsend').on('click', function () {
socket.emit('sendMessage', $('#msgbox').val());
$('#msgbox').val('');
});
$('#invite').on('click', function () {
var str1 = "https://127.0.0.1:5501/?mid=";
var str2 = meeting_id;
var res = str1.concat(str2);
navigator.clipboard.writeText(res);
alert("Meeting id copied to clipboard. ");
});
$('#msgbox').keypress(function (e) {
var key = e.which;
if (key == 13) // the enter key code
{
$('#btnsend').click();
return false;
}
});
$('#me').on('dblclick', 'video', function () {
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
var minVideo = document.getElementById('localVideoCtr');
// var maxVideo = document.getElementById('mVideoPlayer');
// var stream = minVideo.captureStream();
// maxVideo.srcObject = stream;
minVideo.requestFullscreen();
// $('#player').hide();
player.pauseVideo();
});
$('#mVideoPlayer').on('dblclick', function () {
this.requestFullscreen();
});
$('#vidURL').keypress(function (e) {
if (e.which == 13) {
var vidId = getId($('#vidURL').val());
// player.loadVideoById(vidId, 0);
socket.emit('newVideoId',
{
connId: socket.id,
videoId: vidId,
});
return false;
}
});
}
function AddNewUser(other_user_id, connId) {
var $newDiv = $('#otherTemplate').clone();
$newDiv = $newDiv.attr('id', connId).addClass('other');
$newDiv.dblclick(function () {
var minVideo = document.getElementById("v_" + connId);
minVideo.requestFullscreen();
player.pauseVideo();
});
$newDiv.find('h4').text(other_user_id);
$newDiv.find('video').attr('id', 'v_' + connId);
$newDiv.find('audio').attr('id', 'a_' + connId);
$newDiv.show();
$('#divUsers').append($newDiv);
}
function getId(url) {
const regExp = /^.*(youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=|&v=)([^#&?]*).*/;
const match = url.match(regExp);
return (match && match[2].length === 11)
? match[2]
: null;
}
function convertHMS(value) {
const sec = parseInt(value, 10); // convert value to number if it's string
let hours = Math.floor(sec / 3600); // get hours
let minutes = Math.floor((sec - (hours * 3600)) / 60); // get minutes
let seconds = sec - (hours * 3600) - (minutes * 60); // get seconds
// add 0 if value < 10; Example: 2 => 02
if (hours < 10) { hours = "0" + hours; }
if (minutes < 10) { minutes = "0" + minutes; }
if (seconds < 10) { seconds = "0" + seconds; }
return hours + ':' + minutes + ':' + seconds; // Return is HH : MM : SS
}
// 2. This code loads the IFrame Player API code asynchronously.
function onYouTubeIframeAPIReady() {
player = new YT.Player('player', {
height: 560,
width: 700,
videoId: 'G5RpJwCJDqc',
playerVars: {
'playsinline': 1
},
events: {
'onReady': onPlayerReady,
'onStateChange': onPlayerStateChange
}
});
}
// 4. The API will call this function when the video player is ready.
function onPlayerReady(event) {
event.target.pauseVideo();
}
// 5. The API calls this function when the player's state changes.
// The function indicates that when playing a video (state=1),
// the player should play for six seconds and then stop.
function onPlayerStateChange(event) {
// Event Listeners
playerStatus = event.data;
console.log(playerStatus)
var currTime = 0;
switch (playerStatus) {
case 0:
//video ended
break;
case 1:
//onplay
currTime = player.getCurrentTime();
socket.emit('play others',
{
connId: socket.id,
currentTime: currTime
});
break;
case 2:
//onpause
currTime = player.getCurrentTime();
socket.emit('pause others', {
connId: socket.id,
currentTime: currTime
});
break;
case 3:
currTime = player.getCurrentTime();
socket.emit('seek', {
connId: socket.id,
currentTime: currTime
});
break;
}
}
function stopVideo() {
player.stopVideo();
}
return {
_init: function (uid, mid) {
init(uid, mid);
}
};
}()); | {
WrtcHelper.init(serverFn, socket.id);
if (user_id != "" && meeting_id != "") {
socket.emit('userconnect', { dsiplayName: user_id, meetingid: meeting_id });
}
} | conditional_block |
app.js | var MyApp = (function () {
var socket = null;
var socker_url = 'http://localhost:3000';
var meeting_id = '';
var user_id = '';
var tag = document.createElement('script');
tag.src = "https://www.youtube.com/iframe_api";
var firstScriptTag = document.getElementsByTagName('script')[0];
firstScriptTag.parentNode.insertBefore(tag, firstScriptTag);
// 3. This function creates an <iframe> (and YouTube player)
// after the API code downloads.
var player;
function init(uid, mid) {
user_id = uid;
meeting_id = mid;
$('#meetingname').text(meeting_id);
$('#me h4').text(user_id + '(Me)');
document.title = user_id;
SignalServerEventBinding();
onYouTubeIframeAPIReady();
EventBinding();
}
function SignalServerEventBinding() {
socket = io.connect(socker_url);
var serverFn = function (data, to_connid) {
socket.emit('exchangeSDP', { message: data, to_connid: to_connid });
};
socket.on('reset', function () {
location.reload();
});
socket.on('exchangeSDP', async function (data) {
await WrtcHelper.ExecuteClientFn(data.message, data.from_connid);
});
socket.on('informAboutNewConnection', function (data) {
AddNewUser(data.other_user_id, data.connId);
WrtcHelper.createNewConnection(data.connId);
});
socket.on('informAboutConnectionEnd', function (connId) {
$('#' + connId).remove();
WrtcHelper.closeExistingConnection(connId);
});
socket.on('showChatMessage', function (data) {
var name = document.createElement("P");
name.innerHTML = data.from;
name.style.fontWeight = "bold";
name.style.marginBottom = "0px";
document.getElementById("messages").appendChild(name);
var dateandtime = document.createElement("P");
dateandtime.innerHTML = data.time;
dateandtime.style.marginBottom = "0px";
dateandtime.style.fontWeight = "bold";
dateandtime.style.fontSize = "12px";
dateandtime.style.color = "#000";
document.getElementById("messages").appendChild(dateandtime);
var messagetext = document.createElement("P");
messagetext.innerHTML = data.message;
document.getElementById("messages").appendChild(messagetext);
});
socket.on('connect', () => {
if (socket.connected) {
WrtcHelper.init(serverFn, socket.id);
if (user_id != "" && meeting_id != "") {
socket.emit('userconnect', { dsiplayName: user_id, meetingid: meeting_id });
}
}
});
socket.on('userconnected', function (other_users) {
$('#divUsers .other').remove();
if (other_users) {
for (var i = 0; i < other_users.length; i++) {
AddNewUser(other_users[i].user_id, other_users[i].connectionId);
WrtcHelper.createNewConnection(other_users[i].connectionId);
}
}
$(".toolbox").show();
$('#messages').show();
$('#divUsers').show();
});
socket.on('seekAll', function (time) {
console.log("justseek");
var clientTime = player.getCurrentTime();
if (clientTime < time - .2 || clientTime > time + .2) {
// if (alert('Do you want to sync with admin at ' + convertHMS(time))) {
player.seekTo(time);
// Forces video to play right after seek
player.playVideo();
// }
}
});
socket.on('playAll', function (data) {
console.log("playAll");
// player.seekTo(time);
// Forces video to play right after seek
// console.log("PlayAll" + data.meetingId +" : "+meeting_id);
// if (data.meetingId == meeting_id) {
player.playVideo();
// }
// player.playVideo();
});
socket.on('pauseAll', function (Time) {
console.log("pauseAll");
// player.seekTo(time);
// Forces video to stop right after seek
// if (data.meetingId == meeting_id) {
player.pauseVideo();
// }
// player.playVideo();
});
socket.on('playNewVid', function (vidId) {
player.loadVideoById(vidId, 0);
});
socket.on('Not Allowed', function () {
// $('divVidUrl').after($("<p></p>").text("Only admin can add new video"));
console.log('Not Allowed');
});
}
function EventBinding() { | socket.emit('sendMessage', $('#msgbox').val());
$('#msgbox').val('');
});
$('#invite').on('click', function () {
var str1 = "https://127.0.0.1:5501/?mid=";
var str2 = meeting_id;
var res = str1.concat(str2);
navigator.clipboard.writeText(res);
alert("Meeting id copied to clipboard. ");
});
$('#msgbox').keypress(function (e) {
var key = e.which;
if (key == 13) // the enter key code
{
$('#btnsend').click();
return false;
}
});
$('#me').on('dblclick', 'video', function () {
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
var minVideo = document.getElementById('localVideoCtr');
// var maxVideo = document.getElementById('mVideoPlayer');
// var stream = minVideo.captureStream();
// maxVideo.srcObject = stream;
minVideo.requestFullscreen();
// $('#player').hide();
player.pauseVideo();
});
$('#mVideoPlayer').on('dblclick', function () {
this.requestFullscreen();
});
$('#vidURL').keypress(function (e) {
if (e.which == 13) {
var vidId = getId($('#vidURL').val());
// player.loadVideoById(vidId, 0);
socket.emit('newVideoId',
{
connId: socket.id,
videoId: vidId,
});
return false;
}
});
}
function AddNewUser(other_user_id, connId) {
var $newDiv = $('#otherTemplate').clone();
$newDiv = $newDiv.attr('id', connId).addClass('other');
$newDiv.dblclick(function () {
var minVideo = document.getElementById("v_" + connId);
minVideo.requestFullscreen();
player.pauseVideo();
});
$newDiv.find('h4').text(other_user_id);
$newDiv.find('video').attr('id', 'v_' + connId);
$newDiv.find('audio').attr('id', 'a_' + connId);
$newDiv.show();
$('#divUsers').append($newDiv);
}
function getId(url) {
const regExp = /^.*(youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=|&v=)([^#&?]*).*/;
const match = url.match(regExp);
return (match && match[2].length === 11)
? match[2]
: null;
}
function convertHMS(value) {
const sec = parseInt(value, 10); // convert value to number if it's string
let hours = Math.floor(sec / 3600); // get hours
let minutes = Math.floor((sec - (hours * 3600)) / 60); // get minutes
let seconds = sec - (hours * 3600) - (minutes * 60); // get seconds
// add 0 if value < 10; Example: 2 => 02
if (hours < 10) { hours = "0" + hours; }
if (minutes < 10) { minutes = "0" + minutes; }
if (seconds < 10) { seconds = "0" + seconds; }
return hours + ':' + minutes + ':' + seconds; // Return is HH : MM : SS
}
// 2. This code loads the IFrame Player API code asynchronously.
function onYouTubeIframeAPIReady() {
player = new YT.Player('player', {
height: 560,
width: 700,
videoId: 'G5RpJwCJDqc',
playerVars: {
'playsinline': 1
},
events: {
'onReady': onPlayerReady,
'onStateChange': onPlayerStateChange
}
});
}
// 4. The API will call this function when the video player is ready.
function onPlayerReady(event) {
event.target.pauseVideo();
}
// 5. The API calls this function when the player's state changes.
// The function indicates that when playing a video (state=1),
// the player should play for six seconds and then stop.
function onPlayerStateChange(event) {
// Event Listeners
playerStatus = event.data;
console.log(playerStatus)
var currTime = 0;
switch (playerStatus) {
case 0:
//video ended
break;
case 1:
//onplay
currTime = player.getCurrentTime();
socket.emit('play others',
{
connId: socket.id,
currentTime: currTime
});
break;
case 2:
//onpause
currTime = player.getCurrentTime();
socket.emit('pause others', {
connId: socket.id,
currentTime: currTime
});
break;
case 3:
currTime = player.getCurrentTime();
socket.emit('seek', {
connId: socket.id,
currentTime: currTime
});
break;
}
}
function stopVideo() {
player.stopVideo();
}
return {
_init: function (uid, mid) {
init(uid, mid);
}
};
}()); | $('#btnResetMeeting').on('click', function () {
socket.emit('reset');
});
$('#btnsend').on('click', function () { | random_line_split |
app.js | var MyApp = (function () {
var socket = null;
var socker_url = 'http://localhost:3000';
var meeting_id = '';
var user_id = '';
var tag = document.createElement('script');
tag.src = "https://www.youtube.com/iframe_api";
var firstScriptTag = document.getElementsByTagName('script')[0];
firstScriptTag.parentNode.insertBefore(tag, firstScriptTag);
// 3. This function creates an <iframe> (and YouTube player)
// after the API code downloads.
var player;
function init(uid, mid) {
user_id = uid;
meeting_id = mid;
$('#meetingname').text(meeting_id);
$('#me h4').text(user_id + '(Me)');
document.title = user_id;
SignalServerEventBinding();
onYouTubeIframeAPIReady();
EventBinding();
}
function SignalServerEventBinding() {
socket = io.connect(socker_url);
var serverFn = function (data, to_connid) {
socket.emit('exchangeSDP', { message: data, to_connid: to_connid });
};
socket.on('reset', function () {
location.reload();
});
socket.on('exchangeSDP', async function (data) {
await WrtcHelper.ExecuteClientFn(data.message, data.from_connid);
});
socket.on('informAboutNewConnection', function (data) {
AddNewUser(data.other_user_id, data.connId);
WrtcHelper.createNewConnection(data.connId);
});
socket.on('informAboutConnectionEnd', function (connId) {
$('#' + connId).remove();
WrtcHelper.closeExistingConnection(connId);
});
socket.on('showChatMessage', function (data) {
var name = document.createElement("P");
name.innerHTML = data.from;
name.style.fontWeight = "bold";
name.style.marginBottom = "0px";
document.getElementById("messages").appendChild(name);
var dateandtime = document.createElement("P");
dateandtime.innerHTML = data.time;
dateandtime.style.marginBottom = "0px";
dateandtime.style.fontWeight = "bold";
dateandtime.style.fontSize = "12px";
dateandtime.style.color = "#000";
document.getElementById("messages").appendChild(dateandtime);
var messagetext = document.createElement("P");
messagetext.innerHTML = data.message;
document.getElementById("messages").appendChild(messagetext);
});
socket.on('connect', () => {
if (socket.connected) {
WrtcHelper.init(serverFn, socket.id);
if (user_id != "" && meeting_id != "") {
socket.emit('userconnect', { dsiplayName: user_id, meetingid: meeting_id });
}
}
});
socket.on('userconnected', function (other_users) {
$('#divUsers .other').remove();
if (other_users) {
for (var i = 0; i < other_users.length; i++) {
AddNewUser(other_users[i].user_id, other_users[i].connectionId);
WrtcHelper.createNewConnection(other_users[i].connectionId);
}
}
$(".toolbox").show();
$('#messages').show();
$('#divUsers').show();
});
socket.on('seekAll', function (time) {
console.log("justseek");
var clientTime = player.getCurrentTime();
if (clientTime < time - .2 || clientTime > time + .2) {
// if (alert('Do you want to sync with admin at ' + convertHMS(time))) {
player.seekTo(time);
// Forces video to play right after seek
player.playVideo();
// }
}
});
socket.on('playAll', function (data) {
console.log("playAll");
// player.seekTo(time);
// Forces video to play right after seek
// console.log("PlayAll" + data.meetingId +" : "+meeting_id);
// if (data.meetingId == meeting_id) {
player.playVideo();
// }
// player.playVideo();
});
socket.on('pauseAll', function (Time) {
console.log("pauseAll");
// player.seekTo(time);
// Forces video to stop right after seek
// if (data.meetingId == meeting_id) {
player.pauseVideo();
// }
// player.playVideo();
});
socket.on('playNewVid', function (vidId) {
player.loadVideoById(vidId, 0);
});
socket.on('Not Allowed', function () {
// $('divVidUrl').after($("<p></p>").text("Only admin can add new video"));
console.log('Not Allowed');
});
}
function EventBinding() {
$('#btnResetMeeting').on('click', function () {
socket.emit('reset');
});
$('#btnsend').on('click', function () {
socket.emit('sendMessage', $('#msgbox').val());
$('#msgbox').val('');
});
$('#invite').on('click', function () {
var str1 = "https://127.0.0.1:5501/?mid=";
var str2 = meeting_id;
var res = str1.concat(str2);
navigator.clipboard.writeText(res);
alert("Meeting id copied to clipboard. ");
});
$('#msgbox').keypress(function (e) {
var key = e.which;
if (key == 13) // the enter key code
{
$('#btnsend').click();
return false;
}
});
$('#me').on('dblclick', 'video', function () {
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
var minVideo = document.getElementById('localVideoCtr');
// var maxVideo = document.getElementById('mVideoPlayer');
// var stream = minVideo.captureStream();
// maxVideo.srcObject = stream;
minVideo.requestFullscreen();
// $('#player').hide();
player.pauseVideo();
});
$('#mVideoPlayer').on('dblclick', function () {
this.requestFullscreen();
});
$('#vidURL').keypress(function (e) {
if (e.which == 13) {
var vidId = getId($('#vidURL').val());
// player.loadVideoById(vidId, 0);
socket.emit('newVideoId',
{
connId: socket.id,
videoId: vidId,
});
return false;
}
});
}
function AddNewUser(other_user_id, connId) {
var $newDiv = $('#otherTemplate').clone();
$newDiv = $newDiv.attr('id', connId).addClass('other');
$newDiv.dblclick(function () {
var minVideo = document.getElementById("v_" + connId);
minVideo.requestFullscreen();
player.pauseVideo();
});
$newDiv.find('h4').text(other_user_id);
$newDiv.find('video').attr('id', 'v_' + connId);
$newDiv.find('audio').attr('id', 'a_' + connId);
$newDiv.show();
$('#divUsers').append($newDiv);
}
function | (url) {
const regExp = /^.*(youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=|&v=)([^#&?]*).*/;
const match = url.match(regExp);
return (match && match[2].length === 11)
? match[2]
: null;
}
function convertHMS(value) {
const sec = parseInt(value, 10); // convert value to number if it's string
let hours = Math.floor(sec / 3600); // get hours
let minutes = Math.floor((sec - (hours * 3600)) / 60); // get minutes
let seconds = sec - (hours * 3600) - (minutes * 60); // get seconds
// add 0 if value < 10; Example: 2 => 02
if (hours < 10) { hours = "0" + hours; }
if (minutes < 10) { minutes = "0" + minutes; }
if (seconds < 10) { seconds = "0" + seconds; }
return hours + ':' + minutes + ':' + seconds; // Return is HH : MM : SS
}
// 2. This code loads the IFrame Player API code asynchronously.
function onYouTubeIframeAPIReady() {
player = new YT.Player('player', {
height: 560,
width: 700,
videoId: 'G5RpJwCJDqc',
playerVars: {
'playsinline': 1
},
events: {
'onReady': onPlayerReady,
'onStateChange': onPlayerStateChange
}
});
}
// 4. The API will call this function when the video player is ready.
function onPlayerReady(event) {
event.target.pauseVideo();
}
// 5. The API calls this function when the player's state changes.
// The function indicates that when playing a video (state=1),
// the player should play for six seconds and then stop.
function onPlayerStateChange(event) {
// Event Listeners
playerStatus = event.data;
console.log(playerStatus)
var currTime = 0;
switch (playerStatus) {
case 0:
//video ended
break;
case 1:
//onplay
currTime = player.getCurrentTime();
socket.emit('play others',
{
connId: socket.id,
currentTime: currTime
});
break;
case 2:
//onpause
currTime = player.getCurrentTime();
socket.emit('pause others', {
connId: socket.id,
currentTime: currTime
});
break;
case 3:
currTime = player.getCurrentTime();
socket.emit('seek', {
connId: socket.id,
currentTime: currTime
});
break;
}
}
function stopVideo() {
player.stopVideo();
}
return {
_init: function (uid, mid) {
init(uid, mid);
}
};
}()); | getId | identifier_name |
proxy.go | package proxy
import (
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/cookiejar"
"net/url"
"regexp"
"strings"
"time"
"github.com/DataHenHQ/datahen/pages"
"github.com/DataHenHQ/till/internal/tillclient"
"github.com/DataHenHQ/tillup/cache"
"github.com/DataHenHQ/tillup/features"
"github.com/DataHenHQ/tillup/logger"
"github.com/DataHenHQ/tillup/sessions"
"github.com/DataHenHQ/useragent"
"golang.org/x/net/publicsuffix"
)
var (
// Token is the Till auth token
Token string
// InstanceName is the name of this till instance
InstanceName string
ca tls.Certificate
okHeader = []byte("HTTP/1.1 200 OK\r\n\r\n")
// ForceUA indicates whether to overwrite all incoming user-agent with a random one
ForceUA = true
// UAType specifies what kind of user-agent to generate
UAType = "desktop"
dhHeadersRe = regexp.MustCompile(`(?i)^X-DH`)
// ProxyFile points to the path of the txt file that contains a list of proxies
ProxyFile = ""
// ProxyURLs are external proxies that will be randomized
ProxyURLs = []string{}
// ProxyCount is the total count of proxies used.
ProxyCount int
// ReleaseVersion is the version of Till release
ReleaseVersion = "dev"
StatMu *tillclient.InstanceStatMutex
// Cache is the cache specific config
CacheConfig cache.Config
// LoggerConfig is the logger specific config
LoggerConfig logger.Config
// SessionsConfig is the sessions specific config
SessionsConfig sessions.Config
)
func NewPageFromRequest(r *http.Request, scheme string, pconf *PageConfig) (p *pages.Page, err error) {
p = new(pages.Page)
u := r.URL
u.Host = r.Host
u.Scheme = scheme
p.SetURL(u.String())
p.SetMethod(r.Method)
// build the page headers
nh := map[string]interface{}{}
for name, values := range r.Header {
nh[name] = strings.Join(values, ",")
}
// remove User-Agent header if we force-user agent
if pconf.ForceUA {
delete(nh, "User-Agent")
}
// delete any other proxy related header
delete(nh, "Proxy-Connection")
// finally set the header
p.SetHeaders(nh)
// fetch type will always be "standard" for Till
p.FetchType = "standard"
p.UaType = pconf.UaType
// read the request body, save it and set it back to the request body
rBody, _ := ioutil.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewReader(rBody))
p.SetBody(string(rBody))
// set defaults
p.SetUaType(pconf.UaType)
p.SetFetchType("standard")
p.SetPageType("default")
// set the GID
gid, err := pages.GenerateGID(p)
if err != nil {
return nil, err
}
p.SetGID(gid)
return p, nil
}
func logReqSummary(gid, method, url string, respStatus int, cachehit bool) {
cacheType := "MISS"
if cachehit {
cacheType = "HIT "
}
fmt.Println(cacheType, gid, method, url, respStatus)
}
func sendToTarget(ctx context.Context, sconn net.Conn, sreq *http.Request, scheme string, p *pages.Page, pconf *PageConfig) (tresp *http.Response, err error) |
// buildTargetRequest builds a target request from source request, and etc.
func buildTargetRequest(scheme string, sreq *http.Request, pconf *PageConfig, sess *sessions.Session, p *pages.Page) (*http.Client, *http.Request, error) {
// create transport for client
t := &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
DisableCompression: false,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 60 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
MaxIdleConns: 1,
MaxIdleConnsPerHost: 1,
IdleConnTimeout: 1 * time.Millisecond,
MaxConnsPerHost: 1,
}
defer t.CloseIdleConnections()
// set proxy if specified
if pconf.UseProxy {
// using till session's proxy URL, or generate random proxy
var u string
if sess != nil {
u = sess.ProxyURL
}
if u == "" {
u = getRandom(ProxyURLs)
}
// set the proxy
p, err := url.Parse(u)
if err != nil {
return nil, nil, err
}
t.Proxy = http.ProxyURL(p)
}
// create cookiejar
jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
if err != nil {
return nil, nil, err
}
// create target client
tclient := &http.Client{
Timeout: 120 * time.Second,
Transport: t,
Jar: jar,
}
// copy the body as *bytes.Reader to properly set the treq's body and content-length
srBody, _ := ioutil.ReadAll(sreq.Body)
sreq.Body = ioutil.NopCloser(bytes.NewReader(srBody))
p.SetBody(string(srBody))
// create target request
treq, err := http.NewRequestWithContext(sreq.Context(), sreq.Method, sreq.RequestURI, bytes.NewReader(srBody))
if err != nil {
return nil, nil, err
}
// build the target request
u := sreq.URL
u.Host = sreq.Host
u.Scheme = scheme
treq.URL = u
treq.Host = u.Host
// if there are cookies on the session, set it in the cookiejar
if sess != nil && len(sess.Cookies) > 0 {
if pconf.StickyCookies {
tclient.Jar.SetCookies(treq.URL, sess.Cookies.Get(u))
}
}
// copy source headers into target headers
th := copySourceHeaders(sreq.Header)
if th != nil {
treq.Header = th
}
// Delete headers related to proxy usage
treq.Header.Del("Proxy-Connection")
// if ForceUA is true, then override User-Agent header with a random UA
if ForceUA {
// using till session's user agent, or generate random one
var ua string
if sess != nil {
ua = sess.UserAgent
}
if ua == "" {
ua, err = generateRandomUA(UAType)
if err != nil {
return nil, nil, err
}
}
// Set the ua on the target header
th.Set("User-Agent", ua)
}
return tclient, treq, nil
}
// copy source headers other than those that starts with X-DH* into target headers
func copySourceHeaders(sh http.Header) (th http.Header) {
th = make(http.Header)
if sh == nil {
return nil
}
for key, values := range sh {
if dhHeadersRe.MatchString(key) {
continue
}
for _, val := range values {
th.Add(key, val)
}
}
return th
}
// Overrides User-Agent header with a random one
func generateRandomUA(uaType string) (ua string, err error) {
switch uaType {
case "desktop":
ua, err = useragent.Desktop()
if err != nil {
return "", err
}
case "mobile":
ua = useragent.Mobile()
}
if ua == "" {
return "", errors.New(fmt.Sprint("generated empty user agent string for", uaType))
}
return ua, nil
}
func writeToSource(sconn net.Conn, tresp *http.Response, p *pages.Page) (err error) {
// add X-DH-GID to the response
if p != nil {
tresp.Header.Set("X-DH-GID", p.GetGID())
}
tresp.Write(sconn)
return nil
}
// Atomically increments request delta in the instance stat
func incrRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.Requests) = *(StatMu.InstanceStat.Requests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments intercepted request delta in the instance stat
func incrInterceptedRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.InterceptedRequests) = *(StatMu.InstanceStat.InterceptedRequests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments failed request delta in the instance stat
func incrFailedRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.FailedRequests) = *(StatMu.InstanceStat.FailedRequests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments successful request delta in the instance stat
func incrSuccessfulRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.SuccessfulRequests) = *(StatMu.InstanceStat.SuccessfulRequests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments request delta in the instance stat
func incrCacheHitStatDelta() {
StatMu.Mutex.Lock()
// increment the CacheHits counter
*(StatMu.InstanceStat.CacheHits) = *(StatMu.InstanceStat.CacheHits) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments request delta in the instance stat
func incrCacheSetStatDelta() {
StatMu.Mutex.Lock()
// increment the CacheSets counter
*(StatMu.InstanceStat.CacheSets) = *(StatMu.InstanceStat.CacheSets) + uint64(1)
StatMu.Mutex.Unlock()
}
| {
var sess *sessions.Session
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// check if past response exist in the cache. if so, then return it.
cresp, err := cache.GetResponse(ctx, p.GID, pconf.CacheFreshness, pconf.CacheServeFailures)
if err != nil {
return nil, err
}
// if cachehit then return the cached response
if cresp != nil {
// Increment the CacheHits stats
incrCacheHitStatDelta()
// Increment the successful or failed requests, and total requests
if sessions.IsSuccess(cresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta()
}
incrRequestStatDelta()
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), cresp.StatusCode, true)
// Build the target req and resp specifically for logging.
_, treq, terr := buildTargetRequest(scheme, sreq, pconf, sess, p)
// defer treq.Body.Close()
if terr == nil && treq != nil {
// record request and response to the logger
_, tlerr := logger.StoreItem(ctx, p.GID, treq, cresp, time.Now(), true, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
}
return cresp, nil
}
}
// If StickySession is allowed, then set the sticky session
if features.Allow(features.StickySessions) && pconf.SessionID != "" {
// get a session, or a create a new one if it doesn't exist yet.
sess, err = sessions.GetOrCreateStickySession(ctx, pconf.SessionID, (sessions.PageConfig)(*pconf))
if err != nil {
return nil, err
}
}
// build the target request from the source request
tclient, treq, err := buildTargetRequest(scheme, sreq, pconf, sess, p)
if err != nil {
return nil, err
}
// record request now, and the logger.Response will be set later once the response comes back.
rid, tlerr := logger.StoreItem(ctx, p.GID, treq, nil, time.Now(), false, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
// send the actual request to target server
tresp, err = tclient.Do(treq)
if err != nil {
return nil, err
}
if sessions.IsSuccess(tresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta()
}
incrRequestStatDelta()
// save the cookies from cookiejar to the session
if sess != nil && !sess.IsZero() {
if pconf.StickyCookies {
if sess.Cookies == nil {
sess.Cookies = sessions.CookieMap{}
}
sess.Cookies.Set(treq.URL, tclient.Jar.Cookies(treq.URL))
}
sessions.SaveSession(ctx, sess)
}
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// Store the response to cache
err := cache.StoreResponse(ctx, p.GID, tresp, nil)
if err != nil {
return nil, err
}
// Increment the CacheSets stats
incrCacheSetStatDelta()
}
// log the request summary
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), tresp.StatusCode, false)
// update response on the logger
tlerr = logger.UpdateItemResponse(ctx, rid, tresp, sess)
if tlerr != nil {
return nil, tlerr
}
return tresp, err
} | identifier_body |
proxy.go | package proxy
import (
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/cookiejar"
"net/url"
"regexp"
"strings"
"time"
"github.com/DataHenHQ/datahen/pages"
"github.com/DataHenHQ/till/internal/tillclient"
"github.com/DataHenHQ/tillup/cache"
"github.com/DataHenHQ/tillup/features"
"github.com/DataHenHQ/tillup/logger"
"github.com/DataHenHQ/tillup/sessions"
"github.com/DataHenHQ/useragent"
"golang.org/x/net/publicsuffix"
)
var (
// Token is the Till auth token
Token string
// InstanceName is the name of this till instance
InstanceName string
ca tls.Certificate
okHeader = []byte("HTTP/1.1 200 OK\r\n\r\n")
// ForceUA indicates whether to overwrite all incoming user-agent with a random one
ForceUA = true
// UAType specifies what kind of user-agent to generate
UAType = "desktop"
dhHeadersRe = regexp.MustCompile(`(?i)^X-DH`)
// ProxyFile points to the path of the txt file that contains a list of proxies
ProxyFile = ""
// ProxyURLs are external proxies that will be randomized
ProxyURLs = []string{}
// ProxyCount is the total count of proxies used.
ProxyCount int
// ReleaseVersion is the version of Till release
ReleaseVersion = "dev"
StatMu *tillclient.InstanceStatMutex
// Cache is the cache specific config
CacheConfig cache.Config
// LoggerConfig is the logger specific config
LoggerConfig logger.Config
// SessionsConfig is the sessions specific config
SessionsConfig sessions.Config
)
func NewPageFromRequest(r *http.Request, scheme string, pconf *PageConfig) (p *pages.Page, err error) {
p = new(pages.Page)
u := r.URL
u.Host = r.Host
u.Scheme = scheme
p.SetURL(u.String())
p.SetMethod(r.Method)
// build the page headers
nh := map[string]interface{}{}
for name, values := range r.Header {
nh[name] = strings.Join(values, ",")
}
// remove User-Agent header if we force-user agent
if pconf.ForceUA {
delete(nh, "User-Agent")
}
// delete any other proxy related header
delete(nh, "Proxy-Connection")
// finally set the header
p.SetHeaders(nh)
// fetch type will always be "standard" for Till
p.FetchType = "standard"
p.UaType = pconf.UaType
// read the request body, save it and set it back to the request body
rBody, _ := ioutil.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewReader(rBody))
p.SetBody(string(rBody))
// set defaults
p.SetUaType(pconf.UaType)
p.SetFetchType("standard")
p.SetPageType("default")
// set the GID
gid, err := pages.GenerateGID(p)
if err != nil {
return nil, err
}
p.SetGID(gid)
return p, nil
}
func logReqSummary(gid, method, url string, respStatus int, cachehit bool) {
cacheType := "MISS"
if cachehit {
cacheType = "HIT "
}
fmt.Println(cacheType, gid, method, url, respStatus)
}
func sendToTarget(ctx context.Context, sconn net.Conn, sreq *http.Request, scheme string, p *pages.Page, pconf *PageConfig) (tresp *http.Response, err error) {
var sess *sessions.Session
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// check if past response exist in the cache. if so, then return it.
cresp, err := cache.GetResponse(ctx, p.GID, pconf.CacheFreshness, pconf.CacheServeFailures)
if err != nil {
return nil, err
}
// if cachehit then return the cached response
if cresp != nil {
// Increment the CacheHits stats
incrCacheHitStatDelta()
// Increment the successful or failed requests, and total requests
if sessions.IsSuccess(cresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta()
}
incrRequestStatDelta()
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), cresp.StatusCode, true)
// Build the target req and resp specifically for logging.
_, treq, terr := buildTargetRequest(scheme, sreq, pconf, sess, p)
// defer treq.Body.Close()
if terr == nil && treq != nil {
// record request and response to the logger
_, tlerr := logger.StoreItem(ctx, p.GID, treq, cresp, time.Now(), true, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
}
return cresp, nil
}
}
// If StickySession is allowed, then set the sticky session
if features.Allow(features.StickySessions) && pconf.SessionID != "" {
// get a session, or a create a new one if it doesn't exist yet.
sess, err = sessions.GetOrCreateStickySession(ctx, pconf.SessionID, (sessions.PageConfig)(*pconf))
if err != nil {
return nil, err
}
}
// build the target request from the source request
tclient, treq, err := buildTargetRequest(scheme, sreq, pconf, sess, p)
if err != nil {
return nil, err
}
// record request now, and the logger.Response will be set later once the response comes back.
rid, tlerr := logger.StoreItem(ctx, p.GID, treq, nil, time.Now(), false, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
// send the actual request to target server
tresp, err = tclient.Do(treq)
if err != nil {
return nil, err
}
if sessions.IsSuccess(tresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta()
}
incrRequestStatDelta()
// save the cookies from cookiejar to the session
if sess != nil && !sess.IsZero() {
if pconf.StickyCookies {
if sess.Cookies == nil {
sess.Cookies = sessions.CookieMap{}
}
sess.Cookies.Set(treq.URL, tclient.Jar.Cookies(treq.URL))
}
sessions.SaveSession(ctx, sess)
}
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// Store the response to cache
err := cache.StoreResponse(ctx, p.GID, tresp, nil)
if err != nil {
return nil, err
}
// Increment the CacheSets stats
incrCacheSetStatDelta()
}
// log the request summary
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), tresp.StatusCode, false)
// update response on the logger
tlerr = logger.UpdateItemResponse(ctx, rid, tresp, sess)
if tlerr != nil {
return nil, tlerr
}
return tresp, err
}
// buildTargetRequest builds a target request from source request, and etc.
func buildTargetRequest(scheme string, sreq *http.Request, pconf *PageConfig, sess *sessions.Session, p *pages.Page) (*http.Client, *http.Request, error) {
// create transport for client
t := &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
DisableCompression: false,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 60 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
MaxIdleConns: 1,
MaxIdleConnsPerHost: 1,
IdleConnTimeout: 1 * time.Millisecond,
MaxConnsPerHost: 1,
}
defer t.CloseIdleConnections()
// set proxy if specified
if pconf.UseProxy {
// using till session's proxy URL, or generate random proxy
var u string
if sess != nil {
u = sess.ProxyURL
}
if u == "" {
u = getRandom(ProxyURLs)
}
// set the proxy
p, err := url.Parse(u)
if err != nil {
return nil, nil, err
}
t.Proxy = http.ProxyURL(p)
}
// create cookiejar
jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
if err != nil {
return nil, nil, err
}
// create target client
tclient := &http.Client{
Timeout: 120 * time.Second,
Transport: t,
Jar: jar,
}
// copy the body as *bytes.Reader to properly set the treq's body and content-length
srBody, _ := ioutil.ReadAll(sreq.Body)
sreq.Body = ioutil.NopCloser(bytes.NewReader(srBody))
p.SetBody(string(srBody))
// create target request
treq, err := http.NewRequestWithContext(sreq.Context(), sreq.Method, sreq.RequestURI, bytes.NewReader(srBody))
if err != nil {
return nil, nil, err
}
// build the target request
u := sreq.URL
u.Host = sreq.Host
u.Scheme = scheme
treq.URL = u
treq.Host = u.Host
// if there are cookies on the session, set it in the cookiejar
if sess != nil && len(sess.Cookies) > 0 {
if pconf.StickyCookies |
}
// copy source headers into target headers
th := copySourceHeaders(sreq.Header)
if th != nil {
treq.Header = th
}
// Delete headers related to proxy usage
treq.Header.Del("Proxy-Connection")
// if ForceUA is true, then override User-Agent header with a random UA
if ForceUA {
// using till session's user agent, or generate random one
var ua string
if sess != nil {
ua = sess.UserAgent
}
if ua == "" {
ua, err = generateRandomUA(UAType)
if err != nil {
return nil, nil, err
}
}
// Set the ua on the target header
th.Set("User-Agent", ua)
}
return tclient, treq, nil
}
// copy source headers other than those that starts with X-DH* into target headers
func copySourceHeaders(sh http.Header) (th http.Header) {
th = make(http.Header)
if sh == nil {
return nil
}
for key, values := range sh {
if dhHeadersRe.MatchString(key) {
continue
}
for _, val := range values {
th.Add(key, val)
}
}
return th
}
// Overrides User-Agent header with a random one
func generateRandomUA(uaType string) (ua string, err error) {
switch uaType {
case "desktop":
ua, err = useragent.Desktop()
if err != nil {
return "", err
}
case "mobile":
ua = useragent.Mobile()
}
if ua == "" {
return "", errors.New(fmt.Sprint("generated empty user agent string for", uaType))
}
return ua, nil
}
func writeToSource(sconn net.Conn, tresp *http.Response, p *pages.Page) (err error) {
// add X-DH-GID to the response
if p != nil {
tresp.Header.Set("X-DH-GID", p.GetGID())
}
tresp.Write(sconn)
return nil
}
// Atomically increments request delta in the instance stat
func incrRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.Requests) = *(StatMu.InstanceStat.Requests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments intercepted request delta in the instance stat
func incrInterceptedRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.InterceptedRequests) = *(StatMu.InstanceStat.InterceptedRequests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments failed request delta in the instance stat
func incrFailedRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.FailedRequests) = *(StatMu.InstanceStat.FailedRequests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments successful request delta in the instance stat
func incrSuccessfulRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.SuccessfulRequests) = *(StatMu.InstanceStat.SuccessfulRequests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments request delta in the instance stat
func incrCacheHitStatDelta() {
StatMu.Mutex.Lock()
// increment the CacheHits counter
*(StatMu.InstanceStat.CacheHits) = *(StatMu.InstanceStat.CacheHits) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments request delta in the instance stat
func incrCacheSetStatDelta() {
StatMu.Mutex.Lock()
// increment the CacheSets counter
*(StatMu.InstanceStat.CacheSets) = *(StatMu.InstanceStat.CacheSets) + uint64(1)
StatMu.Mutex.Unlock()
}
| {
tclient.Jar.SetCookies(treq.URL, sess.Cookies.Get(u))
} | conditional_block |
proxy.go | package proxy
import (
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/cookiejar"
"net/url"
"regexp"
"strings"
"time"
"github.com/DataHenHQ/datahen/pages"
"github.com/DataHenHQ/till/internal/tillclient"
"github.com/DataHenHQ/tillup/cache"
"github.com/DataHenHQ/tillup/features"
"github.com/DataHenHQ/tillup/logger"
"github.com/DataHenHQ/tillup/sessions"
"github.com/DataHenHQ/useragent"
"golang.org/x/net/publicsuffix"
)
var (
// Token is the Till auth token
Token string
// InstanceName is the name of this till instance
InstanceName string
ca tls.Certificate
okHeader = []byte("HTTP/1.1 200 OK\r\n\r\n")
// ForceUA indicates whether to overwrite all incoming user-agent with a random one
ForceUA = true
// UAType specifies what kind of user-agent to generate
UAType = "desktop"
dhHeadersRe = regexp.MustCompile(`(?i)^X-DH`)
// ProxyFile points to the path of the txt file that contains a list of proxies
ProxyFile = ""
// ProxyURLs are external proxies that will be randomized
ProxyURLs = []string{}
// ProxyCount is the total count of proxies used.
ProxyCount int
// ReleaseVersion is the version of Till release
ReleaseVersion = "dev"
StatMu *tillclient.InstanceStatMutex
// Cache is the cache specific config
CacheConfig cache.Config
// LoggerConfig is the logger specific config
LoggerConfig logger.Config
// SessionsConfig is the sessions specific config
SessionsConfig sessions.Config
)
func NewPageFromRequest(r *http.Request, scheme string, pconf *PageConfig) (p *pages.Page, err error) {
p = new(pages.Page)
u := r.URL
u.Host = r.Host
u.Scheme = scheme
p.SetURL(u.String())
p.SetMethod(r.Method)
// build the page headers
nh := map[string]interface{}{}
for name, values := range r.Header {
nh[name] = strings.Join(values, ",")
}
// remove User-Agent header if we force-user agent
if pconf.ForceUA {
delete(nh, "User-Agent")
}
// delete any other proxy related header
delete(nh, "Proxy-Connection")
// finally set the header
p.SetHeaders(nh)
// fetch type will always be "standard" for Till
p.FetchType = "standard"
p.UaType = pconf.UaType
// read the request body, save it and set it back to the request body
rBody, _ := ioutil.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewReader(rBody))
p.SetBody(string(rBody))
// set defaults
p.SetUaType(pconf.UaType)
p.SetFetchType("standard")
p.SetPageType("default")
// set the GID
gid, err := pages.GenerateGID(p)
if err != nil {
return nil, err
}
p.SetGID(gid)
return p, nil
}
func logReqSummary(gid, method, url string, respStatus int, cachehit bool) {
cacheType := "MISS"
if cachehit {
cacheType = "HIT "
}
fmt.Println(cacheType, gid, method, url, respStatus)
}
func sendToTarget(ctx context.Context, sconn net.Conn, sreq *http.Request, scheme string, p *pages.Page, pconf *PageConfig) (tresp *http.Response, err error) {
var sess *sessions.Session
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// check if past response exist in the cache. if so, then return it.
cresp, err := cache.GetResponse(ctx, p.GID, pconf.CacheFreshness, pconf.CacheServeFailures)
if err != nil {
return nil, err
}
// if cachehit then return the cached response
if cresp != nil {
// Increment the CacheHits stats
incrCacheHitStatDelta()
// Increment the successful or failed requests, and total requests
if sessions.IsSuccess(cresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta()
}
incrRequestStatDelta()
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), cresp.StatusCode, true)
// Build the target req and resp specifically for logging.
_, treq, terr := buildTargetRequest(scheme, sreq, pconf, sess, p)
// defer treq.Body.Close()
if terr == nil && treq != nil {
// record request and response to the logger
_, tlerr := logger.StoreItem(ctx, p.GID, treq, cresp, time.Now(), true, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
}
return cresp, nil
}
}
// If StickySession is allowed, then set the sticky session
if features.Allow(features.StickySessions) && pconf.SessionID != "" {
// get a session, or a create a new one if it doesn't exist yet.
sess, err = sessions.GetOrCreateStickySession(ctx, pconf.SessionID, (sessions.PageConfig)(*pconf))
if err != nil {
return nil, err
}
}
// build the target request from the source request
tclient, treq, err := buildTargetRequest(scheme, sreq, pconf, sess, p)
if err != nil {
return nil, err
}
// record request now, and the logger.Response will be set later once the response comes back.
rid, tlerr := logger.StoreItem(ctx, p.GID, treq, nil, time.Now(), false, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
// send the actual request to target server
tresp, err = tclient.Do(treq)
if err != nil {
return nil, err
}
if sessions.IsSuccess(tresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta()
}
incrRequestStatDelta()
// save the cookies from cookiejar to the session
if sess != nil && !sess.IsZero() {
if pconf.StickyCookies {
if sess.Cookies == nil {
sess.Cookies = sessions.CookieMap{}
}
sess.Cookies.Set(treq.URL, tclient.Jar.Cookies(treq.URL))
}
sessions.SaveSession(ctx, sess)
}
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// Store the response to cache
err := cache.StoreResponse(ctx, p.GID, tresp, nil)
if err != nil {
return nil, err
}
// Increment the CacheSets stats
incrCacheSetStatDelta()
}
// log the request summary
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), tresp.StatusCode, false)
// update response on the logger
tlerr = logger.UpdateItemResponse(ctx, rid, tresp, sess)
if tlerr != nil {
return nil, tlerr
}
return tresp, err
}
// buildTargetRequest builds a target request from source request, and etc.
func buildTargetRequest(scheme string, sreq *http.Request, pconf *PageConfig, sess *sessions.Session, p *pages.Page) (*http.Client, *http.Request, error) {
// create transport for client
t := &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
DisableCompression: false,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 60 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
MaxIdleConns: 1,
MaxIdleConnsPerHost: 1,
IdleConnTimeout: 1 * time.Millisecond,
MaxConnsPerHost: 1,
}
defer t.CloseIdleConnections()
// set proxy if specified
if pconf.UseProxy {
// using till session's proxy URL, or generate random proxy
var u string
if sess != nil {
u = sess.ProxyURL
}
if u == "" {
u = getRandom(ProxyURLs)
}
// set the proxy
p, err := url.Parse(u)
if err != nil {
return nil, nil, err
}
t.Proxy = http.ProxyURL(p)
}
// create cookiejar
jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
if err != nil {
return nil, nil, err
}
// create target client
tclient := &http.Client{
Timeout: 120 * time.Second,
Transport: t,
Jar: jar,
}
// copy the body as *bytes.Reader to properly set the treq's body and content-length
srBody, _ := ioutil.ReadAll(sreq.Body)
sreq.Body = ioutil.NopCloser(bytes.NewReader(srBody))
p.SetBody(string(srBody))
// create target request
treq, err := http.NewRequestWithContext(sreq.Context(), sreq.Method, sreq.RequestURI, bytes.NewReader(srBody))
if err != nil {
return nil, nil, err
}
// build the target request
u := sreq.URL
u.Host = sreq.Host
u.Scheme = scheme
treq.URL = u
treq.Host = u.Host
// if there are cookies on the session, set it in the cookiejar
if sess != nil && len(sess.Cookies) > 0 {
if pconf.StickyCookies {
tclient.Jar.SetCookies(treq.URL, sess.Cookies.Get(u))
}
}
// copy source headers into target headers
th := copySourceHeaders(sreq.Header)
if th != nil {
treq.Header = th
}
// Delete headers related to proxy usage
treq.Header.Del("Proxy-Connection")
// if ForceUA is true, then override User-Agent header with a random UA
if ForceUA {
// using till session's user agent, or generate random one
var ua string
if sess != nil {
ua = sess.UserAgent
}
if ua == "" {
ua, err = generateRandomUA(UAType)
if err != nil {
return nil, nil, err
}
}
// Set the ua on the target header
th.Set("User-Agent", ua)
}
return tclient, treq, nil
}
// copy source headers other than those that starts with X-DH* into target headers
func | (sh http.Header) (th http.Header) {
th = make(http.Header)
if sh == nil {
return nil
}
for key, values := range sh {
if dhHeadersRe.MatchString(key) {
continue
}
for _, val := range values {
th.Add(key, val)
}
}
return th
}
// Overrides User-Agent header with a random one
func generateRandomUA(uaType string) (ua string, err error) {
switch uaType {
case "desktop":
ua, err = useragent.Desktop()
if err != nil {
return "", err
}
case "mobile":
ua = useragent.Mobile()
}
if ua == "" {
return "", errors.New(fmt.Sprint("generated empty user agent string for", uaType))
}
return ua, nil
}
func writeToSource(sconn net.Conn, tresp *http.Response, p *pages.Page) (err error) {
// add X-DH-GID to the response
if p != nil {
tresp.Header.Set("X-DH-GID", p.GetGID())
}
tresp.Write(sconn)
return nil
}
// Atomically increments request delta in the instance stat
func incrRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.Requests) = *(StatMu.InstanceStat.Requests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments intercepted request delta in the instance stat
func incrInterceptedRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.InterceptedRequests) = *(StatMu.InstanceStat.InterceptedRequests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments failed request delta in the instance stat
func incrFailedRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.FailedRequests) = *(StatMu.InstanceStat.FailedRequests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments successful request delta in the instance stat
func incrSuccessfulRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.SuccessfulRequests) = *(StatMu.InstanceStat.SuccessfulRequests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments request delta in the instance stat
func incrCacheHitStatDelta() {
StatMu.Mutex.Lock()
// increment the CacheHits counter
*(StatMu.InstanceStat.CacheHits) = *(StatMu.InstanceStat.CacheHits) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments request delta in the instance stat
func incrCacheSetStatDelta() {
StatMu.Mutex.Lock()
// increment the CacheSets counter
*(StatMu.InstanceStat.CacheSets) = *(StatMu.InstanceStat.CacheSets) + uint64(1)
StatMu.Mutex.Unlock()
}
| copySourceHeaders | identifier_name |
proxy.go | package proxy
import (
"bytes"
"context"
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/cookiejar"
"net/url"
"regexp"
"strings"
"time"
"github.com/DataHenHQ/datahen/pages"
"github.com/DataHenHQ/till/internal/tillclient"
"github.com/DataHenHQ/tillup/cache"
"github.com/DataHenHQ/tillup/features"
"github.com/DataHenHQ/tillup/logger"
"github.com/DataHenHQ/tillup/sessions"
"github.com/DataHenHQ/useragent"
"golang.org/x/net/publicsuffix"
)
var (
// Token is the Till auth token
Token string
// InstanceName is the name of this till instance
InstanceName string
ca tls.Certificate
okHeader = []byte("HTTP/1.1 200 OK\r\n\r\n")
// ForceUA indicates whether to overwrite all incoming user-agent with a random one
ForceUA = true
// UAType specifies what kind of user-agent to generate
UAType = "desktop"
dhHeadersRe = regexp.MustCompile(`(?i)^X-DH`)
// ProxyFile points to the path of the txt file that contains a list of proxies
ProxyFile = ""
// ProxyURLs are external proxies that will be randomized
ProxyURLs = []string{}
// ProxyCount is the total count of proxies used.
ProxyCount int
// ReleaseVersion is the version of Till release
ReleaseVersion = "dev"
StatMu *tillclient.InstanceStatMutex
// Cache is the cache specific config
CacheConfig cache.Config
// LoggerConfig is the logger specific config
LoggerConfig logger.Config
// SessionsConfig is the sessions specific config
SessionsConfig sessions.Config
)
func NewPageFromRequest(r *http.Request, scheme string, pconf *PageConfig) (p *pages.Page, err error) {
p = new(pages.Page)
u := r.URL
u.Host = r.Host
u.Scheme = scheme
p.SetURL(u.String())
p.SetMethod(r.Method)
// build the page headers
nh := map[string]interface{}{}
for name, values := range r.Header {
nh[name] = strings.Join(values, ",")
}
// remove User-Agent header if we force-user agent
if pconf.ForceUA {
delete(nh, "User-Agent")
}
// delete any other proxy related header
delete(nh, "Proxy-Connection")
// finally set the header
p.SetHeaders(nh)
// fetch type will always be "standard" for Till
p.FetchType = "standard"
p.UaType = pconf.UaType
// read the request body, save it and set it back to the request body
rBody, _ := ioutil.ReadAll(r.Body)
r.Body = ioutil.NopCloser(bytes.NewReader(rBody))
p.SetBody(string(rBody))
// set defaults
p.SetUaType(pconf.UaType)
p.SetFetchType("standard")
p.SetPageType("default")
// set the GID
gid, err := pages.GenerateGID(p)
if err != nil {
return nil, err
}
p.SetGID(gid)
return p, nil
}
func logReqSummary(gid, method, url string, respStatus int, cachehit bool) {
cacheType := "MISS"
if cachehit {
cacheType = "HIT "
}
fmt.Println(cacheType, gid, method, url, respStatus)
}
func sendToTarget(ctx context.Context, sconn net.Conn, sreq *http.Request, scheme string, p *pages.Page, pconf *PageConfig) (tresp *http.Response, err error) {
var sess *sessions.Session
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// check if past response exist in the cache. if so, then return it.
cresp, err := cache.GetResponse(ctx, p.GID, pconf.CacheFreshness, pconf.CacheServeFailures)
if err != nil {
return nil, err
}
// if cachehit then return the cached response
if cresp != nil {
// Increment the CacheHits stats
incrCacheHitStatDelta()
// Increment the successful or failed requests, and total requests
if sessions.IsSuccess(cresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta()
}
incrRequestStatDelta()
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), cresp.StatusCode, true)
// Build the target req and resp specifically for logging.
_, treq, terr := buildTargetRequest(scheme, sreq, pconf, sess, p)
// defer treq.Body.Close()
if terr == nil && treq != nil {
// record request and response to the logger
_, tlerr := logger.StoreItem(ctx, p.GID, treq, cresp, time.Now(), true, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
}
return cresp, nil
}
}
// If StickySession is allowed, then set the sticky session
if features.Allow(features.StickySessions) && pconf.SessionID != "" {
// get a session, or a create a new one if it doesn't exist yet.
sess, err = sessions.GetOrCreateStickySession(ctx, pconf.SessionID, (sessions.PageConfig)(*pconf))
if err != nil {
return nil, err
}
}
// build the target request from the source request
tclient, treq, err := buildTargetRequest(scheme, sreq, pconf, sess, p)
if err != nil {
return nil, err
}
// record request now, and the logger.Response will be set later once the response comes back.
rid, tlerr := logger.StoreItem(ctx, p.GID, treq, nil, time.Now(), false, (sessions.PageConfig)(*pconf), sess)
if tlerr != nil {
return nil, tlerr
}
// send the actual request to target server
tresp, err = tclient.Do(treq)
if err != nil {
return nil, err
}
if sessions.IsSuccess(tresp.StatusCode) {
incrSuccessfulRequestStatDelta()
} else {
incrFailedRequestStatDelta()
}
incrRequestStatDelta()
// save the cookies from cookiejar to the session
if sess != nil && !sess.IsZero() {
if pconf.StickyCookies {
if sess.Cookies == nil {
sess.Cookies = sessions.CookieMap{}
}
sess.Cookies.Set(treq.URL, tclient.Jar.Cookies(treq.URL))
}
sessions.SaveSession(ctx, sess)
}
if features.Allow(features.Cache) && !CacheConfig.Disabled {
// Store the response to cache
err := cache.StoreResponse(ctx, p.GID, tresp, nil)
if err != nil {
return nil, err
}
// Increment the CacheSets stats
incrCacheSetStatDelta()
}
// log the request summary
logReqSummary(p.GID, sreq.Method, sreq.URL.String(), tresp.StatusCode, false)
// update response on the logger
tlerr = logger.UpdateItemResponse(ctx, rid, tresp, sess)
if tlerr != nil {
return nil, tlerr
}
return tresp, err
}
// buildTargetRequest builds a target request from source request, and etc.
func buildTargetRequest(scheme string, sreq *http.Request, pconf *PageConfig, sess *sessions.Session, p *pages.Page) (*http.Client, *http.Request, error) {
// create transport for client
t := &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
DisableCompression: false,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 60 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
MaxIdleConns: 1,
MaxIdleConnsPerHost: 1,
IdleConnTimeout: 1 * time.Millisecond,
MaxConnsPerHost: 1,
}
defer t.CloseIdleConnections()
// set proxy if specified
if pconf.UseProxy {
// using till session's proxy URL, or generate random proxy
var u string
if sess != nil {
u = sess.ProxyURL
}
if u == "" {
u = getRandom(ProxyURLs)
}
// set the proxy
p, err := url.Parse(u)
if err != nil {
return nil, nil, err
}
t.Proxy = http.ProxyURL(p)
}
// create cookiejar
jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
if err != nil {
return nil, nil, err
}
// create target client
tclient := &http.Client{
Timeout: 120 * time.Second,
Transport: t,
Jar: jar,
}
// copy the body as *bytes.Reader to properly set the treq's body and content-length
srBody, _ := ioutil.ReadAll(sreq.Body)
sreq.Body = ioutil.NopCloser(bytes.NewReader(srBody))
p.SetBody(string(srBody))
// create target request
treq, err := http.NewRequestWithContext(sreq.Context(), sreq.Method, sreq.RequestURI, bytes.NewReader(srBody))
if err != nil {
return nil, nil, err
}
// build the target request
u := sreq.URL
u.Host = sreq.Host
u.Scheme = scheme
treq.URL = u
treq.Host = u.Host
// if there are cookies on the session, set it in the cookiejar
if sess != nil && len(sess.Cookies) > 0 {
if pconf.StickyCookies {
tclient.Jar.SetCookies(treq.URL, sess.Cookies.Get(u))
}
}
// copy source headers into target headers
th := copySourceHeaders(sreq.Header)
if th != nil {
treq.Header = th
}
// Delete headers related to proxy usage
treq.Header.Del("Proxy-Connection")
// if ForceUA is true, then override User-Agent header with a random UA
if ForceUA {
// using till session's user agent, or generate random one
var ua string
if sess != nil {
ua = sess.UserAgent
}
if ua == "" {
ua, err = generateRandomUA(UAType)
if err != nil {
return nil, nil, err
}
}
// Set the ua on the target header
th.Set("User-Agent", ua)
}
return tclient, treq, nil
}
// copy source headers other than those that starts with X-DH* into target headers
func copySourceHeaders(sh http.Header) (th http.Header) {
th = make(http.Header)
if sh == nil {
return nil
}
for key, values := range sh {
if dhHeadersRe.MatchString(key) {
continue
}
for _, val := range values {
th.Add(key, val)
}
}
return th
}
// Overrides User-Agent header with a random one
func generateRandomUA(uaType string) (ua string, err error) {
switch uaType {
case "desktop":
ua, err = useragent.Desktop()
if err != nil {
return "", err
}
case "mobile":
ua = useragent.Mobile()
}
if ua == "" {
return "", errors.New(fmt.Sprint("generated empty user agent string for", uaType))
}
return ua, nil
}
func writeToSource(sconn net.Conn, tresp *http.Response, p *pages.Page) (err error) {
// add X-DH-GID to the response
if p != nil {
tresp.Header.Set("X-DH-GID", p.GetGID())
}
tresp.Write(sconn)
return nil
}
// Atomically increments request delta in the instance stat
func incrRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.Requests) = *(StatMu.InstanceStat.Requests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments intercepted request delta in the instance stat
func incrInterceptedRequestStatDelta() {
StatMu.Mutex.Lock()
| // increment the requests counter
*(StatMu.InstanceStat.InterceptedRequests) = *(StatMu.InstanceStat.InterceptedRequests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments failed request delta in the instance stat
func incrFailedRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.FailedRequests) = *(StatMu.InstanceStat.FailedRequests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments successful request delta in the instance stat
func incrSuccessfulRequestStatDelta() {
StatMu.Mutex.Lock()
// increment the requests counter
*(StatMu.InstanceStat.SuccessfulRequests) = *(StatMu.InstanceStat.SuccessfulRequests) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments request delta in the instance stat
func incrCacheHitStatDelta() {
StatMu.Mutex.Lock()
// increment the CacheHits counter
*(StatMu.InstanceStat.CacheHits) = *(StatMu.InstanceStat.CacheHits) + uint64(1)
StatMu.Mutex.Unlock()
}
// Atomically increments request delta in the instance stat
func incrCacheSetStatDelta() {
StatMu.Mutex.Lock()
// increment the CacheSets counter
*(StatMu.InstanceStat.CacheSets) = *(StatMu.InstanceStat.CacheSets) + uint64(1)
StatMu.Mutex.Unlock()
} | random_line_split |
|
fmt.rs | //! Utilities for formatting and printing `String`s.
//!
//! This module contains the runtime support for the [`format!`] syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings.
//!
//! # Usage
//!
//! The [`format!`] macro is intended to be familiar to those coming from C's
//! `printf`/`fprintf` functions or Python's `str.format` function.
//!
//! Some examples of the [`format!`] extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
//! format!("Hello, {}!", "world"); // => "Hello, world!"
//! format!("The number is {}", 1); // => "The number is 1"
//! format!("{:?}", (3, 4)); // => "(3, 4)"
//! format!("{value}", value=4); // => "4"
//! format!("{} {}", 1, 2); // => "1 2"
//! format!("{:04}", 42); // => "0042" with leading zeros
//! ```
//!
//! From these, you can see that the first argument is a format string. It is
//! required by the compiler for this to be a string literal; it cannot be a
//! variable passed in (in order to perform validity checking). The compiler
//! will then parse the format string and determine if the list of arguments
//! provided is suitable to pass to this format string.
//!
//! To convert a single value to a string, use the [`to_string`] method. This
//! will use the [`Display`] formatting trait.
//!
//! ## Positional parameters
//!
//! Each formatting argument is allowed to specify which value argument it's
//! referencing, and if omitted it is assumed to be "the next argument". For
//! example, the format string `{} {} {}` would take three parameters, and they
//! would be formatted in the same order as they're given. The format string
//! `{2} {1} {0}`, however, would format arguments in reverse order.
//!
//! Things can get a little tricky once you start intermingling the two types of
//! positional specifiers. The "next argument" specifier can be thought of as an
//! iterator over the argument. Each time a "next argument" specifier is seen,
//! the iterator advances. This leads to behavior like this:
//!
//! ```
//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
//! ```
//!
//! The internal iterator over the argument has not been advanced by the time
//! the first `{}` is seen, so it prints the first argument. Then upon reaching
//! the second `{}`, the iterator has advanced forward to the second argument.
//! Essentially, parameters which explicitly name their argument do not affect
//! parameters which do not name an argument in terms of positional specifiers.
//!
//! A format string is required to use all of its arguments, otherwise it is a
//! compile-time error. You may refer to the same argument more than once in the
//! format string.
//!
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the [`format!`] macro is a syntax extension which allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
//! ```text
//! identifier '=' expression
//! ```
//!
//! For example, the following [`format!`] expressions all use named argument:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
//! format!("{name} {}", 1, name = 2); // => "2 1"
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
//! It is not valid to put positional parameters (those without names) after
//! arguments which have names. Like with positional parameters, it is not
//! valid to provide named parameters that are unused by the format string.
//!
//! # Formatting Parameters
//!
//! Each argument being formatted can be transformed by a number of formatting
//! parameters (corresponding to `format_spec` in the syntax above). These
//! parameters affect the string representation of what's being formatted.
//!
//! ## Width
//!
//! ```
//! // All of these print "Hello x !"
//! println!("Hello {:5}!", "x");
//! println!("Hello {:1$}!", "x", 5);
//! println!("Hello {1:0$}!", 5, "x");
//! println!("Hello {:width$}!", "x", width = 5);
//! ```
//!
//! This is a parameter for the "minimum width" that the format should take up.
//! If the value's string does not fill up this many characters, then the
//! padding specified by fill/alignment will be used to take up the required
//! space (see below).
//!
//! The value for the width can also be provided as a [`usize`] in the list of
//! parameters by adding a postfix `$`, indicating that the second argument is
//! a [`usize`] specifying the width.
//!
//! Referring to an argument with the dollar syntax does not affect the "next
//! argument" counter, so it's usually a good idea to refer to arguments by
//! position, or use named arguments.
//!
//! ## Fill/Alignment
//!
//! ```
//! assert_eq!(format!("Hello {:<5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:-<5}!", "x"), "Hello x----!");
//! assert_eq!(format!("Hello {:^5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:>5}!", "x"), "Hello x!");
//! ```
//!
//! The optional fill character and alignment is provided normally in conjunction with the
//! [`width`](#width) parameter. It must be defined before `width`, right after the `:`.
//! This indicates that if the value being formatted is smaller than
//! `width` some extra characters will be printed around it.
//! Filling comes in the following variants for different alignments:
//!
//! * `[fill]<` - the argument is left-aligned in `width` columns
//! * `[fill]^` - the argument is center-aligned in `width` columns
//! * `[fill]>` - the argument is right-aligned in `width` columns
//!
//! The default [fill/alignment](#fillalignment) for non-numerics is a space and
//! left-aligned. The
//! defaults for numeric formatters is also a space but with right-alignment. If
//! the `0` flag (see below) is specified for numerics, then the implicit fill character is
//! `0`.
//!
//! Note that alignment may not be implemented by some types. In particular, it
//! is not generally implemented for the `Debug` trait. A good way to ensure
//! padding is applied is to format your input, then pad this resulting string
//! to obtain your output:
//!
//! ```
//! println!("Hello {:^15}!", format!("{:?}", Some("hi"))); // => "Hello Some("hi") !"
//! ```
//!
//! ## Sign/`#`/`0`
//!
//! ```
//! assert_eq!(format!("Hello {:+}!", 5), "Hello +5!");
//! assert_eq!(format!("{:#x}!", 27), "0x1b!");
//! assert_eq!(format!("Hello {:05}!", 5), "Hello 00005!");
//! assert_eq!(format!("Hello {:05}!", -5), "Hello -0005!");
//! assert_eq!(format!("{:#010x}!", 27), "0x0000001b!");
//! ```
//!
//! These are all flags altering the behavior of the formatter.
//!
//! * `+` - This is intended for numeric types and indicates that the sign
//! should always be printed. Positive signs are never printed by
//! default, and the negative sign is only printed by default for the
//! `Signed` trait. This flag indicates that the correct sign (`+` or `-`)
//! should always be printed.
//! * `-` - Currently not used
//! * `#` - This flag is indicates that the "alternate" form of printing should
//! be used. The alternate forms are:
//! * `#?` - pretty-print the [`Debug`] formatting
//! * `#x` - precedes the argument with a `0x`
//! * `#X` - precedes the argument with a `0x`
//! * `#b` - precedes the argument with a `0b`
//! * `#o` - precedes the argument with a `0o`
//! * `0` - This is used to indicate for integer formats that the padding to `width` should
//! both be done with a `0` character as well as be sign-aware. A format
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
//! Note that padding zeroes are always placed after the sign (if any)
//! and before the digits. When used together with the `#` flag, a similar
//! rule applies: padding zeroes are inserted after the prefix but before
//! the digits. The prefix is included in the total width.
//!
//! ## Precision
//!
//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
//! longer than this width, then it is truncated down to this many characters and that truncated
//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set.
//!
//! For integral types, this is ignored.
//!
//! For floating-point types, this indicates how many digits after the decimal point should be
//! printed.
//!
//! There are three possible ways to specify the desired `precision`:
//!
//! 1. An integer `.N`:
//!
//! the integer `N` itself is the precision.
//!
//! 2. An integer or name followed by dollar sign `.N$`:
//!
//! use format *argument* `N` (which must be a `usize`) as the precision.
//!
//! 3. An asterisk `.*`:
//!
//! `.*` means that this `{...}` is associated with *two* format inputs rather than one: the
//! first input holds the `usize` precision, and the second holds the value to print. Note that
//! in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part refers
//! to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
//! For example, the following calls all print the same thing `Hello x is 0.01000`:
//!
//! ```
//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {second of next two args (0.01) with precision
//! // specified in first of next two args (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg 2 (0.01) with precision
//! // specified in its predecessor (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg "number" (0.01) with precision specified
//! // in arg "prec" (5)}
//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
//! ```
//!
//! While these:
//!
//! ```
//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56");
//! ```
//!
//! print two significantly different things:
//!
//! ```text
//! Hello, `1234.560` has 3 fractional digits
//! Hello, `123` has 3 characters
//! Hello, ` 123` has 3 right-aligned characters
//! ```
//!
//! ## Localization
//!
//! In some programming languages, the behavior of string formatting functions
//! depends on the operating system's locale setting. The format functions
//! provided by Rust's standard library do not have any concept of locale, and
//! will produce the same results on all systems regardless of user
//! configuration.
//!
//! For example, the following code will always print `1.5` even if the system
//! locale uses a decimal separator other than a dot.
//!
//! ```
//! println!("The value is {}", 1.5);
//! ```
//!
//! # Escaping
//!
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
//!
//! ```
//! assert_eq!(format!("Hello {{}}"), "Hello {}");
//! assert_eq!(format!("{{ Hello"), "{ Hello");
//! ```
//!
//! # Syntax
//!
//! To summarize, here you can find the full grammar of format strings.
//! The syntax for the formatting language used is drawn from other languages,
//! so it should not be too alien. Arguments are formatted with Python-like
//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
//! format_string := <text> [ maybe-format <text> ] *
//! maybe-format := '{' '{' | '}' '}' | <format>
//! format := '{' [ argument ] [ ':' format_spec ] '}'
//! argument := integer | identifier
//!
//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision][type]
//! fill := character
//! align := '<' | '^' | '>'
//! sign := '+' | '-'
//! width := count
//! precision := count | '*'
//! type := identifier | '?' | ''
//! count := parameter | integer
//! parameter := argument '$'
//! ```
//!
//! # Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
//! well as [`isize`]). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ [`Display`]
//! * `?` ⇒ [`Debug`]
//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers
//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers
//! * `o` ⇒ [`Octal`](trait.Octal.html)
//! * `x` ⇒ [`LowerHex`](trait.LowerHex.html)
//! * `X` ⇒ [`UpperHex`](trait.UpperHex.html)
//! * `p` ⇒ [`Pointer`](trait.Pointer.html)
//! * `b` ⇒ [`Binary`]
//! * `e` ⇒ [`LowerExp`](trait.LowerExp.html)
//! * `E` ⇒ [`UpperExp`](trait.UpperExp.html)
//!
//! What this means is that any type of argument which implements the
//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the [`Display`] trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # #![allow(dead_code)]
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is [`fmt::Result`] which is a
//! type alias of [`Result`]`<(), `[`std::fmt::Error`]`>`. Formatting implementations
//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
//! calling [`write!`]). However, they should never return errors spuriously. That
//! is, a formatting implementation must and may only return an error if the
//! passed-in [`Formatter`] returns an error. This is because, contrary to what
//! the function signature might suggest, string formatting is an infallible
//! operation. This function only returns a result because writing to the
//! underlying stream might fail and it must provide a way to propagate the fact
//! that an error has occurred back up the stack.
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! use std::fmt;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### `fmt::Display` vs `fmt::Debug`
//!
//! These two formatting traits have distinct purposes:
//!
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the [`Display`] trait.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! # Related macros
//!
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! eprint! // the format string is printed to the standard error
//! eprintln! // same as eprint but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//! ### `eprint!`
//!
//! The [`eprint!`] and [`eprintln!`] macros are identical to
//! [`print!`] and [`println!`], respectively, except they emit their
//! output to stderr.
//!
//! ### `format_args!`
//!
//! This is a curious macro which is used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! [`usize`]: ../../std/primitive.usize.html
//! [`isize`]: ../../std/primitive.isize.html
//! [`i8`]: ../../std/primitive.i8.html
//! [`Display`]: trait.Display.html
//! [`Binary`]: trait.Binary.html
//! [`fmt::Result`]: type.Result.html
//! [`Result`]: ../../std/result/enum.Result.html
//! [`std::fmt::Error`]: struct.Error.html
//! [`Formatter`]: struct.Formatter.html
//! [`write!`]: ../../std/macro.write.html
//! [`Debug`]: trait.Debug.html | //! [`format!`]: ../../std/macro.format.html
//! [`to_string`]: ../../std/string/trait.ToString.html
//! [`writeln!`]: ../../std/macro.writeln.html
//! [`write_fmt`]: ../../std/io/trait.Write.html#method.write_fmt
//! [`std::io::Write`]: ../../std/io/trait.Write.html
//! [`print!`]: ../../std/macro.print.html
//! [`println!`]: ../../std/macro.println.html
//! [`eprint!`]: ../../std/macro.eprint.html
//! [`eprintln!`]: ../../std/macro.eprintln.html
//! [`write!`]: ../../std/macro.write.html
//! [`format_args!`]: ../../std/macro.format_args.html
//! [`fmt::Arguments`]: struct.Arguments.html
//! [`write`]: fn.write.html
//! [`format`]: fn.format.html
#![stable(feature = "rust1", since = "1.0.0")]
#[unstable(feature = "fmt_internals", issue = "none")]
pub use core::fmt::rt;
#[stable(feature = "fmt_flags_align", since = "1.28.0")]
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, ArgumentV1, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Binary, Octal};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Debug, Display};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerExp, UpperExp};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerHex, Pointer, UpperHex};
use crate::string;
/// The `format` function takes an [`Arguments`] struct and returns the resulting
/// formatted string.
///
/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// Please note that using [`format!`] might be preferable.
/// Example:
///
/// ```
/// let s = format!("Hello, {}!", "world");
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// [`Arguments`]: struct.Arguments.html
/// [`format_args!`]: ../../std/macro.format_args.html
/// [`format!`]: ../../std/macro.format.html
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments<'_>) -> string::String {
let capacity = args.estimated_capacity();
let mut output = string::String::with_capacity(capacity);
output.write_fmt(args).expect("a formatting trait implementation returned an error");
output
} | random_line_split |
|
fmt.rs | //! Utilities for formatting and printing `String`s.
//!
//! This module contains the runtime support for the [`format!`] syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings.
//!
//! # Usage
//!
//! The [`format!`] macro is intended to be familiar to those coming from C's
//! `printf`/`fprintf` functions or Python's `str.format` function.
//!
//! Some examples of the [`format!`] extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
//! format!("Hello, {}!", "world"); // => "Hello, world!"
//! format!("The number is {}", 1); // => "The number is 1"
//! format!("{:?}", (3, 4)); // => "(3, 4)"
//! format!("{value}", value=4); // => "4"
//! format!("{} {}", 1, 2); // => "1 2"
//! format!("{:04}", 42); // => "0042" with leading zeros
//! ```
//!
//! From these, you can see that the first argument is a format string. It is
//! required by the compiler for this to be a string literal; it cannot be a
//! variable passed in (in order to perform validity checking). The compiler
//! will then parse the format string and determine if the list of arguments
//! provided is suitable to pass to this format string.
//!
//! To convert a single value to a string, use the [`to_string`] method. This
//! will use the [`Display`] formatting trait.
//!
//! ## Positional parameters
//!
//! Each formatting argument is allowed to specify which value argument it's
//! referencing, and if omitted it is assumed to be "the next argument". For
//! example, the format string `{} {} {}` would take three parameters, and they
//! would be formatted in the same order as they're given. The format string
//! `{2} {1} {0}`, however, would format arguments in reverse order.
//!
//! Things can get a little tricky once you start intermingling the two types of
//! positional specifiers. The "next argument" specifier can be thought of as an
//! iterator over the argument. Each time a "next argument" specifier is seen,
//! the iterator advances. This leads to behavior like this:
//!
//! ```
//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
//! ```
//!
//! The internal iterator over the argument has not been advanced by the time
//! the first `{}` is seen, so it prints the first argument. Then upon reaching
//! the second `{}`, the iterator has advanced forward to the second argument.
//! Essentially, parameters which explicitly name their argument do not affect
//! parameters which do not name an argument in terms of positional specifiers.
//!
//! A format string is required to use all of its arguments, otherwise it is a
//! compile-time error. You may refer to the same argument more than once in the
//! format string.
//!
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the [`format!`] macro is a syntax extension which allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
//! ```text
//! identifier '=' expression
//! ```
//!
//! For example, the following [`format!`] expressions all use named argument:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
//! format!("{name} {}", 1, name = 2); // => "2 1"
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
//! It is not valid to put positional parameters (those without names) after
//! arguments which have names. Like with positional parameters, it is not
//! valid to provide named parameters that are unused by the format string.
//!
//! # Formatting Parameters
//!
//! Each argument being formatted can be transformed by a number of formatting
//! parameters (corresponding to `format_spec` in the syntax above). These
//! parameters affect the string representation of what's being formatted.
//!
//! ## Width
//!
//! ```
//! // All of these print "Hello x !"
//! println!("Hello {:5}!", "x");
//! println!("Hello {:1$}!", "x", 5);
//! println!("Hello {1:0$}!", 5, "x");
//! println!("Hello {:width$}!", "x", width = 5);
//! ```
//!
//! This is a parameter for the "minimum width" that the format should take up.
//! If the value's string does not fill up this many characters, then the
//! padding specified by fill/alignment will be used to take up the required
//! space (see below).
//!
//! The value for the width can also be provided as a [`usize`] in the list of
//! parameters by adding a postfix `$`, indicating that the second argument is
//! a [`usize`] specifying the width.
//!
//! Referring to an argument with the dollar syntax does not affect the "next
//! argument" counter, so it's usually a good idea to refer to arguments by
//! position, or use named arguments.
//!
//! ## Fill/Alignment
//!
//! ```
//! assert_eq!(format!("Hello {:<5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:-<5}!", "x"), "Hello x----!");
//! assert_eq!(format!("Hello {:^5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:>5}!", "x"), "Hello x!");
//! ```
//!
//! The optional fill character and alignment is provided normally in conjunction with the
//! [`width`](#width) parameter. It must be defined before `width`, right after the `:`.
//! This indicates that if the value being formatted is smaller than
//! `width` some extra characters will be printed around it.
//! Filling comes in the following variants for different alignments:
//!
//! * `[fill]<` - the argument is left-aligned in `width` columns
//! * `[fill]^` - the argument is center-aligned in `width` columns
//! * `[fill]>` - the argument is right-aligned in `width` columns
//!
//! The default [fill/alignment](#fillalignment) for non-numerics is a space and
//! left-aligned. The
//! defaults for numeric formatters is also a space but with right-alignment. If
//! the `0` flag (see below) is specified for numerics, then the implicit fill character is
//! `0`.
//!
//! Note that alignment may not be implemented by some types. In particular, it
//! is not generally implemented for the `Debug` trait. A good way to ensure
//! padding is applied is to format your input, then pad this resulting string
//! to obtain your output:
//!
//! ```
//! println!("Hello {:^15}!", format!("{:?}", Some("hi"))); // => "Hello Some("hi") !"
//! ```
//!
//! ## Sign/`#`/`0`
//!
//! ```
//! assert_eq!(format!("Hello {:+}!", 5), "Hello +5!");
//! assert_eq!(format!("{:#x}!", 27), "0x1b!");
//! assert_eq!(format!("Hello {:05}!", 5), "Hello 00005!");
//! assert_eq!(format!("Hello {:05}!", -5), "Hello -0005!");
//! assert_eq!(format!("{:#010x}!", 27), "0x0000001b!");
//! ```
//!
//! These are all flags altering the behavior of the formatter.
//!
//! * `+` - This is intended for numeric types and indicates that the sign
//! should always be printed. Positive signs are never printed by
//! default, and the negative sign is only printed by default for the
//! `Signed` trait. This flag indicates that the correct sign (`+` or `-`)
//! should always be printed.
//! * `-` - Currently not used
//! * `#` - This flag is indicates that the "alternate" form of printing should
//! be used. The alternate forms are:
//! * `#?` - pretty-print the [`Debug`] formatting
//! * `#x` - precedes the argument with a `0x`
//! * `#X` - precedes the argument with a `0x`
//! * `#b` - precedes the argument with a `0b`
//! * `#o` - precedes the argument with a `0o`
//! * `0` - This is used to indicate for integer formats that the padding to `width` should
//! both be done with a `0` character as well as be sign-aware. A format
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
//! Note that padding zeroes are always placed after the sign (if any)
//! and before the digits. When used together with the `#` flag, a similar
//! rule applies: padding zeroes are inserted after the prefix but before
//! the digits. The prefix is included in the total width.
//!
//! ## Precision
//!
//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
//! longer than this width, then it is truncated down to this many characters and that truncated
//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set.
//!
//! For integral types, this is ignored.
//!
//! For floating-point types, this indicates how many digits after the decimal point should be
//! printed.
//!
//! There are three possible ways to specify the desired `precision`:
//!
//! 1. An integer `.N`:
//!
//! the integer `N` itself is the precision.
//!
//! 2. An integer or name followed by dollar sign `.N$`:
//!
//! use format *argument* `N` (which must be a `usize`) as the precision.
//!
//! 3. An asterisk `.*`:
//!
//! `.*` means that this `{...}` is associated with *two* format inputs rather than one: the
//! first input holds the `usize` precision, and the second holds the value to print. Note that
//! in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part refers
//! to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
//! For example, the following calls all print the same thing `Hello x is 0.01000`:
//!
//! ```
//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {second of next two args (0.01) with precision
//! // specified in first of next two args (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg 2 (0.01) with precision
//! // specified in its predecessor (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg "number" (0.01) with precision specified
//! // in arg "prec" (5)}
//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
//! ```
//!
//! While these:
//!
//! ```
//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56");
//! ```
//!
//! print two significantly different things:
//!
//! ```text
//! Hello, `1234.560` has 3 fractional digits
//! Hello, `123` has 3 characters
//! Hello, ` 123` has 3 right-aligned characters
//! ```
//!
//! ## Localization
//!
//! In some programming languages, the behavior of string formatting functions
//! depends on the operating system's locale setting. The format functions
//! provided by Rust's standard library do not have any concept of locale, and
//! will produce the same results on all systems regardless of user
//! configuration.
//!
//! For example, the following code will always print `1.5` even if the system
//! locale uses a decimal separator other than a dot.
//!
//! ```
//! println!("The value is {}", 1.5);
//! ```
//!
//! # Escaping
//!
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
//!
//! ```
//! assert_eq!(format!("Hello {{}}"), "Hello {}");
//! assert_eq!(format!("{{ Hello"), "{ Hello");
//! ```
//!
//! # Syntax
//!
//! To summarize, here you can find the full grammar of format strings.
//! The syntax for the formatting language used is drawn from other languages,
//! so it should not be too alien. Arguments are formatted with Python-like
//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
//! format_string := <text> [ maybe-format <text> ] *
//! maybe-format := '{' '{' | '}' '}' | <format>
//! format := '{' [ argument ] [ ':' format_spec ] '}'
//! argument := integer | identifier
//!
//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision][type]
//! fill := character
//! align := '<' | '^' | '>'
//! sign := '+' | '-'
//! width := count
//! precision := count | '*'
//! type := identifier | '?' | ''
//! count := parameter | integer
//! parameter := argument '$'
//! ```
//!
//! # Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
//! well as [`isize`]). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ [`Display`]
//! * `?` ⇒ [`Debug`]
//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers
//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers
//! * `o` ⇒ [`Octal`](trait.Octal.html)
//! * `x` ⇒ [`LowerHex`](trait.LowerHex.html)
//! * `X` ⇒ [`UpperHex`](trait.UpperHex.html)
//! * `p` ⇒ [`Pointer`](trait.Pointer.html)
//! * `b` ⇒ [`Binary`]
//! * `e` ⇒ [`LowerExp`](trait.LowerExp.html)
//! * `E` ⇒ [`UpperExp`](trait.UpperExp.html)
//!
//! What this means is that any type of argument which implements the
//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the [`Display`] trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # #![allow(dead_code)]
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is [`fmt::Result`] which is a
//! type alias of [`Result`]`<(), `[`std::fmt::Error`]`>`. Formatting implementations
//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
//! calling [`write!`]). However, they should never return errors spuriously. That
//! is, a formatting implementation must and may only return an error if the
//! passed-in [`Formatter`] returns an error. This is because, contrary to what
//! the function signature might suggest, string formatting is an infallible
//! operation. This function only returns a result because writing to the
//! underlying stream might fail and it must provide a way to propagate the fact
//! that an error has occurred back up the stack.
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! use std::fmt;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### `fmt::Display` vs `fmt::Debug`
//!
//! These two formatting traits have distinct purposes:
//!
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the [`Display`] trait.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! # Related macros
//!
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! eprint! // the format string is printed to the standard error
//! eprintln! // same as eprint but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//! ### `eprint!`
//!
//! The [`eprint!`] and [`eprintln!`] macros are identical to
//! [`print!`] and [`println!`], respectively, except they emit their
//! output to stderr.
//!
//! ### `format_args!`
//!
//! This is a curious macro which is used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! [`usize`]: ../../std/primitive.usize.html
//! [`isize`]: ../../std/primitive.isize.html
//! [`i8`]: ../../std/primitive.i8.html
//! [`Display`]: trait.Display.html
//! [`Binary`]: trait.Binary.html
//! [`fmt::Result`]: type.Result.html
//! [`Result`]: ../../std/result/enum.Result.html
//! [`std::fmt::Error`]: struct.Error.html
//! [`Formatter`]: struct.Formatter.html
//! [`write!`]: ../../std/macro.write.html
//! [`Debug`]: trait.Debug.html
//! [`format!`]: ../../std/macro.format.html
//! [`to_string`]: ../../std/string/trait.ToString.html
//! [`writeln!`]: ../../std/macro.writeln.html
//! [`write_fmt`]: ../../std/io/trait.Write.html#method.write_fmt
//! [`std::io::Write`]: ../../std/io/trait.Write.html
//! [`print!`]: ../../std/macro.print.html
//! [`println!`]: ../../std/macro.println.html
//! [`eprint!`]: ../../std/macro.eprint.html
//! [`eprintln!`]: ../../std/macro.eprintln.html
//! [`write!`]: ../../std/macro.write.html
//! [`format_args!`]: ../../std/macro.format_args.html
//! [`fmt::Arguments`]: struct.Arguments.html
//! [`write`]: fn.write.html
//! [`format`]: fn.format.html
#![stable(feature = "rust1", since = "1.0.0")]
#[unstable(feature = "fmt_internals", issue = "none")]
pub use core::fmt::rt;
#[stable(feature = "fmt_flags_align", since = "1.28.0")]
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, ArgumentV1, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Binary, Octal};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Debug, Display};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerExp, UpperExp};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerHex, Pointer, UpperHex};
use crate::string;
/// The `format` function takes an [`Arguments`] struct and returns the resulting
/// formatted string.
///
/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// Please note that using [`format!`] might be preferable.
/// Example:
///
/// ```
/// let s = format!("Hello, {}!", "world");
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// [`Arguments`]: struct.Arguments.html
/// [`format_args!`]: ../../std/macro.format_args.html
/// [`format!`]: ../../std/macro.format.html
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments<'_>) -> string::String {
let capacity = a | rgs.estimated_capacity();
let mut output = string::String::with_capacity(capacity);
output.write_fmt(args).expect("a formatting trait implementation returned an error");
output
}
| identifier_body |
|
fmt.rs | //! Utilities for formatting and printing `String`s.
//!
//! This module contains the runtime support for the [`format!`] syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings.
//!
//! # Usage
//!
//! The [`format!`] macro is intended to be familiar to those coming from C's
//! `printf`/`fprintf` functions or Python's `str.format` function.
//!
//! Some examples of the [`format!`] extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
//! format!("Hello, {}!", "world"); // => "Hello, world!"
//! format!("The number is {}", 1); // => "The number is 1"
//! format!("{:?}", (3, 4)); // => "(3, 4)"
//! format!("{value}", value=4); // => "4"
//! format!("{} {}", 1, 2); // => "1 2"
//! format!("{:04}", 42); // => "0042" with leading zeros
//! ```
//!
//! From these, you can see that the first argument is a format string. It is
//! required by the compiler for this to be a string literal; it cannot be a
//! variable passed in (in order to perform validity checking). The compiler
//! will then parse the format string and determine if the list of arguments
//! provided is suitable to pass to this format string.
//!
//! To convert a single value to a string, use the [`to_string`] method. This
//! will use the [`Display`] formatting trait.
//!
//! ## Positional parameters
//!
//! Each formatting argument is allowed to specify which value argument it's
//! referencing, and if omitted it is assumed to be "the next argument". For
//! example, the format string `{} {} {}` would take three parameters, and they
//! would be formatted in the same order as they're given. The format string
//! `{2} {1} {0}`, however, would format arguments in reverse order.
//!
//! Things can get a little tricky once you start intermingling the two types of
//! positional specifiers. The "next argument" specifier can be thought of as an
//! iterator over the argument. Each time a "next argument" specifier is seen,
//! the iterator advances. This leads to behavior like this:
//!
//! ```
//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
//! ```
//!
//! The internal iterator over the argument has not been advanced by the time
//! the first `{}` is seen, so it prints the first argument. Then upon reaching
//! the second `{}`, the iterator has advanced forward to the second argument.
//! Essentially, parameters which explicitly name their argument do not affect
//! parameters which do not name an argument in terms of positional specifiers.
//!
//! A format string is required to use all of its arguments, otherwise it is a
//! compile-time error. You may refer to the same argument more than once in the
//! format string.
//!
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the [`format!`] macro is a syntax extension which allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
//! ```text
//! identifier '=' expression
//! ```
//!
//! For example, the following [`format!`] expressions all use named argument:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
//! format!("{name} {}", 1, name = 2); // => "2 1"
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
//! It is not valid to put positional parameters (those without names) after
//! arguments which have names. Like with positional parameters, it is not
//! valid to provide named parameters that are unused by the format string.
//!
//! # Formatting Parameters
//!
//! Each argument being formatted can be transformed by a number of formatting
//! parameters (corresponding to `format_spec` in the syntax above). These
//! parameters affect the string representation of what's being formatted.
//!
//! ## Width
//!
//! ```
//! // All of these print "Hello x !"
//! println!("Hello {:5}!", "x");
//! println!("Hello {:1$}!", "x", 5);
//! println!("Hello {1:0$}!", 5, "x");
//! println!("Hello {:width$}!", "x", width = 5);
//! ```
//!
//! This is a parameter for the "minimum width" that the format should take up.
//! If the value's string does not fill up this many characters, then the
//! padding specified by fill/alignment will be used to take up the required
//! space (see below).
//!
//! The value for the width can also be provided as a [`usize`] in the list of
//! parameters by adding a postfix `$`, indicating that the second argument is
//! a [`usize`] specifying the width.
//!
//! Referring to an argument with the dollar syntax does not affect the "next
//! argument" counter, so it's usually a good idea to refer to arguments by
//! position, or use named arguments.
//!
//! ## Fill/Alignment
//!
//! ```
//! assert_eq!(format!("Hello {:<5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:-<5}!", "x"), "Hello x----!");
//! assert_eq!(format!("Hello {:^5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:>5}!", "x"), "Hello x!");
//! ```
//!
//! The optional fill character and alignment is provided normally in conjunction with the
//! [`width`](#width) parameter. It must be defined before `width`, right after the `:`.
//! This indicates that if the value being formatted is smaller than
//! `width` some extra characters will be printed around it.
//! Filling comes in the following variants for different alignments:
//!
//! * `[fill]<` - the argument is left-aligned in `width` columns
//! * `[fill]^` - the argument is center-aligned in `width` columns
//! * `[fill]>` - the argument is right-aligned in `width` columns
//!
//! The default [fill/alignment](#fillalignment) for non-numerics is a space and
//! left-aligned. The
//! defaults for numeric formatters is also a space but with right-alignment. If
//! the `0` flag (see below) is specified for numerics, then the implicit fill character is
//! `0`.
//!
//! Note that alignment may not be implemented by some types. In particular, it
//! is not generally implemented for the `Debug` trait. A good way to ensure
//! padding is applied is to format your input, then pad this resulting string
//! to obtain your output:
//!
//! ```
//! println!("Hello {:^15}!", format!("{:?}", Some("hi"))); // => "Hello Some("hi") !"
//! ```
//!
//! ## Sign/`#`/`0`
//!
//! ```
//! assert_eq!(format!("Hello {:+}!", 5), "Hello +5!");
//! assert_eq!(format!("{:#x}!", 27), "0x1b!");
//! assert_eq!(format!("Hello {:05}!", 5), "Hello 00005!");
//! assert_eq!(format!("Hello {:05}!", -5), "Hello -0005!");
//! assert_eq!(format!("{:#010x}!", 27), "0x0000001b!");
//! ```
//!
//! These are all flags altering the behavior of the formatter.
//!
//! * `+` - This is intended for numeric types and indicates that the sign
//! should always be printed. Positive signs are never printed by
//! default, and the negative sign is only printed by default for the
//! `Signed` trait. This flag indicates that the correct sign (`+` or `-`)
//! should always be printed.
//! * `-` - Currently not used
//! * `#` - This flag is indicates that the "alternate" form of printing should
//! be used. The alternate forms are:
//! * `#?` - pretty-print the [`Debug`] formatting
//! * `#x` - precedes the argument with a `0x`
//! * `#X` - precedes the argument with a `0x`
//! * `#b` - precedes the argument with a `0b`
//! * `#o` - precedes the argument with a `0o`
//! * `0` - This is used to indicate for integer formats that the padding to `width` should
//! both be done with a `0` character as well as be sign-aware. A format
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
//! Note that padding zeroes are always placed after the sign (if any)
//! and before the digits. When used together with the `#` flag, a similar
//! rule applies: padding zeroes are inserted after the prefix but before
//! the digits. The prefix is included in the total width.
//!
//! ## Precision
//!
//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
//! longer than this width, then it is truncated down to this many characters and that truncated
//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set.
//!
//! For integral types, this is ignored.
//!
//! For floating-point types, this indicates how many digits after the decimal point should be
//! printed.
//!
//! There are three possible ways to specify the desired `precision`:
//!
//! 1. An integer `.N`:
//!
//! the integer `N` itself is the precision.
//!
//! 2. An integer or name followed by dollar sign `.N$`:
//!
//! use format *argument* `N` (which must be a `usize`) as the precision.
//!
//! 3. An asterisk `.*`:
//!
//! `.*` means that this `{...}` is associated with *two* format inputs rather than one: the
//! first input holds the `usize` precision, and the second holds the value to print. Note that
//! in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part refers
//! to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
//! For example, the following calls all print the same thing `Hello x is 0.01000`:
//!
//! ```
//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {second of next two args (0.01) with precision
//! // specified in first of next two args (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg 2 (0.01) with precision
//! // specified in its predecessor (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg "number" (0.01) with precision specified
//! // in arg "prec" (5)}
//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
//! ```
//!
//! While these:
//!
//! ```
//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56");
//! ```
//!
//! print two significantly different things:
//!
//! ```text
//! Hello, `1234.560` has 3 fractional digits
//! Hello, `123` has 3 characters
//! Hello, ` 123` has 3 right-aligned characters
//! ```
//!
//! ## Localization
//!
//! In some programming languages, the behavior of string formatting functions
//! depends on the operating system's locale setting. The format functions
//! provided by Rust's standard library do not have any concept of locale, and
//! will produce the same results on all systems regardless of user
//! configuration.
//!
//! For example, the following code will always print `1.5` even if the system
//! locale uses a decimal separator other than a dot.
//!
//! ```
//! println!("The value is {}", 1.5);
//! ```
//!
//! # Escaping
//!
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
//!
//! ```
//! assert_eq!(format!("Hello {{}}"), "Hello {}");
//! assert_eq!(format!("{{ Hello"), "{ Hello");
//! ```
//!
//! # Syntax
//!
//! To summarize, here you can find the full grammar of format strings.
//! The syntax for the formatting language used is drawn from other languages,
//! so it should not be too alien. Arguments are formatted with Python-like
//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
//! format_string := <text> [ maybe-format <text> ] *
//! maybe-format := '{' '{' | '}' '}' | <format>
//! format := '{' [ argument ] [ ':' format_spec ] '}'
//! argument := integer | identifier
//!
//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision][type]
//! fill := character
//! align := '<' | '^' | '>'
//! sign := '+' | '-'
//! width := count
//! precision := count | '*'
//! type := identifier | '?' | ''
//! count := parameter | integer
//! parameter := argument '$'
//! ```
//!
//! # Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
//! well as [`isize`]). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ [`Display`]
//! * `?` ⇒ [`Debug`]
//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers
//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers
//! * `o` ⇒ [`Octal`](trait.Octal.html)
//! * `x` ⇒ [`LowerHex`](trait.LowerHex.html)
//! * `X` ⇒ [`UpperHex`](trait.UpperHex.html)
//! * `p` ⇒ [`Pointer`](trait.Pointer.html)
//! * `b` ⇒ [`Binary`]
//! * `e` ⇒ [`LowerExp`](trait.LowerExp.html)
//! * `E` ⇒ [`UpperExp`](trait.UpperExp.html)
//!
//! What this means is that any type of argument which implements the
//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the [`Display`] trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # #![allow(dead_code)]
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is [`fmt::Result`] which is a
//! type alias of [`Result`]`<(), `[`std::fmt::Error`]`>`. Formatting implementations
//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
//! calling [`write!`]). However, they should never return errors spuriously. That
//! is, a formatting implementation must and may only return an error if the
//! passed-in [`Formatter`] returns an error. This is because, contrary to what
//! the function signature might suggest, string formatting is an infallible
//! operation. This function only returns a result because writing to the
//! underlying stream might fail and it must provide a way to propagate the fact
//! that an error has occurred back up the stack.
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! use std::fmt;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### `fmt::Display` vs `fmt::Debug`
//!
//! These two formatting traits have distinct purposes:
//!
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the [`Display`] trait.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! # Related macros
//!
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! eprint! // the format string is printed to the standard error
//! eprintln! // same as eprint but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//! ### `eprint!`
//!
//! The [`eprint!`] and [`eprintln!`] macros are identical to
//! [`print!`] and [`println!`], respectively, except they emit their
//! output to stderr.
//!
//! ### `format_args!`
//!
//! This is a curious macro which is used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! [`usize`]: ../../std/primitive.usize.html
//! [`isize`]: ../../std/primitive.isize.html
//! [`i8`]: ../../std/primitive.i8.html
//! [`Display`]: trait.Display.html
//! [`Binary`]: trait.Binary.html
//! [`fmt::Result`]: type.Result.html
//! [`Result`]: ../../std/result/enum.Result.html
//! [`std::fmt::Error`]: struct.Error.html
//! [`Formatter`]: struct.Formatter.html
//! [`write!`]: ../../std/macro.write.html
//! [`Debug`]: trait.Debug.html
//! [`format!`]: ../../std/macro.format.html
//! [`to_string`]: ../../std/string/trait.ToString.html
//! [`writeln!`]: ../../std/macro.writeln.html
//! [`write_fmt`]: ../../std/io/trait.Write.html#method.write_fmt
//! [`std::io::Write`]: ../../std/io/trait.Write.html
//! [`print!`]: ../../std/macro.print.html
//! [`println!`]: ../../std/macro.println.html
//! [`eprint!`]: ../../std/macro.eprint.html
//! [`eprintln!`]: ../../std/macro.eprintln.html
//! [`write!`]: ../../std/macro.write.html
//! [`format_args!`]: ../../std/macro.format_args.html
//! [`fmt::Arguments`]: struct.Arguments.html
//! [`write`]: fn.write.html
//! [`format`]: fn.format.html
#![stable(feature = "rust1", since = "1.0.0")]
#[unstable(feature = "fmt_internals", issue = "none")]
pub use core::fmt::rt;
#[stable(feature = "fmt_flags_align", since = "1.28.0")]
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, ArgumentV1, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Binary, Octal};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Debug, Display};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerExp, UpperExp};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerHex, Pointer, UpperHex};
use crate::string;
/// The `format` function takes an [`Arguments`] struct and returns the resulting
/// formatted string.
///
/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// Please note that using [`format!`] might be preferable.
/// Example:
///
/// ```
/// let s = format!("Hello, {}!", "world");
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// [`Arguments`]: struct.Arguments.html
/// [`format_args!`]: ../../std/macro.format_args.html
/// [`format!`]: ../../std/macro.format.html
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments | -> string::String {
let capacity = args.estimated_capacity();
let mut output = string::String::with_capacity(capacity);
output.write_fmt(args).expect("a formatting trait implementation returned an error");
output
}
| <'_>) | identifier_name |
index.ts | import BScroll, { MountedBScrollHTMLElement } from '@better-scroll/core'
import {
Direction,
EventEmitter,
extend,
warn,
findIndex,
} from '@better-scroll/shared-utils'
import BScrollFamily from './BScrollFamily'
import propertiesConfig from './propertiesConfig'
export const DEFAUL_GROUP_ID = 'INTERNAL_NESTED_SCROLL'
export type NestedScrollGroupId = string | number
export interface NestedScrollConfig {
groupId: NestedScrollGroupId
}
export type NestedScrollOptions = NestedScrollConfig | true
declare module '@better-scroll/core' {
interface CustomOptions {
nestedScroll?: NestedScrollOptions
}
interface CustomAPI {
nestedScroll: PluginAPI
}
}
interface PluginAPI {
purgeNestedScroll(groupId: NestedScrollGroupId): void
}
interface NestedScrollInstancesMap {
[key: string]: NestedScroll
[index: number]: NestedScroll
}
const forceScrollStopHandler = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
if (scroll.pending) {
scroll.stop()
scroll.resetPosition()
}
})
}
const enableScrollHander = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
scroll.enable()
})
}
const disableScrollHander = (scrolls: BScroll[], currentScroll: BScroll) => {
scrolls.forEach((scroll) => {
if (
scroll.hasHorizontalScroll === currentScroll.hasHorizontalScroll ||
scroll.hasVerticalScroll === currentScroll.hasVerticalScroll
) {
scroll.disable()
}
})
}
const syncTouchstartData = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
const { actions, scrollBehaviorX, scrollBehaviorY } = scroll.scroller
// prevent click triggering many times
actions.fingerMoved = true
actions.contentMoved = false
actions.directionLockAction.reset()
scrollBehaviorX.start()
scrollBehaviorY.start()
scrollBehaviorX.resetStartPos()
scrollBehaviorY.resetStartPos()
actions.startTime = +new Date()
})
}
const isOutOfBoundary = (scroll: BScroll): boolean => {
const { | hasVerticalScroll,
x,
y,
minScrollX,
maxScrollX,
minScrollY,
maxScrollY,
movingDirectionX,
movingDirectionY,
} = scroll
let ret = false
const outOfLeftBoundary =
x >= minScrollX && movingDirectionX === Direction.Negative
const outOfRightBoundary =
x <= maxScrollX && movingDirectionX === Direction.Positive
const outOfTopBoundary =
y >= minScrollY && movingDirectionY === Direction.Negative
const outOfBottomBoundary =
y <= maxScrollY && movingDirectionY === Direction.Positive
if (hasVerticalScroll) {
ret = outOfTopBoundary || outOfBottomBoundary
} else if (hasHorizontalScroll) {
ret = outOfLeftBoundary || outOfRightBoundary
}
return ret
}
const isResettingPosition = (scroll: BScroll): boolean => {
const {
hasHorizontalScroll,
hasVerticalScroll,
x,
y,
minScrollX,
maxScrollX,
minScrollY,
maxScrollY,
} = scroll
let ret = false
const outOfLeftBoundary = x > minScrollX
const outOfRightBoundary = x < maxScrollX
const outOfTopBoundary = y > minScrollY
const outOfBottomBoundary = y < maxScrollY
if (hasVerticalScroll) {
ret = outOfTopBoundary || outOfBottomBoundary
} else if (hasHorizontalScroll) {
ret = outOfLeftBoundary || outOfRightBoundary
}
return ret
}
const resetPositionHandler = (scroll: BScroll) => {
scroll.scroller.reflow()
scroll.resetPosition(0 /* Immediately */)
}
const calculateDistance = (
childNode: HTMLElement,
parentNode: HTMLElement
): number => {
let distance = 0
let parent = childNode.parentNode
while (parent && parent !== parentNode) {
distance++
parent = parent.parentNode
}
return distance
}
export default class NestedScroll implements PluginAPI {
static pluginName = 'nestedScroll'
static instancesMap: NestedScrollInstancesMap = {}
store: BScrollFamily[]
options: NestedScrollConfig
private hooksFn: Array<[EventEmitter, string, Function]>
constructor(scroll: BScroll) {
const groupId = this.handleOptions(scroll)
let instance = NestedScroll.instancesMap[groupId]
if (!instance) {
instance = NestedScroll.instancesMap[groupId] = this
instance.store = []
instance.hooksFn = []
}
instance.init(scroll)
return instance
}
static getAllNestedScrolls(): NestedScroll[] {
const instancesMap = NestedScroll.instancesMap
return Object.keys(instancesMap).map((key) => instancesMap[key])
}
static purgeAllNestedScrolls() {
const nestedScrolls = NestedScroll.getAllNestedScrolls()
nestedScrolls.forEach((ns) => ns.purgeNestedScroll())
}
private handleOptions(scroll: BScroll): number | string {
const userOptions = (scroll.options.nestedScroll === true
? {}
: scroll.options.nestedScroll) as NestedScrollConfig
const defaultOptions: NestedScrollConfig = {
groupId: DEFAUL_GROUP_ID,
}
this.options = extend(defaultOptions, userOptions)
const groupIdType = typeof this.options.groupId
if (groupIdType !== 'string' && groupIdType !== 'number') {
warn('groupId must be string or number for NestedScroll plugin')
}
return this.options.groupId
}
private init(scroll: BScroll) {
scroll.proxy(propertiesConfig)
this.addBScroll(scroll)
this.buildBScrollGraph()
this.analyzeBScrollGraph()
this.ensureEventInvokeSequence()
this.handleHooks(scroll)
}
private handleHooks(scroll: BScroll) {
this.registerHooks(scroll.hooks, scroll.hooks.eventTypes.destroy, () => {
this.deleteScroll(scroll)
})
}
deleteScroll(scroll: BScroll) {
const wrapper = scroll.wrapper as MountedBScrollHTMLElement
wrapper.isBScrollContainer = undefined
const store = this.store
const hooksFn = this.hooksFn
const i = findIndex(store, (bscrollFamily) => {
return bscrollFamily.selfScroll === scroll
})
if (i > -1) {
const bscrollFamily = store[i]
bscrollFamily.purge()
store.splice(i, 1)
}
const k = findIndex(hooksFn, ([hooks]) => {
return hooks === scroll.hooks
})
if (k > -1) {
const [hooks, eventType, handler] = hooksFn[k]
hooks.off(eventType, handler)
hooksFn.splice(k, 1)
}
}
addBScroll(scroll: BScroll) {
this.store.push(BScrollFamily.create(scroll))
}
private buildBScrollGraph() {
const store = this.store
let bf1: BScrollFamily
let bf2: BScrollFamily
let wrapper1: MountedBScrollHTMLElement
let wrapper2: MountedBScrollHTMLElement
let len = this.store.length
// build graph
for (let i = 0; i < len; i++) {
bf1 = store[i]
wrapper1 = bf1.selfScroll.wrapper
for (let j = 0; j < len; j++) {
bf2 = store[j]
wrapper2 = bf2.selfScroll.wrapper
// same bs
if (bf1 === bf2) continue
if (!wrapper1.contains(wrapper2)) continue
// bs1 contains bs2
const distance = calculateDistance(wrapper2, wrapper1)
if (!bf1.hasDescendants(bf2)) {
bf1.addDescendant(bf2, distance)
}
if (!bf2.hasAncestors(bf1)) {
bf2.addAncestor(bf1, distance)
}
}
}
}
private analyzeBScrollGraph() {
this.store.forEach((bscrollFamily) => {
if (bscrollFamily.analyzed) {
return
}
const {
ancestors,
descendants,
selfScroll: currentScroll,
} = bscrollFamily
const beforeScrollStartHandler = () => {
// always get the latest scroll
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const descendantScrolls = descendants.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
forceScrollStopHandler([...ancestorScrolls, ...descendantScrolls])
if (isResettingPosition(currentScroll)) {
resetPositionHandler(currentScroll)
}
syncTouchstartData(ancestorScrolls)
disableScrollHander(ancestorScrolls, currentScroll)
}
const touchEndHandler = () => {
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const descendantScrolls = descendants.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
enableScrollHander([...ancestorScrolls, ...descendantScrolls])
}
bscrollFamily.registerHooks(
currentScroll,
currentScroll.eventTypes.beforeScrollStart,
beforeScrollStartHandler
)
bscrollFamily.registerHooks(
currentScroll,
currentScroll.eventTypes.touchEnd,
touchEndHandler
)
const selfActionsHooks = currentScroll.scroller.actions.hooks
bscrollFamily.registerHooks(
selfActionsHooks,
selfActionsHooks.eventTypes.detectMovingDirection,
() => {
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const parentScroll = ancestorScrolls[0]
const otherAncestorScrolls = ancestorScrolls.slice(1)
const contentMoved = currentScroll.scroller.actions.contentMoved
const isTopScroll = ancestorScrolls.length === 0
if (contentMoved) {
disableScrollHander(ancestorScrolls, currentScroll)
} else if (!isTopScroll) {
if (isOutOfBoundary(currentScroll)) {
disableScrollHander([currentScroll], currentScroll)
if (parentScroll) {
enableScrollHander([parentScroll])
}
disableScrollHander(otherAncestorScrolls, currentScroll)
return true
}
}
}
)
bscrollFamily.setAnalyzed(true)
})
}
// make sure touchmove|touchend invoke from child to parent
private ensureEventInvokeSequence() {
const copied = this.store.slice()
const sequencedScroll = copied.sort((a, b) => {
return a.descendants.length - b.descendants.length
})
sequencedScroll.forEach((bscrollFamily) => {
const scroll = bscrollFamily.selfScroll
scroll.scroller.actionsHandler.rebindDOMEvents()
})
}
private registerHooks(hooks: EventEmitter, name: string, handler: Function) {
hooks.on(name, handler, this)
this.hooksFn.push([hooks, name, handler])
}
purgeNestedScroll() {
const groupId = this.options.groupId
this.store.forEach((bscrollFamily) => {
bscrollFamily.purge()
})
this.store = []
this.hooksFn.forEach(([hooks, eventType, handler]) => {
hooks.off(eventType, handler)
})
this.hooksFn = []
delete NestedScroll.instancesMap[groupId]
}
} | hasHorizontalScroll, | random_line_split |
index.ts | import BScroll, { MountedBScrollHTMLElement } from '@better-scroll/core'
import {
Direction,
EventEmitter,
extend,
warn,
findIndex,
} from '@better-scroll/shared-utils'
import BScrollFamily from './BScrollFamily'
import propertiesConfig from './propertiesConfig'
export const DEFAUL_GROUP_ID = 'INTERNAL_NESTED_SCROLL'
export type NestedScrollGroupId = string | number
export interface NestedScrollConfig {
groupId: NestedScrollGroupId
}
export type NestedScrollOptions = NestedScrollConfig | true
declare module '@better-scroll/core' {
interface CustomOptions {
nestedScroll?: NestedScrollOptions
}
interface CustomAPI {
nestedScroll: PluginAPI
}
}
interface PluginAPI {
purgeNestedScroll(groupId: NestedScrollGroupId): void
}
interface NestedScrollInstancesMap {
[key: string]: NestedScroll
[index: number]: NestedScroll
}
const forceScrollStopHandler = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
if (scroll.pending) {
scroll.stop()
scroll.resetPosition()
}
})
}
const enableScrollHander = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
scroll.enable()
})
}
const disableScrollHander = (scrolls: BScroll[], currentScroll: BScroll) => {
scrolls.forEach((scroll) => {
if (
scroll.hasHorizontalScroll === currentScroll.hasHorizontalScroll ||
scroll.hasVerticalScroll === currentScroll.hasVerticalScroll
) {
scroll.disable()
}
})
}
const syncTouchstartData = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
const { actions, scrollBehaviorX, scrollBehaviorY } = scroll.scroller
// prevent click triggering many times
actions.fingerMoved = true
actions.contentMoved = false
actions.directionLockAction.reset()
scrollBehaviorX.start()
scrollBehaviorY.start()
scrollBehaviorX.resetStartPos()
scrollBehaviorY.resetStartPos()
actions.startTime = +new Date()
})
}
const isOutOfBoundary = (scroll: BScroll): boolean => {
const {
hasHorizontalScroll,
hasVerticalScroll,
x,
y,
minScrollX,
maxScrollX,
minScrollY,
maxScrollY,
movingDirectionX,
movingDirectionY,
} = scroll
let ret = false
const outOfLeftBoundary =
x >= minScrollX && movingDirectionX === Direction.Negative
const outOfRightBoundary =
x <= maxScrollX && movingDirectionX === Direction.Positive
const outOfTopBoundary =
y >= minScrollY && movingDirectionY === Direction.Negative
const outOfBottomBoundary =
y <= maxScrollY && movingDirectionY === Direction.Positive
if (hasVerticalScroll) {
ret = outOfTopBoundary || outOfBottomBoundary
} else if (hasHorizontalScroll) {
ret = outOfLeftBoundary || outOfRightBoundary
}
return ret
}
const isResettingPosition = (scroll: BScroll): boolean => {
const {
hasHorizontalScroll,
hasVerticalScroll,
x,
y,
minScrollX,
maxScrollX,
minScrollY,
maxScrollY,
} = scroll
let ret = false
const outOfLeftBoundary = x > minScrollX
const outOfRightBoundary = x < maxScrollX
const outOfTopBoundary = y > minScrollY
const outOfBottomBoundary = y < maxScrollY
if (hasVerticalScroll) {
ret = outOfTopBoundary || outOfBottomBoundary
} else if (hasHorizontalScroll) {
ret = outOfLeftBoundary || outOfRightBoundary
}
return ret
}
const resetPositionHandler = (scroll: BScroll) => {
scroll.scroller.reflow()
scroll.resetPosition(0 /* Immediately */)
}
const calculateDistance = (
childNode: HTMLElement,
parentNode: HTMLElement
): number => {
let distance = 0
let parent = childNode.parentNode
while (parent && parent !== parentNode) {
distance++
parent = parent.parentNode
}
return distance
}
export default class NestedScroll implements PluginAPI {
static pluginName = 'nestedScroll'
static instancesMap: NestedScrollInstancesMap = {}
store: BScrollFamily[]
options: NestedScrollConfig
private hooksFn: Array<[EventEmitter, string, Function]>
constructor(scroll: BScroll) {
const groupId = this.handleOptions(scroll)
let instance = NestedScroll.instancesMap[groupId]
if (!instance) {
instance = NestedScroll.instancesMap[groupId] = this
instance.store = []
instance.hooksFn = []
}
instance.init(scroll)
return instance
}
static getAllNestedScrolls(): NestedScroll[] {
const instancesMap = NestedScroll.instancesMap
return Object.keys(instancesMap).map((key) => instancesMap[key])
}
static purgeAllNestedScrolls() {
const nestedScrolls = NestedScroll.getAllNestedScrolls()
nestedScrolls.forEach((ns) => ns.purgeNestedScroll())
}
private handleOptions(scroll: BScroll): number | string {
const userOptions = (scroll.options.nestedScroll === true
? {}
: scroll.options.nestedScroll) as NestedScrollConfig
const defaultOptions: NestedScrollConfig = {
groupId: DEFAUL_GROUP_ID,
}
this.options = extend(defaultOptions, userOptions)
const groupIdType = typeof this.options.groupId
if (groupIdType !== 'string' && groupIdType !== 'number') {
warn('groupId must be string or number for NestedScroll plugin')
}
return this.options.groupId
}
private init(scroll: BScroll) {
scroll.proxy(propertiesConfig)
this.addBScroll(scroll)
this.buildBScrollGraph()
this.analyzeBScrollGraph()
this.ensureEventInvokeSequence()
this.handleHooks(scroll)
}
private handleHooks(scroll: BScroll) {
this.registerHooks(scroll.hooks, scroll.hooks.eventTypes.destroy, () => {
this.deleteScroll(scroll)
})
}
deleteScroll(scroll: BScroll) {
const wrapper = scroll.wrapper as MountedBScrollHTMLElement
wrapper.isBScrollContainer = undefined
const store = this.store
const hooksFn = this.hooksFn
const i = findIndex(store, (bscrollFamily) => {
return bscrollFamily.selfScroll === scroll
})
if (i > -1) |
const k = findIndex(hooksFn, ([hooks]) => {
return hooks === scroll.hooks
})
if (k > -1) {
const [hooks, eventType, handler] = hooksFn[k]
hooks.off(eventType, handler)
hooksFn.splice(k, 1)
}
}
addBScroll(scroll: BScroll) {
this.store.push(BScrollFamily.create(scroll))
}
private buildBScrollGraph() {
const store = this.store
let bf1: BScrollFamily
let bf2: BScrollFamily
let wrapper1: MountedBScrollHTMLElement
let wrapper2: MountedBScrollHTMLElement
let len = this.store.length
// build graph
for (let i = 0; i < len; i++) {
bf1 = store[i]
wrapper1 = bf1.selfScroll.wrapper
for (let j = 0; j < len; j++) {
bf2 = store[j]
wrapper2 = bf2.selfScroll.wrapper
// same bs
if (bf1 === bf2) continue
if (!wrapper1.contains(wrapper2)) continue
// bs1 contains bs2
const distance = calculateDistance(wrapper2, wrapper1)
if (!bf1.hasDescendants(bf2)) {
bf1.addDescendant(bf2, distance)
}
if (!bf2.hasAncestors(bf1)) {
bf2.addAncestor(bf1, distance)
}
}
}
}
private analyzeBScrollGraph() {
this.store.forEach((bscrollFamily) => {
if (bscrollFamily.analyzed) {
return
}
const {
ancestors,
descendants,
selfScroll: currentScroll,
} = bscrollFamily
const beforeScrollStartHandler = () => {
// always get the latest scroll
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const descendantScrolls = descendants.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
forceScrollStopHandler([...ancestorScrolls, ...descendantScrolls])
if (isResettingPosition(currentScroll)) {
resetPositionHandler(currentScroll)
}
syncTouchstartData(ancestorScrolls)
disableScrollHander(ancestorScrolls, currentScroll)
}
const touchEndHandler = () => {
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const descendantScrolls = descendants.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
enableScrollHander([...ancestorScrolls, ...descendantScrolls])
}
bscrollFamily.registerHooks(
currentScroll,
currentScroll.eventTypes.beforeScrollStart,
beforeScrollStartHandler
)
bscrollFamily.registerHooks(
currentScroll,
currentScroll.eventTypes.touchEnd,
touchEndHandler
)
const selfActionsHooks = currentScroll.scroller.actions.hooks
bscrollFamily.registerHooks(
selfActionsHooks,
selfActionsHooks.eventTypes.detectMovingDirection,
() => {
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const parentScroll = ancestorScrolls[0]
const otherAncestorScrolls = ancestorScrolls.slice(1)
const contentMoved = currentScroll.scroller.actions.contentMoved
const isTopScroll = ancestorScrolls.length === 0
if (contentMoved) {
disableScrollHander(ancestorScrolls, currentScroll)
} else if (!isTopScroll) {
if (isOutOfBoundary(currentScroll)) {
disableScrollHander([currentScroll], currentScroll)
if (parentScroll) {
enableScrollHander([parentScroll])
}
disableScrollHander(otherAncestorScrolls, currentScroll)
return true
}
}
}
)
bscrollFamily.setAnalyzed(true)
})
}
// make sure touchmove|touchend invoke from child to parent
private ensureEventInvokeSequence() {
const copied = this.store.slice()
const sequencedScroll = copied.sort((a, b) => {
return a.descendants.length - b.descendants.length
})
sequencedScroll.forEach((bscrollFamily) => {
const scroll = bscrollFamily.selfScroll
scroll.scroller.actionsHandler.rebindDOMEvents()
})
}
private registerHooks(hooks: EventEmitter, name: string, handler: Function) {
hooks.on(name, handler, this)
this.hooksFn.push([hooks, name, handler])
}
purgeNestedScroll() {
const groupId = this.options.groupId
this.store.forEach((bscrollFamily) => {
bscrollFamily.purge()
})
this.store = []
this.hooksFn.forEach(([hooks, eventType, handler]) => {
hooks.off(eventType, handler)
})
this.hooksFn = []
delete NestedScroll.instancesMap[groupId]
}
}
| {
const bscrollFamily = store[i]
bscrollFamily.purge()
store.splice(i, 1)
} | conditional_block |
index.ts | import BScroll, { MountedBScrollHTMLElement } from '@better-scroll/core'
import {
Direction,
EventEmitter,
extend,
warn,
findIndex,
} from '@better-scroll/shared-utils'
import BScrollFamily from './BScrollFamily'
import propertiesConfig from './propertiesConfig'
export const DEFAUL_GROUP_ID = 'INTERNAL_NESTED_SCROLL'
export type NestedScrollGroupId = string | number
export interface NestedScrollConfig {
groupId: NestedScrollGroupId
}
export type NestedScrollOptions = NestedScrollConfig | true
declare module '@better-scroll/core' {
interface CustomOptions {
nestedScroll?: NestedScrollOptions
}
interface CustomAPI {
nestedScroll: PluginAPI
}
}
interface PluginAPI {
purgeNestedScroll(groupId: NestedScrollGroupId): void
}
interface NestedScrollInstancesMap {
[key: string]: NestedScroll
[index: number]: NestedScroll
}
const forceScrollStopHandler = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
if (scroll.pending) {
scroll.stop()
scroll.resetPosition()
}
})
}
const enableScrollHander = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
scroll.enable()
})
}
const disableScrollHander = (scrolls: BScroll[], currentScroll: BScroll) => {
scrolls.forEach((scroll) => {
if (
scroll.hasHorizontalScroll === currentScroll.hasHorizontalScroll ||
scroll.hasVerticalScroll === currentScroll.hasVerticalScroll
) {
scroll.disable()
}
})
}
const syncTouchstartData = (scrolls: BScroll[]) => {
scrolls.forEach((scroll) => {
const { actions, scrollBehaviorX, scrollBehaviorY } = scroll.scroller
// prevent click triggering many times
actions.fingerMoved = true
actions.contentMoved = false
actions.directionLockAction.reset()
scrollBehaviorX.start()
scrollBehaviorY.start()
scrollBehaviorX.resetStartPos()
scrollBehaviorY.resetStartPos()
actions.startTime = +new Date()
})
}
const isOutOfBoundary = (scroll: BScroll): boolean => {
const {
hasHorizontalScroll,
hasVerticalScroll,
x,
y,
minScrollX,
maxScrollX,
minScrollY,
maxScrollY,
movingDirectionX,
movingDirectionY,
} = scroll
let ret = false
const outOfLeftBoundary =
x >= minScrollX && movingDirectionX === Direction.Negative
const outOfRightBoundary =
x <= maxScrollX && movingDirectionX === Direction.Positive
const outOfTopBoundary =
y >= minScrollY && movingDirectionY === Direction.Negative
const outOfBottomBoundary =
y <= maxScrollY && movingDirectionY === Direction.Positive
if (hasVerticalScroll) {
ret = outOfTopBoundary || outOfBottomBoundary
} else if (hasHorizontalScroll) {
ret = outOfLeftBoundary || outOfRightBoundary
}
return ret
}
const isResettingPosition = (scroll: BScroll): boolean => {
const {
hasHorizontalScroll,
hasVerticalScroll,
x,
y,
minScrollX,
maxScrollX,
minScrollY,
maxScrollY,
} = scroll
let ret = false
const outOfLeftBoundary = x > minScrollX
const outOfRightBoundary = x < maxScrollX
const outOfTopBoundary = y > minScrollY
const outOfBottomBoundary = y < maxScrollY
if (hasVerticalScroll) {
ret = outOfTopBoundary || outOfBottomBoundary
} else if (hasHorizontalScroll) {
ret = outOfLeftBoundary || outOfRightBoundary
}
return ret
}
const resetPositionHandler = (scroll: BScroll) => {
scroll.scroller.reflow()
scroll.resetPosition(0 /* Immediately */)
}
const calculateDistance = (
childNode: HTMLElement,
parentNode: HTMLElement
): number => {
let distance = 0
let parent = childNode.parentNode
while (parent && parent !== parentNode) {
distance++
parent = parent.parentNode
}
return distance
}
export default class NestedScroll implements PluginAPI {
static pluginName = 'nestedScroll'
static instancesMap: NestedScrollInstancesMap = {}
store: BScrollFamily[]
options: NestedScrollConfig
private hooksFn: Array<[EventEmitter, string, Function]>
constructor(scroll: BScroll) {
const groupId = this.handleOptions(scroll)
let instance = NestedScroll.instancesMap[groupId]
if (!instance) {
instance = NestedScroll.instancesMap[groupId] = this
instance.store = []
instance.hooksFn = []
}
instance.init(scroll)
return instance
}
static | (): NestedScroll[] {
const instancesMap = NestedScroll.instancesMap
return Object.keys(instancesMap).map((key) => instancesMap[key])
}
static purgeAllNestedScrolls() {
const nestedScrolls = NestedScroll.getAllNestedScrolls()
nestedScrolls.forEach((ns) => ns.purgeNestedScroll())
}
private handleOptions(scroll: BScroll): number | string {
const userOptions = (scroll.options.nestedScroll === true
? {}
: scroll.options.nestedScroll) as NestedScrollConfig
const defaultOptions: NestedScrollConfig = {
groupId: DEFAUL_GROUP_ID,
}
this.options = extend(defaultOptions, userOptions)
const groupIdType = typeof this.options.groupId
if (groupIdType !== 'string' && groupIdType !== 'number') {
warn('groupId must be string or number for NestedScroll plugin')
}
return this.options.groupId
}
private init(scroll: BScroll) {
scroll.proxy(propertiesConfig)
this.addBScroll(scroll)
this.buildBScrollGraph()
this.analyzeBScrollGraph()
this.ensureEventInvokeSequence()
this.handleHooks(scroll)
}
private handleHooks(scroll: BScroll) {
this.registerHooks(scroll.hooks, scroll.hooks.eventTypes.destroy, () => {
this.deleteScroll(scroll)
})
}
deleteScroll(scroll: BScroll) {
const wrapper = scroll.wrapper as MountedBScrollHTMLElement
wrapper.isBScrollContainer = undefined
const store = this.store
const hooksFn = this.hooksFn
const i = findIndex(store, (bscrollFamily) => {
return bscrollFamily.selfScroll === scroll
})
if (i > -1) {
const bscrollFamily = store[i]
bscrollFamily.purge()
store.splice(i, 1)
}
const k = findIndex(hooksFn, ([hooks]) => {
return hooks === scroll.hooks
})
if (k > -1) {
const [hooks, eventType, handler] = hooksFn[k]
hooks.off(eventType, handler)
hooksFn.splice(k, 1)
}
}
addBScroll(scroll: BScroll) {
this.store.push(BScrollFamily.create(scroll))
}
private buildBScrollGraph() {
const store = this.store
let bf1: BScrollFamily
let bf2: BScrollFamily
let wrapper1: MountedBScrollHTMLElement
let wrapper2: MountedBScrollHTMLElement
let len = this.store.length
// build graph
for (let i = 0; i < len; i++) {
bf1 = store[i]
wrapper1 = bf1.selfScroll.wrapper
for (let j = 0; j < len; j++) {
bf2 = store[j]
wrapper2 = bf2.selfScroll.wrapper
// same bs
if (bf1 === bf2) continue
if (!wrapper1.contains(wrapper2)) continue
// bs1 contains bs2
const distance = calculateDistance(wrapper2, wrapper1)
if (!bf1.hasDescendants(bf2)) {
bf1.addDescendant(bf2, distance)
}
if (!bf2.hasAncestors(bf1)) {
bf2.addAncestor(bf1, distance)
}
}
}
}
private analyzeBScrollGraph() {
this.store.forEach((bscrollFamily) => {
if (bscrollFamily.analyzed) {
return
}
const {
ancestors,
descendants,
selfScroll: currentScroll,
} = bscrollFamily
const beforeScrollStartHandler = () => {
// always get the latest scroll
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const descendantScrolls = descendants.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
forceScrollStopHandler([...ancestorScrolls, ...descendantScrolls])
if (isResettingPosition(currentScroll)) {
resetPositionHandler(currentScroll)
}
syncTouchstartData(ancestorScrolls)
disableScrollHander(ancestorScrolls, currentScroll)
}
const touchEndHandler = () => {
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const descendantScrolls = descendants.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
enableScrollHander([...ancestorScrolls, ...descendantScrolls])
}
bscrollFamily.registerHooks(
currentScroll,
currentScroll.eventTypes.beforeScrollStart,
beforeScrollStartHandler
)
bscrollFamily.registerHooks(
currentScroll,
currentScroll.eventTypes.touchEnd,
touchEndHandler
)
const selfActionsHooks = currentScroll.scroller.actions.hooks
bscrollFamily.registerHooks(
selfActionsHooks,
selfActionsHooks.eventTypes.detectMovingDirection,
() => {
const ancestorScrolls = ancestors.map(
([bscrollFamily]) => bscrollFamily.selfScroll
)
const parentScroll = ancestorScrolls[0]
const otherAncestorScrolls = ancestorScrolls.slice(1)
const contentMoved = currentScroll.scroller.actions.contentMoved
const isTopScroll = ancestorScrolls.length === 0
if (contentMoved) {
disableScrollHander(ancestorScrolls, currentScroll)
} else if (!isTopScroll) {
if (isOutOfBoundary(currentScroll)) {
disableScrollHander([currentScroll], currentScroll)
if (parentScroll) {
enableScrollHander([parentScroll])
}
disableScrollHander(otherAncestorScrolls, currentScroll)
return true
}
}
}
)
bscrollFamily.setAnalyzed(true)
})
}
// make sure touchmove|touchend invoke from child to parent
private ensureEventInvokeSequence() {
const copied = this.store.slice()
const sequencedScroll = copied.sort((a, b) => {
return a.descendants.length - b.descendants.length
})
sequencedScroll.forEach((bscrollFamily) => {
const scroll = bscrollFamily.selfScroll
scroll.scroller.actionsHandler.rebindDOMEvents()
})
}
private registerHooks(hooks: EventEmitter, name: string, handler: Function) {
hooks.on(name, handler, this)
this.hooksFn.push([hooks, name, handler])
}
purgeNestedScroll() {
const groupId = this.options.groupId
this.store.forEach((bscrollFamily) => {
bscrollFamily.purge()
})
this.store = []
this.hooksFn.forEach(([hooks, eventType, handler]) => {
hooks.off(eventType, handler)
})
this.hooksFn = []
delete NestedScroll.instancesMap[groupId]
}
}
| getAllNestedScrolls | identifier_name |
sha_256.rs | //! This module is an implementation of the SHA-256 hashing algorithm
use padding::PaddingScheme;
use padding::merkle_damgard::MDPadding512u32;
// Logical functions used by SHA-256 (function names taken from NIST standard)
fn ch(x: u32, y: u32, z: u32) -> u32 {
(x & y) ^ (!x & z)
}
//
fn maj(x: u32, y: u32, z: u32) -> u32 {
(x & y) ^ (x & z) ^ (y & z)
}
//
fn capital_sigma_0(x: u32) -> u32 {
x.rotate_right(2) ^ x.rotate_right(13) ^ x.rotate_right(22)
}
//
fn capital_sigma_1(x: u32) -> u32 {
x.rotate_right(6) ^ x.rotate_right(11) ^ x.rotate_right(25)
}
//
fn sigma_0(x: u32) -> u32 {
x.rotate_right(7) ^ x.rotate_right(18) ^ (x >> 3)
}
//
fn | (x: u32) -> u32 {
x.rotate_right(17) ^ x.rotate_right(19) ^ (x >> 10)
}
// Constants used by SHA-256
const K: [u32; 64] = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2];
// Initial hash value of SHA-256
const H_0: [u32; 8] = [0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19];
// SHA-256 digests will be emitted in the following format
pub const DIGEST_LEN: usize = 256/8;
pub type Digest = [u8; DIGEST_LEN];
// Compute the SHA-256 hash of any message
pub fn sha_256(message: &[u8]) -> Digest {
// Set the initial hash value
let mut hash = H_0;
// Parse and pad the message into 512-bit blocks of 32-bit words, then
// iterate over the resulting message blocks
for message_block in MDPadding512u32::new(message) {
// Prepare the message schedule
let mut w = [0; 64];
w[0..16].copy_from_slice(&message_block[..]);
for t in 16..64 {
w[t] = sigma_1(w[t-2]).wrapping_add(w[t-7])
.wrapping_add(sigma_0(w[t-15]))
.wrapping_add(w[t-16]);
}
// Initialize the eight working variables from the previous hash value
let (mut a, mut b, mut c, mut d) = (hash[0], hash[1], hash[2], hash[3]);
let (mut e, mut f, mut g, mut h) = (hash[4], hash[5], hash[6], hash[7]);
// Compute the hash increment
for t in 0..64 {
let t_1 = h.wrapping_add(capital_sigma_1(e))
.wrapping_add(ch(e, f, g))
.wrapping_add(K[t])
.wrapping_add(w[t]);
let t_2 = capital_sigma_0(a).wrapping_add(maj(a, b, c));
h = g;
g = f;
f = e;
e = d.wrapping_add(t_1);
d = c;
c = b;
b = a;
a = t_1.wrapping_add(t_2);
}
// Update the hash value
hash[0] = hash[0].wrapping_add(a);
hash[1] = hash[1].wrapping_add(b);
hash[2] = hash[2].wrapping_add(c);
hash[3] = hash[3].wrapping_add(d);
hash[4] = hash[4].wrapping_add(e);
hash[5] = hash[5].wrapping_add(f);
hash[6] = hash[6].wrapping_add(g);
hash[7] = hash[7].wrapping_add(h);
}
// Output the final hash value
let mut result = [0u8; 256/8];
for (input, outputs) in hash.iter().zip(result.chunks_mut(4)) {
outputs.copy_from_slice(&[(*input >> 24) as u8,
((*input >> 16) & 0xff) as u8,
((*input >> 8) & 0xff) as u8,
(*input & 0xff) as u8]);
};
result
}
#[cfg(test)]
mod tests {
use hash::sha_256::sha_256;
#[test]
fn one_block_message_sample() {
let input = [0x61, 0x62, 0x63];
let hash = sha_256(&input);
assert_eq!(hash, [0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea,
0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23,
0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad]);
}
#[test]
fn two_block_message_sample() {
let input = [0x61, 0x62, 0x63, 0x64, 0x62, 0x63, 0x64, 0x65,
0x63, 0x64, 0x65, 0x66, 0x64, 0x65, 0x66, 0x67,
0x65, 0x66, 0x67, 0x68, 0x66, 0x67, 0x68, 0x69,
0x67, 0x68, 0x69, 0x6a, 0x68, 0x69, 0x6a, 0x6b,
0x69, 0x6a, 0x6b, 0x6c, 0x6a, 0x6b, 0x6c, 0x6d,
0x6b, 0x6c, 0x6d, 0x6e, 0x6c, 0x6d, 0x6e, 0x6f,
0x6d, 0x6e, 0x6f, 0x70, 0x6e, 0x6f, 0x70, 0x71];
let hash = sha_256(&input);
assert_eq!(hash, [0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8,
0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39,
0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67,
0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1]);
}
#[test]
fn one_byte() {
let input = [0xbd];
let hash = sha_256(&input);
assert_eq!(hash, [0x68, 0x32, 0x57, 0x20, 0xaa, 0xbd, 0x7c, 0x82,
0xf3, 0x0f, 0x55, 0x4b, 0x31, 0x3d, 0x05, 0x70,
0xc9, 0x5a, 0xcc, 0xbb, 0x7d, 0xc4, 0xb5, 0xaa,
0xe1, 0x12, 0x04, 0xc0, 0x8f, 0xfe, 0x73, 0x2b]);
}
#[test]
fn four_bytes() {
let input = [0xc9, 0x8c, 0x8e, 0x55];
let hash = sha_256(&input);
assert_eq!(hash, [0x7a, 0xbc, 0x22, 0xc0, 0xae, 0x5a, 0xf2, 0x6c,
0xe9, 0x3d, 0xbb, 0x94, 0x43, 0x3a, 0x0e, 0x0b,
0x2e, 0x11, 0x9d, 0x01, 0x4f, 0x8e, 0x7f, 0x65,
0xbd, 0x56, 0xc6, 0x1c, 0xcc, 0xcd, 0x95, 0x04]);
}
#[test]
fn fifty_five_zeros() {
let input = [0; 55];
let hash = sha_256(&input);
assert_eq!(hash, [0x02, 0x77, 0x94, 0x66, 0xcd, 0xec, 0x16, 0x38,
0x11, 0xd0, 0x78, 0x81, 0x5c, 0x63, 0x3f, 0x21,
0x90, 0x14, 0x13, 0x08, 0x14, 0x49, 0x00, 0x2f,
0x24, 0xaa, 0x3e, 0x80, 0xf0, 0xb8, 0x8e, 0xf7]);
}
#[test]
fn fifty_six_zeros() {
let input = [0; 56];
let hash = sha_256(&input);
assert_eq!(hash, [0xd4, 0x81, 0x7a, 0xa5, 0x49, 0x76, 0x28, 0xe7,
0xc7, 0x7e, 0x6b, 0x60, 0x61, 0x07, 0x04, 0x2b,
0xbb, 0xa3, 0x13, 0x08, 0x88, 0xc5, 0xf4, 0x7a,
0x37, 0x5e, 0x61, 0x79, 0xbe, 0x78, 0x9f, 0xbb]);
}
#[test]
fn fifty_seven_zeros() {
let input = [0; 57];
let hash = sha_256(&input);
assert_eq!(hash, [0x65, 0xa1, 0x6c, 0xb7, 0x86, 0x13, 0x35, 0xd5,
0xac, 0xe3, 0xc6, 0x07, 0x18, 0xb5, 0x05, 0x2e,
0x44, 0x66, 0x07, 0x26, 0xda, 0x4c, 0xd1, 0x3b,
0xb7, 0x45, 0x38, 0x1b, 0x23, 0x5a, 0x17, 0x85]);
}
#[test]
fn sixty_four_zeros() {
let input = [0; 64];
let hash = sha_256(&input);
assert_eq!(hash, [0xf5, 0xa5, 0xfd, 0x42, 0xd1, 0x6a, 0x20, 0x30,
0x27, 0x98, 0xef, 0x6e, 0xd3, 0x09, 0x97, 0x9b,
0x43, 0x00, 0x3d, 0x23, 0x20, 0xd9, 0xf0, 0xe8,
0xea, 0x98, 0x31, 0xa9, 0x27, 0x59, 0xfb, 0x4b]);
}
#[test]
fn a_thousand_zeros() {
let input = [0; 1000];
let hash = sha_256(&input);
assert_eq!(hash, [0x54, 0x1b, 0x3e, 0x9d, 0xaa, 0x09, 0xb2, 0x0b,
0xf8, 0x5f, 0xa2, 0x73, 0xe5, 0xcb, 0xd3, 0xe8,
0x01, 0x85, 0xaa, 0x4e, 0xc2, 0x98, 0xe7, 0x65,
0xdb, 0x87, 0x74, 0x2b, 0x70, 0x13, 0x8a, 0x53]);
}
#[test]
fn a_thousand_41() {
let input = [0x41; 1000];
let hash = sha_256(&input);
assert_eq!(hash, [0xc2, 0xe6, 0x86, 0x82, 0x34, 0x89, 0xce, 0xd2,
0x01, 0x7f, 0x60, 0x59, 0xb8, 0xb2, 0x39, 0x31,
0x8b, 0x63, 0x64, 0xf6, 0xdc, 0xd8, 0x35, 0xd0,
0xa5, 0x19, 0x10, 0x5a, 0x1e, 0xad, 0xd6, 0xe4]);
}
#[test]
fn a_thousand_and_five_55() {
let input = [0x55; 1005];
let hash = sha_256(&input);
assert_eq!(hash, [0xf4, 0xd6, 0x2d, 0xde, 0xc0, 0xf3, 0xdd, 0x90,
0xea, 0x13, 0x80, 0xfa, 0x16, 0xa5, 0xff, 0x8d,
0xc4, 0xc5, 0x4b, 0x21, 0x74, 0x06, 0x50, 0xf2,
0x4a, 0xfc, 0x41, 0x20, 0x90, 0x35, 0x52, 0xb0]);
}
#[test]
fn a_million_zeros() {
let input = vec![0; 1_000_000];
let hash = sha_256(&input);
assert_eq!(hash, [0xd2, 0x97, 0x51, 0xf2, 0x64, 0x9b, 0x32, 0xff,
0x57, 0x2b, 0x5e, 0x0a, 0x9f, 0x54, 0x1e, 0xa6,
0x60, 0xa5, 0x0f, 0x94, 0xff, 0x0b, 0xee, 0xdf,
0xb0, 0xb6, 0x92, 0xb9, 0x24, 0xcc, 0x80, 0x25]);
}
// The following tests are highly ressource intensive and should only be
// run in release mode, which is why they are ignored by default.
#[test]
#[ignore]
fn half_a_billion_5a() {
let input = vec![0x5a; 0x2000_0000];
let hash = sha_256(&input);
assert_eq!(hash, [0x15, 0xa1, 0x86, 0x8c, 0x12, 0xcc, 0x53, 0x95,
0x1e, 0x18, 0x23, 0x44, 0x27, 0x74, 0x47, 0xcd,
0x09, 0x79, 0x53, 0x6b, 0xad, 0xcc, 0x51, 0x2a,
0xd2, 0x4c, 0x67, 0xe9, 0xb2, 0xd4, 0xf3, 0xdd]);
}
//
#[test]
#[ignore]
fn a_billion_zeros() {
let input = vec![0; 0x4100_0000];
let hash = sha_256(&input);
assert_eq!(hash, [0x46, 0x1c, 0x19, 0xa9, 0x3b, 0xd4, 0x34, 0x4f,
0x92, 0x15, 0xf5, 0xec, 0x64, 0x35, 0x70, 0x90,
0x34, 0x2b, 0xc6, 0x6b, 0x15, 0xa1, 0x48, 0x31,
0x7d, 0x27, 0x6e, 0x31, 0xcb, 0xc2, 0x0b, 0x53]);
}
//
#[test]
#[ignore]
fn two_billions_42() {
let input = vec![0x42; 0x6000_003e];
let hash = sha_256(&input);
assert_eq!(hash, [0xc2, 0x3c, 0xe8, 0xa7, 0x89, 0x5f, 0x4b, 0x21,
0xec, 0x0d, 0xaf, 0x37, 0x92, 0x0a, 0xc0, 0xa2,
0x62, 0xa2, 0x20, 0x04, 0x5a, 0x03, 0xeb, 0x2d,
0xfe, 0xd4, 0x8e, 0xf9, 0xb0, 0x5a, 0xab, 0xea]);
}
}
| sigma_1 | identifier_name |
sha_256.rs | //! This module is an implementation of the SHA-256 hashing algorithm
use padding::PaddingScheme;
use padding::merkle_damgard::MDPadding512u32;
// Logical functions used by SHA-256 (function names taken from NIST standard)
fn ch(x: u32, y: u32, z: u32) -> u32 {
(x & y) ^ (!x & z)
}
//
fn maj(x: u32, y: u32, z: u32) -> u32 {
(x & y) ^ (x & z) ^ (y & z)
}
//
fn capital_sigma_0(x: u32) -> u32 {
x.rotate_right(2) ^ x.rotate_right(13) ^ x.rotate_right(22)
}
//
fn capital_sigma_1(x: u32) -> u32 {
x.rotate_right(6) ^ x.rotate_right(11) ^ x.rotate_right(25)
}
//
fn sigma_0(x: u32) -> u32 |
//
fn sigma_1(x: u32) -> u32 {
x.rotate_right(17) ^ x.rotate_right(19) ^ (x >> 10)
}
// Constants used by SHA-256
const K: [u32; 64] = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2];
// Initial hash value of SHA-256
const H_0: [u32; 8] = [0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19];
// SHA-256 digests will be emitted in the following format
pub const DIGEST_LEN: usize = 256/8;
pub type Digest = [u8; DIGEST_LEN];
// Compute the SHA-256 hash of any message
pub fn sha_256(message: &[u8]) -> Digest {
// Set the initial hash value
let mut hash = H_0;
// Parse and pad the message into 512-bit blocks of 32-bit words, then
// iterate over the resulting message blocks
for message_block in MDPadding512u32::new(message) {
// Prepare the message schedule
let mut w = [0; 64];
w[0..16].copy_from_slice(&message_block[..]);
for t in 16..64 {
w[t] = sigma_1(w[t-2]).wrapping_add(w[t-7])
.wrapping_add(sigma_0(w[t-15]))
.wrapping_add(w[t-16]);
}
// Initialize the eight working variables from the previous hash value
let (mut a, mut b, mut c, mut d) = (hash[0], hash[1], hash[2], hash[3]);
let (mut e, mut f, mut g, mut h) = (hash[4], hash[5], hash[6], hash[7]);
// Compute the hash increment
for t in 0..64 {
let t_1 = h.wrapping_add(capital_sigma_1(e))
.wrapping_add(ch(e, f, g))
.wrapping_add(K[t])
.wrapping_add(w[t]);
let t_2 = capital_sigma_0(a).wrapping_add(maj(a, b, c));
h = g;
g = f;
f = e;
e = d.wrapping_add(t_1);
d = c;
c = b;
b = a;
a = t_1.wrapping_add(t_2);
}
// Update the hash value
hash[0] = hash[0].wrapping_add(a);
hash[1] = hash[1].wrapping_add(b);
hash[2] = hash[2].wrapping_add(c);
hash[3] = hash[3].wrapping_add(d);
hash[4] = hash[4].wrapping_add(e);
hash[5] = hash[5].wrapping_add(f);
hash[6] = hash[6].wrapping_add(g);
hash[7] = hash[7].wrapping_add(h);
}
// Output the final hash value
let mut result = [0u8; 256/8];
for (input, outputs) in hash.iter().zip(result.chunks_mut(4)) {
outputs.copy_from_slice(&[(*input >> 24) as u8,
((*input >> 16) & 0xff) as u8,
((*input >> 8) & 0xff) as u8,
(*input & 0xff) as u8]);
};
result
}
#[cfg(test)]
mod tests {
use hash::sha_256::sha_256;
#[test]
fn one_block_message_sample() {
let input = [0x61, 0x62, 0x63];
let hash = sha_256(&input);
assert_eq!(hash, [0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea,
0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23,
0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad]);
}
#[test]
fn two_block_message_sample() {
let input = [0x61, 0x62, 0x63, 0x64, 0x62, 0x63, 0x64, 0x65,
0x63, 0x64, 0x65, 0x66, 0x64, 0x65, 0x66, 0x67,
0x65, 0x66, 0x67, 0x68, 0x66, 0x67, 0x68, 0x69,
0x67, 0x68, 0x69, 0x6a, 0x68, 0x69, 0x6a, 0x6b,
0x69, 0x6a, 0x6b, 0x6c, 0x6a, 0x6b, 0x6c, 0x6d,
0x6b, 0x6c, 0x6d, 0x6e, 0x6c, 0x6d, 0x6e, 0x6f,
0x6d, 0x6e, 0x6f, 0x70, 0x6e, 0x6f, 0x70, 0x71];
let hash = sha_256(&input);
assert_eq!(hash, [0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8,
0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39,
0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67,
0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1]);
}
#[test]
fn one_byte() {
let input = [0xbd];
let hash = sha_256(&input);
assert_eq!(hash, [0x68, 0x32, 0x57, 0x20, 0xaa, 0xbd, 0x7c, 0x82,
0xf3, 0x0f, 0x55, 0x4b, 0x31, 0x3d, 0x05, 0x70,
0xc9, 0x5a, 0xcc, 0xbb, 0x7d, 0xc4, 0xb5, 0xaa,
0xe1, 0x12, 0x04, 0xc0, 0x8f, 0xfe, 0x73, 0x2b]);
}
#[test]
fn four_bytes() {
let input = [0xc9, 0x8c, 0x8e, 0x55];
let hash = sha_256(&input);
assert_eq!(hash, [0x7a, 0xbc, 0x22, 0xc0, 0xae, 0x5a, 0xf2, 0x6c,
0xe9, 0x3d, 0xbb, 0x94, 0x43, 0x3a, 0x0e, 0x0b,
0x2e, 0x11, 0x9d, 0x01, 0x4f, 0x8e, 0x7f, 0x65,
0xbd, 0x56, 0xc6, 0x1c, 0xcc, 0xcd, 0x95, 0x04]);
}
#[test]
fn fifty_five_zeros() {
let input = [0; 55];
let hash = sha_256(&input);
assert_eq!(hash, [0x02, 0x77, 0x94, 0x66, 0xcd, 0xec, 0x16, 0x38,
0x11, 0xd0, 0x78, 0x81, 0x5c, 0x63, 0x3f, 0x21,
0x90, 0x14, 0x13, 0x08, 0x14, 0x49, 0x00, 0x2f,
0x24, 0xaa, 0x3e, 0x80, 0xf0, 0xb8, 0x8e, 0xf7]);
}
#[test]
fn fifty_six_zeros() {
let input = [0; 56];
let hash = sha_256(&input);
assert_eq!(hash, [0xd4, 0x81, 0x7a, 0xa5, 0x49, 0x76, 0x28, 0xe7,
0xc7, 0x7e, 0x6b, 0x60, 0x61, 0x07, 0x04, 0x2b,
0xbb, 0xa3, 0x13, 0x08, 0x88, 0xc5, 0xf4, 0x7a,
0x37, 0x5e, 0x61, 0x79, 0xbe, 0x78, 0x9f, 0xbb]);
}
#[test]
fn fifty_seven_zeros() {
let input = [0; 57];
let hash = sha_256(&input);
assert_eq!(hash, [0x65, 0xa1, 0x6c, 0xb7, 0x86, 0x13, 0x35, 0xd5,
0xac, 0xe3, 0xc6, 0x07, 0x18, 0xb5, 0x05, 0x2e,
0x44, 0x66, 0x07, 0x26, 0xda, 0x4c, 0xd1, 0x3b,
0xb7, 0x45, 0x38, 0x1b, 0x23, 0x5a, 0x17, 0x85]);
}
#[test]
fn sixty_four_zeros() {
let input = [0; 64];
let hash = sha_256(&input);
assert_eq!(hash, [0xf5, 0xa5, 0xfd, 0x42, 0xd1, 0x6a, 0x20, 0x30,
0x27, 0x98, 0xef, 0x6e, 0xd3, 0x09, 0x97, 0x9b,
0x43, 0x00, 0x3d, 0x23, 0x20, 0xd9, 0xf0, 0xe8,
0xea, 0x98, 0x31, 0xa9, 0x27, 0x59, 0xfb, 0x4b]);
}
#[test]
fn a_thousand_zeros() {
let input = [0; 1000];
let hash = sha_256(&input);
assert_eq!(hash, [0x54, 0x1b, 0x3e, 0x9d, 0xaa, 0x09, 0xb2, 0x0b,
0xf8, 0x5f, 0xa2, 0x73, 0xe5, 0xcb, 0xd3, 0xe8,
0x01, 0x85, 0xaa, 0x4e, 0xc2, 0x98, 0xe7, 0x65,
0xdb, 0x87, 0x74, 0x2b, 0x70, 0x13, 0x8a, 0x53]);
}
#[test]
fn a_thousand_41() {
let input = [0x41; 1000];
let hash = sha_256(&input);
assert_eq!(hash, [0xc2, 0xe6, 0x86, 0x82, 0x34, 0x89, 0xce, 0xd2,
0x01, 0x7f, 0x60, 0x59, 0xb8, 0xb2, 0x39, 0x31,
0x8b, 0x63, 0x64, 0xf6, 0xdc, 0xd8, 0x35, 0xd0,
0xa5, 0x19, 0x10, 0x5a, 0x1e, 0xad, 0xd6, 0xe4]);
}
#[test]
fn a_thousand_and_five_55() {
let input = [0x55; 1005];
let hash = sha_256(&input);
assert_eq!(hash, [0xf4, 0xd6, 0x2d, 0xde, 0xc0, 0xf3, 0xdd, 0x90,
0xea, 0x13, 0x80, 0xfa, 0x16, 0xa5, 0xff, 0x8d,
0xc4, 0xc5, 0x4b, 0x21, 0x74, 0x06, 0x50, 0xf2,
0x4a, 0xfc, 0x41, 0x20, 0x90, 0x35, 0x52, 0xb0]);
}
#[test]
fn a_million_zeros() {
let input = vec![0; 1_000_000];
let hash = sha_256(&input);
assert_eq!(hash, [0xd2, 0x97, 0x51, 0xf2, 0x64, 0x9b, 0x32, 0xff,
0x57, 0x2b, 0x5e, 0x0a, 0x9f, 0x54, 0x1e, 0xa6,
0x60, 0xa5, 0x0f, 0x94, 0xff, 0x0b, 0xee, 0xdf,
0xb0, 0xb6, 0x92, 0xb9, 0x24, 0xcc, 0x80, 0x25]);
}
// The following tests are highly ressource intensive and should only be
// run in release mode, which is why they are ignored by default.
#[test]
#[ignore]
fn half_a_billion_5a() {
let input = vec![0x5a; 0x2000_0000];
let hash = sha_256(&input);
assert_eq!(hash, [0x15, 0xa1, 0x86, 0x8c, 0x12, 0xcc, 0x53, 0x95,
0x1e, 0x18, 0x23, 0x44, 0x27, 0x74, 0x47, 0xcd,
0x09, 0x79, 0x53, 0x6b, 0xad, 0xcc, 0x51, 0x2a,
0xd2, 0x4c, 0x67, 0xe9, 0xb2, 0xd4, 0xf3, 0xdd]);
}
//
#[test]
#[ignore]
fn a_billion_zeros() {
let input = vec![0; 0x4100_0000];
let hash = sha_256(&input);
assert_eq!(hash, [0x46, 0x1c, 0x19, 0xa9, 0x3b, 0xd4, 0x34, 0x4f,
0x92, 0x15, 0xf5, 0xec, 0x64, 0x35, 0x70, 0x90,
0x34, 0x2b, 0xc6, 0x6b, 0x15, 0xa1, 0x48, 0x31,
0x7d, 0x27, 0x6e, 0x31, 0xcb, 0xc2, 0x0b, 0x53]);
}
//
#[test]
#[ignore]
fn two_billions_42() {
let input = vec![0x42; 0x6000_003e];
let hash = sha_256(&input);
assert_eq!(hash, [0xc2, 0x3c, 0xe8, 0xa7, 0x89, 0x5f, 0x4b, 0x21,
0xec, 0x0d, 0xaf, 0x37, 0x92, 0x0a, 0xc0, 0xa2,
0x62, 0xa2, 0x20, 0x04, 0x5a, 0x03, 0xeb, 0x2d,
0xfe, 0xd4, 0x8e, 0xf9, 0xb0, 0x5a, 0xab, 0xea]);
}
}
| {
x.rotate_right(7) ^ x.rotate_right(18) ^ (x >> 3)
} | identifier_body |
sha_256.rs | //! This module is an implementation of the SHA-256 hashing algorithm
use padding::PaddingScheme;
use padding::merkle_damgard::MDPadding512u32;
// Logical functions used by SHA-256 (function names taken from NIST standard)
fn ch(x: u32, y: u32, z: u32) -> u32 {
(x & y) ^ (!x & z)
}
//
fn maj(x: u32, y: u32, z: u32) -> u32 {
(x & y) ^ (x & z) ^ (y & z)
}
//
fn capital_sigma_0(x: u32) -> u32 {
x.rotate_right(2) ^ x.rotate_right(13) ^ x.rotate_right(22)
}
//
fn capital_sigma_1(x: u32) -> u32 {
x.rotate_right(6) ^ x.rotate_right(11) ^ x.rotate_right(25)
}
//
fn sigma_0(x: u32) -> u32 {
x.rotate_right(7) ^ x.rotate_right(18) ^ (x >> 3)
}
//
fn sigma_1(x: u32) -> u32 {
x.rotate_right(17) ^ x.rotate_right(19) ^ (x >> 10)
}
// Constants used by SHA-256
const K: [u32; 64] = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2];
// Initial hash value of SHA-256
const H_0: [u32; 8] = [0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19];
// SHA-256 digests will be emitted in the following format
pub const DIGEST_LEN: usize = 256/8;
pub type Digest = [u8; DIGEST_LEN];
// Compute the SHA-256 hash of any message
pub fn sha_256(message: &[u8]) -> Digest {
// Set the initial hash value
let mut hash = H_0;
// Parse and pad the message into 512-bit blocks of 32-bit words, then
// iterate over the resulting message blocks
for message_block in MDPadding512u32::new(message) {
// Prepare the message schedule
let mut w = [0; 64];
w[0..16].copy_from_slice(&message_block[..]);
for t in 16..64 {
w[t] = sigma_1(w[t-2]).wrapping_add(w[t-7])
.wrapping_add(sigma_0(w[t-15]))
.wrapping_add(w[t-16]);
}
// Initialize the eight working variables from the previous hash value
let (mut a, mut b, mut c, mut d) = (hash[0], hash[1], hash[2], hash[3]);
let (mut e, mut f, mut g, mut h) = (hash[4], hash[5], hash[6], hash[7]);
// Compute the hash increment
for t in 0..64 {
let t_1 = h.wrapping_add(capital_sigma_1(e)) | g = f;
f = e;
e = d.wrapping_add(t_1);
d = c;
c = b;
b = a;
a = t_1.wrapping_add(t_2);
}
// Update the hash value
hash[0] = hash[0].wrapping_add(a);
hash[1] = hash[1].wrapping_add(b);
hash[2] = hash[2].wrapping_add(c);
hash[3] = hash[3].wrapping_add(d);
hash[4] = hash[4].wrapping_add(e);
hash[5] = hash[5].wrapping_add(f);
hash[6] = hash[6].wrapping_add(g);
hash[7] = hash[7].wrapping_add(h);
}
// Output the final hash value
let mut result = [0u8; 256/8];
for (input, outputs) in hash.iter().zip(result.chunks_mut(4)) {
outputs.copy_from_slice(&[(*input >> 24) as u8,
((*input >> 16) & 0xff) as u8,
((*input >> 8) & 0xff) as u8,
(*input & 0xff) as u8]);
};
result
}
#[cfg(test)]
mod tests {
use hash::sha_256::sha_256;
#[test]
fn one_block_message_sample() {
let input = [0x61, 0x62, 0x63];
let hash = sha_256(&input);
assert_eq!(hash, [0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea,
0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23,
0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad]);
}
#[test]
fn two_block_message_sample() {
let input = [0x61, 0x62, 0x63, 0x64, 0x62, 0x63, 0x64, 0x65,
0x63, 0x64, 0x65, 0x66, 0x64, 0x65, 0x66, 0x67,
0x65, 0x66, 0x67, 0x68, 0x66, 0x67, 0x68, 0x69,
0x67, 0x68, 0x69, 0x6a, 0x68, 0x69, 0x6a, 0x6b,
0x69, 0x6a, 0x6b, 0x6c, 0x6a, 0x6b, 0x6c, 0x6d,
0x6b, 0x6c, 0x6d, 0x6e, 0x6c, 0x6d, 0x6e, 0x6f,
0x6d, 0x6e, 0x6f, 0x70, 0x6e, 0x6f, 0x70, 0x71];
let hash = sha_256(&input);
assert_eq!(hash, [0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8,
0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39,
0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67,
0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1]);
}
#[test]
fn one_byte() {
let input = [0xbd];
let hash = sha_256(&input);
assert_eq!(hash, [0x68, 0x32, 0x57, 0x20, 0xaa, 0xbd, 0x7c, 0x82,
0xf3, 0x0f, 0x55, 0x4b, 0x31, 0x3d, 0x05, 0x70,
0xc9, 0x5a, 0xcc, 0xbb, 0x7d, 0xc4, 0xb5, 0xaa,
0xe1, 0x12, 0x04, 0xc0, 0x8f, 0xfe, 0x73, 0x2b]);
}
#[test]
fn four_bytes() {
let input = [0xc9, 0x8c, 0x8e, 0x55];
let hash = sha_256(&input);
assert_eq!(hash, [0x7a, 0xbc, 0x22, 0xc0, 0xae, 0x5a, 0xf2, 0x6c,
0xe9, 0x3d, 0xbb, 0x94, 0x43, 0x3a, 0x0e, 0x0b,
0x2e, 0x11, 0x9d, 0x01, 0x4f, 0x8e, 0x7f, 0x65,
0xbd, 0x56, 0xc6, 0x1c, 0xcc, 0xcd, 0x95, 0x04]);
}
#[test]
fn fifty_five_zeros() {
let input = [0; 55];
let hash = sha_256(&input);
assert_eq!(hash, [0x02, 0x77, 0x94, 0x66, 0xcd, 0xec, 0x16, 0x38,
0x11, 0xd0, 0x78, 0x81, 0x5c, 0x63, 0x3f, 0x21,
0x90, 0x14, 0x13, 0x08, 0x14, 0x49, 0x00, 0x2f,
0x24, 0xaa, 0x3e, 0x80, 0xf0, 0xb8, 0x8e, 0xf7]);
}
#[test]
fn fifty_six_zeros() {
let input = [0; 56];
let hash = sha_256(&input);
assert_eq!(hash, [0xd4, 0x81, 0x7a, 0xa5, 0x49, 0x76, 0x28, 0xe7,
0xc7, 0x7e, 0x6b, 0x60, 0x61, 0x07, 0x04, 0x2b,
0xbb, 0xa3, 0x13, 0x08, 0x88, 0xc5, 0xf4, 0x7a,
0x37, 0x5e, 0x61, 0x79, 0xbe, 0x78, 0x9f, 0xbb]);
}
#[test]
fn fifty_seven_zeros() {
let input = [0; 57];
let hash = sha_256(&input);
assert_eq!(hash, [0x65, 0xa1, 0x6c, 0xb7, 0x86, 0x13, 0x35, 0xd5,
0xac, 0xe3, 0xc6, 0x07, 0x18, 0xb5, 0x05, 0x2e,
0x44, 0x66, 0x07, 0x26, 0xda, 0x4c, 0xd1, 0x3b,
0xb7, 0x45, 0x38, 0x1b, 0x23, 0x5a, 0x17, 0x85]);
}
#[test]
fn sixty_four_zeros() {
let input = [0; 64];
let hash = sha_256(&input);
assert_eq!(hash, [0xf5, 0xa5, 0xfd, 0x42, 0xd1, 0x6a, 0x20, 0x30,
0x27, 0x98, 0xef, 0x6e, 0xd3, 0x09, 0x97, 0x9b,
0x43, 0x00, 0x3d, 0x23, 0x20, 0xd9, 0xf0, 0xe8,
0xea, 0x98, 0x31, 0xa9, 0x27, 0x59, 0xfb, 0x4b]);
}
#[test]
fn a_thousand_zeros() {
let input = [0; 1000];
let hash = sha_256(&input);
assert_eq!(hash, [0x54, 0x1b, 0x3e, 0x9d, 0xaa, 0x09, 0xb2, 0x0b,
0xf8, 0x5f, 0xa2, 0x73, 0xe5, 0xcb, 0xd3, 0xe8,
0x01, 0x85, 0xaa, 0x4e, 0xc2, 0x98, 0xe7, 0x65,
0xdb, 0x87, 0x74, 0x2b, 0x70, 0x13, 0x8a, 0x53]);
}
#[test]
fn a_thousand_41() {
let input = [0x41; 1000];
let hash = sha_256(&input);
assert_eq!(hash, [0xc2, 0xe6, 0x86, 0x82, 0x34, 0x89, 0xce, 0xd2,
0x01, 0x7f, 0x60, 0x59, 0xb8, 0xb2, 0x39, 0x31,
0x8b, 0x63, 0x64, 0xf6, 0xdc, 0xd8, 0x35, 0xd0,
0xa5, 0x19, 0x10, 0x5a, 0x1e, 0xad, 0xd6, 0xe4]);
}
#[test]
fn a_thousand_and_five_55() {
let input = [0x55; 1005];
let hash = sha_256(&input);
assert_eq!(hash, [0xf4, 0xd6, 0x2d, 0xde, 0xc0, 0xf3, 0xdd, 0x90,
0xea, 0x13, 0x80, 0xfa, 0x16, 0xa5, 0xff, 0x8d,
0xc4, 0xc5, 0x4b, 0x21, 0x74, 0x06, 0x50, 0xf2,
0x4a, 0xfc, 0x41, 0x20, 0x90, 0x35, 0x52, 0xb0]);
}
#[test]
fn a_million_zeros() {
let input = vec![0; 1_000_000];
let hash = sha_256(&input);
assert_eq!(hash, [0xd2, 0x97, 0x51, 0xf2, 0x64, 0x9b, 0x32, 0xff,
0x57, 0x2b, 0x5e, 0x0a, 0x9f, 0x54, 0x1e, 0xa6,
0x60, 0xa5, 0x0f, 0x94, 0xff, 0x0b, 0xee, 0xdf,
0xb0, 0xb6, 0x92, 0xb9, 0x24, 0xcc, 0x80, 0x25]);
}
// The following tests are highly ressource intensive and should only be
// run in release mode, which is why they are ignored by default.
#[test]
#[ignore]
fn half_a_billion_5a() {
let input = vec![0x5a; 0x2000_0000];
let hash = sha_256(&input);
assert_eq!(hash, [0x15, 0xa1, 0x86, 0x8c, 0x12, 0xcc, 0x53, 0x95,
0x1e, 0x18, 0x23, 0x44, 0x27, 0x74, 0x47, 0xcd,
0x09, 0x79, 0x53, 0x6b, 0xad, 0xcc, 0x51, 0x2a,
0xd2, 0x4c, 0x67, 0xe9, 0xb2, 0xd4, 0xf3, 0xdd]);
}
//
#[test]
#[ignore]
fn a_billion_zeros() {
let input = vec![0; 0x4100_0000];
let hash = sha_256(&input);
assert_eq!(hash, [0x46, 0x1c, 0x19, 0xa9, 0x3b, 0xd4, 0x34, 0x4f,
0x92, 0x15, 0xf5, 0xec, 0x64, 0x35, 0x70, 0x90,
0x34, 0x2b, 0xc6, 0x6b, 0x15, 0xa1, 0x48, 0x31,
0x7d, 0x27, 0x6e, 0x31, 0xcb, 0xc2, 0x0b, 0x53]);
}
//
#[test]
#[ignore]
fn two_billions_42() {
let input = vec![0x42; 0x6000_003e];
let hash = sha_256(&input);
assert_eq!(hash, [0xc2, 0x3c, 0xe8, 0xa7, 0x89, 0x5f, 0x4b, 0x21,
0xec, 0x0d, 0xaf, 0x37, 0x92, 0x0a, 0xc0, 0xa2,
0x62, 0xa2, 0x20, 0x04, 0x5a, 0x03, 0xeb, 0x2d,
0xfe, 0xd4, 0x8e, 0xf9, 0xb0, 0x5a, 0xab, 0xea]);
}
} | .wrapping_add(ch(e, f, g))
.wrapping_add(K[t])
.wrapping_add(w[t]);
let t_2 = capital_sigma_0(a).wrapping_add(maj(a, b, c));
h = g; | random_line_split |
transformer_models.py | import json
import random
import argparse
import sys
from sklearn.metrics import precision_recall_fscore_support, classification_report
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier
from transformers import RobertaTokenizer, RobertaForSequenceClassification
from transformers import BigBirdTokenizer, BigBirdForSequenceClassification, BigBirdConfig
import torch
import os
from torch.utils.data import Dataset, DataLoader
from transformers import AdamW, get_linear_schedule_with_warmup
from tqdm import tqdm
from transformers import DebertaV2Tokenizer, DebertaV2Model, DebertaV2ForSequenceClassification, DebertaV2Config
class SequenceClassificationDataset(Dataset):
def __init__(self, x, y, tokenizer):
self.examples = list(zip(x, y))
self.tokenizer = tokenizer
self.device = "cuda" if torch.cuda.is_available() else "cpu"
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
def collate_fn(self, batch):
model_inputs = self.tokenizer([i[0] for i in batch], return_tensors="pt", padding=True, truncation=True,
max_length=64).to(self.device)
labels = torch.tensor([i[1] for i in batch]).to(self.device)
return {"model_inputs": model_inputs, "label": labels}
class RandomModel():
def __init__(self):
pass
def fit(self, data):
"""
Learns the seed for future prediction.
Doesn't use the given data.
"""
self.seed = random.choice(range(100))
def predict(self, test_data):
"""
Takes some data and makes predictions based on the seed which was learnt in the fit() part.
Returns the predictions.
"""
random.seed(self.seed)
preds = [{"id": instance['id'], "prediction": random.choice([0, 1])} for instance in test_data]
return preds
def read(path):
"""
Reads the file from the given path (json file).
Returns list of instance dictionaries.
"""
data = []
with open(path, "r", encoding="utf-8") as file:
for instance in file:
data.append(json.loads(instance))
return data
def evaluate_epoch(model, dataset):
model.eval()
targets = []
outputs = []
with torch.no_grad():
for batch in tqdm(DataLoader(dataset, batch_size=args.batch_size, collate_fn=dataset.collate_fn)):
output = model(**batch["model_inputs"]) | targets.extend(batch['label'].float().tolist())
outputs.extend(logits.argmax(dim=1).tolist())
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(targets, outputs, labels=[0, 1],
average="macro")
print(precision_macro, recall_macro)
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return outputs
def evaluate_old(goldfile, sysfile):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
gold = {i["id"]: i["label"] for i in read(goldfile)}
sys = {i["id"]: i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(sys[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
def evaluate(gold, predictions):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
# gold = {i["id"]:i["label"] for i in read(goldfile)}
# sys = {i["id"]:i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(predictions[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return preds
def main(train_file, test_file=None):
train_data = read(train_file)
try:
X = [i["text"] for i in train_data]
except:
X = [i["sentence"] for i in train_data]
y = [i["label"] for i in train_data]
idx = [i["id"] for i in train_data]
# if we only provide train file, we do train-test split
X_train, X_test, y_train, y_test, idx_train, idx_test = train_test_split(X, y, idx, test_size=0.33, random_state=42)
if test_file != train_file:
# else, we are in inference mode and predict the testset
test_data = read(test_file)
try:
X_test = [i["text"] for i in test_data]
except:
X_test = [i["sentence"] for i in test_data]
y_test = [0] * len(X_test)
device = "cuda" if torch.cuda.is_available() else "cpu"
model_name = "/cluster/work/lawecon/Work/dominik/roberta-base"
model_name = "/cluster/work/lawecon/Work/dominik/FEVER_bigbird/bigbird-roberta-base"
# model_name = "roberta-base"
model_name = args.model
if "bigbird" in model_name:
config = BigBirdConfig.from_pretrained(model_name)
config.gradient_checkpointing = True
model = BigBirdForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = BigBirdTokenizer.from_pretrained(model_name)
elif "roberta" in model_name:
model = RobertaForSequenceClassification.from_pretrained(model_name).to(device)
tokenizer = RobertaTokenizer.from_pretrained(model_name)
elif "deberta" in model_name:
# DebertaV2Tokenizer, DebertaV2Model, DebertaV2ForSequenceClassification, DebertaV2Config
config = DebertaV2Config.from_pretrained(model_name)
config.gradient_checkpointing = True
config.num_classes = 2
model = DebertaV2ForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = DebertaV2Tokenizer.from_pretrained(model_name)
trainset = SequenceClassificationDataset(X_train, y_train, tokenizer)
devset = SequenceClassificationDataset(X_test, y_test, tokenizer)
warmup_steps = 0
train_dataloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, collate_fn=trainset.collate_fn)
t_total = int(len(train_dataloader) * args.num_epochs / args.gradient_accumulation_steps)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
model.zero_grad()
optimizer.zero_grad()
cuda_device_capability = torch.cuda.get_device_capability()
if cuda_device_capability[0] >= 8:
use_amp = True
else:
use_amp = False
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
if args.only_prediction is not None:
preds = evaluate_epoch(model, devset)
save_path = os.path.join(args.dest)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
sys.exit(0)
for epoch in range(args.num_epochs):
model.train()
t = tqdm(train_dataloader)
for i, batch in enumerate(t):
with torch.cuda.amp.autocast(enabled=use_amp):
output = model(**batch["model_inputs"], labels=batch['label'])
loss = output.loss / args.gradient_accumulation_steps
scaler.scale(loss).backward()
if (i + 1) % args.gradient_accumulation_steps == 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
scaler.step(optimizer)
scaler.update()
scheduler.step() # Update learning rate schedule
optimizer.zero_grad()
acc = (output.logits.argmax(axis=-1).detach() == batch["label"]).float().mean()
t.set_description(f'Epoch {epoch}, iter {i}, loss: {round(loss.item(), 4)}, acc: {round(acc.item(), 4)}')
preds = evaluate_epoch(model, devset)
# Save
save_path = os.path.join(args.dest)
os.makedirs(save_path, exist_ok=True)
tokenizer.save_pretrained(save_path)
model.save_pretrained(save_path)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-train_file', '--train_file', required=True, help="The path to the training data json file")
parser.add_argument('-test_file', '--test_file', required=True, help="The path to the training data json file")
parser.add_argument('--dest', type=str, required=True, help='Folder to save the weights')
parser.add_argument('--model', type=str, default='roberta-large')
parser.add_argument('--num_epochs', type=int, default=3)
parser.add_argument("--gradient_accumulation_steps", type=int, default=8,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--batch_size", default=16, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--learning_rate", default=2e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--only_prediction", default=None, type=str,
help="Epsilon for Adam optimizer.")
args = parser.parse_args()
main(args.train_file, args.test_file)
# subtask 1 roberta, just change model names if we use different model
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../20210312/subtask1-document/en-train.json --dest roberta-task1 --model /cluster/work/lawecon/Work/dominik/roberta-base
# evaluate
# subtask 2 roberta
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask2-sentence/en-train.json --test_file ../20210312/subtask2-sentence/en-train.json --dest roberta-task2 --model /cluster/work/lawecon/Work/dominik/roberta-base
# subtask 2 deberta
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask2-sentence/en-train.json --test_file ../20210312/subtask2-sentence/en-train.json --dest debertav2-task2 --model ../../../deberta-v2-xlarge-mnli/ --batch_size 1 --gradient_accumulation_steps 16
# bigbird-roberta-large
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../20210312/subtask1-document/en-train.json --dest bigbird-large-task1 --gradient_accumulation_steps 16 --batch_size 2 --model /cluster/work/lawecon/Work/dominik/FEVER_bigbird/bigbird-roberta-large
# evaluate
# bsub -n 1 -R "rusage[mem=12800,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../test.json --dest bigbird-large-task1 --batch_size 2 --model bigbird-large-task1
# evaluate subtask 2
# bsub -n 1 -R "rusage[mem=12800,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../test_subtask2.json --dest debertav2-task2 --batch_size 2 --model debertav2-task2 | logits = output.logits | random_line_split |
transformer_models.py | import json
import random
import argparse
import sys
from sklearn.metrics import precision_recall_fscore_support, classification_report
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier
from transformers import RobertaTokenizer, RobertaForSequenceClassification
from transformers import BigBirdTokenizer, BigBirdForSequenceClassification, BigBirdConfig
import torch
import os
from torch.utils.data import Dataset, DataLoader
from transformers import AdamW, get_linear_schedule_with_warmup
from tqdm import tqdm
from transformers import DebertaV2Tokenizer, DebertaV2Model, DebertaV2ForSequenceClassification, DebertaV2Config
class SequenceClassificationDataset(Dataset):
def __init__(self, x, y, tokenizer):
self.examples = list(zip(x, y))
self.tokenizer = tokenizer
self.device = "cuda" if torch.cuda.is_available() else "cpu"
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
def collate_fn(self, batch):
model_inputs = self.tokenizer([i[0] for i in batch], return_tensors="pt", padding=True, truncation=True,
max_length=64).to(self.device)
labels = torch.tensor([i[1] for i in batch]).to(self.device)
return {"model_inputs": model_inputs, "label": labels}
class RandomModel():
def __init__(self):
pass
def fit(self, data):
"""
Learns the seed for future prediction.
Doesn't use the given data.
"""
self.seed = random.choice(range(100))
def predict(self, test_data):
"""
Takes some data and makes predictions based on the seed which was learnt in the fit() part.
Returns the predictions.
"""
random.seed(self.seed)
preds = [{"id": instance['id'], "prediction": random.choice([0, 1])} for instance in test_data]
return preds
def | (path):
"""
Reads the file from the given path (json file).
Returns list of instance dictionaries.
"""
data = []
with open(path, "r", encoding="utf-8") as file:
for instance in file:
data.append(json.loads(instance))
return data
def evaluate_epoch(model, dataset):
model.eval()
targets = []
outputs = []
with torch.no_grad():
for batch in tqdm(DataLoader(dataset, batch_size=args.batch_size, collate_fn=dataset.collate_fn)):
output = model(**batch["model_inputs"])
logits = output.logits
targets.extend(batch['label'].float().tolist())
outputs.extend(logits.argmax(dim=1).tolist())
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(targets, outputs, labels=[0, 1],
average="macro")
print(precision_macro, recall_macro)
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return outputs
def evaluate_old(goldfile, sysfile):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
gold = {i["id"]: i["label"] for i in read(goldfile)}
sys = {i["id"]: i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(sys[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
def evaluate(gold, predictions):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
# gold = {i["id"]:i["label"] for i in read(goldfile)}
# sys = {i["id"]:i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(predictions[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return preds
def main(train_file, test_file=None):
train_data = read(train_file)
try:
X = [i["text"] for i in train_data]
except:
X = [i["sentence"] for i in train_data]
y = [i["label"] for i in train_data]
idx = [i["id"] for i in train_data]
# if we only provide train file, we do train-test split
X_train, X_test, y_train, y_test, idx_train, idx_test = train_test_split(X, y, idx, test_size=0.33, random_state=42)
if test_file != train_file:
# else, we are in inference mode and predict the testset
test_data = read(test_file)
try:
X_test = [i["text"] for i in test_data]
except:
X_test = [i["sentence"] for i in test_data]
y_test = [0] * len(X_test)
device = "cuda" if torch.cuda.is_available() else "cpu"
model_name = "/cluster/work/lawecon/Work/dominik/roberta-base"
model_name = "/cluster/work/lawecon/Work/dominik/FEVER_bigbird/bigbird-roberta-base"
# model_name = "roberta-base"
model_name = args.model
if "bigbird" in model_name:
config = BigBirdConfig.from_pretrained(model_name)
config.gradient_checkpointing = True
model = BigBirdForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = BigBirdTokenizer.from_pretrained(model_name)
elif "roberta" in model_name:
model = RobertaForSequenceClassification.from_pretrained(model_name).to(device)
tokenizer = RobertaTokenizer.from_pretrained(model_name)
elif "deberta" in model_name:
# DebertaV2Tokenizer, DebertaV2Model, DebertaV2ForSequenceClassification, DebertaV2Config
config = DebertaV2Config.from_pretrained(model_name)
config.gradient_checkpointing = True
config.num_classes = 2
model = DebertaV2ForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = DebertaV2Tokenizer.from_pretrained(model_name)
trainset = SequenceClassificationDataset(X_train, y_train, tokenizer)
devset = SequenceClassificationDataset(X_test, y_test, tokenizer)
warmup_steps = 0
train_dataloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, collate_fn=trainset.collate_fn)
t_total = int(len(train_dataloader) * args.num_epochs / args.gradient_accumulation_steps)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
model.zero_grad()
optimizer.zero_grad()
cuda_device_capability = torch.cuda.get_device_capability()
if cuda_device_capability[0] >= 8:
use_amp = True
else:
use_amp = False
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
if args.only_prediction is not None:
preds = evaluate_epoch(model, devset)
save_path = os.path.join(args.dest)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
sys.exit(0)
for epoch in range(args.num_epochs):
model.train()
t = tqdm(train_dataloader)
for i, batch in enumerate(t):
with torch.cuda.amp.autocast(enabled=use_amp):
output = model(**batch["model_inputs"], labels=batch['label'])
loss = output.loss / args.gradient_accumulation_steps
scaler.scale(loss).backward()
if (i + 1) % args.gradient_accumulation_steps == 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
scaler.step(optimizer)
scaler.update()
scheduler.step() # Update learning rate schedule
optimizer.zero_grad()
acc = (output.logits.argmax(axis=-1).detach() == batch["label"]).float().mean()
t.set_description(f'Epoch {epoch}, iter {i}, loss: {round(loss.item(), 4)}, acc: {round(acc.item(), 4)}')
preds = evaluate_epoch(model, devset)
# Save
save_path = os.path.join(args.dest)
os.makedirs(save_path, exist_ok=True)
tokenizer.save_pretrained(save_path)
model.save_pretrained(save_path)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-train_file', '--train_file', required=True, help="The path to the training data json file")
parser.add_argument('-test_file', '--test_file', required=True, help="The path to the training data json file")
parser.add_argument('--dest', type=str, required=True, help='Folder to save the weights')
parser.add_argument('--model', type=str, default='roberta-large')
parser.add_argument('--num_epochs', type=int, default=3)
parser.add_argument("--gradient_accumulation_steps", type=int, default=8,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--batch_size", default=16, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--learning_rate", default=2e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--only_prediction", default=None, type=str,
help="Epsilon for Adam optimizer.")
args = parser.parse_args()
main(args.train_file, args.test_file)
# subtask 1 roberta, just change model names if we use different model
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../20210312/subtask1-document/en-train.json --dest roberta-task1 --model /cluster/work/lawecon/Work/dominik/roberta-base
# evaluate
# subtask 2 roberta
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask2-sentence/en-train.json --test_file ../20210312/subtask2-sentence/en-train.json --dest roberta-task2 --model /cluster/work/lawecon/Work/dominik/roberta-base
# subtask 2 deberta
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask2-sentence/en-train.json --test_file ../20210312/subtask2-sentence/en-train.json --dest debertav2-task2 --model ../../../deberta-v2-xlarge-mnli/ --batch_size 1 --gradient_accumulation_steps 16
# bigbird-roberta-large
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../20210312/subtask1-document/en-train.json --dest bigbird-large-task1 --gradient_accumulation_steps 16 --batch_size 2 --model /cluster/work/lawecon/Work/dominik/FEVER_bigbird/bigbird-roberta-large
# evaluate
# bsub -n 1 -R "rusage[mem=12800,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../test.json --dest bigbird-large-task1 --batch_size 2 --model bigbird-large-task1
# evaluate subtask 2
# bsub -n 1 -R "rusage[mem=12800,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../test_subtask2.json --dest debertav2-task2 --batch_size 2 --model debertav2-task2
| read | identifier_name |
transformer_models.py | import json
import random
import argparse
import sys
from sklearn.metrics import precision_recall_fscore_support, classification_report
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier
from transformers import RobertaTokenizer, RobertaForSequenceClassification
from transformers import BigBirdTokenizer, BigBirdForSequenceClassification, BigBirdConfig
import torch
import os
from torch.utils.data import Dataset, DataLoader
from transformers import AdamW, get_linear_schedule_with_warmup
from tqdm import tqdm
from transformers import DebertaV2Tokenizer, DebertaV2Model, DebertaV2ForSequenceClassification, DebertaV2Config
class SequenceClassificationDataset(Dataset):
def __init__(self, x, y, tokenizer):
self.examples = list(zip(x, y))
self.tokenizer = tokenizer
self.device = "cuda" if torch.cuda.is_available() else "cpu"
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
def collate_fn(self, batch):
model_inputs = self.tokenizer([i[0] for i in batch], return_tensors="pt", padding=True, truncation=True,
max_length=64).to(self.device)
labels = torch.tensor([i[1] for i in batch]).to(self.device)
return {"model_inputs": model_inputs, "label": labels}
class RandomModel():
def __init__(self):
pass
def fit(self, data):
"""
Learns the seed for future prediction.
Doesn't use the given data.
"""
self.seed = random.choice(range(100))
def predict(self, test_data):
"""
Takes some data and makes predictions based on the seed which was learnt in the fit() part.
Returns the predictions.
"""
random.seed(self.seed)
preds = [{"id": instance['id'], "prediction": random.choice([0, 1])} for instance in test_data]
return preds
def read(path):
"""
Reads the file from the given path (json file).
Returns list of instance dictionaries.
"""
data = []
with open(path, "r", encoding="utf-8") as file:
for instance in file:
data.append(json.loads(instance))
return data
def evaluate_epoch(model, dataset):
model.eval()
targets = []
outputs = []
with torch.no_grad():
for batch in tqdm(DataLoader(dataset, batch_size=args.batch_size, collate_fn=dataset.collate_fn)):
output = model(**batch["model_inputs"])
logits = output.logits
targets.extend(batch['label'].float().tolist())
outputs.extend(logits.argmax(dim=1).tolist())
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(targets, outputs, labels=[0, 1],
average="macro")
print(precision_macro, recall_macro)
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return outputs
def evaluate_old(goldfile, sysfile):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
gold = {i["id"]: i["label"] for i in read(goldfile)}
sys = {i["id"]: i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(sys[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
def evaluate(gold, predictions):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
# gold = {i["id"]:i["label"] for i in read(goldfile)}
# sys = {i["id"]:i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(predictions[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return preds
def main(train_file, test_file=None):
train_data = read(train_file)
try:
X = [i["text"] for i in train_data]
except:
X = [i["sentence"] for i in train_data]
y = [i["label"] for i in train_data]
idx = [i["id"] for i in train_data]
# if we only provide train file, we do train-test split
X_train, X_test, y_train, y_test, idx_train, idx_test = train_test_split(X, y, idx, test_size=0.33, random_state=42)
if test_file != train_file:
# else, we are in inference mode and predict the testset
test_data = read(test_file)
try:
X_test = [i["text"] for i in test_data]
except:
X_test = [i["sentence"] for i in test_data]
y_test = [0] * len(X_test)
device = "cuda" if torch.cuda.is_available() else "cpu"
model_name = "/cluster/work/lawecon/Work/dominik/roberta-base"
model_name = "/cluster/work/lawecon/Work/dominik/FEVER_bigbird/bigbird-roberta-base"
# model_name = "roberta-base"
model_name = args.model
if "bigbird" in model_name:
|
elif "roberta" in model_name:
model = RobertaForSequenceClassification.from_pretrained(model_name).to(device)
tokenizer = RobertaTokenizer.from_pretrained(model_name)
elif "deberta" in model_name:
# DebertaV2Tokenizer, DebertaV2Model, DebertaV2ForSequenceClassification, DebertaV2Config
config = DebertaV2Config.from_pretrained(model_name)
config.gradient_checkpointing = True
config.num_classes = 2
model = DebertaV2ForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = DebertaV2Tokenizer.from_pretrained(model_name)
trainset = SequenceClassificationDataset(X_train, y_train, tokenizer)
devset = SequenceClassificationDataset(X_test, y_test, tokenizer)
warmup_steps = 0
train_dataloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, collate_fn=trainset.collate_fn)
t_total = int(len(train_dataloader) * args.num_epochs / args.gradient_accumulation_steps)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
model.zero_grad()
optimizer.zero_grad()
cuda_device_capability = torch.cuda.get_device_capability()
if cuda_device_capability[0] >= 8:
use_amp = True
else:
use_amp = False
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
if args.only_prediction is not None:
preds = evaluate_epoch(model, devset)
save_path = os.path.join(args.dest)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
sys.exit(0)
for epoch in range(args.num_epochs):
model.train()
t = tqdm(train_dataloader)
for i, batch in enumerate(t):
with torch.cuda.amp.autocast(enabled=use_amp):
output = model(**batch["model_inputs"], labels=batch['label'])
loss = output.loss / args.gradient_accumulation_steps
scaler.scale(loss).backward()
if (i + 1) % args.gradient_accumulation_steps == 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
scaler.step(optimizer)
scaler.update()
scheduler.step() # Update learning rate schedule
optimizer.zero_grad()
acc = (output.logits.argmax(axis=-1).detach() == batch["label"]).float().mean()
t.set_description(f'Epoch {epoch}, iter {i}, loss: {round(loss.item(), 4)}, acc: {round(acc.item(), 4)}')
preds = evaluate_epoch(model, devset)
# Save
save_path = os.path.join(args.dest)
os.makedirs(save_path, exist_ok=True)
tokenizer.save_pretrained(save_path)
model.save_pretrained(save_path)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-train_file', '--train_file', required=True, help="The path to the training data json file")
parser.add_argument('-test_file', '--test_file', required=True, help="The path to the training data json file")
parser.add_argument('--dest', type=str, required=True, help='Folder to save the weights')
parser.add_argument('--model', type=str, default='roberta-large')
parser.add_argument('--num_epochs', type=int, default=3)
parser.add_argument("--gradient_accumulation_steps", type=int, default=8,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--batch_size", default=16, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--learning_rate", default=2e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--only_prediction", default=None, type=str,
help="Epsilon for Adam optimizer.")
args = parser.parse_args()
main(args.train_file, args.test_file)
# subtask 1 roberta, just change model names if we use different model
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../20210312/subtask1-document/en-train.json --dest roberta-task1 --model /cluster/work/lawecon/Work/dominik/roberta-base
# evaluate
# subtask 2 roberta
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask2-sentence/en-train.json --test_file ../20210312/subtask2-sentence/en-train.json --dest roberta-task2 --model /cluster/work/lawecon/Work/dominik/roberta-base
# subtask 2 deberta
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask2-sentence/en-train.json --test_file ../20210312/subtask2-sentence/en-train.json --dest debertav2-task2 --model ../../../deberta-v2-xlarge-mnli/ --batch_size 1 --gradient_accumulation_steps 16
# bigbird-roberta-large
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../20210312/subtask1-document/en-train.json --dest bigbird-large-task1 --gradient_accumulation_steps 16 --batch_size 2 --model /cluster/work/lawecon/Work/dominik/FEVER_bigbird/bigbird-roberta-large
# evaluate
# bsub -n 1 -R "rusage[mem=12800,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../test.json --dest bigbird-large-task1 --batch_size 2 --model bigbird-large-task1
# evaluate subtask 2
# bsub -n 1 -R "rusage[mem=12800,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../test_subtask2.json --dest debertav2-task2 --batch_size 2 --model debertav2-task2
| config = BigBirdConfig.from_pretrained(model_name)
config.gradient_checkpointing = True
model = BigBirdForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = BigBirdTokenizer.from_pretrained(model_name) | conditional_block |
transformer_models.py | import json
import random
import argparse
import sys
from sklearn.metrics import precision_recall_fscore_support, classification_report
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier
from transformers import RobertaTokenizer, RobertaForSequenceClassification
from transformers import BigBirdTokenizer, BigBirdForSequenceClassification, BigBirdConfig
import torch
import os
from torch.utils.data import Dataset, DataLoader
from transformers import AdamW, get_linear_schedule_with_warmup
from tqdm import tqdm
from transformers import DebertaV2Tokenizer, DebertaV2Model, DebertaV2ForSequenceClassification, DebertaV2Config
class SequenceClassificationDataset(Dataset):
def __init__(self, x, y, tokenizer):
self.examples = list(zip(x, y))
self.tokenizer = tokenizer
self.device = "cuda" if torch.cuda.is_available() else "cpu"
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
|
def collate_fn(self, batch):
model_inputs = self.tokenizer([i[0] for i in batch], return_tensors="pt", padding=True, truncation=True,
max_length=64).to(self.device)
labels = torch.tensor([i[1] for i in batch]).to(self.device)
return {"model_inputs": model_inputs, "label": labels}
class RandomModel():
def __init__(self):
pass
def fit(self, data):
"""
Learns the seed for future prediction.
Doesn't use the given data.
"""
self.seed = random.choice(range(100))
def predict(self, test_data):
"""
Takes some data and makes predictions based on the seed which was learnt in the fit() part.
Returns the predictions.
"""
random.seed(self.seed)
preds = [{"id": instance['id'], "prediction": random.choice([0, 1])} for instance in test_data]
return preds
def read(path):
"""
Reads the file from the given path (json file).
Returns list of instance dictionaries.
"""
data = []
with open(path, "r", encoding="utf-8") as file:
for instance in file:
data.append(json.loads(instance))
return data
def evaluate_epoch(model, dataset):
model.eval()
targets = []
outputs = []
with torch.no_grad():
for batch in tqdm(DataLoader(dataset, batch_size=args.batch_size, collate_fn=dataset.collate_fn)):
output = model(**batch["model_inputs"])
logits = output.logits
targets.extend(batch['label'].float().tolist())
outputs.extend(logits.argmax(dim=1).tolist())
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(targets, outputs, labels=[0, 1],
average="macro")
print(precision_macro, recall_macro)
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return outputs
def evaluate_old(goldfile, sysfile):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
gold = {i["id"]: i["label"] for i in read(goldfile)}
sys = {i["id"]: i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(sys[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
def evaluate(gold, predictions):
"""
Takes goldfile (json) and sysfile (json) paths.
Prints out the results on the terminal.
The metric used is F1-Macro implementation from sklearn library (Its documentation is at https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_fscore_support.html).
This function is the exact way the subtask1's submissions will be evaluated.
"""
# gold = {i["id"]:i["label"] for i in read(goldfile)}
# sys = {i["id"]:i["prediction"] for i in read(sysfile)}
labels, preds = [], []
for idx in gold:
labels.append(gold[idx])
preds.append(predictions[idx])
precision_macro, recall_macro, f1_macro, _ = precision_recall_fscore_support(labels, preds, labels=[0, 1],
average="macro")
print("F1-macro score for test data predictions are: %.4f" % f1_macro)
return preds
def main(train_file, test_file=None):
train_data = read(train_file)
try:
X = [i["text"] for i in train_data]
except:
X = [i["sentence"] for i in train_data]
y = [i["label"] for i in train_data]
idx = [i["id"] for i in train_data]
# if we only provide train file, we do train-test split
X_train, X_test, y_train, y_test, idx_train, idx_test = train_test_split(X, y, idx, test_size=0.33, random_state=42)
if test_file != train_file:
# else, we are in inference mode and predict the testset
test_data = read(test_file)
try:
X_test = [i["text"] for i in test_data]
except:
X_test = [i["sentence"] for i in test_data]
y_test = [0] * len(X_test)
device = "cuda" if torch.cuda.is_available() else "cpu"
model_name = "/cluster/work/lawecon/Work/dominik/roberta-base"
model_name = "/cluster/work/lawecon/Work/dominik/FEVER_bigbird/bigbird-roberta-base"
# model_name = "roberta-base"
model_name = args.model
if "bigbird" in model_name:
config = BigBirdConfig.from_pretrained(model_name)
config.gradient_checkpointing = True
model = BigBirdForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = BigBirdTokenizer.from_pretrained(model_name)
elif "roberta" in model_name:
model = RobertaForSequenceClassification.from_pretrained(model_name).to(device)
tokenizer = RobertaTokenizer.from_pretrained(model_name)
elif "deberta" in model_name:
# DebertaV2Tokenizer, DebertaV2Model, DebertaV2ForSequenceClassification, DebertaV2Config
config = DebertaV2Config.from_pretrained(model_name)
config.gradient_checkpointing = True
config.num_classes = 2
model = DebertaV2ForSequenceClassification.from_pretrained(model_name, config=config).to(device)
tokenizer = DebertaV2Tokenizer.from_pretrained(model_name)
trainset = SequenceClassificationDataset(X_train, y_train, tokenizer)
devset = SequenceClassificationDataset(X_test, y_test, tokenizer)
warmup_steps = 0
train_dataloader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, collate_fn=trainset.collate_fn)
t_total = int(len(train_dataloader) * args.num_epochs / args.gradient_accumulation_steps)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
model.zero_grad()
optimizer.zero_grad()
cuda_device_capability = torch.cuda.get_device_capability()
if cuda_device_capability[0] >= 8:
use_amp = True
else:
use_amp = False
scaler = torch.cuda.amp.GradScaler(enabled=use_amp)
if args.only_prediction is not None:
preds = evaluate_epoch(model, devset)
save_path = os.path.join(args.dest)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
sys.exit(0)
for epoch in range(args.num_epochs):
model.train()
t = tqdm(train_dataloader)
for i, batch in enumerate(t):
with torch.cuda.amp.autocast(enabled=use_amp):
output = model(**batch["model_inputs"], labels=batch['label'])
loss = output.loss / args.gradient_accumulation_steps
scaler.scale(loss).backward()
if (i + 1) % args.gradient_accumulation_steps == 0:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
scaler.step(optimizer)
scaler.update()
scheduler.step() # Update learning rate schedule
optimizer.zero_grad()
acc = (output.logits.argmax(axis=-1).detach() == batch["label"]).float().mean()
t.set_description(f'Epoch {epoch}, iter {i}, loss: {round(loss.item(), 4)}, acc: {round(acc.item(), 4)}')
preds = evaluate_epoch(model, devset)
# Save
save_path = os.path.join(args.dest)
os.makedirs(save_path, exist_ok=True)
tokenizer.save_pretrained(save_path)
model.save_pretrained(save_path)
with open(os.path.join(save_path, "dev_preds.txt"), "w") as f:
for i in preds:
f.write(str(i) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-train_file', '--train_file', required=True, help="The path to the training data json file")
parser.add_argument('-test_file', '--test_file', required=True, help="The path to the training data json file")
parser.add_argument('--dest', type=str, required=True, help='Folder to save the weights')
parser.add_argument('--model', type=str, default='roberta-large')
parser.add_argument('--num_epochs', type=int, default=3)
parser.add_argument("--gradient_accumulation_steps", type=int, default=8,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--batch_size", default=16, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--learning_rate", default=2e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--only_prediction", default=None, type=str,
help="Epsilon for Adam optimizer.")
args = parser.parse_args()
main(args.train_file, args.test_file)
# subtask 1 roberta, just change model names if we use different model
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../20210312/subtask1-document/en-train.json --dest roberta-task1 --model /cluster/work/lawecon/Work/dominik/roberta-base
# evaluate
# subtask 2 roberta
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask2-sentence/en-train.json --test_file ../20210312/subtask2-sentence/en-train.json --dest roberta-task2 --model /cluster/work/lawecon/Work/dominik/roberta-base
# subtask 2 deberta
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask2-sentence/en-train.json --test_file ../20210312/subtask2-sentence/en-train.json --dest debertav2-task2 --model ../../../deberta-v2-xlarge-mnli/ --batch_size 1 --gradient_accumulation_steps 16
# bigbird-roberta-large
# bsub -n 1 -R "rusage[mem=25600,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../20210312/subtask1-document/en-train.json --dest bigbird-large-task1 --gradient_accumulation_steps 16 --batch_size 2 --model /cluster/work/lawecon/Work/dominik/FEVER_bigbird/bigbird-roberta-large
# evaluate
# bsub -n 1 -R "rusage[mem=12800,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../test.json --dest bigbird-large-task1 --batch_size 2 --model bigbird-large-task1
# evaluate subtask 2
# bsub -n 1 -R "rusage[mem=12800,ngpus_excl_p=1]" -R "select[gpu_mtotal0>=10240]" python task1/subtask1/transformer_models.py --train_file ../20210312/subtask1-document/en-train.json --test_file ../test_subtask2.json --dest debertav2-task2 --batch_size 2 --model debertav2-task2
| return self.examples[idx] | identifier_body |
smartnic.go | // {C} Copyright 2019 Pensando Systems Inc. All rights reserved.
package impl
import (
"context"
"fmt"
"reflect"
"strings"
"github.com/pensando/sw/api"
"github.com/pensando/sw/api/generated/apiclient"
"github.com/pensando/sw/api/generated/cluster"
diagapi "github.com/pensando/sw/api/generated/diagnostics"
"github.com/pensando/sw/api/generated/network"
apiintf "github.com/pensando/sw/api/interfaces"
apiutils "github.com/pensando/sw/api/utils"
apisrvpkg "github.com/pensando/sw/venice/apiserver/pkg"
"github.com/pensando/sw/venice/globals"
"github.com/pensando/sw/venice/utils/diagnostics"
"github.com/pensando/sw/venice/utils/kvstore"
)
func (cl *clusterHooks) checkNonUserModifiableSmartNICFields(updObj, curObj *cluster.DistributedServiceCard) []string {
NUMFields := []string{"ID", "NetworkMode", "MgmtVlan", "Controllers"}
var errs []string
updSpec := reflect.Indirect(reflect.ValueOf(updObj)).FieldByName("Spec")
curSpec := reflect.Indirect(reflect.ValueOf(curObj)).FieldByName("Spec")
for _, fn := range NUMFields {
updField := updSpec.FieldByName(fn).Interface()
curField := curSpec.FieldByName(fn).Interface()
if !reflect.DeepEqual(updField, curField) {
errs = append(errs, fn)
}
}
// if ipconfig field is not empty and non nil (old or new) then do the check
updIPConfig := updSpec.FieldByName("IPConfig").Interface()
curIPConfig := curSpec.FieldByName("IPConfig").Interface()
emptyIPConfig := cluster.IPConfig{}
// Reflect can't distinguish between empty object and nil object. So adding this additional check for IP Config
// We perform the deepequal check only if either of the old or new spec are not nil and not empty
if ((updIPConfig != nil) && !reflect.DeepEqual(updIPConfig, emptyIPConfig)) || ((curIPConfig != nil) && !reflect.DeepEqual(curIPConfig, emptyIPConfig)) {
if !reflect.DeepEqual(updIPConfig, curIPConfig) {
errs = append(errs, "IPConfig")
}
}
return errs
}
func (cl *clusterHooks) smartNICPreCommitHook(ctx context.Context, kvs kvstore.Interface, txn kvstore.Txn, key string, oper apiintf.APIOperType, dryrun bool, i interface{}) (interface{}, bool, error) {
updNIC, ok := i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
// Current configurable PolicerAttachTenant field supports default tenant only. This restriction will be removed in future
if updNIC.Spec.PolicerAttachTenant == "" {
updNIC.Spec.PolicerAttachTenant = "default"
} else if updNIC.Spec.PolicerAttachTenant != "default" {
cl.logger.Errorf("PolicerAttachTenant is supported for default tenant only")
return i, true, fmt.Errorf("PolicerAttachTenant is supported for default tenant only")
}
if oper == apiintf.CreateOper {
var nicIPAddr string // create modules with empty IP address if smartnic doesn't have an IP yet
if updNIC.Status.IPConfig != nil {
nicIPAddr = updNIC.Status.IPConfig.IPAddress
}
modObjs := diagnostics.NewNaplesModules(updNIC.Name, nicIPAddr, apisrvpkg.MustGetAPIServer().GetVersion())
for _, modObj := range modObjs {
if err := txn.Create(modObj.MakeKey("diagnostics"), modObj); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for smart nic [%s] creation", modObj.Name, updNIC.Name), "error", err)
continue // TODO: throw an event
}
}
if updNIC.Spec.RoutingConfig != "" {
rtCfg := network.RoutingConfig{
ObjectMeta: api.ObjectMeta{
Name: updNIC.Spec.RoutingConfig,
},
}
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(rtCfg.MakeKey(string(apiclient.GroupNetwork))), ">", 0))
}
return i, true, nil
}
if ctx == nil || kvs == nil {
return i, false, fmt.Errorf("smartNICPreCommitHook called with NIL parameter, ctx: %p, kvs: %p", ctx, kvs)
}
curNIC := &cluster.DistributedServiceCard{}
// Get from the persisted DB here.
pctx := apiutils.SetVar(ctx, apiutils.CtxKeyGetPersistedKV, true)
err := kvs.Get(pctx, key, curNIC)
if err != nil {
cl.logger.Errorf("Error getting DistributedServiceCard with key [%s] in API server smartNICPreCommitHook pre-commit hook", key)
return i, false, fmt.Errorf("Error getting object: %v", err)
}
nwManaged := curNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_NETWORK.String()
admitted := curNIC.Status.AdmissionPhase == cluster.DistributedServiceCardStatus_ADMITTED.String()
switch oper {
case apiintf.DeleteOper:
// Prevent deletion of DistributedServiceCard object if MgmtMode = NETWORK && Phase = ADMITTED
if nwManaged && admitted {
errStr := fmt.Sprintf("Cannot delete DistributedServiceCard Object because it is in %s phase. Please decommission before deleting.", cluster.DistributedServiceCardStatus_ADMITTED.String())
cl.logger.Errorf(errStr)
return i, true, fmt.Errorf(errStr)
}
// delete module objects for processes running on the smart nic
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
break
}
for _, modObj := range into.Items {
if err := txn.Delete(modObj.MakeKey("diagnostics")); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
}
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("deleting module: %s", modObj.Name))
}
case apiintf.UpdateOper:
var ok bool
updNIC, ok = i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
if updNIC.Status.IPConfig != nil { // update IP address of smartnic in module object
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
}
for _, modObj := range into.Items {
modObj.Status.Node = updNIC.Status.IPConfig.IPAddress
// not doing CAS, don't want smart nic updates to fail, we are ok with module object reverting to older version
if err := txn.Update(modObj.MakeKey("diagnostics"), modObj); err != nil |
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("updating module: %s with IP: %s", modObj.Name, modObj.Status.Node))
}
}
oldprofname := curNIC.Spec.DSCProfile
//TODO:revisit once the feature stabilises
var oldProfile cluster.DSCProfile
if oldprofname != "" {
oldProfile = cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: oldprofname,
},
}
err := kvs.Get(ctx, oldProfile.MakeKey("cluster"), &oldProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find old profile")
}
}
updprofname := updNIC.Spec.DSCProfile
if updprofname == "" {
return i, false, fmt.Errorf("updprofilename is nil")
}
updProfile := cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: updprofname,
},
}
err = kvs.Get(ctx, updProfile.MakeKey("cluster"), &updProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find the new profile")
}
if oldprofname != "" && admitted { // validate update of profile only if the NIC is admitted and not decomissioned
errStr := fmt.Sprintf("Profile old : %v %v new: %v %v ", oldProfile.Spec.DeploymentTarget, oldProfile.Spec.FeatureSet, updProfile.Spec.DeploymentTarget, updProfile.Spec.FeatureSet)
cl.logger.Errorf(errStr)
err = verifyAllowedProfile(oldProfile, updProfile)
if err != nil {
return i, false, fmt.Errorf("error in validating profile: %v", err)
}
}
// Reject user-initiated modifications of Spec fields like ID and NetworkMode, as NMD currently
// does not have code to react to the changes.
if apiutils.IsUserRequestCtx(ctx) {
// Prevent mode change if NIC is NOT admitted
if !admitted && updNIC.Spec.MgmtMode != curNIC.Spec.MgmtMode {
errStr := fmt.Sprintf("Management mode change not allowed for DistributedServiceCard because it is not in %s phase", cluster.DistributedServiceCardStatus_ADMITTED.String())
cl.logger.Errorf(errStr)
return i, true, fmt.Errorf(errStr)
}
// Workaround for ...
// Once the SmartNIC is admitted, disallow flipping "Spec.Admit" back to false
if admitted && updNIC.Spec.Admit == false && curNIC.Spec.Admit == true {
return i, true, fmt.Errorf("Spec.Admit cannot be changed to false once the DistributedServiceCard is admitted")
}
// For unified mode, if we are decommissioning, we need to prevent the card from rejoining, so we set admit=false
if admitted && updNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_HOST.String() && curNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_NETWORK.String() {
updNIC.Spec.Admit = false
}
errs := cl.checkNonUserModifiableSmartNICFields(&updNIC, curNIC)
if len(errs) > 0 {
return i, true, fmt.Errorf("Modification of DistributedServiceCard object fields %s is not allowed", strings.Join(errs, ", "))
}
}
if updNIC.Spec.RoutingConfig != "" {
rtCfg := network.RoutingConfig{
ObjectMeta: api.ObjectMeta{
Name: updNIC.Spec.RoutingConfig,
},
}
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(rtCfg.MakeKey(string(apiclient.GroupNetwork))), ">", 0))
}
}
// Add a comparator for CAS
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(key), "=", curNIC.ResourceVersion))
// We need to return updNIC, not i, because updNIC is a copy and we may have modified it
return updNIC, true, nil
}
func getFieldSelector(nic string) string {
modules := diagnostics.ListSupportedNaplesModules()
var moduleNames []string
for _, val := range modules {
moduleNames = append(moduleNames, fmt.Sprintf("%s-%s", nic, val))
}
return fmt.Sprintf("meta.name in (%s),status.category=%s", strings.Join(moduleNames, ","), diagapi.ModuleStatus_Naples.String())
}
func verifyAllowedProfile(oldProfile, newProfile cluster.DSCProfile) error {
return nil
}
| {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
} | conditional_block |
smartnic.go | // {C} Copyright 2019 Pensando Systems Inc. All rights reserved.
package impl
import (
"context"
"fmt"
"reflect"
"strings"
"github.com/pensando/sw/api"
"github.com/pensando/sw/api/generated/apiclient"
"github.com/pensando/sw/api/generated/cluster"
diagapi "github.com/pensando/sw/api/generated/diagnostics"
"github.com/pensando/sw/api/generated/network"
apiintf "github.com/pensando/sw/api/interfaces"
apiutils "github.com/pensando/sw/api/utils"
apisrvpkg "github.com/pensando/sw/venice/apiserver/pkg"
"github.com/pensando/sw/venice/globals"
"github.com/pensando/sw/venice/utils/diagnostics"
"github.com/pensando/sw/venice/utils/kvstore"
)
func (cl *clusterHooks) checkNonUserModifiableSmartNICFields(updObj, curObj *cluster.DistributedServiceCard) []string {
NUMFields := []string{"ID", "NetworkMode", "MgmtVlan", "Controllers"}
var errs []string
updSpec := reflect.Indirect(reflect.ValueOf(updObj)).FieldByName("Spec")
curSpec := reflect.Indirect(reflect.ValueOf(curObj)).FieldByName("Spec")
for _, fn := range NUMFields {
updField := updSpec.FieldByName(fn).Interface()
curField := curSpec.FieldByName(fn).Interface()
if !reflect.DeepEqual(updField, curField) {
errs = append(errs, fn)
}
}
// if ipconfig field is not empty and non nil (old or new) then do the check
updIPConfig := updSpec.FieldByName("IPConfig").Interface()
curIPConfig := curSpec.FieldByName("IPConfig").Interface()
emptyIPConfig := cluster.IPConfig{}
// Reflect can't distinguish between empty object and nil object. So adding this additional check for IP Config
// We perform the deepequal check only if either of the old or new spec are not nil and not empty
if ((updIPConfig != nil) && !reflect.DeepEqual(updIPConfig, emptyIPConfig)) || ((curIPConfig != nil) && !reflect.DeepEqual(curIPConfig, emptyIPConfig)) {
if !reflect.DeepEqual(updIPConfig, curIPConfig) {
errs = append(errs, "IPConfig")
}
}
return errs
}
func (cl *clusterHooks) | (ctx context.Context, kvs kvstore.Interface, txn kvstore.Txn, key string, oper apiintf.APIOperType, dryrun bool, i interface{}) (interface{}, bool, error) {
updNIC, ok := i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
// Current configurable PolicerAttachTenant field supports default tenant only. This restriction will be removed in future
if updNIC.Spec.PolicerAttachTenant == "" {
updNIC.Spec.PolicerAttachTenant = "default"
} else if updNIC.Spec.PolicerAttachTenant != "default" {
cl.logger.Errorf("PolicerAttachTenant is supported for default tenant only")
return i, true, fmt.Errorf("PolicerAttachTenant is supported for default tenant only")
}
if oper == apiintf.CreateOper {
var nicIPAddr string // create modules with empty IP address if smartnic doesn't have an IP yet
if updNIC.Status.IPConfig != nil {
nicIPAddr = updNIC.Status.IPConfig.IPAddress
}
modObjs := diagnostics.NewNaplesModules(updNIC.Name, nicIPAddr, apisrvpkg.MustGetAPIServer().GetVersion())
for _, modObj := range modObjs {
if err := txn.Create(modObj.MakeKey("diagnostics"), modObj); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for smart nic [%s] creation", modObj.Name, updNIC.Name), "error", err)
continue // TODO: throw an event
}
}
if updNIC.Spec.RoutingConfig != "" {
rtCfg := network.RoutingConfig{
ObjectMeta: api.ObjectMeta{
Name: updNIC.Spec.RoutingConfig,
},
}
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(rtCfg.MakeKey(string(apiclient.GroupNetwork))), ">", 0))
}
return i, true, nil
}
if ctx == nil || kvs == nil {
return i, false, fmt.Errorf("smartNICPreCommitHook called with NIL parameter, ctx: %p, kvs: %p", ctx, kvs)
}
curNIC := &cluster.DistributedServiceCard{}
// Get from the persisted DB here.
pctx := apiutils.SetVar(ctx, apiutils.CtxKeyGetPersistedKV, true)
err := kvs.Get(pctx, key, curNIC)
if err != nil {
cl.logger.Errorf("Error getting DistributedServiceCard with key [%s] in API server smartNICPreCommitHook pre-commit hook", key)
return i, false, fmt.Errorf("Error getting object: %v", err)
}
nwManaged := curNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_NETWORK.String()
admitted := curNIC.Status.AdmissionPhase == cluster.DistributedServiceCardStatus_ADMITTED.String()
switch oper {
case apiintf.DeleteOper:
// Prevent deletion of DistributedServiceCard object if MgmtMode = NETWORK && Phase = ADMITTED
if nwManaged && admitted {
errStr := fmt.Sprintf("Cannot delete DistributedServiceCard Object because it is in %s phase. Please decommission before deleting.", cluster.DistributedServiceCardStatus_ADMITTED.String())
cl.logger.Errorf(errStr)
return i, true, fmt.Errorf(errStr)
}
// delete module objects for processes running on the smart nic
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
break
}
for _, modObj := range into.Items {
if err := txn.Delete(modObj.MakeKey("diagnostics")); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
}
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("deleting module: %s", modObj.Name))
}
case apiintf.UpdateOper:
var ok bool
updNIC, ok = i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
if updNIC.Status.IPConfig != nil { // update IP address of smartnic in module object
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
}
for _, modObj := range into.Items {
modObj.Status.Node = updNIC.Status.IPConfig.IPAddress
// not doing CAS, don't want smart nic updates to fail, we are ok with module object reverting to older version
if err := txn.Update(modObj.MakeKey("diagnostics"), modObj); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
}
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("updating module: %s with IP: %s", modObj.Name, modObj.Status.Node))
}
}
oldprofname := curNIC.Spec.DSCProfile
//TODO:revisit once the feature stabilises
var oldProfile cluster.DSCProfile
if oldprofname != "" {
oldProfile = cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: oldprofname,
},
}
err := kvs.Get(ctx, oldProfile.MakeKey("cluster"), &oldProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find old profile")
}
}
updprofname := updNIC.Spec.DSCProfile
if updprofname == "" {
return i, false, fmt.Errorf("updprofilename is nil")
}
updProfile := cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: updprofname,
},
}
err = kvs.Get(ctx, updProfile.MakeKey("cluster"), &updProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find the new profile")
}
if oldprofname != "" && admitted { // validate update of profile only if the NIC is admitted and not decomissioned
errStr := fmt.Sprintf("Profile old : %v %v new: %v %v ", oldProfile.Spec.DeploymentTarget, oldProfile.Spec.FeatureSet, updProfile.Spec.DeploymentTarget, updProfile.Spec.FeatureSet)
cl.logger.Errorf(errStr)
err = verifyAllowedProfile(oldProfile, updProfile)
if err != nil {
return i, false, fmt.Errorf("error in validating profile: %v", err)
}
}
// Reject user-initiated modifications of Spec fields like ID and NetworkMode, as NMD currently
// does not have code to react to the changes.
if apiutils.IsUserRequestCtx(ctx) {
// Prevent mode change if NIC is NOT admitted
if !admitted && updNIC.Spec.MgmtMode != curNIC.Spec.MgmtMode {
errStr := fmt.Sprintf("Management mode change not allowed for DistributedServiceCard because it is not in %s phase", cluster.DistributedServiceCardStatus_ADMITTED.String())
cl.logger.Errorf(errStr)
return i, true, fmt.Errorf(errStr)
}
// Workaround for ...
// Once the SmartNIC is admitted, disallow flipping "Spec.Admit" back to false
if admitted && updNIC.Spec.Admit == false && curNIC.Spec.Admit == true {
return i, true, fmt.Errorf("Spec.Admit cannot be changed to false once the DistributedServiceCard is admitted")
}
// For unified mode, if we are decommissioning, we need to prevent the card from rejoining, so we set admit=false
if admitted && updNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_HOST.String() && curNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_NETWORK.String() {
updNIC.Spec.Admit = false
}
errs := cl.checkNonUserModifiableSmartNICFields(&updNIC, curNIC)
if len(errs) > 0 {
return i, true, fmt.Errorf("Modification of DistributedServiceCard object fields %s is not allowed", strings.Join(errs, ", "))
}
}
if updNIC.Spec.RoutingConfig != "" {
rtCfg := network.RoutingConfig{
ObjectMeta: api.ObjectMeta{
Name: updNIC.Spec.RoutingConfig,
},
}
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(rtCfg.MakeKey(string(apiclient.GroupNetwork))), ">", 0))
}
}
// Add a comparator for CAS
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(key), "=", curNIC.ResourceVersion))
// We need to return updNIC, not i, because updNIC is a copy and we may have modified it
return updNIC, true, nil
}
func getFieldSelector(nic string) string {
modules := diagnostics.ListSupportedNaplesModules()
var moduleNames []string
for _, val := range modules {
moduleNames = append(moduleNames, fmt.Sprintf("%s-%s", nic, val))
}
return fmt.Sprintf("meta.name in (%s),status.category=%s", strings.Join(moduleNames, ","), diagapi.ModuleStatus_Naples.String())
}
func verifyAllowedProfile(oldProfile, newProfile cluster.DSCProfile) error {
return nil
}
| smartNICPreCommitHook | identifier_name |
smartnic.go | // {C} Copyright 2019 Pensando Systems Inc. All rights reserved.
package impl
import (
"context"
"fmt"
"reflect"
"strings"
"github.com/pensando/sw/api"
"github.com/pensando/sw/api/generated/apiclient"
"github.com/pensando/sw/api/generated/cluster"
diagapi "github.com/pensando/sw/api/generated/diagnostics"
"github.com/pensando/sw/api/generated/network"
apiintf "github.com/pensando/sw/api/interfaces"
apiutils "github.com/pensando/sw/api/utils"
apisrvpkg "github.com/pensando/sw/venice/apiserver/pkg"
"github.com/pensando/sw/venice/globals"
"github.com/pensando/sw/venice/utils/diagnostics"
"github.com/pensando/sw/venice/utils/kvstore"
)
func (cl *clusterHooks) checkNonUserModifiableSmartNICFields(updObj, curObj *cluster.DistributedServiceCard) []string |
func (cl *clusterHooks) smartNICPreCommitHook(ctx context.Context, kvs kvstore.Interface, txn kvstore.Txn, key string, oper apiintf.APIOperType, dryrun bool, i interface{}) (interface{}, bool, error) {
updNIC, ok := i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
// Current configurable PolicerAttachTenant field supports default tenant only. This restriction will be removed in future
if updNIC.Spec.PolicerAttachTenant == "" {
updNIC.Spec.PolicerAttachTenant = "default"
} else if updNIC.Spec.PolicerAttachTenant != "default" {
cl.logger.Errorf("PolicerAttachTenant is supported for default tenant only")
return i, true, fmt.Errorf("PolicerAttachTenant is supported for default tenant only")
}
if oper == apiintf.CreateOper {
var nicIPAddr string // create modules with empty IP address if smartnic doesn't have an IP yet
if updNIC.Status.IPConfig != nil {
nicIPAddr = updNIC.Status.IPConfig.IPAddress
}
modObjs := diagnostics.NewNaplesModules(updNIC.Name, nicIPAddr, apisrvpkg.MustGetAPIServer().GetVersion())
for _, modObj := range modObjs {
if err := txn.Create(modObj.MakeKey("diagnostics"), modObj); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for smart nic [%s] creation", modObj.Name, updNIC.Name), "error", err)
continue // TODO: throw an event
}
}
if updNIC.Spec.RoutingConfig != "" {
rtCfg := network.RoutingConfig{
ObjectMeta: api.ObjectMeta{
Name: updNIC.Spec.RoutingConfig,
},
}
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(rtCfg.MakeKey(string(apiclient.GroupNetwork))), ">", 0))
}
return i, true, nil
}
if ctx == nil || kvs == nil {
return i, false, fmt.Errorf("smartNICPreCommitHook called with NIL parameter, ctx: %p, kvs: %p", ctx, kvs)
}
curNIC := &cluster.DistributedServiceCard{}
// Get from the persisted DB here.
pctx := apiutils.SetVar(ctx, apiutils.CtxKeyGetPersistedKV, true)
err := kvs.Get(pctx, key, curNIC)
if err != nil {
cl.logger.Errorf("Error getting DistributedServiceCard with key [%s] in API server smartNICPreCommitHook pre-commit hook", key)
return i, false, fmt.Errorf("Error getting object: %v", err)
}
nwManaged := curNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_NETWORK.String()
admitted := curNIC.Status.AdmissionPhase == cluster.DistributedServiceCardStatus_ADMITTED.String()
switch oper {
case apiintf.DeleteOper:
// Prevent deletion of DistributedServiceCard object if MgmtMode = NETWORK && Phase = ADMITTED
if nwManaged && admitted {
errStr := fmt.Sprintf("Cannot delete DistributedServiceCard Object because it is in %s phase. Please decommission before deleting.", cluster.DistributedServiceCardStatus_ADMITTED.String())
cl.logger.Errorf(errStr)
return i, true, fmt.Errorf(errStr)
}
// delete module objects for processes running on the smart nic
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
break
}
for _, modObj := range into.Items {
if err := txn.Delete(modObj.MakeKey("diagnostics")); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
}
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("deleting module: %s", modObj.Name))
}
case apiintf.UpdateOper:
var ok bool
updNIC, ok = i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
if updNIC.Status.IPConfig != nil { // update IP address of smartnic in module object
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
}
for _, modObj := range into.Items {
modObj.Status.Node = updNIC.Status.IPConfig.IPAddress
// not doing CAS, don't want smart nic updates to fail, we are ok with module object reverting to older version
if err := txn.Update(modObj.MakeKey("diagnostics"), modObj); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
}
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("updating module: %s with IP: %s", modObj.Name, modObj.Status.Node))
}
}
oldprofname := curNIC.Spec.DSCProfile
//TODO:revisit once the feature stabilises
var oldProfile cluster.DSCProfile
if oldprofname != "" {
oldProfile = cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: oldprofname,
},
}
err := kvs.Get(ctx, oldProfile.MakeKey("cluster"), &oldProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find old profile")
}
}
updprofname := updNIC.Spec.DSCProfile
if updprofname == "" {
return i, false, fmt.Errorf("updprofilename is nil")
}
updProfile := cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: updprofname,
},
}
err = kvs.Get(ctx, updProfile.MakeKey("cluster"), &updProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find the new profile")
}
if oldprofname != "" && admitted { // validate update of profile only if the NIC is admitted and not decomissioned
errStr := fmt.Sprintf("Profile old : %v %v new: %v %v ", oldProfile.Spec.DeploymentTarget, oldProfile.Spec.FeatureSet, updProfile.Spec.DeploymentTarget, updProfile.Spec.FeatureSet)
cl.logger.Errorf(errStr)
err = verifyAllowedProfile(oldProfile, updProfile)
if err != nil {
return i, false, fmt.Errorf("error in validating profile: %v", err)
}
}
// Reject user-initiated modifications of Spec fields like ID and NetworkMode, as NMD currently
// does not have code to react to the changes.
if apiutils.IsUserRequestCtx(ctx) {
// Prevent mode change if NIC is NOT admitted
if !admitted && updNIC.Spec.MgmtMode != curNIC.Spec.MgmtMode {
errStr := fmt.Sprintf("Management mode change not allowed for DistributedServiceCard because it is not in %s phase", cluster.DistributedServiceCardStatus_ADMITTED.String())
cl.logger.Errorf(errStr)
return i, true, fmt.Errorf(errStr)
}
// Workaround for ...
// Once the SmartNIC is admitted, disallow flipping "Spec.Admit" back to false
if admitted && updNIC.Spec.Admit == false && curNIC.Spec.Admit == true {
return i, true, fmt.Errorf("Spec.Admit cannot be changed to false once the DistributedServiceCard is admitted")
}
// For unified mode, if we are decommissioning, we need to prevent the card from rejoining, so we set admit=false
if admitted && updNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_HOST.String() && curNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_NETWORK.String() {
updNIC.Spec.Admit = false
}
errs := cl.checkNonUserModifiableSmartNICFields(&updNIC, curNIC)
if len(errs) > 0 {
return i, true, fmt.Errorf("Modification of DistributedServiceCard object fields %s is not allowed", strings.Join(errs, ", "))
}
}
if updNIC.Spec.RoutingConfig != "" {
rtCfg := network.RoutingConfig{
ObjectMeta: api.ObjectMeta{
Name: updNIC.Spec.RoutingConfig,
},
}
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(rtCfg.MakeKey(string(apiclient.GroupNetwork))), ">", 0))
}
}
// Add a comparator for CAS
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(key), "=", curNIC.ResourceVersion))
// We need to return updNIC, not i, because updNIC is a copy and we may have modified it
return updNIC, true, nil
}
func getFieldSelector(nic string) string {
modules := diagnostics.ListSupportedNaplesModules()
var moduleNames []string
for _, val := range modules {
moduleNames = append(moduleNames, fmt.Sprintf("%s-%s", nic, val))
}
return fmt.Sprintf("meta.name in (%s),status.category=%s", strings.Join(moduleNames, ","), diagapi.ModuleStatus_Naples.String())
}
func verifyAllowedProfile(oldProfile, newProfile cluster.DSCProfile) error {
return nil
}
| {
NUMFields := []string{"ID", "NetworkMode", "MgmtVlan", "Controllers"}
var errs []string
updSpec := reflect.Indirect(reflect.ValueOf(updObj)).FieldByName("Spec")
curSpec := reflect.Indirect(reflect.ValueOf(curObj)).FieldByName("Spec")
for _, fn := range NUMFields {
updField := updSpec.FieldByName(fn).Interface()
curField := curSpec.FieldByName(fn).Interface()
if !reflect.DeepEqual(updField, curField) {
errs = append(errs, fn)
}
}
// if ipconfig field is not empty and non nil (old or new) then do the check
updIPConfig := updSpec.FieldByName("IPConfig").Interface()
curIPConfig := curSpec.FieldByName("IPConfig").Interface()
emptyIPConfig := cluster.IPConfig{}
// Reflect can't distinguish between empty object and nil object. So adding this additional check for IP Config
// We perform the deepequal check only if either of the old or new spec are not nil and not empty
if ((updIPConfig != nil) && !reflect.DeepEqual(updIPConfig, emptyIPConfig)) || ((curIPConfig != nil) && !reflect.DeepEqual(curIPConfig, emptyIPConfig)) {
if !reflect.DeepEqual(updIPConfig, curIPConfig) {
errs = append(errs, "IPConfig")
}
}
return errs
} | identifier_body |
smartnic.go | // {C} Copyright 2019 Pensando Systems Inc. All rights reserved.
package impl
import (
"context"
"fmt"
"reflect"
"strings"
"github.com/pensando/sw/api"
"github.com/pensando/sw/api/generated/apiclient"
"github.com/pensando/sw/api/generated/cluster"
diagapi "github.com/pensando/sw/api/generated/diagnostics"
"github.com/pensando/sw/api/generated/network"
apiintf "github.com/pensando/sw/api/interfaces"
apiutils "github.com/pensando/sw/api/utils"
apisrvpkg "github.com/pensando/sw/venice/apiserver/pkg"
"github.com/pensando/sw/venice/globals"
"github.com/pensando/sw/venice/utils/diagnostics"
"github.com/pensando/sw/venice/utils/kvstore"
)
func (cl *clusterHooks) checkNonUserModifiableSmartNICFields(updObj, curObj *cluster.DistributedServiceCard) []string {
NUMFields := []string{"ID", "NetworkMode", "MgmtVlan", "Controllers"}
var errs []string
updSpec := reflect.Indirect(reflect.ValueOf(updObj)).FieldByName("Spec")
curSpec := reflect.Indirect(reflect.ValueOf(curObj)).FieldByName("Spec")
for _, fn := range NUMFields {
updField := updSpec.FieldByName(fn).Interface()
curField := curSpec.FieldByName(fn).Interface()
if !reflect.DeepEqual(updField, curField) {
errs = append(errs, fn)
}
}
// if ipconfig field is not empty and non nil (old or new) then do the check
updIPConfig := updSpec.FieldByName("IPConfig").Interface()
curIPConfig := curSpec.FieldByName("IPConfig").Interface()
emptyIPConfig := cluster.IPConfig{}
// Reflect can't distinguish between empty object and nil object. So adding this additional check for IP Config
// We perform the deepequal check only if either of the old or new spec are not nil and not empty
if ((updIPConfig != nil) && !reflect.DeepEqual(updIPConfig, emptyIPConfig)) || ((curIPConfig != nil) && !reflect.DeepEqual(curIPConfig, emptyIPConfig)) {
if !reflect.DeepEqual(updIPConfig, curIPConfig) {
errs = append(errs, "IPConfig")
}
}
return errs
}
func (cl *clusterHooks) smartNICPreCommitHook(ctx context.Context, kvs kvstore.Interface, txn kvstore.Txn, key string, oper apiintf.APIOperType, dryrun bool, i interface{}) (interface{}, bool, error) {
updNIC, ok := i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
// Current configurable PolicerAttachTenant field supports default tenant only. This restriction will be removed in future
if updNIC.Spec.PolicerAttachTenant == "" { | updNIC.Spec.PolicerAttachTenant = "default"
} else if updNIC.Spec.PolicerAttachTenant != "default" {
cl.logger.Errorf("PolicerAttachTenant is supported for default tenant only")
return i, true, fmt.Errorf("PolicerAttachTenant is supported for default tenant only")
}
if oper == apiintf.CreateOper {
var nicIPAddr string // create modules with empty IP address if smartnic doesn't have an IP yet
if updNIC.Status.IPConfig != nil {
nicIPAddr = updNIC.Status.IPConfig.IPAddress
}
modObjs := diagnostics.NewNaplesModules(updNIC.Name, nicIPAddr, apisrvpkg.MustGetAPIServer().GetVersion())
for _, modObj := range modObjs {
if err := txn.Create(modObj.MakeKey("diagnostics"), modObj); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for smart nic [%s] creation", modObj.Name, updNIC.Name), "error", err)
continue // TODO: throw an event
}
}
if updNIC.Spec.RoutingConfig != "" {
rtCfg := network.RoutingConfig{
ObjectMeta: api.ObjectMeta{
Name: updNIC.Spec.RoutingConfig,
},
}
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(rtCfg.MakeKey(string(apiclient.GroupNetwork))), ">", 0))
}
return i, true, nil
}
if ctx == nil || kvs == nil {
return i, false, fmt.Errorf("smartNICPreCommitHook called with NIL parameter, ctx: %p, kvs: %p", ctx, kvs)
}
curNIC := &cluster.DistributedServiceCard{}
// Get from the persisted DB here.
pctx := apiutils.SetVar(ctx, apiutils.CtxKeyGetPersistedKV, true)
err := kvs.Get(pctx, key, curNIC)
if err != nil {
cl.logger.Errorf("Error getting DistributedServiceCard with key [%s] in API server smartNICPreCommitHook pre-commit hook", key)
return i, false, fmt.Errorf("Error getting object: %v", err)
}
nwManaged := curNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_NETWORK.String()
admitted := curNIC.Status.AdmissionPhase == cluster.DistributedServiceCardStatus_ADMITTED.String()
switch oper {
case apiintf.DeleteOper:
// Prevent deletion of DistributedServiceCard object if MgmtMode = NETWORK && Phase = ADMITTED
if nwManaged && admitted {
errStr := fmt.Sprintf("Cannot delete DistributedServiceCard Object because it is in %s phase. Please decommission before deleting.", cluster.DistributedServiceCardStatus_ADMITTED.String())
cl.logger.Errorf(errStr)
return i, true, fmt.Errorf(errStr)
}
// delete module objects for processes running on the smart nic
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
break
}
for _, modObj := range into.Items {
if err := txn.Delete(modObj.MakeKey("diagnostics")); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
}
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("deleting module: %s", modObj.Name))
}
case apiintf.UpdateOper:
var ok bool
updNIC, ok = i.(cluster.DistributedServiceCard)
if !ok {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("called for invalid object type [%#v]", i))
return i, true, errInvalidInputType
}
if updNIC.Status.IPConfig != nil { // update IP address of smartnic in module object
into := &diagapi.ModuleList{}
nctx := apiutils.SetVar(ctx, apiutils.CtxKeyObjKind, fmt.Sprintf("%s.%s", "diagnostics", string(diagapi.KindModule)))
if err := kvs.ListFiltered(nctx, globals.ModulesKey, into, api.ListWatchOptions{
FieldSelector: getFieldSelector(curNIC.Name),
}); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("unable to list module objects for smart nic [%s], id [%s]", curNIC.Name, curNIC.Spec.ID), "error", err)
}
for _, modObj := range into.Items {
modObj.Status.Node = updNIC.Status.IPConfig.IPAddress
// not doing CAS, don't want smart nic updates to fail, we are ok with module object reverting to older version
if err := txn.Update(modObj.MakeKey("diagnostics"), modObj); err != nil {
cl.logger.ErrorLog("method", "smartNICPreCommitHook", "msg",
fmt.Sprintf("error adding module obj [%s] to transaction for deletion", modObj.Name), "error", err)
continue
}
cl.logger.DebugLog("method", "smartNICPreCommitHook", "msg", fmt.Sprintf("updating module: %s with IP: %s", modObj.Name, modObj.Status.Node))
}
}
oldprofname := curNIC.Spec.DSCProfile
//TODO:revisit once the feature stabilises
var oldProfile cluster.DSCProfile
if oldprofname != "" {
oldProfile = cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: oldprofname,
},
}
err := kvs.Get(ctx, oldProfile.MakeKey("cluster"), &oldProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find old profile")
}
}
updprofname := updNIC.Spec.DSCProfile
if updprofname == "" {
return i, false, fmt.Errorf("updprofilename is nil")
}
updProfile := cluster.DSCProfile{
ObjectMeta: api.ObjectMeta{
Name: updprofname,
},
}
err = kvs.Get(ctx, updProfile.MakeKey("cluster"), &updProfile)
if err != nil {
return i, false, fmt.Errorf("unable to find the new profile")
}
if oldprofname != "" && admitted { // validate update of profile only if the NIC is admitted and not decomissioned
errStr := fmt.Sprintf("Profile old : %v %v new: %v %v ", oldProfile.Spec.DeploymentTarget, oldProfile.Spec.FeatureSet, updProfile.Spec.DeploymentTarget, updProfile.Spec.FeatureSet)
cl.logger.Errorf(errStr)
err = verifyAllowedProfile(oldProfile, updProfile)
if err != nil {
return i, false, fmt.Errorf("error in validating profile: %v", err)
}
}
// Reject user-initiated modifications of Spec fields like ID and NetworkMode, as NMD currently
// does not have code to react to the changes.
if apiutils.IsUserRequestCtx(ctx) {
// Prevent mode change if NIC is NOT admitted
if !admitted && updNIC.Spec.MgmtMode != curNIC.Spec.MgmtMode {
errStr := fmt.Sprintf("Management mode change not allowed for DistributedServiceCard because it is not in %s phase", cluster.DistributedServiceCardStatus_ADMITTED.String())
cl.logger.Errorf(errStr)
return i, true, fmt.Errorf(errStr)
}
// Workaround for ...
// Once the SmartNIC is admitted, disallow flipping "Spec.Admit" back to false
if admitted && updNIC.Spec.Admit == false && curNIC.Spec.Admit == true {
return i, true, fmt.Errorf("Spec.Admit cannot be changed to false once the DistributedServiceCard is admitted")
}
// For unified mode, if we are decommissioning, we need to prevent the card from rejoining, so we set admit=false
if admitted && updNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_HOST.String() && curNIC.Spec.MgmtMode == cluster.DistributedServiceCardSpec_NETWORK.String() {
updNIC.Spec.Admit = false
}
errs := cl.checkNonUserModifiableSmartNICFields(&updNIC, curNIC)
if len(errs) > 0 {
return i, true, fmt.Errorf("Modification of DistributedServiceCard object fields %s is not allowed", strings.Join(errs, ", "))
}
}
if updNIC.Spec.RoutingConfig != "" {
rtCfg := network.RoutingConfig{
ObjectMeta: api.ObjectMeta{
Name: updNIC.Spec.RoutingConfig,
},
}
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(rtCfg.MakeKey(string(apiclient.GroupNetwork))), ">", 0))
}
}
// Add a comparator for CAS
txn.AddComparator(kvstore.Compare(kvstore.WithVersion(key), "=", curNIC.ResourceVersion))
// We need to return updNIC, not i, because updNIC is a copy and we may have modified it
return updNIC, true, nil
}
func getFieldSelector(nic string) string {
modules := diagnostics.ListSupportedNaplesModules()
var moduleNames []string
for _, val := range modules {
moduleNames = append(moduleNames, fmt.Sprintf("%s-%s", nic, val))
}
return fmt.Sprintf("meta.name in (%s),status.category=%s", strings.Join(moduleNames, ","), diagapi.ModuleStatus_Naples.String())
}
func verifyAllowedProfile(oldProfile, newProfile cluster.DSCProfile) error {
return nil
} | random_line_split |
|
main.1.rs | extern crate brainfuck;
extern crate rand;
#[macro_use]
extern crate log;
extern crate env_logger;
mod context;
use brainfuck::parser;
use context::Context;
use std::time::{Duration, Instant};
use rand::random;
use log::LevelFilter;
use std::sync::mpsc::channel;
use std::thread;
use std::sync::{Arc, Mutex};
/*
f64数组 代表genome(基因组)
每个f64(8个字节 gene 基因)代表一条指令
AI程序的工作原理如下:
一个基因组由一个f64数组组成。
每个基因对应Brainf-ck编程语言中的指令。
从一群随机基因组开始。
将每个f64转换成相应的指令,编码成结果程序,并执行这个程序。
根据程序的控制台输出获取每个程序的适应分数,并对它们进行排名。
使用赌轮选择,杂交和变异将最佳基因组配对在一起,以产生新一代。
用新一代重复该过程,直到达到目标适应分数。
解释指令集
Brainf-ck由以下指令集组成:
1 > 递增指针。
2 < 减少指针。
3 + 递增指针处的字节。
3 - 减少指针处的字节。
5 . 输出指针处的字节。
6 , 输入一个字节并将其存储在指针的字节中。
7 [ 如果指针处的字节为零,则跳过匹配]。
8 ] 向后跳转到匹配[除非指针处的字节为零。
*/
const MUTATION_RATE: f64 = 0.05;
const CROSSOVER_RATE: f64 = 0.80;
const INITIAL_GENOME_SIZE: usize = 100;
const NUM_ELITE: usize = 4;//精英选择个数
const NUM_COPIES_ELITE: usize = 1; //每个精英复制数
const NUM_THREAD: usize = 2;//线程数
const POPULATION_SIZE: usize = 50*NUM_THREAD+NUM_ELITE*NUM_COPIES_ELITE;//人口数量
//基因组
#[derive(Copy)]
pub struct Genome {
fitness: f64,
genes: Vec<f64>,
}
impl Genome {
fn new() -> Genome{
Genome{
fitness: 1.0,
genes: vec![]
}
}
fn length(&self) -> usize{
self.genes.len()
}
fn random() -> Genome{
Genome{
fitness: 1.0,
genes: vec![random(); INITIAL_GENOME_SIZE]
}
}
/*
通过插入,替换,删除,移位进行突变。
- 在基因组中选择一个索引。
- 如果插入,则在该位置插入一个变异位。 其余位向上移动一个索引。 最后一位被删除了。
- 如果替换,则在该位置设置变异位。
- 如果删除,所有位都在该位置向下移动。 在数组的末尾添加一个变异位。
- 如果移位,所有位都从位置0开始向上或向下移动。如果向上,则最后一位向前移动。 如果向下,第一位就会结束。
*/
fn mutate(&mut self){
for pos in 0..self.genes.len(){
if random::<f64>() < MUTATION_RATE{
//选择变异类型
let r = random::<f64>();
if r <= 0.25 {
//插入突变
let mutation_index = pos;
//变异之前备份当前位
let mut shift_bit = self.genes[mutation_index];
//在变异位设置随机数
self.genes[mutation_index] = random();
//将位向上或向下凹陷1。
let up = random::<f64>() >= 0.5;
if up{//插入并删除末尾
for i in mutation_index+1..self.length{
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}else{//插入并删除第一个
for i in (0..=mutation_index).rev(){
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}
}else if r <= 0.5{
//删除突变
let mutation_index = pos;
let up = random::<f64>() >= 0.5;
if up{//删除并在开头插入
for i in (1..=mutation_index).rev(){
self.genes[i] = self.genes[i-1];
}
self.genes[0] = random();
}else{//删除并在末尾插入
for i in mutation_index..self.length-1{
self.genes[i] = self.genes[i+1]
}
self.genes[self.length-1] = random();
}
}else if r <= 0.75{
//转移/旋转突变
let up = random::<f64>() >= 0.5;
if up{
// 1,2,3 => 3,1,2
let mut shift_bit = self.genes[0];
for i in 0..self.length{
if i>0{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[self.length-1];
}
}
}else{
// 1,2,3 => 2,3,1
let mut shift_bit = self.genes[self.length-1];
for i in (0..=self.length-1).rev(){
if i<self.length-1{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[0];
}
}
}
}else{
//替换突变
self.genes[pos] = random();
}
}
}
}
fn crossover(&self, genome:&Genome) -> (Genome, Genome){
if random::<f64>()>CROSSOVER_RATE{
return (self.clone(), genome.clone());
}
let pos = (random::<f64>()*self.length as f64) as usize;
let mut child1 = Genome::new();
let mut child2 = Genome::new();
for i in 0..self.length{
if i<pos{
child1.genes[i] = self.genes[i];
child2.genes[i] = genome.genes[i];
}else{
child1.genes[i] = genome.genes[i];
child2.genes[i] = self.genes[i];
}
}
(child1, child2)
}
fn to_bf(&self) -> String {
let mut bf = String::new();
for gene in self.genes.iter() {
let d = *gene;
if d <= 0.125 {
bf.push('>');
} else if d <= 0.25 {
bf.push('<');
} else if d <= 0.375 {
bf.push('+');
} else if d <= 0.5 {
bf.push('-');
} else if d <= 0.625 {
bf.push('.');
} else if d <= 0.75 {
//bf.push(',');
bf.push('.');
} else if d <= 0.875 {
bf.push('[');
} else {
bf.push(']');
}
}
bf
}
fn run(&self) -> String{
let mut context = Context::new();
let program = self.to_bf().replace("[]", "");
if let Ok(block) = parser::parse(program.as_bytes()) {
context.run(&block);
}
context.out
}
fn calc_fitness(&mut self, target: &str){
let target = target.as_bytes();
self.fitness = 0.0;
let out = self.run();
let out_bytes = out.as_bytes();
for i in 0..target.len() {
if out_bytes.len()>i{
self.fitness += 255.0 - (out_bytes[i] as f64 - target[i] as f64).abs();
}
}
}
}
impl Clone for Genome {
fn clone(&self) -> Genome {
Genome{
fitness: self.fitness,
genes: self.genes,
length: self.length
}
}
}
pub struct GA {
target: String,
populations: Vec<Genome>,
total_fitness: f64,
generations: usize,
}
impl GA {
fn new(target: &str) -> GA {
let mut populations = vec![];
for _ in 0..POPULATION_SIZE{
populations.push(Genome::random());
}
GA {
target: String::from(target),
generations: 0,
total_fitness: 0.0,
populations
}
}
fn roulette_selection(&self) -> usize{
//生成0和总体适应分之间的随机数
let slice = random::<f64>() * self.total_fitness;
let mut fitness_total = 0.0;
let mut selected_pos = 0;
for i in 0..self.populations.len(){
fitness_total += self.populations[i].fitness;
//如果当前适应分>随机数,返回此处的染色体
if fitness_total > slice{
selected_pos = i;
break;
}
}
selected_pos
}
//下一代
fn epoch(&mut self){
//计算总适应分
self.total_fitness = 0.0;
for p in &mut self.populations{
self.total_fitness += p.fitness;
}
//按照得分排序
self.populations.sort_by(|a, b| b.fitness.partial_cmp(&a.fitness).unwrap());
let out = self.populations[0].run();
println!("program={} out={:?}", self.populations[0].to_bf(), out.get(0..5));
info!("人口:{} 代数:{} 最高分:{}", self.populations.len(), self.generations, self.populations[0].fitness);
//新群体
let mut new_pop = vec![];
//精英选择
for i in 0..NUM_ELITE{
for _ in 0. |
new_pop.push(self.populations[i]);
}
}
let (tx, rx) = channel();
let elite_count = new_pop.len();
let new_pop = Arc::new(Mutex::new(new_pop));
for tid in 0..NUM_THREAD{
let target = self.target.clone();
let child_count = (POPULATION_SIZE-elite_count)/NUM_THREAD;
let mut parents = vec![];
for _ in 0..child_count/2{
//每次生成两个孩子
parents.push(self.populations[self.roulette_selection()]);
parents.push(self.populations[self.roulette_selection()]);
}
let tx = tx.clone();
let new_pop_clone = new_pop.clone();
thread::spawn(move || {
let mut childs = vec![];
//println!("{}.start", tid);
//每次生成两个孩子
while childs.len()<child_count as usize{
let mum = parents.pop().unwrap();
let dad = parents.pop().unwrap();
//杂交
let (mut baby1, mut baby2) = mum.crossover(&dad);
//变异
baby1.mutate();
baby1.mutate();
//计算适应分
baby1.calc_fitness(&target);
baby2.calc_fitness(&target);
childs.push(baby1);
childs.push(baby2);
}
//println!("{}.end", tid);
let mut new_pop = new_pop_clone.lock().unwrap();
new_pop.append(&mut childs);
//println!("{}.new_pop.len()={}", tid, new_pop.len());
if new_pop.len() == POPULATION_SIZE{
let mut pops = vec![];
pops.append(&mut new_pop);
tx.send(pops).unwrap();
}
});
}
//替换新的群体
self.populations.clear();
self.populations.append(&mut rx.recv().unwrap());
self.generations += 1;
}
}
fn main() {
env_logger::Builder::from_default_env()
//.default_format_timestamp(false)
.filter_level(LevelFilter::Info)
.init();
//let hello_world = include_bytes!("../hello_world.bf");
//let program = b"++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++.";
let mut ga = GA::new("Hello");
for _ in 0..50000{
ga.epoch();
}
// for i in 0..ga.populations.len(){
// ga.populations[i].calc_fitness("Hello World!");
// }
// let dad = ga.populations[ga.roulette_selection()];
// let mum = ga.populations[ga.roulette_selection()];
// let (mut child1, mut child2) = dad.crossover(&mum);
// child1.calc_fitness("Hello World!");
// child2.calc_fitness("Hello World!");
println!("end.");
//let now = Instant::now();
// let fitness = ga.calc_fitness(program, "Hello World!");
// println!("耗时:{}ms", duration_to_milis(&now.elapsed()));
// println!("fitness:{}", fitness);
}
pub fn duration_to_milis(duration: &Duration) -> f64 {
duration.as_secs() as f64 * 1000.0 + duration.subsec_nanos() as f64 / 1_000_000.0
}
| .NUM_COPIES_ELITE{ | identifier_name |
main.1.rs | extern crate brainfuck;
extern crate rand;
#[macro_use]
extern crate log;
extern crate env_logger;
mod context;
use brainfuck::parser;
use context::Context;
use std::time::{Duration, Instant};
use rand::random;
use log::LevelFilter;
use std::sync::mpsc::channel;
use std::thread;
use std::sync::{Arc, Mutex};
/*
f64数组 代表genome(基因组)
每个f64(8个字节 gene 基因)代表一条指令
AI程序的工作原理如下:
一个基因组由一个f64数组组成。
每个基因对应Brainf-ck编程语言中的指令。
从一群随机基因组开始。
将每个f64转换成相应的指令,编码成结果程序,并执行这个程序。
根据程序的控制台输出获取每个程序的适应分数,并对它们进行排名。
使用赌轮选择,杂交和变异将最佳基因组配对在一起,以产生新一代。
用新一代重复该过程,直到达到目标适应分数。
解释指令集
Brainf-ck由以下指令集组成:
1 > 递增指针。
2 < 减少指针。
3 + 递增指针处的字节。
3 - 减少指针处的字节。
5 . 输出指针处的字节。
6 , 输入一个字节并将其存储在指针的字节中。
7 [ 如果指针处的字节为零,则跳过匹配]。
8 ] 向后跳转到匹配[除非指针处的字节为零。
*/
const MUTATION_RATE: f64 = 0.05;
const CROSSOVER_RATE: f64 = 0.80;
const INITIAL_GENOME_SIZE: usize = 100;
const NUM_ELITE: usize = 4;//精英选择个数
const NUM_COPIES_ELITE: usize = 1; //每个精英复制数
const NUM_THREAD: usize = 2;//线程数
const POPULATION_SIZE: usize = 50*NUM_THREAD+NUM_ELITE*NUM_COPIES_ELITE;//人口数量
//基因组
#[derive(Copy)]
pub struct Genome {
fitness: f64,
genes: Vec<f64>,
}
impl Genome {
fn new() -> Genome{
Genome{
fitness: 1.0,
genes: vec![]
}
}
fn length(&self) -> usize{
self.genes.len()
}
fn random() -> Genome{
Genome{
fitness: 1.0,
genes: vec![random(); INITIAL_GENOME_SIZE]
}
}
/*
通过插入,替换,删除,移位进行突变。
- 在基因组中选择一个索引。
- 如果插入,则在该位置插入一个变异位。 其余位向上移动一个索引。 最后一位被删除了。
- 如果替换,则在该位置设置变异位。
- 如果删除,所有位都在该位置向下移动。 在数组的末尾添加一个变异位。
- 如果移位,所有位都从位置0开始向上或向下移动。如果向上,则最后一位向前移动。 如果向下,第一位就会结束。
*/
fn mutate(&mut self){
for pos in 0..self.genes.len(){
if random::<f64>() < MUTATION_RATE{
//选择变异类型
let r = random::<f64>();
if r <= 0.25 {
//插入突变
let mutation_index = pos;
//变异之前备份当前位
let mut shift_bit = self.genes[mutation_index];
//在变异位设置随机数
self.genes[mutation_index] = random();
//将位向上或向下凹陷1。
let up = random::<f64>() >= 0.5;
if up{//插入并删除末尾
for i in mutation_index+1..self.length{
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}else{//插入并删除第一个
for i in (0..=mutation_index).rev(){
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}
}else if r <= 0.5{
//删除突变
let mutation_index = pos;
let up = random::<f64>() >= 0.5;
if up{//删除并在开头插入
for i in (1..=mutation_index).rev(){
self.genes[i] = self.genes[i-1];
}
self.genes[0] = random();
}else{//删除并在末尾插入
for i in mutation_index..self.length-1{
self.genes[i] = self.genes[i+1]
}
self.genes[self.length-1] = random();
}
}else if r <= 0.75{
//转移/旋转突变
let up = random::<f64>() >= 0.5;
if up{
// 1,2,3 => 3,1,2
let mut shift_bit = self.genes[0];
for i in 0..self.length{
if i>0{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[self.length-1];
}
}
}else{
// 1,2,3 => 2,3,1
let mut shift_bit = self.genes[self.length-1];
for i in (0..=self.length-1).rev(){
if i<self.length-1{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[0];
}
}
}
}else{
//替换突变
self.genes[pos] = random();
}
}
}
}
fn crossover(&self, genome:&Genome) -> (Genome, Genome){
if random::<f64>()>CROSSOVER_RATE{
return (self.clone(), genome.clone());
}
let pos = (random::<f64>()*self.length as f64) as usize;
let mut child1 = Genome::new();
let mut child2 = Genome::new();
for i in 0..self.length{
if i<pos{
child1.genes[i] = self.genes[i];
child2.genes[i] = genome.genes[i];
}else{
child1.genes[i] = genome.genes[i];
child2.genes[i] = self.genes[i];
}
}
(child1, child2)
}
fn to_bf(&self) -> String {
let mut bf = String::new();
for gene in self.genes.iter() {
let d = *gene;
if d <= 0.125 {
bf.push('>');
} else if d <= 0.25 {
bf.push('<');
} else if d <= 0.375 {
bf.push('+');
} else if d <= 0.5 {
bf.push('-');
} else if d <= 0.625 {
bf.push('.');
} else if d <= 0.75 {
//bf.push(',');
bf.push('.');
} else if d <= 0.875 {
bf.push('[');
} else {
bf.push(']');
}
}
bf
}
fn run(&self) -> String{
let mut context = Context::new();
let program = self.to_bf().replace("[]", "");
if let Ok(block) = parser::parse(program.as_bytes()) {
context.run(&block);
}
context.out
}
fn calc_fitness(&mut self, target: &str){
let target = target.as_bytes();
self.fitness = 0.0;
let out = self.run();
let out_bytes = out.as_bytes();
for i in 0..target.len() {
if out_bytes.len()>i{
self.fitness += 255.0 - (out_bytes[i] as f64 - target[i] as f64).abs();
}
}
}
}
impl Clone for Genome {
fn clone(&self) -> Genome {
Genome{
fitness: self.fitness,
genes: self.genes,
length: self.length
}
}
}
pub struct GA {
target: String,
populations: Vec<Genome>,
total_fitness: f64,
generations: usize,
}
impl GA {
fn new(target: &str) -> GA {
let mut populations = vec![];
for _ in 0..POPULATION_SIZE{
populations.push(Genome::random());
}
GA {
target: String::from(target),
generations: 0,
total_fitness: 0.0,
populations
}
}
fn roulette_selection(&self) -> usize{
//生成0和总体适应分之间的随机数
let slice = random::<f64>() * self.total_fitness;
let mut fitness_total = 0.0;
let mut selected_pos = 0;
for i in 0..self.populations.len(){
fitness_total += self.populations[i].fitness;
//如果当前适应分>随机数,返回此处的染色体
if fitness_total > slice{
selected_pos = i;
break;
}
}
selected_pos
}
//下一代
fn epoch(&mut self){
//计算总适应分
self.total_fitness = 0.0;
for p in &mut self.populations{
self.total_fitness += p.fitness;
}
//按照得分排序
self.populations.sort_by(|a, b| b.fitness.partial_cmp(&a.fitness).unwrap());
let out = self.populations[0].run();
println!("program={} out={:?}", self.populations[0].to_bf(), out.get(0..5));
info!("人口:{} 代数:{} 最高分:{}", self.populations.len(), self.generations, self.populations[0].fitness);
//新群体
let mut new_pop = vec![];
//精英选择
for i in 0..NUM_ELITE{
for _ in 0..NUM_COPIES_ELITE{
| oulette_selection()]);
parents.push(self.populations[self.roulette_selection()]);
}
let tx = tx.clone();
let new_pop_clone = new_pop.clone();
thread::spawn(move || {
let mut childs = vec![];
//println!("{}.start", tid);
//每次生成两个孩子
while childs.len()<child_count as usize{
let mum = parents.pop().unwrap();
let dad = parents.pop().unwrap();
//杂交
let (mut baby1, mut baby2) = mum.crossover(&dad);
//变异
baby1.mutate();
baby1.mutate();
//计算适应分
baby1.calc_fitness(&target);
baby2.calc_fitness(&target);
childs.push(baby1);
childs.push(baby2);
}
//println!("{}.end", tid);
let mut new_pop = new_pop_clone.lock().unwrap();
new_pop.append(&mut childs);
//println!("{}.new_pop.len()={}", tid, new_pop.len());
if new_pop.len() == POPULATION_SIZE{
let mut pops = vec![];
pops.append(&mut new_pop);
tx.send(pops).unwrap();
}
});
}
//替换新的群体
self.populations.clear();
self.populations.append(&mut rx.recv().unwrap());
self.generations += 1;
}
}
fn main() {
env_logger::Builder::from_default_env()
//.default_format_timestamp(false)
.filter_level(LevelFilter::Info)
.init();
//let hello_world = include_bytes!("../hello_world.bf");
//let program = b"++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++.";
let mut ga = GA::new("Hello");
for _ in 0..50000{
ga.epoch();
}
// for i in 0..ga.populations.len(){
// ga.populations[i].calc_fitness("Hello World!");
// }
// let dad = ga.populations[ga.roulette_selection()];
// let mum = ga.populations[ga.roulette_selection()];
// let (mut child1, mut child2) = dad.crossover(&mum);
// child1.calc_fitness("Hello World!");
// child2.calc_fitness("Hello World!");
println!("end.");
//let now = Instant::now();
// let fitness = ga.calc_fitness(program, "Hello World!");
// println!("耗时:{}ms", duration_to_milis(&now.elapsed()));
// println!("fitness:{}", fitness);
}
pub fn duration_to_milis(duration: &Duration) -> f64 {
duration.as_secs() as f64 * 1000.0 + duration.subsec_nanos() as f64 / 1_000_000.0
}
| new_pop.push(self.populations[i]);
}
}
let (tx, rx) = channel();
let elite_count = new_pop.len();
let new_pop = Arc::new(Mutex::new(new_pop));
for tid in 0..NUM_THREAD{
let target = self.target.clone();
let child_count = (POPULATION_SIZE-elite_count)/NUM_THREAD;
let mut parents = vec![];
for _ in 0..child_count/2{
//每次生成两个孩子
parents.push(self.populations[self.r | identifier_body |
main.1.rs | extern crate brainfuck;
extern crate rand;
#[macro_use]
extern crate log;
extern crate env_logger;
mod context;
use brainfuck::parser;
use context::Context;
use std::time::{Duration, Instant};
use rand::random;
use log::LevelFilter;
use std::sync::mpsc::channel;
use std::thread;
use std::sync::{Arc, Mutex};
/*
f64数组 代表genome(基因组)
每个f64(8个字节 gene 基因)代表一条指令
AI程序的工作原理如下:
一个基因组由一个f64数组组成。
每个基因对应Brainf-ck编程语言中的指令。
从一群随机基因组开始。
将每个f64转换成相应的指令,编码成结果程序,并执行这个程序。
根据程序的控制台输出获取每个程序的适应分数,并对它们进行排名。
使用赌轮选择,杂交和变异将最佳基因组配对在一起,以产生新一代。
用新一代重复该过程,直到达到目标适应分数。
解释指令集
Brainf-ck由以下指令集组成:
1 > 递增指针。
2 < 减少指针。
3 + 递增指针处的字节。
3 - 减少指针处的字节。
5 . 输出指针处的字节。
6 , 输入一个字节并将其存储在指针的字节中。
7 [ 如果指针处的字节为零,则跳过匹配]。
8 ] 向后跳转到匹配[除非指针处的字节为零。
*/
const MUTATION_RATE: f64 = 0.05;
const CROSSOVER_RATE: f64 = 0.80;
const INITIAL_GENOME_SIZE: usize = 100;
const NUM_ELITE: usize = 4;//精英选择个数
const NUM_COPIES_ELITE: usize = 1; //每个精英复制数
const NUM_THREAD: usize = 2;//线程数
const POPULATION_SIZE: usize = 50*NUM_THREAD+NUM_ELITE*NUM_COPIES_ELITE;//人口数量
//基因组
#[derive(Copy)]
pub struct Genome {
fitness: f64,
genes: Vec<f64>,
}
impl Genome {
fn new() -> Genome{
Genome{
fitness: 1.0,
genes: vec![]
}
}
fn length(&self) -> usize{
self.genes.len()
}
fn random() -> Genome{
Genome{
fitness: 1.0,
genes: vec![random(); INITIAL_GENOME_SIZE]
}
}
/*
通过插入,替换,删除,移位进行突变。
- 在基因组中选择一个索引。
- 如果插入,则在该位置插入一个变异位。 其余位向上移动一个索引。 最后一位被删除了。
- 如果替换,则在该位置设置变异位。
- 如果删除,所有位都在该位置向下移动。 在数组的末尾添加一个变异位。
- 如果移位,所有位都从位置0开始向上或向下移动。如果向上,则最后一位向前移动。 如果向下,第一位就会结束。
*/
fn mutate(&mut self){
for pos in 0..self.genes.len(){
if random::<f64>() < MUTATION_RATE{
//选择变异类型
let r = random::<f64>();
if r <= 0.25 {
//插入突变
let mutation_index = pos;
//变异之前备份当前位
let mut shift_bit = self.genes[mutation_index];
//在变异位设置随机数
self.genes[mutation_index] = random();
//将位向上或向下凹陷1。
let up = random::<f64>() >= 0.5;
if up{//插入并删除末尾
for i in mutation_index+1..self.length{
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}else{//插入并删除第一个
for i in (0..=mutation_index).rev(){
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}
}else if r <= 0.5{
//删除突变
let mutation_index = pos;
let up = random::<f64>() >= 0.5;
if up{//删除并在开头插入
for i in (1..=mutation_index).rev(){
self.genes[i] = self.genes[i-1];
}
self.genes[0] = random();
}else{//删除并在末尾插入
for i in mutation_index..self.length-1{
self.genes[i] = self.genes[i+1]
}
self.genes[self.length-1] = random();
}
}else if r <= 0.75{
//转移/旋转突变
let up = random::<f64>() >= 0.5;
if up{
// 1,2,3 => 3,1,2
let mut shift_bit = self.genes[0];
for i in 0..self.length{ | self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[self.length-1];
}
}
}else{
// 1,2,3 => 2,3,1
let mut shift_bit = self.genes[self.length-1];
for i in (0..=self.length-1).rev(){
if i<self.length-1{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[0];
}
}
}
}else{
//替换突变
self.genes[pos] = random();
}
}
}
}
fn crossover(&self, genome:&Genome) -> (Genome, Genome){
if random::<f64>()>CROSSOVER_RATE{
return (self.clone(), genome.clone());
}
let pos = (random::<f64>()*self.length as f64) as usize;
let mut child1 = Genome::new();
let mut child2 = Genome::new();
for i in 0..self.length{
if i<pos{
child1.genes[i] = self.genes[i];
child2.genes[i] = genome.genes[i];
}else{
child1.genes[i] = genome.genes[i];
child2.genes[i] = self.genes[i];
}
}
(child1, child2)
}
fn to_bf(&self) -> String {
let mut bf = String::new();
for gene in self.genes.iter() {
let d = *gene;
if d <= 0.125 {
bf.push('>');
} else if d <= 0.25 {
bf.push('<');
} else if d <= 0.375 {
bf.push('+');
} else if d <= 0.5 {
bf.push('-');
} else if d <= 0.625 {
bf.push('.');
} else if d <= 0.75 {
//bf.push(',');
bf.push('.');
} else if d <= 0.875 {
bf.push('[');
} else {
bf.push(']');
}
}
bf
}
fn run(&self) -> String{
let mut context = Context::new();
let program = self.to_bf().replace("[]", "");
if let Ok(block) = parser::parse(program.as_bytes()) {
context.run(&block);
}
context.out
}
fn calc_fitness(&mut self, target: &str){
let target = target.as_bytes();
self.fitness = 0.0;
let out = self.run();
let out_bytes = out.as_bytes();
for i in 0..target.len() {
if out_bytes.len()>i{
self.fitness += 255.0 - (out_bytes[i] as f64 - target[i] as f64).abs();
}
}
}
}
impl Clone for Genome {
fn clone(&self) -> Genome {
Genome{
fitness: self.fitness,
genes: self.genes,
length: self.length
}
}
}
pub struct GA {
target: String,
populations: Vec<Genome>,
total_fitness: f64,
generations: usize,
}
impl GA {
fn new(target: &str) -> GA {
let mut populations = vec![];
for _ in 0..POPULATION_SIZE{
populations.push(Genome::random());
}
GA {
target: String::from(target),
generations: 0,
total_fitness: 0.0,
populations
}
}
fn roulette_selection(&self) -> usize{
//生成0和总体适应分之间的随机数
let slice = random::<f64>() * self.total_fitness;
let mut fitness_total = 0.0;
let mut selected_pos = 0;
for i in 0..self.populations.len(){
fitness_total += self.populations[i].fitness;
//如果当前适应分>随机数,返回此处的染色体
if fitness_total > slice{
selected_pos = i;
break;
}
}
selected_pos
}
//下一代
fn epoch(&mut self){
//计算总适应分
self.total_fitness = 0.0;
for p in &mut self.populations{
self.total_fitness += p.fitness;
}
//按照得分排序
self.populations.sort_by(|a, b| b.fitness.partial_cmp(&a.fitness).unwrap());
let out = self.populations[0].run();
println!("program={} out={:?}", self.populations[0].to_bf(), out.get(0..5));
info!("人口:{} 代数:{} 最高分:{}", self.populations.len(), self.generations, self.populations[0].fitness);
//新群体
let mut new_pop = vec![];
//精英选择
for i in 0..NUM_ELITE{
for _ in 0..NUM_COPIES_ELITE{
new_pop.push(self.populations[i]);
}
}
let (tx, rx) = channel();
let elite_count = new_pop.len();
let new_pop = Arc::new(Mutex::new(new_pop));
for tid in 0..NUM_THREAD{
let target = self.target.clone();
let child_count = (POPULATION_SIZE-elite_count)/NUM_THREAD;
let mut parents = vec![];
for _ in 0..child_count/2{
//每次生成两个孩子
parents.push(self.populations[self.roulette_selection()]);
parents.push(self.populations[self.roulette_selection()]);
}
let tx = tx.clone();
let new_pop_clone = new_pop.clone();
thread::spawn(move || {
let mut childs = vec![];
//println!("{}.start", tid);
//每次生成两个孩子
while childs.len()<child_count as usize{
let mum = parents.pop().unwrap();
let dad = parents.pop().unwrap();
//杂交
let (mut baby1, mut baby2) = mum.crossover(&dad);
//变异
baby1.mutate();
baby1.mutate();
//计算适应分
baby1.calc_fitness(&target);
baby2.calc_fitness(&target);
childs.push(baby1);
childs.push(baby2);
}
//println!("{}.end", tid);
let mut new_pop = new_pop_clone.lock().unwrap();
new_pop.append(&mut childs);
//println!("{}.new_pop.len()={}", tid, new_pop.len());
if new_pop.len() == POPULATION_SIZE{
let mut pops = vec![];
pops.append(&mut new_pop);
tx.send(pops).unwrap();
}
});
}
//替换新的群体
self.populations.clear();
self.populations.append(&mut rx.recv().unwrap());
self.generations += 1;
}
}
fn main() {
env_logger::Builder::from_default_env()
//.default_format_timestamp(false)
.filter_level(LevelFilter::Info)
.init();
//let hello_world = include_bytes!("../hello_world.bf");
//let program = b"++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++.";
let mut ga = GA::new("Hello");
for _ in 0..50000{
ga.epoch();
}
// for i in 0..ga.populations.len(){
// ga.populations[i].calc_fitness("Hello World!");
// }
// let dad = ga.populations[ga.roulette_selection()];
// let mum = ga.populations[ga.roulette_selection()];
// let (mut child1, mut child2) = dad.crossover(&mum);
// child1.calc_fitness("Hello World!");
// child2.calc_fitness("Hello World!");
println!("end.");
//let now = Instant::now();
// let fitness = ga.calc_fitness(program, "Hello World!");
// println!("耗时:{}ms", duration_to_milis(&now.elapsed()));
// println!("fitness:{}", fitness);
}
pub fn duration_to_milis(duration: &Duration) -> f64 {
duration.as_secs() as f64 * 1000.0 + duration.subsec_nanos() as f64 / 1_000_000.0
} | if i>0{
let temp = self.genes[i]; | random_line_split |
main.1.rs | extern crate brainfuck;
extern crate rand;
#[macro_use]
extern crate log;
extern crate env_logger;
mod context;
use brainfuck::parser;
use context::Context;
use std::time::{Duration, Instant};
use rand::random;
use log::LevelFilter;
use std::sync::mpsc::channel;
use std::thread;
use std::sync::{Arc, Mutex};
/*
f64数组 代表genome(基因组)
每个f64(8个字节 gene 基因)代表一条指令
AI程序的工作原理如下:
一个基因组由一个f64数组组成。
每个基因对应Brainf-ck编程语言中的指令。
从一群随机基因组开始。
将每个f64转换成相应的指令,编码成结果程序,并执行这个程序。
根据程序的控制台输出获取每个程序的适应分数,并对它们进行排名。
使用赌轮选择,杂交和变异将最佳基因组配对在一起,以产生新一代。
用新一代重复该过程,直到达到目标适应分数。
解释指令集
Brainf-ck由以下指令集组成:
1 > 递增指针。
2 < 减少指针。
3 + 递增指针处的字节。
3 - 减少指针处的字节。
5 . 输出指针处的字节。
6 , 输入一个字节并将其存储在指针的字节中。
7 [ 如果指针处的字节为零,则跳过匹配]。
8 ] 向后跳转到匹配[除非指针处的字节为零。
*/
const MUTATION_RATE: f64 = 0.05;
const CROSSOVER_RATE: f64 = 0.80;
const INITIAL_GENOME_SIZE: usize = 100;
const NUM_ELITE: usize = 4;//精英选择个数
const NUM_COPIES_ELITE: usize = 1; //每个精英复制数
const NUM_THREAD: usize = 2;//线程数
const POPULATION_SIZE: usize = 50*NUM_THREAD+NUM_ELITE*NUM_COPIES_ELITE;//人口数量
//基因组
#[derive(Copy)]
pub struct Genome {
fitness: f64,
genes: Vec<f64>,
}
impl Genome {
fn new() -> Genome{
Genome{
fitness: 1.0,
genes: vec![]
}
}
fn length(&self) -> usize{
self.genes.len()
}
fn random() -> Genome{
Genome{
fitness: 1.0,
genes: vec![random(); INITIAL_GENOME_SIZE]
}
}
/*
通过插入,替换,删除,移位进行突变。
- 在基因组中选择一个索引。
- 如果插入,则在该位置插入一个变异位。 其余位向上移动一个索引。 最后一位被删除了。
- 如果替换,则在该位置设置变异位。
- 如果删除,所有位都在该位置向下移动。 在数组的末尾添加一个变异位。
- 如果移位,所有位都从位置0开始向上或向下移动。如果向上,则最后一位向前移动。 如果向下,第一位就会结束。
*/
fn mutate(&mut self){
for pos in 0..self.genes.len(){
if random::<f64>() < MUTATION_RATE{
//选择变异类型
let r = random::<f64>();
if r <= 0.25 {
//插入突变
let mutation_index = pos;
//变异之前备份当前位
let mut shift_bit = self.genes[mutation_index];
//在变异位设置随机数
self.genes[mutation_index] = random();
//将位向上或向下凹陷1。
let up = random::<f64>() >= 0.5;
if up{//插入并删除末尾
for i in mutation_index+1..self.length{
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}else{//插入并删除第一个
for i in (0..=mutation_index).rev(){
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}
}else if r <= 0.5{
//删除突变
let mutation_index = pos;
let up = random::<f64>() >= 0.5;
if up{//删除并在开头插入
for i in (1..=mutation_index).rev(){
self.genes[i] = self.genes[i-1];
}
self.genes[0] = random();
}else{//删除并在末尾插入
for i in mutation_index..self.length-1{
self.genes[i] = self.genes[i+1]
}
self.genes[self.length-1] = random();
}
}else if r <= 0.75{
//转移/旋转突变
let up = random::<f64>() >= 0.5;
if up{
// 1,2,3 => 3,1,2
let mut shift_bit = self.genes[0];
for i in 0..self.length{
if i>0{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[self.length-1];
}
}
}else{
// 1,2,3 => 2,3,1
let mut shift_bit = self.genes[self.length-1];
for i in (0..=self.length-1).rev(){
if i<self.length-1{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[0];
}
}
}
}else{
//替换突变
self.genes[pos] = random();
}
}
}
}
fn crossover(&self, genome:&Genome) -> (Genome, Genome){
if random::<f64>()>CROSSOVER_RATE{
return (self.clone(), genome.clone());
}
let pos = (random::<f64>()*self.length as f64) as usize;
let mut child1 = Genome::new();
let mut child2 = Genome::new();
for i in 0..self.length{
if i<pos{
child1.genes[i] = self.genes[i];
child2.genes[i] = genome.genes[i];
}else{
child1.genes[i] = genome.genes[i];
child2.genes[i] = self.genes[i];
}
}
(child1, child2)
}
fn to_bf(&self) -> String {
let mut bf = String::new();
for gene in self.genes.iter() {
let d = *gene;
if d <= 0.125 {
bf.push('>');
} else if d <= 0.25 {
bf.push('<');
} else if d <= 0.375 {
bf.push('+');
} else if d <= 0.5 {
bf.push('-');
} else if d <= 0.625 {
bf.push('.');
} else if d <= 0.75 {
//bf.push(',');
bf.push('.');
} else if d <= 0.875 {
bf.push('[');
} else {
bf.push(']');
}
}
bf
}
fn run(&self) -> String{
let mut context = Context::new();
let program = self.to_bf().replace("[]", "");
if let Ok(block) = parser::parse(program.as_bytes()) {
context.run(&block);
}
context.out
}
fn calc_fitness(&mut self, target: &str){
let target = target.as_bytes();
self.fitness = 0.0;
let out = self.run();
let out_bytes = out.as_bytes();
for i in 0..target.len() {
if out_bytes.len()>i{
self.fitness += 255.0 - (out_bytes[i] as f64 - target[i] as f64).abs();
}
}
}
}
impl Clone for Genome {
fn clone(&self) -> Genome {
Genome{
fitness: self.fitness,
genes: self.genes,
length: self.length
}
}
}
pub struct GA {
target: String,
populations: Vec<Genome>,
total_fitness: f64,
generations: usize,
}
impl GA {
fn new(target: &str) -> GA {
let mut populations = vec![];
for _ in 0..POPULATION_SIZE{
populations.push(Genome::random());
}
GA {
target: String::from(target),
generations: 0,
total_fitness: 0.0,
populations
}
}
fn roulette_selection(&self) -> usize{
//生成0和总体适应分之间的随机数
let slice = random::<f64>() * self.total_fitness;
let mut fitness_total = 0.0;
let mut selected_pos = 0;
for i in 0..self.populations.len(){
fitness_total += self.populations[i].fitness;
//如果当前适应分>随机数,返回此处的染色体
if fit | selected_pos
}
//下一代
fn epoch(&mut self){
//计算总适应分
self.total_fitness = 0.0;
for p in &mut self.populations{
self.total_fitness += p.fitness;
}
//按照得分排序
self.populations.sort_by(|a, b| b.fitness.partial_cmp(&a.fitness).unwrap());
let out = self.populations[0].run();
println!("program={} out={:?}", self.populations[0].to_bf(), out.get(0..5));
info!("人口:{} 代数:{} 最高分:{}", self.populations.len(), self.generations, self.populations[0].fitness);
//新群体
let mut new_pop = vec![];
//精英选择
for i in 0..NUM_ELITE{
for _ in 0..NUM_COPIES_ELITE{
new_pop.push(self.populations[i]);
}
}
let (tx, rx) = channel();
let elite_count = new_pop.len();
let new_pop = Arc::new(Mutex::new(new_pop));
for tid in 0..NUM_THREAD{
let target = self.target.clone();
let child_count = (POPULATION_SIZE-elite_count)/NUM_THREAD;
let mut parents = vec![];
for _ in 0..child_count/2{
//每次生成两个孩子
parents.push(self.populations[self.roulette_selection()]);
parents.push(self.populations[self.roulette_selection()]);
}
let tx = tx.clone();
let new_pop_clone = new_pop.clone();
thread::spawn(move || {
let mut childs = vec![];
//println!("{}.start", tid);
//每次生成两个孩子
while childs.len()<child_count as usize{
let mum = parents.pop().unwrap();
let dad = parents.pop().unwrap();
//杂交
let (mut baby1, mut baby2) = mum.crossover(&dad);
//变异
baby1.mutate();
baby1.mutate();
//计算适应分
baby1.calc_fitness(&target);
baby2.calc_fitness(&target);
childs.push(baby1);
childs.push(baby2);
}
//println!("{}.end", tid);
let mut new_pop = new_pop_clone.lock().unwrap();
new_pop.append(&mut childs);
//println!("{}.new_pop.len()={}", tid, new_pop.len());
if new_pop.len() == POPULATION_SIZE{
let mut pops = vec![];
pops.append(&mut new_pop);
tx.send(pops).unwrap();
}
});
}
//替换新的群体
self.populations.clear();
self.populations.append(&mut rx.recv().unwrap());
self.generations += 1;
}
}
fn main() {
env_logger::Builder::from_default_env()
//.default_format_timestamp(false)
.filter_level(LevelFilter::Info)
.init();
//let hello_world = include_bytes!("../hello_world.bf");
//let program = b"++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++.";
let mut ga = GA::new("Hello");
for _ in 0..50000{
ga.epoch();
}
// for i in 0..ga.populations.len(){
// ga.populations[i].calc_fitness("Hello World!");
// }
// let dad = ga.populations[ga.roulette_selection()];
// let mum = ga.populations[ga.roulette_selection()];
// let (mut child1, mut child2) = dad.crossover(&mum);
// child1.calc_fitness("Hello World!");
// child2.calc_fitness("Hello World!");
println!("end.");
//let now = Instant::now();
// let fitness = ga.calc_fitness(program, "Hello World!");
// println!("耗时:{}ms", duration_to_milis(&now.elapsed()));
// println!("fitness:{}", fitness);
}
pub fn duration_to_milis(duration: &Duration) -> f64 {
duration.as_secs() as f64 * 1000.0 + duration.subsec_nanos() as f64 / 1_000_000.0
}
| ness_total > slice{
selected_pos = i;
break;
}
}
| conditional_block |
functionsSqueeze.py | import numpy as np
import matplotlib.pyplot as plt
import qutip
import scipy.special as spe
# import multiprocessing as mp
from joblib import Parallel, delayed
from qutip import *
import time
# testing atom on new pc
def wQP(t, args):
"""calculates and returns the modulated frequency like in "Lit early universe"
t time at which the frequency is calculated
args: a list {w0, dwQ, dtQ, dwP, dtP, delay} or a dictionary with the following keys:
w0 the unmodulated frequency
dwQ (strength) and dtQ (duration) of a gaussian shaped quench centered around t=0
dwP (strength) and dtP (duration) of a parametric modulation of frequency 2 w0 which starts at t = delay
dtP shoud be an integer multiple of pi/(2 w0) to avoid uncontinuity at t=delay+dtP
units: all frequencies are circular frequencies with unit MHz, times have unit \mu s"""
if type(args) == list:
w0, dwQ, dtQ, dwP, dtP, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dwQ, dtQ, dwP, dtP, delay = args['w0'], args['dwQ'], args['dtQ'], args['dwP'], args['dtP'], args['delay']
else:
return("wrong input form for args, list or dict")
# freq += dwQ/(np.sqrt(2*np.pi)*dtQ)*np.exp(-0.5*(t/dtQ)**2)
freq = w0 + dwQ*np.exp(-0.5*(t/dtQ)**2) # quench
freq += dwP*np.sin(2*w0*(t-delay))*np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1) # parametric
return(freq)
def wQPdot(t, args):
"""calculates the time derivative of w(t, args) at time t
check help(wQP) for further information on args"""
if type(args) == list:
w0, dwQ, dtQ, dwP, dtP, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dwQ, dtQ, dwP, dtP, delay = args['w0'], args['dwQ'], args['dtQ'], args['dwP'], args['dtP'], args['delay']
else:
return("wrong input form for args, list or dict")
# freqD = - dwQ/(np.sqrt(2*np.pi)*dtQ)*np.exp(-0.5*(t/dtQ)**2) * t/(dtQ**2)
freqD = - dwQ*np.exp(-0.5*(t/dtQ)**2) * t/(dtQ**2) # quench
freqD += 2*w0*dwP*np.cos(2*w0*(t-delay))*np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1) # parametric
return(freqD)
def wQQ(t, args):
|
def wQQdot(t, args):
"""calculates the time derivative of wQQ(t, args) at time t
check help(wQQ) for further information on args"""
if type(args) == list:
w0, dw1, dt1, dw2, dt2, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dw1, dt1, dw2, dt2, delay = args['w0'], args['dw1'], args['dt1'], args['dw2'], args['dt2'], args['delay']
else:
return("wrong input form for args, list or dict")
freqD = - dw1*np.exp(-0.5*(t/dt1)**2) * t/(dt1**2)
freqD += - dw2*np.exp(-0.5*((t-delay)/dt2)**2) * (t-delay)/(dt2**2)
return(freqD)
# defining the hamiltonian of the phonon evolution for vaiable w(t)
def H(t, args):
"""calculates the hamiltonian of a harmonic oscillator with modulated frequency
has an additional term which takes a force proportional to 1/w^2 into account
args (dictonary which carries all arguments except t):
t time at which the Hamiltonian is calculated (unit \mu s)
n dimension of the hilbert space (or cutoff dimension for the numerical calculations)
f0 proportionality constant of the additional force (unit N MHz^2)
omega(t, omegaArgs) frequency, modulated in time, described by the list of arguments omegaArgs
omegaDt(t, omegaArgs) time derivative of the frequency
=> in args you need: n, f0, omega, omegaDt, omegaArgs
This form of imput is necessary to use H in further calculations (mesolve)"""
f0 = args['f0']
n = args['n']
omega = args['omega']
omegaDt = args['omegaDt']
omegaArgs = args['omegaArgs']
ad = create(n)
a = destroy(n)
# H0, for the first two terms see Silveri 2017 Quantum_systems_under_frequency_modulation
ham = omega(t, omegaArgs)*(ad*a+0.5*qeye(n))
# additional term because of w(t) not constant
ham += 1j/4*omegaDt(t, omegaArgs)/omega(t, omegaArgs)*(a*a-ad*ad)
# Force term (9**10^-9 = x0, extent of ground state wave function), see Wittmann diss
# with compensation term -f0/w0^2 (e.g. no force in the case of no modulation)
ham += 9*(f0/(omega(t, omegaArgs)**2) - f0/(omegaArgs[0]**2))*(ad + a)
# ham += (9*10**-9)/(10**6)*(f0/(omega(t, omegaArgs)**2))*(ad + a)
return(ham)
def eval_H_QP(psi, times, args, options=0):
"""evaluates the time evolution of the state psi in a harmonic oscillator with modulated frequency
frequency modulation is a combination of a quench and a parametric modulation
the hamiltonian has an additional term which takes a force proportional to 1/w^2 into account
parameters:
psi: initial state for the time evolution (should have dimension n, see below)
times: list of times for which the state should be calculated
args: a dictionary with the following entries:
n: dimension of the hilbert space (or cutoff dimension for the numerical calculations)
w0: the unmodulated frequency
dwQ, dtQ: strength and duration of a gaussian shaped quench centered around t=0
dwP, dtP, delay: strength and duration of a parametric modulation of frequency 2 w0 which starts at t = delay
dtP shoud be an integer multiple of pi/(2 w0) to avoid uncontinuity at t=delay+dtP
f0: proportionality constant of the additional force (unit 10^-15 N MHz^2)
options: possible options for the solver (mesolve)
returns:
a list of states (evolution of psi, one for each t in times)"""
n = args['n']
ad = create(n)
a = destroy(n)
# the following string describes the time dependant frequency
strWQP = 'w0 + dwQ*exp(-0.5*(t/dtQ)**2) + (dwP*sin(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + (dwP*sin(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)
# time derivative of the time depandant frequency
strDWQP = '- dwQ*exp(-0.5*(t/dtQ)**2) * t/(dtQ**2) + (2*w0*dwP*cos(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + 2*w0*dwP*cos(2*w0*t) if t > delay and t < delay+dtP else 0)
# *np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1)
# Hamiltonian in string format, see Silveri 2017 Quantum_systems_under_frequency_modulation
Hc = [[ad*a+0.5*qeye(n), strWQP]]
Hc.append([a*a-ad*ad, '1j/4*(' + strDWQP + ')/(' + strWQP + ')'])
Hc.append([ad+a, '9*(f0/((' + strWQP + ')**2) - f0/(w0**2))'])
# do the time evolution
if options==0:
results = mesolve(Hc, psi, times, args = args)
else:
results = mesolve(Hc, psi, times, args = args, options=options)
return(results)
def eval_H_QQ(psi, times, args, options=0):
"""evaluates the time evolution of the state psi in a harmonic oscillator with modulated frequency
frequency modulation consists of two gaussian quenches
the hamiltonian has an additional term which takes a force proportional to 1/w^2 into account
parameters:
psi: initial state for the time evolution (should have dimension n, see below)
times: list of times for which the state should be calculated
args: a dictionary with the following entries:
n: dimension of the hilbert space (or cutoff dimension for the numerical calculations)
w0: the unmodulated frequency
dw1, dt1: strength and duration of the first quench centered around t=0
dw2, dt2, delay: strength and duration of the second quench centered around t=delay
f0: proportionality constant of the additional force (unit 10^-15 N MHz^2)
options: possible options for the solver (mesolve)
returns:
a list of states (evolution of psi, one for each t in times)"""
n = args['n']
ad = create(n)
a = destroy(n)
# the following string describes the time dependant frequency
strWQQ = 'w0 + dw1*exp(-0.5*(t/dt1)**2) + dw2*exp(-0.5*((t-delay)/dt2)**2)'
# time derivative of the time depandant frequency
strDWQQ = '- dw1*exp(-0.5*(t/dt1)**2) * t/(dt1**2) - dw2*exp(-0.5*((t-delay)/dt2)**2) * (t-delay)/(dt2**2)'
# Hamiltonian in string format, see Silveri 2017 Quantum_systems_under_frequency_modulation
Hc = [[ad*a+0.5*qeye(n), strWQQ]]
Hc.append([a*a-ad*ad, '1j/4*(' + strDWQQ + ')/(' + strWQQ + ')'])
Hc.append([ad+a, '9*(f0/((' + strWQQ + ')**2) - f0/(w0**2))'])
# do the time evolution
if options==0:
results = mesolve(Hc, psi, times, args = args)
else:
results = mesolve(Hc, psi, times, args = args, options=options)
return(results)
def getParams(psi, calculate_nT = True, order_SD = False):
"""calculates for a given state psi (assumes that the thermal excitation is close to the vacuum):
alpha: the coherent displacement parameter
xi: the squeezing parameter
nBar: the mean photon number
nT: the photon number due to the thermal excitation DM_t
calculate_nT: bool, decides if nT will be calculated (takes time), default set to True
if calculate_nT = False, xi is only correct modulo complex conjugation, nT is set to 0!!!
order_SD: bool, changes order in displacement and squeezing
if True: assumes that psi can be written as DM_psi = S(xi) D(alpha) DM_t D(alpha).dag() S(xi).dag()
nT will automatically be calculated, regardless calculate_nT (is needed for the commutation of S and D)
if False: assumes that psi can be written as DM_psi = D(alpha) S(xi) DM_t S(xi).dag() D(alpha).dag()
returns alpha, xi, nBar, nT"""
n = psi.dims[0][0]
ad = create(n)
a = destroy(n)
x = (ad + a)
p = 1j*(ad - a)
xV = variance(x, psi)
pV = variance(p, psi)
# calculated by hand, assuming t = 0 (e.g. DM_t = |0><0|)
xiR = np.arcsinh(0.5*np.sqrt(xV + pV - 2 +0j)) # avoid NANs
if (np.cosh(xiR)*np.sinh(xiR))==0:
xiT1 = 0
else:
xiT1 = 0.25*(pV - xV)/(np.cosh(xiR)*np.sinh(xiR))
# cos is symmetric to x=0, therefore is the inverse +/- arccos(...)
# xiT = np.sign(xiT1)*np.arccos(xiT1)
xiT = np.sign(xiT1)*np.arccos(xiT1)
xi = xiR*np.exp(1j*xiT)
# alpha = 0.5*np.sqrt(xV + pV)
alpha = expect(a, psi)
# print(alpha)
nBar = np.abs(expect(num(n), psi))
# print(nBar)
# calculates the thermal excitation (assuming DM_psi = D S DM_t S.dag() D.dag())
if calculate_nT or order_SD:
psiT = squeeze(n, xi).dag()*displace(n, alpha).dag()*psi*displace(n, alpha)*squeeze(n, xi)
nT = np.abs(expect(num(n), psiT))
xic = np.conj(xi)
psiTc = squeeze(n, xic).dag()*displace(n, alpha).dag()*psi*displace(n, alpha)*squeeze(n, xic)
nTc = np.abs(expect(num(n), psiTc))
if nTc < nT:
nT, xi = nTc, xic
# formula used to commute D and S: https://en.wikipedia.org/wiki/Squeeze_operator
if order_SD:
alpha = alpha*np.cosh(xiR) + np.conj(alpha)*xi/xiR*np.sinh(xiR)
return(alpha, xi, nBar, nT)
else:
return(alpha, xi, nBar, 0)
def plotResults(times, result, args, calculate_nT = True, order_SD = False, nSkipp = 1, showProgress = False):
"""plots the development of the coherent displacement alpha,
squeezing parameter r, mean excitation number nBar, thermal excitation nT (see help(getParams))
together with the time dependant frequency and the force
arguments:
times: list of times for which the values should be calculated
results: list of states (as returned from mesolve) corresponding to times
args: arguments given to H in the calculation of the dynamics
calculate_nT = True: bool, if nT should be calculated as well (takes time)
nSkipp = 1: number of states that should be skipped between each plotted point (speeds it up)"""
t1 = time.time()
times = times[::nSkipp]
if 'omegaArgs' in args:
wList = args['omega'](times, args['omegaArgs'])
fList = args['f0']/wList**2 - args['f0']/args['omegaArgs'][0]**2
else:
wList = args['omega'](times, args)
fList = args['f0']/wList**2 - args['f0']/args['w0']**2
masterList = [[],[],[],[]]
nStates = len(result.states[::nSkipp])
progress = 0
for psi in result.states[::nSkipp]:
alpha, xi, nBar, nT = getParams(psi, calculate_nT = calculate_nT, order_SD = order_SD)
masterList[0].append(np.abs(alpha))
masterList[1].append(np.abs(xi))
masterList[2].append(nBar)
masterList[3].append(nT)
if showProgress:
progress += 1
print('\r', "Progress:", round(100*progress/nStates), "%, processing time:", round(time.time() - t1), "s", end = '')
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5)
fig.set_size_inches(15.5, 7.5, forward=True)
ax1.plot(times, masterList[0], label = r'$|\alpha |$')
ax1.legend()
ax2.plot(times, masterList[1], label = "r")
ax2.legend()
ax3.plot(times, masterList[2], label = "nBar")
if calculate_nT:
ax3.plot(times, masterList[3], label = "nT")
ax3.legend()
ax4.plot(times, wList, label = "w(t)")
ax4.legend()
ax5.plot(times, fList, label = r'$10^{-15} F/\hbar$ in N/(Js)')
ax5.legend()
plt.show()
return(0)
def scanAlphaXiN(H, psi0, times, args, valueList, whichVal, showProgress = True, skippInLoop = 0):
"""returns quentity of interest (alpha and/or xi and/or nBar) for a given list of valueList
arguments:
H: Hamiltonian which governs the time evolution
psi0: initial states
times: list of times used to calculate the time evolution
args: dictionary of arguments given to the hamiltonian (check help(H))
valueList: list of values for which the quantity should be calculated
vhichVal: value in args['omegaArgs'] which should be changed according to valueList
skippInLoop: number of timesteps which should be calculated before the loop over valueList
this means that these timesteps are calculated only for the value args['omegaArgs'] given in args (not for all values in valueList)
Not yet implemented: scanA = True, scanX = False, scanN = False
"""
t1 = time.time()
alphaList = []
xiList = []
nList = []
# if skippInLoop > 0: calculate the first skippInLoop steps only once (and do the loop only over the rest)
if skippInLoop > 0:
times1 = times[:skippInLoop]
times2 = times[skippInLoop:]
results = mesolve(H, psi0, times1, args=args)
psi1 = results.states[-1]
else:
times2 = times
psi1 = psi0
# calculate time evolution for all values in valueList
for val in valueList:
args['omegaArgs'][whichVal] = val # change the value that needs changing
results = mesolve(H, psi1, times2, args=args) # calculate time evolution
psi2 = results.states[-1] # final state
alpha,xi,nBar,_ = getParams(psi2, False) # get alpha
# alpha = np.sqrt(np.abs(expect(x, psi2)**2) + np.abs(expect(p, psi2)**2)) # get alpha
alphaList.append(alpha) # save alpha
xiList.append(xi) # save xi
nList.append(nBar) # save nBar
if showProgress:
print('\r', "Progress: ", round(100*(val-valueList[0])/(valueList[-1]-valueList[0])), "%, processing time:", round(time.time() - t1), "s", end = '')
return(alphaList, xiList, nList)
# make a function to generate a random number of a two-poissonian distribution
def rand_2Poisson(mu1, mu2, P_down=0.5, G_leak=0.):
"""simulates the counts after one single experiment of probing the spin state of one single ion
assumes a two poissonian distribution P(k) = P_down P_mu2(k) + (1-P_down) P_mu1(k)
parameters:
mu1: expected counts for the dark state
mu2: expected counts for the bright state (=> mu1 < mu2)
P_down: probability of being in the bright state
G_leak: rate of leaking from the dark state into the bright state (default 0, no leaking)
(P(leak) = exp(-G_leak))
"""
ru = np.random.rand()
rleak = np.random.rand()
if P_down < ru:
# poisson for low counts
if rleak < 1-np.exp(-G_leak):
return -np.log(1-rleak)/G_leak*np.random.poisson(mu1) + (1+np.log(1-rleak)/G_leak)*np.random.poisson(mu2)
else:
return np.random.poisson(mu1)
else:
# poisson for high counts
return np.random.poisson(mu2)
# make a function to generate a random number of a two-poissonian distribution
def rand_4Poisson(mu_U, mu_D, alpha=0, P_D1=0.5, P_D2=0.5, sup = 1, G_leak=0.):
"""generates a random count, taken two bright ions, each emitting light following a two poissonian distribution
parameters:
mu_U: low count rate (when the ion is in its dark, spin up state)
mu_D: high count rate (when the ion is in its bright, spin down state)
alpha: counts, by which the counts of the two bright states differ
P_D1/P_D2: probability of ion one/two to be in the down (bright) state
sup: factor, by which the count rate of ion 1 is suppressed"""
ret = 0
# counts by ion 1
ret += sup*rand_2Poisson(mu_U, mu_D - 0.5*alpha, P_down=P_D1, G_leak=G_leak)
# counts by ion 2
ret += rand_2Poisson(mu_U, mu_D + 0.5*alpha, P_down=P_D2, G_leak=G_leak)
return ret
def rand_4Poisson_hist(mu_U, mu_D, alpha=0, P_D1=0.5, P_D2=0.5, sup = 1, G_leak=0., repetitions=1000, parallel=False):
"""gives a list of random numbers (default 1000) distributed by rand_4Poisson"""
# print("ddd")
if parallel:
hist = Parallel(n_jobs=8)(delayed(rand_4Poisson)(mu_U, mu_D, alpha, P_D1, P_D2, sup, G_leak) for i in range(repetitions))
return hist
else:
hist = [rand_4Poisson(mu_U, mu_D, alpha, P_D1, P_D2, sup, G_leak) for i in range(repetitions)]
return hist
| """calculates and returns the modulated (two quenches) frequency like in 'Lit early universe'
t time at which the frequency is calculated
args: a list {w0, dw1, dt1, dw2, dt2, delay} or a dictionary with the following keys:
w0 the unmodulated frequency
dw1/2 (strength) and dt1/2 (duration) of the first/second gaussian shaped quench
delay: time between the two quenches
units: all frequencies are circular frequencies with unit MHz, times have unit \mu s"""
if type(args) == list:
w0, dw1, dt1, dw2, dt2, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dw1, dt1, dw2, dt2, delay = args['w0'], args['dw1'], args['dt1'], args['dw2'], args['dt2'], args['delay']
else:
return("wrong input form for args, list or dict")
freq = w0
freq += dw1*np.exp(-0.5*(t/dt1)**2)
freq += dw2*np.exp(-0.5*((t-delay)/dt2)**2)
return(freq) | identifier_body |
functionsSqueeze.py | import numpy as np
import matplotlib.pyplot as plt
import qutip
import scipy.special as spe
# import multiprocessing as mp
from joblib import Parallel, delayed
from qutip import *
import time
# testing atom on new pc
def wQP(t, args):
"""calculates and returns the modulated frequency like in "Lit early universe"
t time at which the frequency is calculated
args: a list {w0, dwQ, dtQ, dwP, dtP, delay} or a dictionary with the following keys:
w0 the unmodulated frequency
dwQ (strength) and dtQ (duration) of a gaussian shaped quench centered around t=0
dwP (strength) and dtP (duration) of a parametric modulation of frequency 2 w0 which starts at t = delay
dtP shoud be an integer multiple of pi/(2 w0) to avoid uncontinuity at t=delay+dtP
units: all frequencies are circular frequencies with unit MHz, times have unit \mu s"""
if type(args) == list:
w0, dwQ, dtQ, dwP, dtP, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dwQ, dtQ, dwP, dtP, delay = args['w0'], args['dwQ'], args['dtQ'], args['dwP'], args['dtP'], args['delay']
else:
return("wrong input form for args, list or dict")
# freq += dwQ/(np.sqrt(2*np.pi)*dtQ)*np.exp(-0.5*(t/dtQ)**2)
freq = w0 + dwQ*np.exp(-0.5*(t/dtQ)**2) # quench
freq += dwP*np.sin(2*w0*(t-delay))*np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1) # parametric
return(freq)
def wQPdot(t, args):
"""calculates the time derivative of w(t, args) at time t
check help(wQP) for further information on args"""
if type(args) == list:
w0, dwQ, dtQ, dwP, dtP, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dwQ, dtQ, dwP, dtP, delay = args['w0'], args['dwQ'], args['dtQ'], args['dwP'], args['dtP'], args['delay']
else:
return("wrong input form for args, list or dict")
# freqD = - dwQ/(np.sqrt(2*np.pi)*dtQ)*np.exp(-0.5*(t/dtQ)**2) * t/(dtQ**2)
freqD = - dwQ*np.exp(-0.5*(t/dtQ)**2) * t/(dtQ**2) # quench
freqD += 2*w0*dwP*np.cos(2*w0*(t-delay))*np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1) # parametric
return(freqD)
def wQQ(t, args):
"""calculates and returns the modulated (two quenches) frequency like in 'Lit early universe'
t time at which the frequency is calculated
args: a list {w0, dw1, dt1, dw2, dt2, delay} or a dictionary with the following keys:
w0 the unmodulated frequency
dw1/2 (strength) and dt1/2 (duration) of the first/second gaussian shaped quench
delay: time between the two quenches
units: all frequencies are circular frequencies with unit MHz, times have unit \mu s"""
if type(args) == list:
w0, dw1, dt1, dw2, dt2, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dw1, dt1, dw2, dt2, delay = args['w0'], args['dw1'], args['dt1'], args['dw2'], args['dt2'], args['delay']
else:
return("wrong input form for args, list or dict")
freq = w0
freq += dw1*np.exp(-0.5*(t/dt1)**2)
freq += dw2*np.exp(-0.5*((t-delay)/dt2)**2)
return(freq)
def wQQdot(t, args):
"""calculates the time derivative of wQQ(t, args) at time t
check help(wQQ) for further information on args"""
if type(args) == list:
w0, dw1, dt1, dw2, dt2, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dw1, dt1, dw2, dt2, delay = args['w0'], args['dw1'], args['dt1'], args['dw2'], args['dt2'], args['delay']
else:
|
freqD = - dw1*np.exp(-0.5*(t/dt1)**2) * t/(dt1**2)
freqD += - dw2*np.exp(-0.5*((t-delay)/dt2)**2) * (t-delay)/(dt2**2)
return(freqD)
# defining the hamiltonian of the phonon evolution for vaiable w(t)
def H(t, args):
"""calculates the hamiltonian of a harmonic oscillator with modulated frequency
has an additional term which takes a force proportional to 1/w^2 into account
args (dictonary which carries all arguments except t):
t time at which the Hamiltonian is calculated (unit \mu s)
n dimension of the hilbert space (or cutoff dimension for the numerical calculations)
f0 proportionality constant of the additional force (unit N MHz^2)
omega(t, omegaArgs) frequency, modulated in time, described by the list of arguments omegaArgs
omegaDt(t, omegaArgs) time derivative of the frequency
=> in args you need: n, f0, omega, omegaDt, omegaArgs
This form of imput is necessary to use H in further calculations (mesolve)"""
f0 = args['f0']
n = args['n']
omega = args['omega']
omegaDt = args['omegaDt']
omegaArgs = args['omegaArgs']
ad = create(n)
a = destroy(n)
# H0, for the first two terms see Silveri 2017 Quantum_systems_under_frequency_modulation
ham = omega(t, omegaArgs)*(ad*a+0.5*qeye(n))
# additional term because of w(t) not constant
ham += 1j/4*omegaDt(t, omegaArgs)/omega(t, omegaArgs)*(a*a-ad*ad)
# Force term (9**10^-9 = x0, extent of ground state wave function), see Wittmann diss
# with compensation term -f0/w0^2 (e.g. no force in the case of no modulation)
ham += 9*(f0/(omega(t, omegaArgs)**2) - f0/(omegaArgs[0]**2))*(ad + a)
# ham += (9*10**-9)/(10**6)*(f0/(omega(t, omegaArgs)**2))*(ad + a)
return(ham)
def eval_H_QP(psi, times, args, options=0):
"""evaluates the time evolution of the state psi in a harmonic oscillator with modulated frequency
frequency modulation is a combination of a quench and a parametric modulation
the hamiltonian has an additional term which takes a force proportional to 1/w^2 into account
parameters:
psi: initial state for the time evolution (should have dimension n, see below)
times: list of times for which the state should be calculated
args: a dictionary with the following entries:
n: dimension of the hilbert space (or cutoff dimension for the numerical calculations)
w0: the unmodulated frequency
dwQ, dtQ: strength and duration of a gaussian shaped quench centered around t=0
dwP, dtP, delay: strength and duration of a parametric modulation of frequency 2 w0 which starts at t = delay
dtP shoud be an integer multiple of pi/(2 w0) to avoid uncontinuity at t=delay+dtP
f0: proportionality constant of the additional force (unit 10^-15 N MHz^2)
options: possible options for the solver (mesolve)
returns:
a list of states (evolution of psi, one for each t in times)"""
n = args['n']
ad = create(n)
a = destroy(n)
# the following string describes the time dependant frequency
strWQP = 'w0 + dwQ*exp(-0.5*(t/dtQ)**2) + (dwP*sin(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + (dwP*sin(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)
# time derivative of the time depandant frequency
strDWQP = '- dwQ*exp(-0.5*(t/dtQ)**2) * t/(dtQ**2) + (2*w0*dwP*cos(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + 2*w0*dwP*cos(2*w0*t) if t > delay and t < delay+dtP else 0)
# *np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1)
# Hamiltonian in string format, see Silveri 2017 Quantum_systems_under_frequency_modulation
Hc = [[ad*a+0.5*qeye(n), strWQP]]
Hc.append([a*a-ad*ad, '1j/4*(' + strDWQP + ')/(' + strWQP + ')'])
Hc.append([ad+a, '9*(f0/((' + strWQP + ')**2) - f0/(w0**2))'])
# do the time evolution
if options==0:
results = mesolve(Hc, psi, times, args = args)
else:
results = mesolve(Hc, psi, times, args = args, options=options)
return(results)
def eval_H_QQ(psi, times, args, options=0):
"""evaluates the time evolution of the state psi in a harmonic oscillator with modulated frequency
frequency modulation consists of two gaussian quenches
the hamiltonian has an additional term which takes a force proportional to 1/w^2 into account
parameters:
psi: initial state for the time evolution (should have dimension n, see below)
times: list of times for which the state should be calculated
args: a dictionary with the following entries:
n: dimension of the hilbert space (or cutoff dimension for the numerical calculations)
w0: the unmodulated frequency
dw1, dt1: strength and duration of the first quench centered around t=0
dw2, dt2, delay: strength and duration of the second quench centered around t=delay
f0: proportionality constant of the additional force (unit 10^-15 N MHz^2)
options: possible options for the solver (mesolve)
returns:
a list of states (evolution of psi, one for each t in times)"""
n = args['n']
ad = create(n)
a = destroy(n)
# the following string describes the time dependant frequency
strWQQ = 'w0 + dw1*exp(-0.5*(t/dt1)**2) + dw2*exp(-0.5*((t-delay)/dt2)**2)'
# time derivative of the time depandant frequency
strDWQQ = '- dw1*exp(-0.5*(t/dt1)**2) * t/(dt1**2) - dw2*exp(-0.5*((t-delay)/dt2)**2) * (t-delay)/(dt2**2)'
# Hamiltonian in string format, see Silveri 2017 Quantum_systems_under_frequency_modulation
Hc = [[ad*a+0.5*qeye(n), strWQQ]]
Hc.append([a*a-ad*ad, '1j/4*(' + strDWQQ + ')/(' + strWQQ + ')'])
Hc.append([ad+a, '9*(f0/((' + strWQQ + ')**2) - f0/(w0**2))'])
# do the time evolution
if options==0:
results = mesolve(Hc, psi, times, args = args)
else:
results = mesolve(Hc, psi, times, args = args, options=options)
return(results)
def getParams(psi, calculate_nT = True, order_SD = False):
"""calculates for a given state psi (assumes that the thermal excitation is close to the vacuum):
alpha: the coherent displacement parameter
xi: the squeezing parameter
nBar: the mean photon number
nT: the photon number due to the thermal excitation DM_t
calculate_nT: bool, decides if nT will be calculated (takes time), default set to True
if calculate_nT = False, xi is only correct modulo complex conjugation, nT is set to 0!!!
order_SD: bool, changes order in displacement and squeezing
if True: assumes that psi can be written as DM_psi = S(xi) D(alpha) DM_t D(alpha).dag() S(xi).dag()
nT will automatically be calculated, regardless calculate_nT (is needed for the commutation of S and D)
if False: assumes that psi can be written as DM_psi = D(alpha) S(xi) DM_t S(xi).dag() D(alpha).dag()
returns alpha, xi, nBar, nT"""
n = psi.dims[0][0]
ad = create(n)
a = destroy(n)
x = (ad + a)
p = 1j*(ad - a)
xV = variance(x, psi)
pV = variance(p, psi)
# calculated by hand, assuming t = 0 (e.g. DM_t = |0><0|)
xiR = np.arcsinh(0.5*np.sqrt(xV + pV - 2 +0j)) # avoid NANs
if (np.cosh(xiR)*np.sinh(xiR))==0:
xiT1 = 0
else:
xiT1 = 0.25*(pV - xV)/(np.cosh(xiR)*np.sinh(xiR))
# cos is symmetric to x=0, therefore is the inverse +/- arccos(...)
# xiT = np.sign(xiT1)*np.arccos(xiT1)
xiT = np.sign(xiT1)*np.arccos(xiT1)
xi = xiR*np.exp(1j*xiT)
# alpha = 0.5*np.sqrt(xV + pV)
alpha = expect(a, psi)
# print(alpha)
nBar = np.abs(expect(num(n), psi))
# print(nBar)
# calculates the thermal excitation (assuming DM_psi = D S DM_t S.dag() D.dag())
if calculate_nT or order_SD:
psiT = squeeze(n, xi).dag()*displace(n, alpha).dag()*psi*displace(n, alpha)*squeeze(n, xi)
nT = np.abs(expect(num(n), psiT))
xic = np.conj(xi)
psiTc = squeeze(n, xic).dag()*displace(n, alpha).dag()*psi*displace(n, alpha)*squeeze(n, xic)
nTc = np.abs(expect(num(n), psiTc))
if nTc < nT:
nT, xi = nTc, xic
# formula used to commute D and S: https://en.wikipedia.org/wiki/Squeeze_operator
if order_SD:
alpha = alpha*np.cosh(xiR) + np.conj(alpha)*xi/xiR*np.sinh(xiR)
return(alpha, xi, nBar, nT)
else:
return(alpha, xi, nBar, 0)
def plotResults(times, result, args, calculate_nT = True, order_SD = False, nSkipp = 1, showProgress = False):
"""plots the development of the coherent displacement alpha,
squeezing parameter r, mean excitation number nBar, thermal excitation nT (see help(getParams))
together with the time dependant frequency and the force
arguments:
times: list of times for which the values should be calculated
results: list of states (as returned from mesolve) corresponding to times
args: arguments given to H in the calculation of the dynamics
calculate_nT = True: bool, if nT should be calculated as well (takes time)
nSkipp = 1: number of states that should be skipped between each plotted point (speeds it up)"""
t1 = time.time()
times = times[::nSkipp]
if 'omegaArgs' in args:
wList = args['omega'](times, args['omegaArgs'])
fList = args['f0']/wList**2 - args['f0']/args['omegaArgs'][0]**2
else:
wList = args['omega'](times, args)
fList = args['f0']/wList**2 - args['f0']/args['w0']**2
masterList = [[],[],[],[]]
nStates = len(result.states[::nSkipp])
progress = 0
for psi in result.states[::nSkipp]:
alpha, xi, nBar, nT = getParams(psi, calculate_nT = calculate_nT, order_SD = order_SD)
masterList[0].append(np.abs(alpha))
masterList[1].append(np.abs(xi))
masterList[2].append(nBar)
masterList[3].append(nT)
if showProgress:
progress += 1
print('\r', "Progress:", round(100*progress/nStates), "%, processing time:", round(time.time() - t1), "s", end = '')
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5)
fig.set_size_inches(15.5, 7.5, forward=True)
ax1.plot(times, masterList[0], label = r'$|\alpha |$')
ax1.legend()
ax2.plot(times, masterList[1], label = "r")
ax2.legend()
ax3.plot(times, masterList[2], label = "nBar")
if calculate_nT:
ax3.plot(times, masterList[3], label = "nT")
ax3.legend()
ax4.plot(times, wList, label = "w(t)")
ax4.legend()
ax5.plot(times, fList, label = r'$10^{-15} F/\hbar$ in N/(Js)')
ax5.legend()
plt.show()
return(0)
def scanAlphaXiN(H, psi0, times, args, valueList, whichVal, showProgress = True, skippInLoop = 0):
"""returns quentity of interest (alpha and/or xi and/or nBar) for a given list of valueList
arguments:
H: Hamiltonian which governs the time evolution
psi0: initial states
times: list of times used to calculate the time evolution
args: dictionary of arguments given to the hamiltonian (check help(H))
valueList: list of values for which the quantity should be calculated
vhichVal: value in args['omegaArgs'] which should be changed according to valueList
skippInLoop: number of timesteps which should be calculated before the loop over valueList
this means that these timesteps are calculated only for the value args['omegaArgs'] given in args (not for all values in valueList)
Not yet implemented: scanA = True, scanX = False, scanN = False
"""
t1 = time.time()
alphaList = []
xiList = []
nList = []
# if skippInLoop > 0: calculate the first skippInLoop steps only once (and do the loop only over the rest)
if skippInLoop > 0:
times1 = times[:skippInLoop]
times2 = times[skippInLoop:]
results = mesolve(H, psi0, times1, args=args)
psi1 = results.states[-1]
else:
times2 = times
psi1 = psi0
# calculate time evolution for all values in valueList
for val in valueList:
args['omegaArgs'][whichVal] = val # change the value that needs changing
results = mesolve(H, psi1, times2, args=args) # calculate time evolution
psi2 = results.states[-1] # final state
alpha,xi,nBar,_ = getParams(psi2, False) # get alpha
# alpha = np.sqrt(np.abs(expect(x, psi2)**2) + np.abs(expect(p, psi2)**2)) # get alpha
alphaList.append(alpha) # save alpha
xiList.append(xi) # save xi
nList.append(nBar) # save nBar
if showProgress:
print('\r', "Progress: ", round(100*(val-valueList[0])/(valueList[-1]-valueList[0])), "%, processing time:", round(time.time() - t1), "s", end = '')
return(alphaList, xiList, nList)
# make a function to generate a random number of a two-poissonian distribution
def rand_2Poisson(mu1, mu2, P_down=0.5, G_leak=0.):
"""simulates the counts after one single experiment of probing the spin state of one single ion
assumes a two poissonian distribution P(k) = P_down P_mu2(k) + (1-P_down) P_mu1(k)
parameters:
mu1: expected counts for the dark state
mu2: expected counts for the bright state (=> mu1 < mu2)
P_down: probability of being in the bright state
G_leak: rate of leaking from the dark state into the bright state (default 0, no leaking)
(P(leak) = exp(-G_leak))
"""
ru = np.random.rand()
rleak = np.random.rand()
if P_down < ru:
# poisson for low counts
if rleak < 1-np.exp(-G_leak):
return -np.log(1-rleak)/G_leak*np.random.poisson(mu1) + (1+np.log(1-rleak)/G_leak)*np.random.poisson(mu2)
else:
return np.random.poisson(mu1)
else:
# poisson for high counts
return np.random.poisson(mu2)
# make a function to generate a random number of a two-poissonian distribution
def rand_4Poisson(mu_U, mu_D, alpha=0, P_D1=0.5, P_D2=0.5, sup = 1, G_leak=0.):
"""generates a random count, taken two bright ions, each emitting light following a two poissonian distribution
parameters:
mu_U: low count rate (when the ion is in its dark, spin up state)
mu_D: high count rate (when the ion is in its bright, spin down state)
alpha: counts, by which the counts of the two bright states differ
P_D1/P_D2: probability of ion one/two to be in the down (bright) state
sup: factor, by which the count rate of ion 1 is suppressed"""
ret = 0
# counts by ion 1
ret += sup*rand_2Poisson(mu_U, mu_D - 0.5*alpha, P_down=P_D1, G_leak=G_leak)
# counts by ion 2
ret += rand_2Poisson(mu_U, mu_D + 0.5*alpha, P_down=P_D2, G_leak=G_leak)
return ret
def rand_4Poisson_hist(mu_U, mu_D, alpha=0, P_D1=0.5, P_D2=0.5, sup = 1, G_leak=0., repetitions=1000, parallel=False):
"""gives a list of random numbers (default 1000) distributed by rand_4Poisson"""
# print("ddd")
if parallel:
hist = Parallel(n_jobs=8)(delayed(rand_4Poisson)(mu_U, mu_D, alpha, P_D1, P_D2, sup, G_leak) for i in range(repetitions))
return hist
else:
hist = [rand_4Poisson(mu_U, mu_D, alpha, P_D1, P_D2, sup, G_leak) for i in range(repetitions)]
return hist
| return("wrong input form for args, list or dict") | conditional_block |
functionsSqueeze.py | import numpy as np
import matplotlib.pyplot as plt
import qutip
import scipy.special as spe
# import multiprocessing as mp
from joblib import Parallel, delayed
from qutip import *
import time
# testing atom on new pc
def wQP(t, args):
"""calculates and returns the modulated frequency like in "Lit early universe"
t time at which the frequency is calculated
args: a list {w0, dwQ, dtQ, dwP, dtP, delay} or a dictionary with the following keys:
w0 the unmodulated frequency
dwQ (strength) and dtQ (duration) of a gaussian shaped quench centered around t=0
dwP (strength) and dtP (duration) of a parametric modulation of frequency 2 w0 which starts at t = delay
dtP shoud be an integer multiple of pi/(2 w0) to avoid uncontinuity at t=delay+dtP
units: all frequencies are circular frequencies with unit MHz, times have unit \mu s"""
if type(args) == list:
w0, dwQ, dtQ, dwP, dtP, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dwQ, dtQ, dwP, dtP, delay = args['w0'], args['dwQ'], args['dtQ'], args['dwP'], args['dtP'], args['delay']
else:
return("wrong input form for args, list or dict")
# freq += dwQ/(np.sqrt(2*np.pi)*dtQ)*np.exp(-0.5*(t/dtQ)**2)
freq = w0 + dwQ*np.exp(-0.5*(t/dtQ)**2) # quench
freq += dwP*np.sin(2*w0*(t-delay))*np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1) # parametric
return(freq)
def wQPdot(t, args):
"""calculates the time derivative of w(t, args) at time t
check help(wQP) for further information on args"""
if type(args) == list:
w0, dwQ, dtQ, dwP, dtP, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dwQ, dtQ, dwP, dtP, delay = args['w0'], args['dwQ'], args['dtQ'], args['dwP'], args['dtP'], args['delay']
else:
return("wrong input form for args, list or dict")
# freqD = - dwQ/(np.sqrt(2*np.pi)*dtQ)*np.exp(-0.5*(t/dtQ)**2) * t/(dtQ**2)
freqD = - dwQ*np.exp(-0.5*(t/dtQ)**2) * t/(dtQ**2) # quench
freqD += 2*w0*dwP*np.cos(2*w0*(t-delay))*np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1) # parametric
return(freqD)
def wQQ(t, args):
"""calculates and returns the modulated (two quenches) frequency like in 'Lit early universe'
t time at which the frequency is calculated
args: a list {w0, dw1, dt1, dw2, dt2, delay} or a dictionary with the following keys:
w0 the unmodulated frequency
dw1/2 (strength) and dt1/2 (duration) of the first/second gaussian shaped quench
delay: time between the two quenches
units: all frequencies are circular frequencies with unit MHz, times have unit \mu s"""
if type(args) == list:
w0, dw1, dt1, dw2, dt2, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dw1, dt1, dw2, dt2, delay = args['w0'], args['dw1'], args['dt1'], args['dw2'], args['dt2'], args['delay']
else:
return("wrong input form for args, list or dict")
freq = w0
freq += dw1*np.exp(-0.5*(t/dt1)**2)
freq += dw2*np.exp(-0.5*((t-delay)/dt2)**2)
return(freq)
def | (t, args):
"""calculates the time derivative of wQQ(t, args) at time t
check help(wQQ) for further information on args"""
if type(args) == list:
w0, dw1, dt1, dw2, dt2, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dw1, dt1, dw2, dt2, delay = args['w0'], args['dw1'], args['dt1'], args['dw2'], args['dt2'], args['delay']
else:
return("wrong input form for args, list or dict")
freqD = - dw1*np.exp(-0.5*(t/dt1)**2) * t/(dt1**2)
freqD += - dw2*np.exp(-0.5*((t-delay)/dt2)**2) * (t-delay)/(dt2**2)
return(freqD)
# defining the hamiltonian of the phonon evolution for vaiable w(t)
def H(t, args):
"""calculates the hamiltonian of a harmonic oscillator with modulated frequency
has an additional term which takes a force proportional to 1/w^2 into account
args (dictonary which carries all arguments except t):
t time at which the Hamiltonian is calculated (unit \mu s)
n dimension of the hilbert space (or cutoff dimension for the numerical calculations)
f0 proportionality constant of the additional force (unit N MHz^2)
omega(t, omegaArgs) frequency, modulated in time, described by the list of arguments omegaArgs
omegaDt(t, omegaArgs) time derivative of the frequency
=> in args you need: n, f0, omega, omegaDt, omegaArgs
This form of imput is necessary to use H in further calculations (mesolve)"""
f0 = args['f0']
n = args['n']
omega = args['omega']
omegaDt = args['omegaDt']
omegaArgs = args['omegaArgs']
ad = create(n)
a = destroy(n)
# H0, for the first two terms see Silveri 2017 Quantum_systems_under_frequency_modulation
ham = omega(t, omegaArgs)*(ad*a+0.5*qeye(n))
# additional term because of w(t) not constant
ham += 1j/4*omegaDt(t, omegaArgs)/omega(t, omegaArgs)*(a*a-ad*ad)
# Force term (9**10^-9 = x0, extent of ground state wave function), see Wittmann diss
# with compensation term -f0/w0^2 (e.g. no force in the case of no modulation)
ham += 9*(f0/(omega(t, omegaArgs)**2) - f0/(omegaArgs[0]**2))*(ad + a)
# ham += (9*10**-9)/(10**6)*(f0/(omega(t, omegaArgs)**2))*(ad + a)
return(ham)
def eval_H_QP(psi, times, args, options=0):
"""evaluates the time evolution of the state psi in a harmonic oscillator with modulated frequency
frequency modulation is a combination of a quench and a parametric modulation
the hamiltonian has an additional term which takes a force proportional to 1/w^2 into account
parameters:
psi: initial state for the time evolution (should have dimension n, see below)
times: list of times for which the state should be calculated
args: a dictionary with the following entries:
n: dimension of the hilbert space (or cutoff dimension for the numerical calculations)
w0: the unmodulated frequency
dwQ, dtQ: strength and duration of a gaussian shaped quench centered around t=0
dwP, dtP, delay: strength and duration of a parametric modulation of frequency 2 w0 which starts at t = delay
dtP shoud be an integer multiple of pi/(2 w0) to avoid uncontinuity at t=delay+dtP
f0: proportionality constant of the additional force (unit 10^-15 N MHz^2)
options: possible options for the solver (mesolve)
returns:
a list of states (evolution of psi, one for each t in times)"""
n = args['n']
ad = create(n)
a = destroy(n)
# the following string describes the time dependant frequency
strWQP = 'w0 + dwQ*exp(-0.5*(t/dtQ)**2) + (dwP*sin(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + (dwP*sin(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)
# time derivative of the time depandant frequency
strDWQP = '- dwQ*exp(-0.5*(t/dtQ)**2) * t/(dtQ**2) + (2*w0*dwP*cos(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + 2*w0*dwP*cos(2*w0*t) if t > delay and t < delay+dtP else 0)
# *np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1)
# Hamiltonian in string format, see Silveri 2017 Quantum_systems_under_frequency_modulation
Hc = [[ad*a+0.5*qeye(n), strWQP]]
Hc.append([a*a-ad*ad, '1j/4*(' + strDWQP + ')/(' + strWQP + ')'])
Hc.append([ad+a, '9*(f0/((' + strWQP + ')**2) - f0/(w0**2))'])
# do the time evolution
if options==0:
results = mesolve(Hc, psi, times, args = args)
else:
results = mesolve(Hc, psi, times, args = args, options=options)
return(results)
def eval_H_QQ(psi, times, args, options=0):
"""evaluates the time evolution of the state psi in a harmonic oscillator with modulated frequency
frequency modulation consists of two gaussian quenches
the hamiltonian has an additional term which takes a force proportional to 1/w^2 into account
parameters:
psi: initial state for the time evolution (should have dimension n, see below)
times: list of times for which the state should be calculated
args: a dictionary with the following entries:
n: dimension of the hilbert space (or cutoff dimension for the numerical calculations)
w0: the unmodulated frequency
dw1, dt1: strength and duration of the first quench centered around t=0
dw2, dt2, delay: strength and duration of the second quench centered around t=delay
f0: proportionality constant of the additional force (unit 10^-15 N MHz^2)
options: possible options for the solver (mesolve)
returns:
a list of states (evolution of psi, one for each t in times)"""
n = args['n']
ad = create(n)
a = destroy(n)
# the following string describes the time dependant frequency
strWQQ = 'w0 + dw1*exp(-0.5*(t/dt1)**2) + dw2*exp(-0.5*((t-delay)/dt2)**2)'
# time derivative of the time depandant frequency
strDWQQ = '- dw1*exp(-0.5*(t/dt1)**2) * t/(dt1**2) - dw2*exp(-0.5*((t-delay)/dt2)**2) * (t-delay)/(dt2**2)'
# Hamiltonian in string format, see Silveri 2017 Quantum_systems_under_frequency_modulation
Hc = [[ad*a+0.5*qeye(n), strWQQ]]
Hc.append([a*a-ad*ad, '1j/4*(' + strDWQQ + ')/(' + strWQQ + ')'])
Hc.append([ad+a, '9*(f0/((' + strWQQ + ')**2) - f0/(w0**2))'])
# do the time evolution
if options==0:
results = mesolve(Hc, psi, times, args = args)
else:
results = mesolve(Hc, psi, times, args = args, options=options)
return(results)
def getParams(psi, calculate_nT = True, order_SD = False):
"""calculates for a given state psi (assumes that the thermal excitation is close to the vacuum):
alpha: the coherent displacement parameter
xi: the squeezing parameter
nBar: the mean photon number
nT: the photon number due to the thermal excitation DM_t
calculate_nT: bool, decides if nT will be calculated (takes time), default set to True
if calculate_nT = False, xi is only correct modulo complex conjugation, nT is set to 0!!!
order_SD: bool, changes order in displacement and squeezing
if True: assumes that psi can be written as DM_psi = S(xi) D(alpha) DM_t D(alpha).dag() S(xi).dag()
nT will automatically be calculated, regardless calculate_nT (is needed for the commutation of S and D)
if False: assumes that psi can be written as DM_psi = D(alpha) S(xi) DM_t S(xi).dag() D(alpha).dag()
returns alpha, xi, nBar, nT"""
n = psi.dims[0][0]
ad = create(n)
a = destroy(n)
x = (ad + a)
p = 1j*(ad - a)
xV = variance(x, psi)
pV = variance(p, psi)
# calculated by hand, assuming t = 0 (e.g. DM_t = |0><0|)
xiR = np.arcsinh(0.5*np.sqrt(xV + pV - 2 +0j)) # avoid NANs
if (np.cosh(xiR)*np.sinh(xiR))==0:
xiT1 = 0
else:
xiT1 = 0.25*(pV - xV)/(np.cosh(xiR)*np.sinh(xiR))
# cos is symmetric to x=0, therefore is the inverse +/- arccos(...)
# xiT = np.sign(xiT1)*np.arccos(xiT1)
xiT = np.sign(xiT1)*np.arccos(xiT1)
xi = xiR*np.exp(1j*xiT)
# alpha = 0.5*np.sqrt(xV + pV)
alpha = expect(a, psi)
# print(alpha)
nBar = np.abs(expect(num(n), psi))
# print(nBar)
# calculates the thermal excitation (assuming DM_psi = D S DM_t S.dag() D.dag())
if calculate_nT or order_SD:
psiT = squeeze(n, xi).dag()*displace(n, alpha).dag()*psi*displace(n, alpha)*squeeze(n, xi)
nT = np.abs(expect(num(n), psiT))
xic = np.conj(xi)
psiTc = squeeze(n, xic).dag()*displace(n, alpha).dag()*psi*displace(n, alpha)*squeeze(n, xic)
nTc = np.abs(expect(num(n), psiTc))
if nTc < nT:
nT, xi = nTc, xic
# formula used to commute D and S: https://en.wikipedia.org/wiki/Squeeze_operator
if order_SD:
alpha = alpha*np.cosh(xiR) + np.conj(alpha)*xi/xiR*np.sinh(xiR)
return(alpha, xi, nBar, nT)
else:
return(alpha, xi, nBar, 0)
def plotResults(times, result, args, calculate_nT = True, order_SD = False, nSkipp = 1, showProgress = False):
"""plots the development of the coherent displacement alpha,
squeezing parameter r, mean excitation number nBar, thermal excitation nT (see help(getParams))
together with the time dependant frequency and the force
arguments:
times: list of times for which the values should be calculated
results: list of states (as returned from mesolve) corresponding to times
args: arguments given to H in the calculation of the dynamics
calculate_nT = True: bool, if nT should be calculated as well (takes time)
nSkipp = 1: number of states that should be skipped between each plotted point (speeds it up)"""
t1 = time.time()
times = times[::nSkipp]
if 'omegaArgs' in args:
wList = args['omega'](times, args['omegaArgs'])
fList = args['f0']/wList**2 - args['f0']/args['omegaArgs'][0]**2
else:
wList = args['omega'](times, args)
fList = args['f0']/wList**2 - args['f0']/args['w0']**2
masterList = [[],[],[],[]]
nStates = len(result.states[::nSkipp])
progress = 0
for psi in result.states[::nSkipp]:
alpha, xi, nBar, nT = getParams(psi, calculate_nT = calculate_nT, order_SD = order_SD)
masterList[0].append(np.abs(alpha))
masterList[1].append(np.abs(xi))
masterList[2].append(nBar)
masterList[3].append(nT)
if showProgress:
progress += 1
print('\r', "Progress:", round(100*progress/nStates), "%, processing time:", round(time.time() - t1), "s", end = '')
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5)
fig.set_size_inches(15.5, 7.5, forward=True)
ax1.plot(times, masterList[0], label = r'$|\alpha |$')
ax1.legend()
ax2.plot(times, masterList[1], label = "r")
ax2.legend()
ax3.plot(times, masterList[2], label = "nBar")
if calculate_nT:
ax3.plot(times, masterList[3], label = "nT")
ax3.legend()
ax4.plot(times, wList, label = "w(t)")
ax4.legend()
ax5.plot(times, fList, label = r'$10^{-15} F/\hbar$ in N/(Js)')
ax5.legend()
plt.show()
return(0)
def scanAlphaXiN(H, psi0, times, args, valueList, whichVal, showProgress = True, skippInLoop = 0):
"""returns quentity of interest (alpha and/or xi and/or nBar) for a given list of valueList
arguments:
H: Hamiltonian which governs the time evolution
psi0: initial states
times: list of times used to calculate the time evolution
args: dictionary of arguments given to the hamiltonian (check help(H))
valueList: list of values for which the quantity should be calculated
vhichVal: value in args['omegaArgs'] which should be changed according to valueList
skippInLoop: number of timesteps which should be calculated before the loop over valueList
this means that these timesteps are calculated only for the value args['omegaArgs'] given in args (not for all values in valueList)
Not yet implemented: scanA = True, scanX = False, scanN = False
"""
t1 = time.time()
alphaList = []
xiList = []
nList = []
# if skippInLoop > 0: calculate the first skippInLoop steps only once (and do the loop only over the rest)
if skippInLoop > 0:
times1 = times[:skippInLoop]
times2 = times[skippInLoop:]
results = mesolve(H, psi0, times1, args=args)
psi1 = results.states[-1]
else:
times2 = times
psi1 = psi0
# calculate time evolution for all values in valueList
for val in valueList:
args['omegaArgs'][whichVal] = val # change the value that needs changing
results = mesolve(H, psi1, times2, args=args) # calculate time evolution
psi2 = results.states[-1] # final state
alpha,xi,nBar,_ = getParams(psi2, False) # get alpha
# alpha = np.sqrt(np.abs(expect(x, psi2)**2) + np.abs(expect(p, psi2)**2)) # get alpha
alphaList.append(alpha) # save alpha
xiList.append(xi) # save xi
nList.append(nBar) # save nBar
if showProgress:
print('\r', "Progress: ", round(100*(val-valueList[0])/(valueList[-1]-valueList[0])), "%, processing time:", round(time.time() - t1), "s", end = '')
return(alphaList, xiList, nList)
# make a function to generate a random number of a two-poissonian distribution
def rand_2Poisson(mu1, mu2, P_down=0.5, G_leak=0.):
"""simulates the counts after one single experiment of probing the spin state of one single ion
assumes a two poissonian distribution P(k) = P_down P_mu2(k) + (1-P_down) P_mu1(k)
parameters:
mu1: expected counts for the dark state
mu2: expected counts for the bright state (=> mu1 < mu2)
P_down: probability of being in the bright state
G_leak: rate of leaking from the dark state into the bright state (default 0, no leaking)
(P(leak) = exp(-G_leak))
"""
ru = np.random.rand()
rleak = np.random.rand()
if P_down < ru:
# poisson for low counts
if rleak < 1-np.exp(-G_leak):
return -np.log(1-rleak)/G_leak*np.random.poisson(mu1) + (1+np.log(1-rleak)/G_leak)*np.random.poisson(mu2)
else:
return np.random.poisson(mu1)
else:
# poisson for high counts
return np.random.poisson(mu2)
# make a function to generate a random number of a two-poissonian distribution
def rand_4Poisson(mu_U, mu_D, alpha=0, P_D1=0.5, P_D2=0.5, sup = 1, G_leak=0.):
"""generates a random count, taken two bright ions, each emitting light following a two poissonian distribution
parameters:
mu_U: low count rate (when the ion is in its dark, spin up state)
mu_D: high count rate (when the ion is in its bright, spin down state)
alpha: counts, by which the counts of the two bright states differ
P_D1/P_D2: probability of ion one/two to be in the down (bright) state
sup: factor, by which the count rate of ion 1 is suppressed"""
ret = 0
# counts by ion 1
ret += sup*rand_2Poisson(mu_U, mu_D - 0.5*alpha, P_down=P_D1, G_leak=G_leak)
# counts by ion 2
ret += rand_2Poisson(mu_U, mu_D + 0.5*alpha, P_down=P_D2, G_leak=G_leak)
return ret
def rand_4Poisson_hist(mu_U, mu_D, alpha=0, P_D1=0.5, P_D2=0.5, sup = 1, G_leak=0., repetitions=1000, parallel=False):
"""gives a list of random numbers (default 1000) distributed by rand_4Poisson"""
# print("ddd")
if parallel:
hist = Parallel(n_jobs=8)(delayed(rand_4Poisson)(mu_U, mu_D, alpha, P_D1, P_D2, sup, G_leak) for i in range(repetitions))
return hist
else:
hist = [rand_4Poisson(mu_U, mu_D, alpha, P_D1, P_D2, sup, G_leak) for i in range(repetitions)]
return hist
| wQQdot | identifier_name |
functionsSqueeze.py | import numpy as np
import matplotlib.pyplot as plt
import qutip
import scipy.special as spe
# import multiprocessing as mp
from joblib import Parallel, delayed
from qutip import *
import time
# testing atom on new pc
def wQP(t, args):
"""calculates and returns the modulated frequency like in "Lit early universe"
t time at which the frequency is calculated
args: a list {w0, dwQ, dtQ, dwP, dtP, delay} or a dictionary with the following keys:
w0 the unmodulated frequency
dwQ (strength) and dtQ (duration) of a gaussian shaped quench centered around t=0
dwP (strength) and dtP (duration) of a parametric modulation of frequency 2 w0 which starts at t = delay
dtP shoud be an integer multiple of pi/(2 w0) to avoid uncontinuity at t=delay+dtP
units: all frequencies are circular frequencies with unit MHz, times have unit \mu s"""
if type(args) == list:
w0, dwQ, dtQ, dwP, dtP, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dwQ, dtQ, dwP, dtP, delay = args['w0'], args['dwQ'], args['dtQ'], args['dwP'], args['dtP'], args['delay']
else:
return("wrong input form for args, list or dict")
# freq += dwQ/(np.sqrt(2*np.pi)*dtQ)*np.exp(-0.5*(t/dtQ)**2)
freq = w0 + dwQ*np.exp(-0.5*(t/dtQ)**2) # quench
freq += dwP*np.sin(2*w0*(t-delay))*np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1) # parametric
return(freq)
def wQPdot(t, args):
"""calculates the time derivative of w(t, args) at time t
check help(wQP) for further information on args"""
if type(args) == list:
w0, dwQ, dtQ, dwP, dtP, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dwQ, dtQ, dwP, dtP, delay = args['w0'], args['dwQ'], args['dtQ'], args['dwP'], args['dtP'], args['delay']
else:
return("wrong input form for args, list or dict")
# freqD = - dwQ/(np.sqrt(2*np.pi)*dtQ)*np.exp(-0.5*(t/dtQ)**2) * t/(dtQ**2)
freqD = - dwQ*np.exp(-0.5*(t/dtQ)**2) * t/(dtQ**2) # quench
freqD += 2*w0*dwP*np.cos(2*w0*(t-delay))*np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1) # parametric
return(freqD)
def wQQ(t, args):
"""calculates and returns the modulated (two quenches) frequency like in 'Lit early universe'
t time at which the frequency is calculated
args: a list {w0, dw1, dt1, dw2, dt2, delay} or a dictionary with the following keys:
w0 the unmodulated frequency
dw1/2 (strength) and dt1/2 (duration) of the first/second gaussian shaped quench
delay: time between the two quenches
units: all frequencies are circular frequencies with unit MHz, times have unit \mu s"""
if type(args) == list:
w0, dw1, dt1, dw2, dt2, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dw1, dt1, dw2, dt2, delay = args['w0'], args['dw1'], args['dt1'], args['dw2'], args['dt2'], args['delay']
else:
return("wrong input form for args, list or dict")
freq = w0
freq += dw1*np.exp(-0.5*(t/dt1)**2)
freq += dw2*np.exp(-0.5*((t-delay)/dt2)**2)
return(freq)
def wQQdot(t, args):
"""calculates the time derivative of wQQ(t, args) at time t
check help(wQQ) for further information on args"""
if type(args) == list:
w0, dw1, dt1, dw2, dt2, delay = args[0], args[1], args[2], args[3], args[4], args[5]
elif type(args) == dict:
w0, dw1, dt1, dw2, dt2, delay = args['w0'], args['dw1'], args['dt1'], args['dw2'], args['dt2'], args['delay']
else:
return("wrong input form for args, list or dict")
freqD = - dw1*np.exp(-0.5*(t/dt1)**2) * t/(dt1**2)
freqD += - dw2*np.exp(-0.5*((t-delay)/dt2)**2) * (t-delay)/(dt2**2)
return(freqD)
# defining the hamiltonian of the phonon evolution for vaiable w(t)
def H(t, args):
"""calculates the hamiltonian of a harmonic oscillator with modulated frequency
has an additional term which takes a force proportional to 1/w^2 into account
args (dictonary which carries all arguments except t):
t time at which the Hamiltonian is calculated (unit \mu s)
n dimension of the hilbert space (or cutoff dimension for the numerical calculations)
f0 proportionality constant of the additional force (unit N MHz^2)
omega(t, omegaArgs) frequency, modulated in time, described by the list of arguments omegaArgs
omegaDt(t, omegaArgs) time derivative of the frequency
=> in args you need: n, f0, omega, omegaDt, omegaArgs
This form of imput is necessary to use H in further calculations (mesolve)"""
f0 = args['f0']
n = args['n']
omega = args['omega']
omegaDt = args['omegaDt']
omegaArgs = args['omegaArgs']
ad = create(n)
a = destroy(n)
# H0, for the first two terms see Silveri 2017 Quantum_systems_under_frequency_modulation
ham = omega(t, omegaArgs)*(ad*a+0.5*qeye(n))
# additional term because of w(t) not constant
ham += 1j/4*omegaDt(t, omegaArgs)/omega(t, omegaArgs)*(a*a-ad*ad)
# Force term (9**10^-9 = x0, extent of ground state wave function), see Wittmann diss
# with compensation term -f0/w0^2 (e.g. no force in the case of no modulation)
ham += 9*(f0/(omega(t, omegaArgs)**2) - f0/(omegaArgs[0]**2))*(ad + a)
# ham += (9*10**-9)/(10**6)*(f0/(omega(t, omegaArgs)**2))*(ad + a)
return(ham)
def eval_H_QP(psi, times, args, options=0):
"""evaluates the time evolution of the state psi in a harmonic oscillator with modulated frequency
frequency modulation is a combination of a quench and a parametric modulation
the hamiltonian has an additional term which takes a force proportional to 1/w^2 into account
parameters:
psi: initial state for the time evolution (should have dimension n, see below)
times: list of times for which the state should be calculated
args: a dictionary with the following entries:
n: dimension of the hilbert space (or cutoff dimension for the numerical calculations)
w0: the unmodulated frequency
dwQ, dtQ: strength and duration of a gaussian shaped quench centered around t=0
dwP, dtP, delay: strength and duration of a parametric modulation of frequency 2 w0 which starts at t = delay
dtP shoud be an integer multiple of pi/(2 w0) to avoid uncontinuity at t=delay+dtP
f0: proportionality constant of the additional force (unit 10^-15 N MHz^2)
options: possible options for the solver (mesolve)
returns:
a list of states (evolution of psi, one for each t in times)"""
n = args['n']
ad = create(n)
a = destroy(n)
# the following string describes the time dependant frequency
strWQP = 'w0 + dwQ*exp(-0.5*(t/dtQ)**2) + (dwP*sin(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + (dwP*sin(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)
# time derivative of the time depandant frequency
strDWQP = '- dwQ*exp(-0.5*(t/dtQ)**2) * t/(dtQ**2) + (2*w0*dwP*cos(2*w0*(t-delay)) if t > delay and t < delay+dtP else 0)'
# + 2*w0*dwP*cos(2*w0*t) if t > delay and t < delay+dtP else 0)
# *np.heaviside(t-delay,1)*np.heaviside(dtP-(t-delay),1)
# Hamiltonian in string format, see Silveri 2017 Quantum_systems_under_frequency_modulation
Hc = [[ad*a+0.5*qeye(n), strWQP]]
Hc.append([a*a-ad*ad, '1j/4*(' + strDWQP + ')/(' + strWQP + ')'])
Hc.append([ad+a, '9*(f0/((' + strWQP + ')**2) - f0/(w0**2))'])
# do the time evolution
if options==0:
results = mesolve(Hc, psi, times, args = args)
else:
results = mesolve(Hc, psi, times, args = args, options=options)
return(results)
def eval_H_QQ(psi, times, args, options=0):
"""evaluates the time evolution of the state psi in a harmonic oscillator with modulated frequency
frequency modulation consists of two gaussian quenches
the hamiltonian has an additional term which takes a force proportional to 1/w^2 into account
parameters:
psi: initial state for the time evolution (should have dimension n, see below)
times: list of times for which the state should be calculated
args: a dictionary with the following entries:
n: dimension of the hilbert space (or cutoff dimension for the numerical calculations)
w0: the unmodulated frequency
dw1, dt1: strength and duration of the first quench centered around t=0
dw2, dt2, delay: strength and duration of the second quench centered around t=delay
f0: proportionality constant of the additional force (unit 10^-15 N MHz^2)
options: possible options for the solver (mesolve)
returns:
a list of states (evolution of psi, one for each t in times)"""
n = args['n']
ad = create(n)
a = destroy(n)
# the following string describes the time dependant frequency
strWQQ = 'w0 + dw1*exp(-0.5*(t/dt1)**2) + dw2*exp(-0.5*((t-delay)/dt2)**2)'
# time derivative of the time depandant frequency
strDWQQ = '- dw1*exp(-0.5*(t/dt1)**2) * t/(dt1**2) - dw2*exp(-0.5*((t-delay)/dt2)**2) * (t-delay)/(dt2**2)'
# Hamiltonian in string format, see Silveri 2017 Quantum_systems_under_frequency_modulation
Hc = [[ad*a+0.5*qeye(n), strWQQ]]
Hc.append([a*a-ad*ad, '1j/4*(' + strDWQQ + ')/(' + strWQQ + ')'])
Hc.append([ad+a, '9*(f0/((' + strWQQ + ')**2) - f0/(w0**2))'])
# do the time evolution
if options==0:
results = mesolve(Hc, psi, times, args = args)
else:
results = mesolve(Hc, psi, times, args = args, options=options)
return(results)
def getParams(psi, calculate_nT = True, order_SD = False):
"""calculates for a given state psi (assumes that the thermal excitation is close to the vacuum):
alpha: the coherent displacement parameter
xi: the squeezing parameter
nBar: the mean photon number
nT: the photon number due to the thermal excitation DM_t
calculate_nT: bool, decides if nT will be calculated (takes time), default set to True
if calculate_nT = False, xi is only correct modulo complex conjugation, nT is set to 0!!!
order_SD: bool, changes order in displacement and squeezing
if True: assumes that psi can be written as DM_psi = S(xi) D(alpha) DM_t D(alpha).dag() S(xi).dag()
nT will automatically be calculated, regardless calculate_nT (is needed for the commutation of S and D)
if False: assumes that psi can be written as DM_psi = D(alpha) S(xi) DM_t S(xi).dag() D(alpha).dag()
returns alpha, xi, nBar, nT"""
n = psi.dims[0][0]
ad = create(n)
a = destroy(n)
x = (ad + a)
p = 1j*(ad - a)
xV = variance(x, psi)
pV = variance(p, psi)
# calculated by hand, assuming t = 0 (e.g. DM_t = |0><0|)
xiR = np.arcsinh(0.5*np.sqrt(xV + pV - 2 +0j)) # avoid NANs
if (np.cosh(xiR)*np.sinh(xiR))==0:
xiT1 = 0
else:
xiT1 = 0.25*(pV - xV)/(np.cosh(xiR)*np.sinh(xiR))
# cos is symmetric to x=0, therefore is the inverse +/- arccos(...)
# xiT = np.sign(xiT1)*np.arccos(xiT1)
xiT = np.sign(xiT1)*np.arccos(xiT1)
xi = xiR*np.exp(1j*xiT)
# alpha = 0.5*np.sqrt(xV + pV)
alpha = expect(a, psi)
# print(alpha)
nBar = np.abs(expect(num(n), psi))
# print(nBar)
# calculates the thermal excitation (assuming DM_psi = D S DM_t S.dag() D.dag())
if calculate_nT or order_SD:
psiT = squeeze(n, xi).dag()*displace(n, alpha).dag()*psi*displace(n, alpha)*squeeze(n, xi)
nT = np.abs(expect(num(n), psiT))
xic = np.conj(xi)
psiTc = squeeze(n, xic).dag()*displace(n, alpha).dag()*psi*displace(n, alpha)*squeeze(n, xic)
nTc = np.abs(expect(num(n), psiTc))
if nTc < nT:
nT, xi = nTc, xic
# formula used to commute D and S: https://en.wikipedia.org/wiki/Squeeze_operator
if order_SD:
alpha = alpha*np.cosh(xiR) + np.conj(alpha)*xi/xiR*np.sinh(xiR)
return(alpha, xi, nBar, nT)
else:
return(alpha, xi, nBar, 0)
def plotResults(times, result, args, calculate_nT = True, order_SD = False, nSkipp = 1, showProgress = False):
"""plots the development of the coherent displacement alpha,
squeezing parameter r, mean excitation number nBar, thermal excitation nT (see help(getParams))
together with the time dependant frequency and the force
arguments:
times: list of times for which the values should be calculated
results: list of states (as returned from mesolve) corresponding to times
args: arguments given to H in the calculation of the dynamics
calculate_nT = True: bool, if nT should be calculated as well (takes time)
nSkipp = 1: number of states that should be skipped between each plotted point (speeds it up)"""
t1 = time.time()
times = times[::nSkipp]
if 'omegaArgs' in args:
wList = args['omega'](times, args['omegaArgs'])
fList = args['f0']/wList**2 - args['f0']/args['omegaArgs'][0]**2
else:
wList = args['omega'](times, args)
fList = args['f0']/wList**2 - args['f0']/args['w0']**2
masterList = [[],[],[],[]]
nStates = len(result.states[::nSkipp])
progress = 0
for psi in result.states[::nSkipp]:
alpha, xi, nBar, nT = getParams(psi, calculate_nT = calculate_nT, order_SD = order_SD)
masterList[0].append(np.abs(alpha)) | masterList[2].append(nBar)
masterList[3].append(nT)
if showProgress:
progress += 1
print('\r', "Progress:", round(100*progress/nStates), "%, processing time:", round(time.time() - t1), "s", end = '')
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5)
fig.set_size_inches(15.5, 7.5, forward=True)
ax1.plot(times, masterList[0], label = r'$|\alpha |$')
ax1.legend()
ax2.plot(times, masterList[1], label = "r")
ax2.legend()
ax3.plot(times, masterList[2], label = "nBar")
if calculate_nT:
ax3.plot(times, masterList[3], label = "nT")
ax3.legend()
ax4.plot(times, wList, label = "w(t)")
ax4.legend()
ax5.plot(times, fList, label = r'$10^{-15} F/\hbar$ in N/(Js)')
ax5.legend()
plt.show()
return(0)
def scanAlphaXiN(H, psi0, times, args, valueList, whichVal, showProgress = True, skippInLoop = 0):
"""returns quentity of interest (alpha and/or xi and/or nBar) for a given list of valueList
arguments:
H: Hamiltonian which governs the time evolution
psi0: initial states
times: list of times used to calculate the time evolution
args: dictionary of arguments given to the hamiltonian (check help(H))
valueList: list of values for which the quantity should be calculated
vhichVal: value in args['omegaArgs'] which should be changed according to valueList
skippInLoop: number of timesteps which should be calculated before the loop over valueList
this means that these timesteps are calculated only for the value args['omegaArgs'] given in args (not for all values in valueList)
Not yet implemented: scanA = True, scanX = False, scanN = False
"""
t1 = time.time()
alphaList = []
xiList = []
nList = []
# if skippInLoop > 0: calculate the first skippInLoop steps only once (and do the loop only over the rest)
if skippInLoop > 0:
times1 = times[:skippInLoop]
times2 = times[skippInLoop:]
results = mesolve(H, psi0, times1, args=args)
psi1 = results.states[-1]
else:
times2 = times
psi1 = psi0
# calculate time evolution for all values in valueList
for val in valueList:
args['omegaArgs'][whichVal] = val # change the value that needs changing
results = mesolve(H, psi1, times2, args=args) # calculate time evolution
psi2 = results.states[-1] # final state
alpha,xi,nBar,_ = getParams(psi2, False) # get alpha
# alpha = np.sqrt(np.abs(expect(x, psi2)**2) + np.abs(expect(p, psi2)**2)) # get alpha
alphaList.append(alpha) # save alpha
xiList.append(xi) # save xi
nList.append(nBar) # save nBar
if showProgress:
print('\r', "Progress: ", round(100*(val-valueList[0])/(valueList[-1]-valueList[0])), "%, processing time:", round(time.time() - t1), "s", end = '')
return(alphaList, xiList, nList)
# make a function to generate a random number of a two-poissonian distribution
def rand_2Poisson(mu1, mu2, P_down=0.5, G_leak=0.):
"""simulates the counts after one single experiment of probing the spin state of one single ion
assumes a two poissonian distribution P(k) = P_down P_mu2(k) + (1-P_down) P_mu1(k)
parameters:
mu1: expected counts for the dark state
mu2: expected counts for the bright state (=> mu1 < mu2)
P_down: probability of being in the bright state
G_leak: rate of leaking from the dark state into the bright state (default 0, no leaking)
(P(leak) = exp(-G_leak))
"""
ru = np.random.rand()
rleak = np.random.rand()
if P_down < ru:
# poisson for low counts
if rleak < 1-np.exp(-G_leak):
return -np.log(1-rleak)/G_leak*np.random.poisson(mu1) + (1+np.log(1-rleak)/G_leak)*np.random.poisson(mu2)
else:
return np.random.poisson(mu1)
else:
# poisson for high counts
return np.random.poisson(mu2)
# make a function to generate a random number of a two-poissonian distribution
def rand_4Poisson(mu_U, mu_D, alpha=0, P_D1=0.5, P_D2=0.5, sup = 1, G_leak=0.):
"""generates a random count, taken two bright ions, each emitting light following a two poissonian distribution
parameters:
mu_U: low count rate (when the ion is in its dark, spin up state)
mu_D: high count rate (when the ion is in its bright, spin down state)
alpha: counts, by which the counts of the two bright states differ
P_D1/P_D2: probability of ion one/two to be in the down (bright) state
sup: factor, by which the count rate of ion 1 is suppressed"""
ret = 0
# counts by ion 1
ret += sup*rand_2Poisson(mu_U, mu_D - 0.5*alpha, P_down=P_D1, G_leak=G_leak)
# counts by ion 2
ret += rand_2Poisson(mu_U, mu_D + 0.5*alpha, P_down=P_D2, G_leak=G_leak)
return ret
def rand_4Poisson_hist(mu_U, mu_D, alpha=0, P_D1=0.5, P_D2=0.5, sup = 1, G_leak=0., repetitions=1000, parallel=False):
"""gives a list of random numbers (default 1000) distributed by rand_4Poisson"""
# print("ddd")
if parallel:
hist = Parallel(n_jobs=8)(delayed(rand_4Poisson)(mu_U, mu_D, alpha, P_D1, P_D2, sup, G_leak) for i in range(repetitions))
return hist
else:
hist = [rand_4Poisson(mu_U, mu_D, alpha, P_D1, P_D2, sup, G_leak) for i in range(repetitions)]
return hist | masterList[1].append(np.abs(xi)) | random_line_split |
DanhSachChoDuyet.page.ts | import { Component, OnInit } from '@angular/core';
import { Router } from '@angular/router';
import { UtilService } from '../../../../service/util.service';
import { AppConsts } from '../../../../shared/AppConsts';
import * as moment from 'moment';
import { AlertController, LoadingController } from '@ionic/angular';
// import { AnnouncementServiceProxy, TokenAuthServiceProxy, WorkTimeServiceProxy, HrWorkTime_ENTITY } from '../../../../shared/service-proxies/service-proxies';
import { AnnouncementServiceProxy, HrWorkTime_ENTITY, HrWorkTimeRequest_ENTITY, SendAnnouncement_ENTITY, TokenAuthServiceProxy, WorkTimeServiceProxy, HrWorkTime } from '../../../../shared/service-proxies/service-proxies';
import {LocalStorageService } from '../../../core/local-storage/local-storage.service';
import 'rxjs/add/operator/timeout';
import { Plugins } from '@capacitor/core';
import { MenuController } from '@ionic/angular';
import { SignalRService } from '../../../../service/signalR.service';
// import { Subscription } from 'rxjs';
import { HttpClient, HttpErrorResponse } from '@angular/common/http';
import { AppSettings } from '../../../../AppSettings';
const { App } = Plugins;
export const AUTH_KEY = 'AUTH';
@Component({
selector: 'danhsachchoduyet',
templateUrl: 'DanhSachChoDuyet.page.html',
styleUrls: ['DanhSachChoDuyet.page.scss'],
})
export class DanhSachChoDuyet implements OnInit {
userId: number;
branchType: any;
maChamCong: any;
moment = moment();
checkPauseResume = false;
hoVaTen: any;
creatorUserId: number;
isLoading = false;
tenCty: any;
globalUrlAPI : string = AppConsts.remoteServiceBaseUrl;
image: string;
avatar: string;
receiveId: any;
masterSelected:boolean = false;
checkList = [];
checkedList: any = [];
totalUnred: number = 0;
page: number;
pageSize: number = 10;
constructor(
private _utilService: UtilService,
private _router: Router,
public _alertController: AlertController,
private _loadingCtrl: LoadingController,
private _localStorageService: LocalStorageService,
private _tokenAuthServiceProxy: TokenAuthServiceProxy,
public _menu: MenuController,
public _signalRSevice: SignalRService,
private _announcementServiceProxy: AnnouncementServiceProxy,
private workTimeServiceProxy: WorkTimeServiceProxy,
private http: HttpClient,
) {
this.userId = this._localStorageService.getItem(AUTH_KEY).userId;
this._signalRSevice.retrieveMappedObject().subscribe(
(message) => {
this._announcementServiceProxy.getAllUnRead(this.userId).subscribe({
next: (res) => {
if (res) {
this.totalUnred = res.length;
}
}
});
});
}
ngOnInit() {
this.receiveId = <string>this._localStorageService.getItem(AUTH_KEY).userId;
this.hoVaTen = this._localStorageService.getItem(AUTH_KEY).hoVaTen;
this.tenCty = this._localStorageService.getItem(AUTH_KEY).tenCty;
this.avatar = this._localStorageService.getItem(AUTH_KEY).image;
}
ionViewWillEnter() | kTimeUnread(){
this.page = 1;
this.workTimeServiceProxy.getWorkTimeUnCheck(this.receiveId, this.page, this.pageSize).subscribe({
next: (res: any) => {
this.checkList = res;
for (const { index, value } of this.checkList.map((value, index) => ({ index, value }))){
this.checkList[index].isSelected = false;
}
this.masterSelected = false;
this.dismissLoading();
},
error: (err: any) => {
this.dismissLoading();
console.log(err);
}
});
}
viewDetail(id: any){
if (id) {
this._router.navigate(['app/main/quanly-congtac'], {
queryParams: { id: id }
});
}
}
duyetDon(id: any){
this.workTimeServiceProxy.getWorkTimeDetail(id).subscribe({
next: async (res: HrWorkTime) => {
let thongtinWorkTime : HrWorkTime = new HrWorkTime();
thongtinWorkTime = res;
await this.onCreateOrEdit(id, thongtinWorkTime);
},
error: (err: HttpErrorResponse) => {
this.dismissLoading();
console.log(err);
}
});
}
onCreateOrEdit(id: any, thongtinWorkTime: HrWorkTime){
this.loadingDefault();
let formData = new FormData;
formData.append('Id',id);
formData.append('NextApproverId', thongtinWorkTime.nextApproverId.toString());
formData.append('Reasons', thongtinWorkTime.reasons);
formData.append('Image', this.avatar);
formData.append('HoVaTen', this.hoVaTen);
formData.append('TenCty', this.tenCty);
formData.append('DocumentType', thongtinWorkTime.documentType);
formData.append('CreatorUserId', thongtinWorkTime.creatorUserId.toString());
formData.append('Email', thongtinWorkTime.emailAddress);
if(thongtinWorkTime.truongNhomId) formData.append('TruongNhomId', thongtinWorkTime.truongNhomId.toString());
if(thongtinWorkTime.truongPhongId) formData.append('TruongPhongId', thongtinWorkTime.truongPhongId.toString());
if(thongtinWorkTime.giamDocKhoiId) formData.append('GiamDocKhoiId', thongtinWorkTime.giamDocKhoiId.toString());
if(thongtinWorkTime.tcnsId) formData.append('TcnsId', thongtinWorkTime.tcnsId.toString());
if(thongtinWorkTime.giamDocDieuHanhId) formData.append('GiamDocDieuHanhId', thongtinWorkTime.giamDocDieuHanhId.toString());
formData.append('TimeFrom', thongtinWorkTime.timeFrom.clone().locale('vi').format('YYYY-MM-DD') +'T'+ thongtinWorkTime.timeFrom.clone().locale('vi').format('HH:mm:ss'));
formData.append('TimeTo', thongtinWorkTime.timeTo.clone().locale('vi').format('YYYY-MM-DD') +'T'+ thongtinWorkTime.timeTo.clone().locale('vi').format('HH:mm:ss'));
formData.append('DocumentType', thongtinWorkTime.documentType);
this.http.post(AppSettings.API_ENDPOINT + "/api/WorkTime/CreateOrEditForMobile", formData).subscribe({
next: async(res: any) => {
let sendUserDto: SendAnnouncement_ENTITY = new SendAnnouncement_ENTITY();
sendUserDto = res.result;
this._announcementServiceProxy.sendMessageToClient(sendUserDto).subscribe({
next: (res) => {}
});
this.getAllWorkTimeUnread();
this.showAlertController('Bạn đã duyệt đơn thành công!');
}, error: (err: any) => {
console.log(err);
this.showAlertController('Lỗi xuất hiện, vui lòng kiểm tra lại');
},
});
}
async showAlertController(message: string){
this.dismissLoading();
const alertController = this._alertController;
let alert = await alertController.create({
header: 'Thông báo',
message: message,
buttons: ['OK']
});
await alert.present();
}
async loadingDefault(){
this.isLoading = true;
return await this._loadingCtrl.create({
// message: 'Đang xử lý........',
// duration: 3000
}).then(a => {
a.present().then(() => {
if (!this.isLoading) {
a.dismiss().then(() => {});
}
});
});
// loading.present();
}
async dismissLoading() {
this.isLoading = false;
return await this._loadingCtrl.dismiss().then(() => {});
}
async revieceAll(){
this.loadingDefault();
let hrWorkTimeDtoRequestArray: HrWorkTimeRequest_ENTITY[] = [];
for (const { index, value } of this.checkList.map((value, index) => ({ index, value }))) {
//kiem tra
if(value.isSelected){
let hrWorkTimeDtoRequest = new HrWorkTimeRequest_ENTITY();
hrWorkTimeDtoRequest.id = value.id;
hrWorkTimeDtoRequest.hoVaTen = this.hoVaTen;
hrWorkTimeDtoRequest.tenCty = this.tenCty;
if(value.truongNhomId) hrWorkTimeDtoRequest.truongNhomId = value.truongNhomId;
if(value.truongPhongId) hrWorkTimeDtoRequest.truongPhongId = value.truongPhongId;
if(value.giamDocKhoiId) hrWorkTimeDtoRequest.giamDocKhoiId = value.giamDocKhoiId;
if(value.tcnsId) hrWorkTimeDtoRequest.tcnsId = value.tcnsId;
if(value.giamDocDieuHanhId) hrWorkTimeDtoRequest.giamDocDieuHanhId = value.giamDocDieuHanhId;
hrWorkTimeDtoRequest.timeFrom = value.timeFrom;
hrWorkTimeDtoRequest.timeTo = value.timeTo;
hrWorkTimeDtoRequest.documentType = value.documentType;
hrWorkTimeDtoRequest.creatorUserId = value.creatorUserId;
hrWorkTimeDtoRequest.reasons = value.reasons;
hrWorkTimeDtoRequest.status = value.status;
hrWorkTimeDtoRequest.nextApproverId = value.nextApproverId;
hrWorkTimeDtoRequest.image = this.avatar;
hrWorkTimeDtoRequestArray.push(hrWorkTimeDtoRequest);
}
}
this.workTimeServiceProxy.editAllForMobile(hrWorkTimeDtoRequestArray).subscribe({
next: (res: any) => {
let sendUserDto: SendAnnouncement_ENTITY = new SendAnnouncement_ENTITY();
sendUserDto = res;
this._announcementServiceProxy.sendMessageToClient(sendUserDto).subscribe({
next: (res) => {}
});
this.getAllWorkTimeUnread();
this.showAlertController('Bạn đã duyệt đơn thành công!');
},
error: (err:any) => {
this.dismissLoading();
console.log(err);
}
})
}
loadData(event) {
setTimeout(() => {
event.target.complete();
// App logic to determine if all data is loaded
// and disable the infinite scroll
this.page += 1;
this.workTimeServiceProxy.getWorkTimeUnCheck(this.receiveId, this.page, this.pageSize).subscribe({
next: (res: any) => {
let list = res;
for (const { index, value } of list.map((value, index) => ({ index, value }))){
list[index].isSelected = false;
list[index].userReadId = this.creatorUserId;
this.checkList.push(list[index]);
}
},
error: (err) => {
console.log(err);
}
});
if (this.checkList.length == 100) {
event.target.disabled = true;
}
}, 500);
}
// Select all / Deselect all
checkUncheckAll() {
for (var i = 0; i < this.checkList.length; i++) {
this.checkList[i].isSelected = this.masterSelected;
}
this.getCheckedItemList();
}
isAllSelected(i: any) {
this.masterSelected = this.checkList.every(function(item:any) {
return item.isSelected == true;
})
this.getCheckedItemList();
}
getCheckedItemList(){
this.checkedList = [];
for (var i = 0; i < this.checkList.length; i++) {
if(this.checkList[i].isSelected)
this.checkedList.push(this.checkList[i]);
}
// this.checkedList = JSON.stringify(this.checkedList);
}
public greaterThan(sub: number, num: number): boolean {
return true ? sub <= num : sub >= num;
}
backToHome() {
setTimeout(() => {
this._router.navigateByUrl('app/main/home');
}, 1000);
}
}
| {
this.loadingDefault();
this._announcementServiceProxy.getAllUnRead(this.userId).subscribe({
next: (res) => {
if (res) {
this.totalUnred = res.length;
}
},
error: (err) => {
this.showAlertController('Lỗi kết nối mạng, vui lòng thử lại.');
return;
}
});
this.getAllWorkTimeUnread();
}
getAllWor | identifier_body |
DanhSachChoDuyet.page.ts | import { Component, OnInit } from '@angular/core';
import { Router } from '@angular/router';
import { UtilService } from '../../../../service/util.service';
import { AppConsts } from '../../../../shared/AppConsts';
import * as moment from 'moment';
import { AlertController, LoadingController } from '@ionic/angular';
// import { AnnouncementServiceProxy, TokenAuthServiceProxy, WorkTimeServiceProxy, HrWorkTime_ENTITY } from '../../../../shared/service-proxies/service-proxies';
import { AnnouncementServiceProxy, HrWorkTime_ENTITY, HrWorkTimeRequest_ENTITY, SendAnnouncement_ENTITY, TokenAuthServiceProxy, WorkTimeServiceProxy, HrWorkTime } from '../../../../shared/service-proxies/service-proxies';
import {LocalStorageService } from '../../../core/local-storage/local-storage.service';
import 'rxjs/add/operator/timeout';
import { Plugins } from '@capacitor/core';
import { MenuController } from '@ionic/angular';
import { SignalRService } from '../../../../service/signalR.service';
// import { Subscription } from 'rxjs';
import { HttpClient, HttpErrorResponse } from '@angular/common/http';
import { AppSettings } from '../../../../AppSettings';
const { App } = Plugins;
export const AUTH_KEY = 'AUTH';
@Component({
selector: 'danhsachchoduyet',
templateUrl: 'DanhSachChoDuyet.page.html',
styleUrls: ['DanhSachChoDuyet.page.scss'],
})
export class DanhSachChoDuyet implements OnInit {
userId: number;
branchType: any;
maChamCong: any;
moment = moment();
checkPauseResume = false;
hoVaTen: any;
creatorUserId: number;
isLoading = false;
tenCty: any;
globalUrlAPI : string = AppConsts.remoteServiceBaseUrl;
image: string;
avatar: string;
receiveId: any;
masterSelected:boolean = false;
checkList = [];
checkedList: any = [];
totalUnred: number = 0;
page: number;
pageSize: number = 10;
constructor(
private _utilService: UtilService,
private _router: Router,
public _alertController: AlertController,
private _loadingCtrl: LoadingController,
private _localStorageService: LocalStorageService,
private _tokenAuthServiceProxy: TokenAuthServiceProxy,
public _menu: MenuController,
public _signalRSevice: SignalRService,
private _announcementServiceProxy: AnnouncementServiceProxy,
private workTimeServiceProxy: WorkTimeServiceProxy,
private http: HttpClient,
) {
this.userId = this._localStorageService.getItem(AUTH_KEY).userId;
this._signalRSevice.retrieveMappedObject().subscribe(
(message) => {
this._announcementServiceProxy.getAllUnRead(this.userId).subscribe({
next: (res) => {
if (res) {
this.totalUnred = res.length;
}
}
});
});
}
ngOnInit() {
this.receiveId = <string>this._localStorageService.getItem(AUTH_KEY).userId;
this.hoVaTen = this._localStorageService.getItem(AUTH_KEY).hoVaTen;
this.tenCty = this._localStorageService.getItem(AUTH_KEY).tenCty;
this.avatar = this._localStorageService.getItem(AUTH_KEY).image;
}
ionViewWillEnter(){
this.loadingDefault();
this._announcementServiceProxy.getAllUnRead(this.userId).subscribe({
next: (res) => {
if (res) {
this.totalUnred = res.length;
}
},
error: (err) => {
this.showAlertController('Lỗi kết nối mạng, vui lòng thử lại.');
return;
}
});
this.getAllWorkTimeUnread();
}
getAllWorkTimeUnread(){
this.page = 1;
this.workTimeServiceProxy.getWorkTimeUnCheck(this.receiveId, this.page, this.pageSize).subscribe({
next: (res: any) => {
this.checkList = res;
for (const { index, value } of this.checkList.map((value, index) => ({ index, value }))){
this.checkList[index].isSelected = false;
}
this.masterSelected = false;
this.dismissLoading();
},
error: (err: any) => {
this.dismissLoading();
console.log(err);
}
});
}
viewDetail(id: any){
if (id) {
this._router.navigate(['app/main/quanly-congtac'], {
queryParams: { id: id }
});
}
}
duyetDon(id: any){
this.workTimeServiceProxy.getWorkTimeDetail(id).subscribe({
next: async (res: HrWorkTime) => {
let thongtinWorkTime : HrWorkTime = new HrWorkTime();
thongtinWorkTime = res;
await this.onCreateOrEdit(id, thongtinWorkTime);
},
error: (err: HttpErrorResponse) => {
this.dismissLoading();
console.log(err);
}
});
}
onCreateOrEdit(id: any, thongtinWorkTime: HrWorkTime){
this.loadingDefault();
let formData = new FormData;
formData.append('Id',id);
formData.append('NextApproverId', thongtinWorkTime.nextApproverId.toString());
formData.append('Reasons', thongtinWorkTime.reasons);
formData.append('Image', this.avatar);
formData.append('HoVaTen', this.hoVaTen);
formData.append('TenCty', this.tenCty);
formData.append('DocumentType', thongtinWorkTime.documentType);
formData.append('CreatorUserId', thongtinWorkTime.creatorUserId.toString());
formData.append('Email', thongtinWorkTime.emailAddress);
if(thongtinWorkTime.truongNhomId) formData.append('TruongNhomId', thongtinWorkTime.truongNhomId.toString());
if(thongtinWorkTime.truongPhongId) formData.append('TruongPhongId', thongtinWorkTime.truongPhongId.toString());
if(thongtinWorkTime.giamDocKhoiId) formData.append('GiamDocKhoiId', thongtinWorkTime.giamDocKhoiId.toString());
if(thongtinWorkTime.tcnsId) formData.append('TcnsId', thongtinWorkTime.tcnsId.toString());
if(thongtinWorkTime.giamDocDieuHanhId) formData.append('GiamDocDieuHanhId', thongtinWorkTime.giamDocDieuHanhId.toString());
formData.append('TimeFrom', thongtinWorkTime.timeFrom.clone().locale('vi').format('YYYY-MM-DD') +'T'+ thongtinWorkTime.timeFrom.clone().locale('vi').format('HH:mm:ss'));
formData.append('TimeTo', thongtinWorkTime.timeTo.clone().locale('vi').format('YYYY-MM-DD') +'T'+ thongtinWorkTime.timeTo.clone().locale('vi').format('HH:mm:ss'));
formData.append('DocumentType', thongtinWorkTime.documentType);
this.http.post(AppSettings.API_ENDPOINT + "/api/WorkTime/CreateOrEditForMobile", formData).subscribe({
next: async(res: any) => {
let sendUserDto: SendAnnouncement_ENTITY = new SendAnnouncement_ENTITY();
sendUserDto = res.result;
this._announcementServiceProxy.sendMessageToClient(sendUserDto).subscribe({
next: (res) => {}
});
this.getAllWorkTimeUnread();
this.showAlertController('Bạn đã duyệt đơn thành công!');
}, error: (err: any) => {
console.log(err);
this.showAlertController('Lỗi xuất hiện, vui lòng kiểm tra lại');
},
});
}
async showAlertController(message: string){
this.dismissLoading();
const alertController = this._alertController;
let alert = await alertController.create({
header: 'Thông báo',
message: message, | }
async loadingDefault(){
this.isLoading = true;
return await this._loadingCtrl.create({
// message: 'Đang xử lý........',
// duration: 3000
}).then(a => {
a.present().then(() => {
if (!this.isLoading) {
a.dismiss().then(() => {});
}
});
});
// loading.present();
}
async dismissLoading() {
this.isLoading = false;
return await this._loadingCtrl.dismiss().then(() => {});
}
async revieceAll(){
this.loadingDefault();
let hrWorkTimeDtoRequestArray: HrWorkTimeRequest_ENTITY[] = [];
for (const { index, value } of this.checkList.map((value, index) => ({ index, value }))) {
//kiem tra
if(value.isSelected){
let hrWorkTimeDtoRequest = new HrWorkTimeRequest_ENTITY();
hrWorkTimeDtoRequest.id = value.id;
hrWorkTimeDtoRequest.hoVaTen = this.hoVaTen;
hrWorkTimeDtoRequest.tenCty = this.tenCty;
if(value.truongNhomId) hrWorkTimeDtoRequest.truongNhomId = value.truongNhomId;
if(value.truongPhongId) hrWorkTimeDtoRequest.truongPhongId = value.truongPhongId;
if(value.giamDocKhoiId) hrWorkTimeDtoRequest.giamDocKhoiId = value.giamDocKhoiId;
if(value.tcnsId) hrWorkTimeDtoRequest.tcnsId = value.tcnsId;
if(value.giamDocDieuHanhId) hrWorkTimeDtoRequest.giamDocDieuHanhId = value.giamDocDieuHanhId;
hrWorkTimeDtoRequest.timeFrom = value.timeFrom;
hrWorkTimeDtoRequest.timeTo = value.timeTo;
hrWorkTimeDtoRequest.documentType = value.documentType;
hrWorkTimeDtoRequest.creatorUserId = value.creatorUserId;
hrWorkTimeDtoRequest.reasons = value.reasons;
hrWorkTimeDtoRequest.status = value.status;
hrWorkTimeDtoRequest.nextApproverId = value.nextApproverId;
hrWorkTimeDtoRequest.image = this.avatar;
hrWorkTimeDtoRequestArray.push(hrWorkTimeDtoRequest);
}
}
this.workTimeServiceProxy.editAllForMobile(hrWorkTimeDtoRequestArray).subscribe({
next: (res: any) => {
let sendUserDto: SendAnnouncement_ENTITY = new SendAnnouncement_ENTITY();
sendUserDto = res;
this._announcementServiceProxy.sendMessageToClient(sendUserDto).subscribe({
next: (res) => {}
});
this.getAllWorkTimeUnread();
this.showAlertController('Bạn đã duyệt đơn thành công!');
},
error: (err:any) => {
this.dismissLoading();
console.log(err);
}
})
}
loadData(event) {
setTimeout(() => {
event.target.complete();
// App logic to determine if all data is loaded
// and disable the infinite scroll
this.page += 1;
this.workTimeServiceProxy.getWorkTimeUnCheck(this.receiveId, this.page, this.pageSize).subscribe({
next: (res: any) => {
let list = res;
for (const { index, value } of list.map((value, index) => ({ index, value }))){
list[index].isSelected = false;
list[index].userReadId = this.creatorUserId;
this.checkList.push(list[index]);
}
},
error: (err) => {
console.log(err);
}
});
if (this.checkList.length == 100) {
event.target.disabled = true;
}
}, 500);
}
// Select all / Deselect all
checkUncheckAll() {
for (var i = 0; i < this.checkList.length; i++) {
this.checkList[i].isSelected = this.masterSelected;
}
this.getCheckedItemList();
}
isAllSelected(i: any) {
this.masterSelected = this.checkList.every(function(item:any) {
return item.isSelected == true;
})
this.getCheckedItemList();
}
getCheckedItemList(){
this.checkedList = [];
for (var i = 0; i < this.checkList.length; i++) {
if(this.checkList[i].isSelected)
this.checkedList.push(this.checkList[i]);
}
// this.checkedList = JSON.stringify(this.checkedList);
}
public greaterThan(sub: number, num: number): boolean {
return true ? sub <= num : sub >= num;
}
backToHome() {
setTimeout(() => {
this._router.navigateByUrl('app/main/home');
}, 1000);
}
} | buttons: ['OK']
});
await alert.present(); | random_line_split |
DanhSachChoDuyet.page.ts | import { Component, OnInit } from '@angular/core';
import { Router } from '@angular/router';
import { UtilService } from '../../../../service/util.service';
import { AppConsts } from '../../../../shared/AppConsts';
import * as moment from 'moment';
import { AlertController, LoadingController } from '@ionic/angular';
// import { AnnouncementServiceProxy, TokenAuthServiceProxy, WorkTimeServiceProxy, HrWorkTime_ENTITY } from '../../../../shared/service-proxies/service-proxies';
import { AnnouncementServiceProxy, HrWorkTime_ENTITY, HrWorkTimeRequest_ENTITY, SendAnnouncement_ENTITY, TokenAuthServiceProxy, WorkTimeServiceProxy, HrWorkTime } from '../../../../shared/service-proxies/service-proxies';
import {LocalStorageService } from '../../../core/local-storage/local-storage.service';
import 'rxjs/add/operator/timeout';
import { Plugins } from '@capacitor/core';
import { MenuController } from '@ionic/angular';
import { SignalRService } from '../../../../service/signalR.service';
// import { Subscription } from 'rxjs';
import { HttpClient, HttpErrorResponse } from '@angular/common/http';
import { AppSettings } from '../../../../AppSettings';
const { App } = Plugins;
export const AUTH_KEY = 'AUTH';
@Component({
selector: 'danhsachchoduyet',
templateUrl: 'DanhSachChoDuyet.page.html',
styleUrls: ['DanhSachChoDuyet.page.scss'],
})
export class DanhSachChoDuyet implements OnInit {
userId: number;
branchType: any;
maChamCong: any;
moment = moment();
checkPauseResume = false;
hoVaTen: any;
creatorUserId: number;
isLoading = false;
tenCty: any;
globalUrlAPI : string = AppConsts.remoteServiceBaseUrl;
image: string;
avatar: string;
receiveId: any;
masterSelected:boolean = false;
checkList = [];
checkedList: any = [];
totalUnred: number = 0;
page: number;
pageSize: number = 10;
constructor(
private _utilService: UtilService,
private _router: Router,
public _alertController: AlertController,
private _loadingCtrl: LoadingController,
private _localStorageService: LocalStorageService,
private _tokenAuthServiceProxy: TokenAuthServiceProxy,
public _menu: MenuController,
public _signalRSevice: SignalRService,
private _announcementServiceProxy: AnnouncementServiceProxy,
private workTimeServiceProxy: WorkTimeServiceProxy,
private http: HttpClient,
) {
this.userId = this._localStorageService.getItem(AUTH_KEY).userId;
this._signalRSevice.retrieveMappedObject().subscribe(
(message) => {
this._announcementServiceProxy.getAllUnRead(this.userId).subscribe({
next: (res) => {
if (res) {
this.totalUnred = res.length;
}
}
});
});
}
ngOnInit() {
this.receiveId = <string>this._localStorageService.getItem(AUTH_KEY).userId;
this.hoVaTen = this._localStorageService.getItem(AUTH_KEY).hoVaTen;
this.tenCty = this._localStorageService.getItem(AUTH_KEY).tenCty;
this.avatar = this._localStorageService.getItem(AUTH_KEY).image;
}
ionViewWillEnter(){
this.loadingDefault();
this._announcementServiceProxy.getAllUnRead(this.userId).subscribe({
next: (res) => {
if (res) {
this.totalUnred = res.length;
}
},
error: (err) => {
this.showAlertController('Lỗi kết nối mạng, vui lòng thử lại.');
return;
}
});
this.getAllWorkTimeUnread();
}
getAllWorkTim | page = 1;
this.workTimeServiceProxy.getWorkTimeUnCheck(this.receiveId, this.page, this.pageSize).subscribe({
next: (res: any) => {
this.checkList = res;
for (const { index, value } of this.checkList.map((value, index) => ({ index, value }))){
this.checkList[index].isSelected = false;
}
this.masterSelected = false;
this.dismissLoading();
},
error: (err: any) => {
this.dismissLoading();
console.log(err);
}
});
}
viewDetail(id: any){
if (id) {
this._router.navigate(['app/main/quanly-congtac'], {
queryParams: { id: id }
});
}
}
duyetDon(id: any){
this.workTimeServiceProxy.getWorkTimeDetail(id).subscribe({
next: async (res: HrWorkTime) => {
let thongtinWorkTime : HrWorkTime = new HrWorkTime();
thongtinWorkTime = res;
await this.onCreateOrEdit(id, thongtinWorkTime);
},
error: (err: HttpErrorResponse) => {
this.dismissLoading();
console.log(err);
}
});
}
onCreateOrEdit(id: any, thongtinWorkTime: HrWorkTime){
this.loadingDefault();
let formData = new FormData;
formData.append('Id',id);
formData.append('NextApproverId', thongtinWorkTime.nextApproverId.toString());
formData.append('Reasons', thongtinWorkTime.reasons);
formData.append('Image', this.avatar);
formData.append('HoVaTen', this.hoVaTen);
formData.append('TenCty', this.tenCty);
formData.append('DocumentType', thongtinWorkTime.documentType);
formData.append('CreatorUserId', thongtinWorkTime.creatorUserId.toString());
formData.append('Email', thongtinWorkTime.emailAddress);
if(thongtinWorkTime.truongNhomId) formData.append('TruongNhomId', thongtinWorkTime.truongNhomId.toString());
if(thongtinWorkTime.truongPhongId) formData.append('TruongPhongId', thongtinWorkTime.truongPhongId.toString());
if(thongtinWorkTime.giamDocKhoiId) formData.append('GiamDocKhoiId', thongtinWorkTime.giamDocKhoiId.toString());
if(thongtinWorkTime.tcnsId) formData.append('TcnsId', thongtinWorkTime.tcnsId.toString());
if(thongtinWorkTime.giamDocDieuHanhId) formData.append('GiamDocDieuHanhId', thongtinWorkTime.giamDocDieuHanhId.toString());
formData.append('TimeFrom', thongtinWorkTime.timeFrom.clone().locale('vi').format('YYYY-MM-DD') +'T'+ thongtinWorkTime.timeFrom.clone().locale('vi').format('HH:mm:ss'));
formData.append('TimeTo', thongtinWorkTime.timeTo.clone().locale('vi').format('YYYY-MM-DD') +'T'+ thongtinWorkTime.timeTo.clone().locale('vi').format('HH:mm:ss'));
formData.append('DocumentType', thongtinWorkTime.documentType);
this.http.post(AppSettings.API_ENDPOINT + "/api/WorkTime/CreateOrEditForMobile", formData).subscribe({
next: async(res: any) => {
let sendUserDto: SendAnnouncement_ENTITY = new SendAnnouncement_ENTITY();
sendUserDto = res.result;
this._announcementServiceProxy.sendMessageToClient(sendUserDto).subscribe({
next: (res) => {}
});
this.getAllWorkTimeUnread();
this.showAlertController('Bạn đã duyệt đơn thành công!');
}, error: (err: any) => {
console.log(err);
this.showAlertController('Lỗi xuất hiện, vui lòng kiểm tra lại');
},
});
}
async showAlertController(message: string){
this.dismissLoading();
const alertController = this._alertController;
let alert = await alertController.create({
header: 'Thông báo',
message: message,
buttons: ['OK']
});
await alert.present();
}
async loadingDefault(){
this.isLoading = true;
return await this._loadingCtrl.create({
// message: 'Đang xử lý........',
// duration: 3000
}).then(a => {
a.present().then(() => {
if (!this.isLoading) {
a.dismiss().then(() => {});
}
});
});
// loading.present();
}
async dismissLoading() {
this.isLoading = false;
return await this._loadingCtrl.dismiss().then(() => {});
}
async revieceAll(){
this.loadingDefault();
let hrWorkTimeDtoRequestArray: HrWorkTimeRequest_ENTITY[] = [];
for (const { index, value } of this.checkList.map((value, index) => ({ index, value }))) {
//kiem tra
if(value.isSelected){
let hrWorkTimeDtoRequest = new HrWorkTimeRequest_ENTITY();
hrWorkTimeDtoRequest.id = value.id;
hrWorkTimeDtoRequest.hoVaTen = this.hoVaTen;
hrWorkTimeDtoRequest.tenCty = this.tenCty;
if(value.truongNhomId) hrWorkTimeDtoRequest.truongNhomId = value.truongNhomId;
if(value.truongPhongId) hrWorkTimeDtoRequest.truongPhongId = value.truongPhongId;
if(value.giamDocKhoiId) hrWorkTimeDtoRequest.giamDocKhoiId = value.giamDocKhoiId;
if(value.tcnsId) hrWorkTimeDtoRequest.tcnsId = value.tcnsId;
if(value.giamDocDieuHanhId) hrWorkTimeDtoRequest.giamDocDieuHanhId = value.giamDocDieuHanhId;
hrWorkTimeDtoRequest.timeFrom = value.timeFrom;
hrWorkTimeDtoRequest.timeTo = value.timeTo;
hrWorkTimeDtoRequest.documentType = value.documentType;
hrWorkTimeDtoRequest.creatorUserId = value.creatorUserId;
hrWorkTimeDtoRequest.reasons = value.reasons;
hrWorkTimeDtoRequest.status = value.status;
hrWorkTimeDtoRequest.nextApproverId = value.nextApproverId;
hrWorkTimeDtoRequest.image = this.avatar;
hrWorkTimeDtoRequestArray.push(hrWorkTimeDtoRequest);
}
}
this.workTimeServiceProxy.editAllForMobile(hrWorkTimeDtoRequestArray).subscribe({
next: (res: any) => {
let sendUserDto: SendAnnouncement_ENTITY = new SendAnnouncement_ENTITY();
sendUserDto = res;
this._announcementServiceProxy.sendMessageToClient(sendUserDto).subscribe({
next: (res) => {}
});
this.getAllWorkTimeUnread();
this.showAlertController('Bạn đã duyệt đơn thành công!');
},
error: (err:any) => {
this.dismissLoading();
console.log(err);
}
})
}
loadData(event) {
setTimeout(() => {
event.target.complete();
// App logic to determine if all data is loaded
// and disable the infinite scroll
this.page += 1;
this.workTimeServiceProxy.getWorkTimeUnCheck(this.receiveId, this.page, this.pageSize).subscribe({
next: (res: any) => {
let list = res;
for (const { index, value } of list.map((value, index) => ({ index, value }))){
list[index].isSelected = false;
list[index].userReadId = this.creatorUserId;
this.checkList.push(list[index]);
}
},
error: (err) => {
console.log(err);
}
});
if (this.checkList.length == 100) {
event.target.disabled = true;
}
}, 500);
}
// Select all / Deselect all
checkUncheckAll() {
for (var i = 0; i < this.checkList.length; i++) {
this.checkList[i].isSelected = this.masterSelected;
}
this.getCheckedItemList();
}
isAllSelected(i: any) {
this.masterSelected = this.checkList.every(function(item:any) {
return item.isSelected == true;
})
this.getCheckedItemList();
}
getCheckedItemList(){
this.checkedList = [];
for (var i = 0; i < this.checkList.length; i++) {
if(this.checkList[i].isSelected)
this.checkedList.push(this.checkList[i]);
}
// this.checkedList = JSON.stringify(this.checkedList);
}
public greaterThan(sub: number, num: number): boolean {
return true ? sub <= num : sub >= num;
}
backToHome() {
setTimeout(() => {
this._router.navigateByUrl('app/main/home');
}, 1000);
}
}
| eUnread(){
this. | identifier_name |
main.rs | //! Urbit Nock 4K data structures, with basic parsing, and evaluation.
//! <https://urbit.org/docs/learn/arvo/nock/>
#![feature(never_type, exact_size_is_empty)]
use byteorder::{ByteOrder, LittleEndian};
use derive_more::Constructor;
use env_logger;
use log::{debug, error, info, log, trace, warn};
use std::{clone::Clone, error::Error, fmt::Display, rc::Rc};
pub fn | () -> Result<(), Box<dyn std::error::Error>> {
env_logger::try_init()?;
let subject = list(&[cell(atom(11), atom(12)), atom(2), atom(3), atom(4), atom(5)]);
let formula = cell(atom(0), atom(7));
info!("subject: {}", subject);
info!("formula: {}", formula);
let product = nock(subject.clone(), formula.try_cell()?)?;
info!("product: {}.", product);
println!("*[{} {}] = {}", subject, formula, product);
Ok(())
}
/* Data structures * * * * * * * * * * * * * * * * * */
/// A Nock Noun can be any Nock value, either an Atom or a Cell.
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
pub enum Noun {
Atom(Atom),
Cell(Cell),
}
/// A Nock Cell is an ordered pair of Nouns, implemented as a tuple.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Constructor)]
pub struct Cell {
tail: Rc<Noun>,
head: Rc<Noun>,
}
/// A Nock Atom is an arbitrarily-large unsigned integer.
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
pub struct Atom {
bytes_le: Vec<u8>,
}
/// Evaluating a Nock expression that contains an invalid, undefined, infinite,
/// nonterminating, or irreducible subexpression produces a Crash.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Constructor)]
pub struct Crash {
message: String,
}
/// The result of evaluating/nocking/tarring a Noun: a product Noun or a Crash.
pub type NockResult = Result<Rc<Noun>, Crash>;
/* Atom encoding and decoding * * * * * * * * * * * * * * * * * */
impl Atom {
/// Construct a new Atom from a little-endian byte slice.
pub fn new(bytes_le: &[u8]) -> Self {
// Strip irrelevent trailing zero bytes to normalize Atom for Hash and Eq.
let mut len = bytes_le.len();
while len > 0 && bytes_le[len - 1] == 0x00 {
len -= 1;
}
Self { bytes_le: bytes_le[..len].to_vec() }
}
/// Whether this Atom is zero, which is the truthy value in Nock.
pub fn is_zero(&self) -> bool {
self.bytes_le.len() == 0
}
/// Returns the value of this atom as a little-endian byte slice.
pub fn as_bytes_le(&self) -> &[u8] {
&self.bytes_le
}
/// Returns the value of the atom as Some(u128) if it fits, else None.
pub fn try_u128(&self) -> Option<u128> {
if self.as_bytes_le().is_empty() {
Some(0)
} else if self.bytes_le.len() < 16 {
Some(LittleEndian::read_uint128(&self.bytes_le, self.bytes_le.len()))
} else {
None
}
}
}
impl From<bool> for Atom {
fn from(b: bool) -> Atom {
if b {
Atom::new(&[0])
} else {
Atom::new(&[1])
}
}
}
impl From<u128> for Atom {
fn from(n: u128) -> Atom {
let mut bytes = [0u8; 16];
LittleEndian::write_u128(&mut bytes, n);
Atom::new(&bytes)
}
}
/* Noun construction conveniences * * * * * * * * * * * * * * * * * */
/// Creates a new cons Cell Noun containing two existing Nouns.
pub fn cell(left: Rc<Noun>, right: Rc<Noun>) -> Rc<Noun> {
Rc::new(Noun::Cell(Cell::new(left, right)))
}
/// Creates a new unsigned integer Atom Noun.
pub fn atom<T: Into<Atom>>(atom: T) -> Rc<Noun> {
Rc::new(Noun::Atom(atom.into()))
}
/// Groups a nonempty slice of Nouns into Cells, from right-to-left, returning a Noun.
///
/// `list(&[a, b, c, d, ...])` = `cell(a, cell(b, cell(c, cell(d, ...))))`
pub fn list(nouns: &[Rc<Noun>]) -> Rc<Noun> {
let nouns = nouns.to_vec();
if nouns.is_empty() {
panic!("can't have an empty list")
}
let mut nouns_rev = nouns.into_iter().rev();
let mut result = nouns_rev.next().unwrap();
for outer in nouns_rev {
result = cell(outer, result);
}
result
}
/* Formatting nouns and errors * * * * * * * * * * * * * * * * * */
impl Display for Noun {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Noun::Atom(atom) => match atom.try_u128() {
// If it fits in u128 we'll display it as an ordinary decimal integer
// literal.
Some(n) => write!(f, "{}", n),
// For larger values, we'll use a hex integer literal.
None => {
write!(f, "0x")?;
for byte in atom.as_bytes_le().iter().rev() {
write!(f, "{:02x}", byte)?;
}
Ok(())
}
},
Noun::Cell(cell) => write!(f, "[{} {}]", cell.head, cell.tail),
}
}
}
impl Error for Crash {}
impl Display for Crash {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Nock Crash: {}", self.message)
}
}
impl From<&str> for Crash {
fn from(message: &str) -> Crash {
Crash::new(message.to_string())
}
}
/* Nock evaluation * * * * * * * * * * * * * * * * * */
/// The Nock function interprets a formula Cell and applies it a subject Noun.
pub fn nock(subject: Rc<Noun>, formula: &Cell) -> NockResult {
let operation = formula.head();
let parameter = formula.tail();
match *operation {
Noun::Cell(operation) => {
let f = operation.head().try_cell()?;
let g = operation.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?;
Ok(cell(fp, gp))
}
Noun::Atom(operation) => {
match operation.try_u128().ok_or(Crash::from("opcode > u128"))? {
// A formula [0 b] reduces to the noun at tree address b in the subject.
// *[a 0 b] -> /[b a]
0 => cell(parameter, subject.clone()).net(),
// A formula [1 b] reduces to the constant noun b.
// *[a 1 b] -> b
1 => Ok(parameter),
// A formula [2 b c] treats b and c as formulas, resolves each against the
// subject, then computes Nock again with the product of b as the subject, c
// as the formula. *[a 2 b c] -> *[*[a b] *[a c]]
2 => {
let parameter = parameter.try_cell()?;
let f = parameter.head().try_cell()?;
let g = parameter.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?.try_cell()?;
nock(fp, gp)
}
// In formulas [3 b] and [4 b], b is another formula, whose product against
// the subject becomes the input to an axiomatic operator. 3 is ? and 4 is +
// *[a 3 b] -> ?*[a b]
3 => Ok(cell(subject, parameter).tar()?.wut()),
// *[a 4 b] -> +*[a b]
3 => Ok(cell(subject, parameter).tar()?.lus()?),
// A formula [5 b c] treats b and c as formulas that become the input to
// another axiomatic operator, =. *[a 5 b c] -> =[*[a b]
// *[a c]]
5 => unimplemented!(),
// Instructions 6 through 11 are not strictly necessary for Turing
// completeness; deleting them from Nock would decrease compactness, but not
// expressiveness. [6 b c d] is if b, then c, else d. Each
// of b, c, d is a formula against the subject. Remember that 0 is true and
// 1 is false. *[a 6 b c d] -> *[a *[[c d] 0 *[[2 3] 0 *[a
// 4 4 b]]]]
6 => unimplemented!(),
// [7 b c] composes the formulas b and c.
// *[a 7 b c] -> *[*[a b] c]
7 => unimplemented!(),
// [8 b c] produces the product of formula c, against a subject whose head
// is the product of formula b with the original subject, and whose tail is
// the original subject. (Think of 8 as a “variable declaration” or “stack
// push.”) *[a 8 b c] -> *[[*[a b] a] c]
8 => unimplemented!(),
// [9 b c] computes the product of formula c with the current subject; from
// that product d it extracts a formula e at tree address b, then computes
// *[d e]. (9 is designed to fit Hoon; d is a core (object), e points to an
// arm (method).) *[a 9 b c] -> *[*[a c] 2 [0 1] 0 b]
9 => unimplemented!(),
// In a formula [10 [b c] d], c and d are computed with the current subject,
// and then b of the product of d is replaced with the product of c.
// *[a 10 [b c] d] -> #[b *[a c] *[a d]]
10 => unimplemented!(),
// [11 b c] is a hint semantically equivalent to the formula c. If b is an
// atom, it's a static hint, which is just discarded. If b is a cell, it's a
// dynamic hint; the head of b is discarded, and the tail of b is executed
// as a formula against the current subject; the product of this is
// discarded. *[a 11 b c] -> *[a c]
// [11 hint formula]
11 => {
let parameter = parameter.try_cell()?;
let _hint = parameter.head;
let formula = parameter.tail.try_cell()?;
nock(subject, formula)
}
_ => Err(Crash::from("opcode > 11")),
}
}
}
}
impl Noun {
/// Returns a reference to the Atom in this Noun, or a Crash if it's a cell.
pub fn try_atom(&self) -> Result<&Atom, Crash> {
match self {
Noun::Atom(atom) => Ok(atom),
Noun::Cell(_) => Err(Crash::from("required atom, had cell")),
}
}
/// Returns a reference to the Cell in this Noun, or a Crash if it's an atom.
pub fn try_cell(&self) -> Result<&Cell, Crash> {
match self {
Noun::Cell(cell) => Ok(cell),
Noun::Atom(_) => Err(Crash::from("required cell, had atom")),
}
}
/// `*[subject formula]` nock formula application.
pub fn tar(&self) -> NockResult {
trace!("*{}", self);
let self_cell = self.try_cell()?;
let subject = self_cell.head();
let formula = self_cell.tail().try_cell()?;
nock(subject, formula)
}
/// `?noun` noun type operator.
pub fn wut(&self) -> Rc<Noun> {
trace!("?{}", self);
Rc::new(Noun::Atom(Atom::from(match self {
Noun::Cell(_) => true,
Noun::Atom(_) => false,
})))
}
/// `=[head tail]` noun equality operator.
pub fn tis(&self) -> NockResult {
trace!("={}", self);
let self_cell = self.try_cell()?;
Ok(atom(Atom::from(self_cell.head == self_cell.tail)))
}
/// `+number` atom increment operator.
pub fn lus(&self) -> NockResult {
trace!("+{}", self);
let self_atom = self.try_atom()?;
let mut incremented_bytes = self_atom.as_bytes_le().to_vec();
incremented_bytes.push(0x00);
for byte in incremented_bytes.iter_mut() {
if *byte == 0xFF {
*byte = 0x00;
continue;
} else {
*byte += 1;
break;
}
}
Ok(atom(Atom::new(&incremented_bytes)))
}
/// `/[index root]`, `*[root 0 index]` cell tree slot indexing operator.
pub fn net(&self) -> NockResult {
trace!("/{}", self);
let self_cell = self.try_cell()?;
let index = self_cell.head().try_atom()?;
let root = self_cell.tail();
if index.is_zero() {
return Err(Crash::from("index in /[index root] must be > 0"));
}
let mut result = root;
for (byte_index, byte) in index.as_bytes_le().iter().rev().enumerate() {
let skip_bits = if byte_index == 0 {
byte.leading_zeros() + 1
} else {
0
};
for bit_index in skip_bits..8 {
result = if ((byte >> (7 - bit_index)) & 1) == 0 {
result.try_cell()?.head()
} else {
result.try_cell()?.tail()
};
}
}
Ok(result)
}
/// `#[index root replacement]` edit cell tree index modification operator.
pub fn hax(&self) -> NockResult {
trace!("#{}", self);
unimplemented!()
}
}
impl Cell {
pub fn head(&self) -> Rc<Noun> {
self.head.clone()
}
pub fn tail(&self) -> Rc<Noun> {
self.tail.clone()
}
}
| main | identifier_name |
main.rs | //! Urbit Nock 4K data structures, with basic parsing, and evaluation.
//! <https://urbit.org/docs/learn/arvo/nock/>
#![feature(never_type, exact_size_is_empty)]
use byteorder::{ByteOrder, LittleEndian};
use derive_more::Constructor;
use env_logger;
use log::{debug, error, info, log, trace, warn};
use std::{clone::Clone, error::Error, fmt::Display, rc::Rc};
pub fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::try_init()?;
let subject = list(&[cell(atom(11), atom(12)), atom(2), atom(3), atom(4), atom(5)]);
let formula = cell(atom(0), atom(7));
info!("subject: {}", subject);
info!("formula: {}", formula);
let product = nock(subject.clone(), formula.try_cell()?)?;
info!("product: {}.", product);
println!("*[{} {}] = {}", subject, formula, product);
Ok(())
}
/* Data structures * * * * * * * * * * * * * * * * * */
/// A Nock Noun can be any Nock value, either an Atom or a Cell.
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
pub enum Noun {
Atom(Atom),
Cell(Cell),
}
/// A Nock Cell is an ordered pair of Nouns, implemented as a tuple.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Constructor)]
pub struct Cell {
tail: Rc<Noun>,
head: Rc<Noun>,
}
/// A Nock Atom is an arbitrarily-large unsigned integer.
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
pub struct Atom {
bytes_le: Vec<u8>,
}
/// Evaluating a Nock expression that contains an invalid, undefined, infinite,
/// nonterminating, or irreducible subexpression produces a Crash.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Constructor)]
pub struct Crash {
message: String,
}
/// The result of evaluating/nocking/tarring a Noun: a product Noun or a Crash.
pub type NockResult = Result<Rc<Noun>, Crash>;
/* Atom encoding and decoding * * * * * * * * * * * * * * * * * */
impl Atom {
/// Construct a new Atom from a little-endian byte slice.
pub fn new(bytes_le: &[u8]) -> Self {
// Strip irrelevent trailing zero bytes to normalize Atom for Hash and Eq.
let mut len = bytes_le.len();
while len > 0 && bytes_le[len - 1] == 0x00 {
len -= 1;
}
Self { bytes_le: bytes_le[..len].to_vec() }
}
/// Whether this Atom is zero, which is the truthy value in Nock.
pub fn is_zero(&self) -> bool {
self.bytes_le.len() == 0
}
/// Returns the value of this atom as a little-endian byte slice.
pub fn as_bytes_le(&self) -> &[u8] {
&self.bytes_le
}
/// Returns the value of the atom as Some(u128) if it fits, else None.
pub fn try_u128(&self) -> Option<u128> {
if self.as_bytes_le().is_empty() {
Some(0)
} else if self.bytes_le.len() < 16 {
Some(LittleEndian::read_uint128(&self.bytes_le, self.bytes_le.len()))
} else {
None
}
}
}
impl From<bool> for Atom {
fn from(b: bool) -> Atom {
if b {
Atom::new(&[0])
} else {
Atom::new(&[1])
}
}
}
impl From<u128> for Atom {
fn from(n: u128) -> Atom {
let mut bytes = [0u8; 16];
LittleEndian::write_u128(&mut bytes, n);
Atom::new(&bytes)
}
}
/* Noun construction conveniences * * * * * * * * * * * * * * * * * */
/// Creates a new cons Cell Noun containing two existing Nouns.
pub fn cell(left: Rc<Noun>, right: Rc<Noun>) -> Rc<Noun> {
Rc::new(Noun::Cell(Cell::new(left, right)))
}
/// Creates a new unsigned integer Atom Noun.
pub fn atom<T: Into<Atom>>(atom: T) -> Rc<Noun> {
Rc::new(Noun::Atom(atom.into()))
}
/// Groups a nonempty slice of Nouns into Cells, from right-to-left, returning a Noun.
///
/// `list(&[a, b, c, d, ...])` = `cell(a, cell(b, cell(c, cell(d, ...))))`
pub fn list(nouns: &[Rc<Noun>]) -> Rc<Noun> {
let nouns = nouns.to_vec();
if nouns.is_empty() {
panic!("can't have an empty list")
}
let mut nouns_rev = nouns.into_iter().rev();
let mut result = nouns_rev.next().unwrap();
for outer in nouns_rev {
result = cell(outer, result);
}
result
}
/* Formatting nouns and errors * * * * * * * * * * * * * * * * * */
impl Display for Noun {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Noun::Atom(atom) => match atom.try_u128() {
// If it fits in u128 we'll display it as an ordinary decimal integer
// literal.
Some(n) => write!(f, "{}", n),
// For larger values, we'll use a hex integer literal.
None => {
write!(f, "0x")?;
for byte in atom.as_bytes_le().iter().rev() {
write!(f, "{:02x}", byte)?;
}
Ok(())
}
},
Noun::Cell(cell) => write!(f, "[{} {}]", cell.head, cell.tail),
}
}
}
impl Error for Crash {}
impl Display for Crash {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Nock Crash: {}", self.message)
}
}
impl From<&str> for Crash {
fn from(message: &str) -> Crash {
Crash::new(message.to_string())
}
}
/* Nock evaluation * * * * * * * * * * * * * * * * * */
/// The Nock function interprets a formula Cell and applies it a subject Noun.
pub fn nock(subject: Rc<Noun>, formula: &Cell) -> NockResult {
let operation = formula.head();
let parameter = formula.tail();
match *operation {
Noun::Cell(operation) => {
let f = operation.head().try_cell()?;
let g = operation.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?;
Ok(cell(fp, gp))
}
Noun::Atom(operation) => {
match operation.try_u128().ok_or(Crash::from("opcode > u128"))? {
// A formula [0 b] reduces to the noun at tree address b in the subject.
// *[a 0 b] -> /[b a]
0 => cell(parameter, subject.clone()).net(),
// A formula [1 b] reduces to the constant noun b.
// *[a 1 b] -> b
1 => Ok(parameter),
// A formula [2 b c] treats b and c as formulas, resolves each against the
// subject, then computes Nock again with the product of b as the subject, c
// as the formula. *[a 2 b c] -> *[*[a b] *[a c]]
2 => {
let parameter = parameter.try_cell()?;
let f = parameter.head().try_cell()?;
let g = parameter.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?.try_cell()?;
nock(fp, gp) | // *[a 4 b] -> +*[a b]
3 => Ok(cell(subject, parameter).tar()?.lus()?),
// A formula [5 b c] treats b and c as formulas that become the input to
// another axiomatic operator, =. *[a 5 b c] -> =[*[a b]
// *[a c]]
5 => unimplemented!(),
// Instructions 6 through 11 are not strictly necessary for Turing
// completeness; deleting them from Nock would decrease compactness, but not
// expressiveness. [6 b c d] is if b, then c, else d. Each
// of b, c, d is a formula against the subject. Remember that 0 is true and
// 1 is false. *[a 6 b c d] -> *[a *[[c d] 0 *[[2 3] 0 *[a
// 4 4 b]]]]
6 => unimplemented!(),
// [7 b c] composes the formulas b and c.
// *[a 7 b c] -> *[*[a b] c]
7 => unimplemented!(),
// [8 b c] produces the product of formula c, against a subject whose head
// is the product of formula b with the original subject, and whose tail is
// the original subject. (Think of 8 as a “variable declaration” or “stack
// push.”) *[a 8 b c] -> *[[*[a b] a] c]
8 => unimplemented!(),
// [9 b c] computes the product of formula c with the current subject; from
// that product d it extracts a formula e at tree address b, then computes
// *[d e]. (9 is designed to fit Hoon; d is a core (object), e points to an
// arm (method).) *[a 9 b c] -> *[*[a c] 2 [0 1] 0 b]
9 => unimplemented!(),
// In a formula [10 [b c] d], c and d are computed with the current subject,
// and then b of the product of d is replaced with the product of c.
// *[a 10 [b c] d] -> #[b *[a c] *[a d]]
10 => unimplemented!(),
// [11 b c] is a hint semantically equivalent to the formula c. If b is an
// atom, it's a static hint, which is just discarded. If b is a cell, it's a
// dynamic hint; the head of b is discarded, and the tail of b is executed
// as a formula against the current subject; the product of this is
// discarded. *[a 11 b c] -> *[a c]
// [11 hint formula]
11 => {
let parameter = parameter.try_cell()?;
let _hint = parameter.head;
let formula = parameter.tail.try_cell()?;
nock(subject, formula)
}
_ => Err(Crash::from("opcode > 11")),
}
}
}
}
impl Noun {
/// Returns a reference to the Atom in this Noun, or a Crash if it's a cell.
pub fn try_atom(&self) -> Result<&Atom, Crash> {
match self {
Noun::Atom(atom) => Ok(atom),
Noun::Cell(_) => Err(Crash::from("required atom, had cell")),
}
}
/// Returns a reference to the Cell in this Noun, or a Crash if it's an atom.
pub fn try_cell(&self) -> Result<&Cell, Crash> {
match self {
Noun::Cell(cell) => Ok(cell),
Noun::Atom(_) => Err(Crash::from("required cell, had atom")),
}
}
/// `*[subject formula]` nock formula application.
pub fn tar(&self) -> NockResult {
trace!("*{}", self);
let self_cell = self.try_cell()?;
let subject = self_cell.head();
let formula = self_cell.tail().try_cell()?;
nock(subject, formula)
}
/// `?noun` noun type operator.
pub fn wut(&self) -> Rc<Noun> {
trace!("?{}", self);
Rc::new(Noun::Atom(Atom::from(match self {
Noun::Cell(_) => true,
Noun::Atom(_) => false,
})))
}
/// `=[head tail]` noun equality operator.
pub fn tis(&self) -> NockResult {
trace!("={}", self);
let self_cell = self.try_cell()?;
Ok(atom(Atom::from(self_cell.head == self_cell.tail)))
}
/// `+number` atom increment operator.
pub fn lus(&self) -> NockResult {
trace!("+{}", self);
let self_atom = self.try_atom()?;
let mut incremented_bytes = self_atom.as_bytes_le().to_vec();
incremented_bytes.push(0x00);
for byte in incremented_bytes.iter_mut() {
if *byte == 0xFF {
*byte = 0x00;
continue;
} else {
*byte += 1;
break;
}
}
Ok(atom(Atom::new(&incremented_bytes)))
}
/// `/[index root]`, `*[root 0 index]` cell tree slot indexing operator.
pub fn net(&self) -> NockResult {
trace!("/{}", self);
let self_cell = self.try_cell()?;
let index = self_cell.head().try_atom()?;
let root = self_cell.tail();
if index.is_zero() {
return Err(Crash::from("index in /[index root] must be > 0"));
}
let mut result = root;
for (byte_index, byte) in index.as_bytes_le().iter().rev().enumerate() {
let skip_bits = if byte_index == 0 {
byte.leading_zeros() + 1
} else {
0
};
for bit_index in skip_bits..8 {
result = if ((byte >> (7 - bit_index)) & 1) == 0 {
result.try_cell()?.head()
} else {
result.try_cell()?.tail()
};
}
}
Ok(result)
}
/// `#[index root replacement]` edit cell tree index modification operator.
pub fn hax(&self) -> NockResult {
trace!("#{}", self);
unimplemented!()
}
}
impl Cell {
pub fn head(&self) -> Rc<Noun> {
self.head.clone()
}
pub fn tail(&self) -> Rc<Noun> {
self.tail.clone()
}
} | }
// In formulas [3 b] and [4 b], b is another formula, whose product against
// the subject becomes the input to an axiomatic operator. 3 is ? and 4 is +
// *[a 3 b] -> ?*[a b]
3 => Ok(cell(subject, parameter).tar()?.wut()), | random_line_split |
main.rs | //! Urbit Nock 4K data structures, with basic parsing, and evaluation.
//! <https://urbit.org/docs/learn/arvo/nock/>
#![feature(never_type, exact_size_is_empty)]
use byteorder::{ByteOrder, LittleEndian};
use derive_more::Constructor;
use env_logger;
use log::{debug, error, info, log, trace, warn};
use std::{clone::Clone, error::Error, fmt::Display, rc::Rc};
pub fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::try_init()?;
let subject = list(&[cell(atom(11), atom(12)), atom(2), atom(3), atom(4), atom(5)]);
let formula = cell(atom(0), atom(7));
info!("subject: {}", subject);
info!("formula: {}", formula);
let product = nock(subject.clone(), formula.try_cell()?)?;
info!("product: {}.", product);
println!("*[{} {}] = {}", subject, formula, product);
Ok(())
}
/* Data structures * * * * * * * * * * * * * * * * * */
/// A Nock Noun can be any Nock value, either an Atom or a Cell.
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
pub enum Noun {
Atom(Atom),
Cell(Cell),
}
/// A Nock Cell is an ordered pair of Nouns, implemented as a tuple.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Constructor)]
pub struct Cell {
tail: Rc<Noun>,
head: Rc<Noun>,
}
/// A Nock Atom is an arbitrarily-large unsigned integer.
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
pub struct Atom {
bytes_le: Vec<u8>,
}
/// Evaluating a Nock expression that contains an invalid, undefined, infinite,
/// nonterminating, or irreducible subexpression produces a Crash.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Constructor)]
pub struct Crash {
message: String,
}
/// The result of evaluating/nocking/tarring a Noun: a product Noun or a Crash.
pub type NockResult = Result<Rc<Noun>, Crash>;
/* Atom encoding and decoding * * * * * * * * * * * * * * * * * */
impl Atom {
/// Construct a new Atom from a little-endian byte slice.
pub fn new(bytes_le: &[u8]) -> Self {
// Strip irrelevent trailing zero bytes to normalize Atom for Hash and Eq.
let mut len = bytes_le.len();
while len > 0 && bytes_le[len - 1] == 0x00 {
len -= 1;
}
Self { bytes_le: bytes_le[..len].to_vec() }
}
/// Whether this Atom is zero, which is the truthy value in Nock.
pub fn is_zero(&self) -> bool {
self.bytes_le.len() == 0
}
/// Returns the value of this atom as a little-endian byte slice.
pub fn as_bytes_le(&self) -> &[u8] {
&self.bytes_le
}
/// Returns the value of the atom as Some(u128) if it fits, else None.
pub fn try_u128(&self) -> Option<u128> {
if self.as_bytes_le().is_empty() {
Some(0)
} else if self.bytes_le.len() < 16 {
Some(LittleEndian::read_uint128(&self.bytes_le, self.bytes_le.len()))
} else {
None
}
}
}
impl From<bool> for Atom {
fn from(b: bool) -> Atom {
if b {
Atom::new(&[0])
} else {
Atom::new(&[1])
}
}
}
impl From<u128> for Atom {
fn from(n: u128) -> Atom {
let mut bytes = [0u8; 16];
LittleEndian::write_u128(&mut bytes, n);
Atom::new(&bytes)
}
}
/* Noun construction conveniences * * * * * * * * * * * * * * * * * */
/// Creates a new cons Cell Noun containing two existing Nouns.
pub fn cell(left: Rc<Noun>, right: Rc<Noun>) -> Rc<Noun> {
Rc::new(Noun::Cell(Cell::new(left, right)))
}
/// Creates a new unsigned integer Atom Noun.
pub fn atom<T: Into<Atom>>(atom: T) -> Rc<Noun> {
Rc::new(Noun::Atom(atom.into()))
}
/// Groups a nonempty slice of Nouns into Cells, from right-to-left, returning a Noun.
///
/// `list(&[a, b, c, d, ...])` = `cell(a, cell(b, cell(c, cell(d, ...))))`
pub fn list(nouns: &[Rc<Noun>]) -> Rc<Noun> {
let nouns = nouns.to_vec();
if nouns.is_empty() {
panic!("can't have an empty list")
}
let mut nouns_rev = nouns.into_iter().rev();
let mut result = nouns_rev.next().unwrap();
for outer in nouns_rev {
result = cell(outer, result);
}
result
}
/* Formatting nouns and errors * * * * * * * * * * * * * * * * * */
impl Display for Noun {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Noun::Atom(atom) => match atom.try_u128() {
// If it fits in u128 we'll display it as an ordinary decimal integer
// literal.
Some(n) => write!(f, "{}", n),
// For larger values, we'll use a hex integer literal.
None => {
write!(f, "0x")?;
for byte in atom.as_bytes_le().iter().rev() {
write!(f, "{:02x}", byte)?;
}
Ok(())
}
},
Noun::Cell(cell) => write!(f, "[{} {}]", cell.head, cell.tail),
}
}
}
impl Error for Crash {}
impl Display for Crash {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Nock Crash: {}", self.message)
}
}
impl From<&str> for Crash {
fn from(message: &str) -> Crash {
Crash::new(message.to_string())
}
}
/* Nock evaluation * * * * * * * * * * * * * * * * * */
/// The Nock function interprets a formula Cell and applies it a subject Noun.
pub fn nock(subject: Rc<Noun>, formula: &Cell) -> NockResult {
let operation = formula.head();
let parameter = formula.tail();
match *operation {
Noun::Cell(operation) => {
let f = operation.head().try_cell()?;
let g = operation.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?;
Ok(cell(fp, gp))
}
Noun::Atom(operation) => {
match operation.try_u128().ok_or(Crash::from("opcode > u128"))? {
// A formula [0 b] reduces to the noun at tree address b in the subject.
// *[a 0 b] -> /[b a]
0 => cell(parameter, subject.clone()).net(),
// A formula [1 b] reduces to the constant noun b.
// *[a 1 b] -> b
1 => Ok(parameter),
// A formula [2 b c] treats b and c as formulas, resolves each against the
// subject, then computes Nock again with the product of b as the subject, c
// as the formula. *[a 2 b c] -> *[*[a b] *[a c]]
2 => {
let parameter = parameter.try_cell()?;
let f = parameter.head().try_cell()?;
let g = parameter.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?.try_cell()?;
nock(fp, gp)
}
// In formulas [3 b] and [4 b], b is another formula, whose product against
// the subject becomes the input to an axiomatic operator. 3 is ? and 4 is +
// *[a 3 b] -> ?*[a b]
3 => Ok(cell(subject, parameter).tar()?.wut()),
// *[a 4 b] -> +*[a b]
3 => Ok(cell(subject, parameter).tar()?.lus()?),
// A formula [5 b c] treats b and c as formulas that become the input to
// another axiomatic operator, =. *[a 5 b c] -> =[*[a b]
// *[a c]]
5 => unimplemented!(),
// Instructions 6 through 11 are not strictly necessary for Turing
// completeness; deleting them from Nock would decrease compactness, but not
// expressiveness. [6 b c d] is if b, then c, else d. Each
// of b, c, d is a formula against the subject. Remember that 0 is true and
// 1 is false. *[a 6 b c d] -> *[a *[[c d] 0 *[[2 3] 0 *[a
// 4 4 b]]]]
6 => unimplemented!(),
// [7 b c] composes the formulas b and c.
// *[a 7 b c] -> *[*[a b] c]
7 => unimplemented!(),
// [8 b c] produces the product of formula c, against a subject whose head
// is the product of formula b with the original subject, and whose tail is
// the original subject. (Think of 8 as a “variable declaration” or “stack
// push.”) *[a 8 b c] -> *[[*[a b] a] c]
8 => unimplemented!(),
// [9 b c] computes the product of formula c with the current subject; from
// that product d it extracts a formula e at tree address b, then computes
// *[d e]. (9 is designed to fit Hoon; d is a core (object), e points to an
// arm (method).) *[a 9 b c] -> *[*[a c] 2 [0 1] 0 b]
9 => unimplemented!(),
// In a formula [10 [b c] d], c and d are computed with the current subject,
// and then b of the product of d is replaced with the product of c.
// *[a 10 [b c] d] -> #[b *[a c] *[a d]]
10 => unimplemented!(),
// [11 b c] is a hint semantically equivalent to the formula c. If b is an
// atom, it's a static hint, which is just discarded. If b is a cell, it's a
// dynamic hint; the head of b is discarded, and the tail of b is executed
// as a formula against the current subject; the product of this is
// discarded. *[a 11 b c] -> *[a c]
// [11 hint formula]
11 => {
let parameter = parameter.try_cell()?;
let _hint = parameter.head;
let formula = parameter.tail.try_cell()?;
nock(subject, formula)
}
_ => Err(Crash::from("opcode > 11")),
}
}
}
}
impl Noun {
/// Returns a reference to the Atom in this Noun, or a Crash if it's a cell.
pub fn try_atom(&self) -> Result<&Atom, Crash> {
| / Returns a reference to the Cell in this Noun, or a Crash if it's an atom.
pub fn try_cell(&self) -> Result<&Cell, Crash> {
match self {
Noun::Cell(cell) => Ok(cell),
Noun::Atom(_) => Err(Crash::from("required cell, had atom")),
}
}
/// `*[subject formula]` nock formula application.
pub fn tar(&self) -> NockResult {
trace!("*{}", self);
let self_cell = self.try_cell()?;
let subject = self_cell.head();
let formula = self_cell.tail().try_cell()?;
nock(subject, formula)
}
/// `?noun` noun type operator.
pub fn wut(&self) -> Rc<Noun> {
trace!("?{}", self);
Rc::new(Noun::Atom(Atom::from(match self {
Noun::Cell(_) => true,
Noun::Atom(_) => false,
})))
}
/// `=[head tail]` noun equality operator.
pub fn tis(&self) -> NockResult {
trace!("={}", self);
let self_cell = self.try_cell()?;
Ok(atom(Atom::from(self_cell.head == self_cell.tail)))
}
/// `+number` atom increment operator.
pub fn lus(&self) -> NockResult {
trace!("+{}", self);
let self_atom = self.try_atom()?;
let mut incremented_bytes = self_atom.as_bytes_le().to_vec();
incremented_bytes.push(0x00);
for byte in incremented_bytes.iter_mut() {
if *byte == 0xFF {
*byte = 0x00;
continue;
} else {
*byte += 1;
break;
}
}
Ok(atom(Atom::new(&incremented_bytes)))
}
/// `/[index root]`, `*[root 0 index]` cell tree slot indexing operator.
pub fn net(&self) -> NockResult {
trace!("/{}", self);
let self_cell = self.try_cell()?;
let index = self_cell.head().try_atom()?;
let root = self_cell.tail();
if index.is_zero() {
return Err(Crash::from("index in /[index root] must be > 0"));
}
let mut result = root;
for (byte_index, byte) in index.as_bytes_le().iter().rev().enumerate() {
let skip_bits = if byte_index == 0 {
byte.leading_zeros() + 1
} else {
0
};
for bit_index in skip_bits..8 {
result = if ((byte >> (7 - bit_index)) & 1) == 0 {
result.try_cell()?.head()
} else {
result.try_cell()?.tail()
};
}
}
Ok(result)
}
/// `#[index root replacement]` edit cell tree index modification operator.
pub fn hax(&self) -> NockResult {
trace!("#{}", self);
unimplemented!()
}
}
impl Cell {
pub fn head(&self) -> Rc<Noun> {
self.head.clone()
}
pub fn tail(&self) -> Rc<Noun> {
self.tail.clone()
}
}
| match self {
Noun::Atom(atom) => Ok(atom),
Noun::Cell(_) => Err(Crash::from("required atom, had cell")),
}
}
// | identifier_body |
symdumper.rs | use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
use std::iter::once;
use std::process::Command;
use std::io::{Error, ErrorKind};
use crate::win32::ModuleInfo;
type HANDLE = usize;
extern {
fn GetCurrentProcess() -> HANDLE;
}
#[allow(non_snake_case)]
#[repr(C)]
struct SrcCodeInfoW {
SizeOfStruct: u32,
Key: usize,
ModBase: u64,
Obj: [u16; 261],
FileName: [u16; 261],
LineNumber: u32,
Address: u64,
}
impl Default for SrcCodeInfoW {
fn default() -> Self { | SizeOfStruct: std::mem::size_of::<SrcCodeInfoW>() as u32,
Key: 0,
ModBase: 0,
Obj: [0; 261],
FileName: [0; 261],
LineNumber: 0,
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct SymbolInfoW {
SizeOfStruct: u32,
TypeIndex: u32,
Reserved: [u64; 2],
Index: u32,
Size: u32,
ModBase: u64,
Flags: u32,
Value: u64,
Address: u64,
Register: u32,
Scope: u32,
Tag: u32,
NameLen: u32,
MaxNameLen: u32,
// technically this field is dynamically sized as specified by MaxNameLen
Name: [u16; 8192],
}
impl Default for SymbolInfoW {
fn default() -> Self {
SymbolInfoW {
// Subtract off the size of the dynamic component, one byte
// already included in the structure
SizeOfStruct: std::mem::size_of::<SymbolInfoW>() as u32 - 8192*2,
TypeIndex: 0,
Reserved: [0; 2],
Index: 0,
Size: 0,
ModBase: 0,
Flags: 0,
Value: 0,
Address: 0,
Register: 0,
Scope: 0,
Tag: 0,
NameLen: 0,
MaxNameLen: 8192,
Name: [0; 8192],
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpLineW64 {
SizeOfStruct: u32,
Key: usize,
LineNumber: u32,
FileName: *const u16,
Address: u64,
}
impl Default for ImagehlpLineW64 {
fn default() -> Self {
ImagehlpLineW64 {
SizeOfStruct: std::mem::size_of::<ImagehlpLineW64>() as u32,
Key: 0,
LineNumber: 0,
FileName: std::ptr::null(),
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpModule64W {
SizeOfStruct: u32,
BaseOfImage: u64,
ImageSize: u32,
TimeDateStamp: u32,
CheckSum: u32,
NumSyms: u32,
SymType: u32,
ModuleName: [u16; 32],
ImageName: [u16; 256],
LoadedImageName: [u16; 256],
LoadedPdbName: [u16; 256],
CVSig: u32,
CVData: [u16; 780],
PdbSig: u32,
PdbSig70: [u8; 16],
PdbAge: u32,
PdbUnmatched: bool,
DbgUnmatched: bool,
LineNumbers: bool,
GlobalSymbols: bool,
TypeInfo: bool,
SourceIndexed: bool,
Publics: bool,
}
impl Default for ImagehlpModule64W {
fn default() -> Self {
ImagehlpModule64W {
SizeOfStruct: std::mem::size_of::<ImagehlpModule64W>() as u32,
BaseOfImage: 0,
ImageSize: 0,
TimeDateStamp: 0,
CheckSum: 0,
NumSyms: 0,
SymType: 0,
ModuleName: [0; 32],
ImageName: [0; 256],
LoadedImageName: [0; 256],
LoadedPdbName: [0; 256],
CVSig: 0,
CVData: [0; 780],
PdbSig: 0,
PdbSig70: [0; 16],
PdbAge: 0,
PdbUnmatched: false,
DbgUnmatched: false,
LineNumbers: false,
GlobalSymbols: false,
TypeInfo: false,
SourceIndexed: false,
Publics: false,
}
}
}
/// Vector of (virtual address, symbol name, symbol size)
type Context = *mut SymbolContext;
extern fn srcline_callback(srcline_info: *const SrcCodeInfoW, context: usize) -> bool {
let srcline = unsafe { &*srcline_info };
let context = unsafe { &mut *(context as Context) };
let mut filename = Vec::with_capacity(srcline.FileName.len());
for &val in srcline.FileName.iter() {
if val == 0 { break; }
filename.push(val);
}
let source_filename = String::from_utf16(&filename)
.expect("Failed to decode UTF-16 file name");
context.sourceline.push((srcline.Address - srcline.ModBase, source_filename, srcline.LineNumber as u64));
true
}
extern fn sym_callback(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool {
let symbol = unsafe { &*sym_info };
let context = unsafe { &mut *(context as Context) };
// Technically NameLen isn't supposed to contain the null terminator... but it does.
// Yay!
if symbol.NameLen < 1 {
return true;
}
let symbol_name = String::from_utf16(&symbol.Name[..symbol.NameLen as usize - 1])
.expect("Failed to decode UTF-16 symbol name");
context.symbols.push((symbol.Address - symbol.ModBase, symbol_name, size as u64));
true
}
#[link(name = "dbghelp")]
extern {
fn SymInitializeW(hProcess: HANDLE, UserSearchPath: *const u16,
fInvadeProcess: bool) -> bool;
fn SymLoadModuleExW(hProcess: HANDLE, hFile: HANDLE, ImageName: *const u16,
ModuleName: *const u16, BaseOfDll: u64, DllSize: u32,
Data: usize, Flags: u32) -> u64;
fn SymGetModuleInfoW64(hProcess: HANDLE, dwAddr: u64,
ModuleInfo: *mut ImagehlpModule64W) -> bool;
fn SymEnumSymbolsW(hProcess: HANDLE, BaseOfDll: u64, Mask: usize,
callback: extern fn(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool,
context: usize) -> bool;
fn SymEnumSourceLinesW(hProcess: HANDLE, Base: u64, Obj: usize, File: usize, Line: u32,
Flags: u32, callback: extern fn(LineInfo: *const SrcCodeInfoW, UserContext: usize) -> bool,
UserContext: usize) -> bool;
fn SymUnloadModule64(hProcess: HANDLE, BaseOfDll: u64) -> bool;
fn SymCleanup(hProcess: HANDLE) -> bool;
}
pub fn win16_for_str(s: &str) -> Vec<u16> {
OsStr::new(s).encode_wide().chain(once(0)).collect()
}
#[repr(C)]
#[derive(Clone, Default)]
pub struct SymbolContext {
pub symbols: Vec<(u64, String, u64)>,
pub sourceline: Vec<(u64, String, u64)>,
}
/// Get all of the symbols from a PE file `pe_file`
pub fn get_symbols_from_file(pe_file: &str) -> SymbolContext {
let mut symdb = SymbolContext {
symbols: Vec::new(),
sourceline: Vec::new(),
};
let module_base;
unsafe {
let cur_process = GetCurrentProcess();
// Initialize the symbol library for this process
assert!(SymInitializeW(cur_process, 0 as *const _, false),
"Failed to SymInitializeW()");
// Load up a module into the current process as the base address
// the file specified
let filename = win16_for_str(pe_file);
module_base = SymLoadModuleExW(cur_process, 0, filename.as_ptr(), std::ptr::null(), 0, 0, 0, 0);
assert!(module_base != 0, "Failed to SymLoadModuleExW()");
// Get information about the module we just loaded
let mut module_info = ImagehlpModule64W::default();
assert!(SymGetModuleInfoW64(cur_process, module_base,
&mut module_info as *mut _),
"Failed to SymGetModuleInfoW64()");
// This is pedantic but we might as well check it
assert!(module_info.BaseOfImage == module_base);
assert!(SymEnumSymbolsW(cur_process, module_base, 0, sym_callback, &mut symdb as *mut _ as usize));
if !SymEnumSourceLinesW(cur_process, module_base, 0, 0, 0, 0, srcline_callback, &mut symdb as *mut _ as usize) {
// Eh just silently fail here, most people won't have private
// symbols so this would just spam
//print!("Warning: Could not enumerate sourcelines\n");
}
assert!(SymUnloadModule64(cur_process, module_base),
"Failed to SymUnloadModule64()");
assert!(SymCleanup(cur_process), "Failed to SymCleanup()");
}
symdb.symbols.sort_by_key(|x| x.0);
symdb.sourceline.sort_by_key(|x| x.0);
symdb
}
/// Get all of the symbols from a module `module_name` with a TimeDateStamp
/// and SizeOfImage from the PE header. This will automatically download the
/// module and PDB from the symbol store using symchk
pub fn get_symbols_from_module(module: &ModuleInfo)
-> std::io::Result<SymbolContext>
{
// Use symchk to download the module and symbols
let module = download_symbol(module.name(), module.time(), module.size())?;
Ok(get_symbols_from_file(&module))
}
/// Download a module and the corresponding PDB based on module_name,
/// it's TimeDateStamp and SizeOfImage from it's PE header
///
/// Returns a string containing a filename of the downloaded module
fn download_symbol(module_name: &str, timedatestamp: u32, sizeofimage: u32)
-> std::io::Result<String> {
let mut dir = std::env::temp_dir();
dir.push("applepie_manifest");
// Create manifest file for symchk
std::fs::write(&dir, format!("{},{:x}{:x},1\r\n",
module_name, timedatestamp, sizeofimage))?;
// Run symchk to download this module
let res = Command::new("symchk")
.arg("/v")
.arg("/im")
.arg(dir)
.output()?;
if !res.status.success() {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
}
// Symchk apparently ran, check output
let stderr = std::str::from_utf8(&res.stderr)
.expect("Failed to convert symchk output to utf-8");
let mut filename = None;
for line in stderr.lines() {
const PREFIX: &'static str = "DBGHELP: ";
const POSTFIX: &'static str = " - OK";
// The line that contains the filename looks like:
// DBGHELP: C:\symbols\calc.exe\8f598a9eb000\calc.exe - OK
if !line.starts_with(PREFIX) { continue; }
if !line.ends_with(POSTFIX) { continue; }
// We only expect one line of output to match the above criteria
// If there are multiple we'll need to improve this "parser"
assert!(filename.is_none(), "Multiple filenames in symchk output");
// Save the filename we found
filename = Some(&line[PREFIX.len()..line.len() - POSTFIX.len()]);
}
// Fail hard if we didn't get the output filename from symchk
let filename = filename.expect("Did not get expected symchk output");
// Run symchk to download the pdb for the file
let res = Command::new("symchk")
.arg(filename)
.output()?;
if !res.status.success() {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
}
// Now we have downloaded the PDB for this file :)
Ok(filename.into())
}
#[test]
fn test_symchk() {
download_symbol("calc.exe", 0x8F598A9E, 0xB000)
.expect("Failed to download symbol");
} | SrcCodeInfoW { | random_line_split |
symdumper.rs | use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
use std::iter::once;
use std::process::Command;
use std::io::{Error, ErrorKind};
use crate::win32::ModuleInfo;
type HANDLE = usize;
extern {
fn GetCurrentProcess() -> HANDLE;
}
#[allow(non_snake_case)]
#[repr(C)]
struct SrcCodeInfoW {
SizeOfStruct: u32,
Key: usize,
ModBase: u64,
Obj: [u16; 261],
FileName: [u16; 261],
LineNumber: u32,
Address: u64,
}
impl Default for SrcCodeInfoW {
fn default() -> Self {
SrcCodeInfoW {
SizeOfStruct: std::mem::size_of::<SrcCodeInfoW>() as u32,
Key: 0,
ModBase: 0,
Obj: [0; 261],
FileName: [0; 261],
LineNumber: 0,
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct SymbolInfoW {
SizeOfStruct: u32,
TypeIndex: u32,
Reserved: [u64; 2],
Index: u32,
Size: u32,
ModBase: u64,
Flags: u32,
Value: u64,
Address: u64,
Register: u32,
Scope: u32,
Tag: u32,
NameLen: u32,
MaxNameLen: u32,
// technically this field is dynamically sized as specified by MaxNameLen
Name: [u16; 8192],
}
impl Default for SymbolInfoW {
fn default() -> Self {
SymbolInfoW {
// Subtract off the size of the dynamic component, one byte
// already included in the structure
SizeOfStruct: std::mem::size_of::<SymbolInfoW>() as u32 - 8192*2,
TypeIndex: 0,
Reserved: [0; 2],
Index: 0,
Size: 0,
ModBase: 0,
Flags: 0,
Value: 0,
Address: 0,
Register: 0,
Scope: 0,
Tag: 0,
NameLen: 0,
MaxNameLen: 8192,
Name: [0; 8192],
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpLineW64 {
SizeOfStruct: u32,
Key: usize,
LineNumber: u32,
FileName: *const u16,
Address: u64,
}
impl Default for ImagehlpLineW64 {
fn default() -> Self |
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpModule64W {
SizeOfStruct: u32,
BaseOfImage: u64,
ImageSize: u32,
TimeDateStamp: u32,
CheckSum: u32,
NumSyms: u32,
SymType: u32,
ModuleName: [u16; 32],
ImageName: [u16; 256],
LoadedImageName: [u16; 256],
LoadedPdbName: [u16; 256],
CVSig: u32,
CVData: [u16; 780],
PdbSig: u32,
PdbSig70: [u8; 16],
PdbAge: u32,
PdbUnmatched: bool,
DbgUnmatched: bool,
LineNumbers: bool,
GlobalSymbols: bool,
TypeInfo: bool,
SourceIndexed: bool,
Publics: bool,
}
impl Default for ImagehlpModule64W {
fn default() -> Self {
ImagehlpModule64W {
SizeOfStruct: std::mem::size_of::<ImagehlpModule64W>() as u32,
BaseOfImage: 0,
ImageSize: 0,
TimeDateStamp: 0,
CheckSum: 0,
NumSyms: 0,
SymType: 0,
ModuleName: [0; 32],
ImageName: [0; 256],
LoadedImageName: [0; 256],
LoadedPdbName: [0; 256],
CVSig: 0,
CVData: [0; 780],
PdbSig: 0,
PdbSig70: [0; 16],
PdbAge: 0,
PdbUnmatched: false,
DbgUnmatched: false,
LineNumbers: false,
GlobalSymbols: false,
TypeInfo: false,
SourceIndexed: false,
Publics: false,
}
}
}
/// Vector of (virtual address, symbol name, symbol size)
type Context = *mut SymbolContext;
extern fn srcline_callback(srcline_info: *const SrcCodeInfoW, context: usize) -> bool {
let srcline = unsafe { &*srcline_info };
let context = unsafe { &mut *(context as Context) };
let mut filename = Vec::with_capacity(srcline.FileName.len());
for &val in srcline.FileName.iter() {
if val == 0 { break; }
filename.push(val);
}
let source_filename = String::from_utf16(&filename)
.expect("Failed to decode UTF-16 file name");
context.sourceline.push((srcline.Address - srcline.ModBase, source_filename, srcline.LineNumber as u64));
true
}
extern fn sym_callback(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool {
let symbol = unsafe { &*sym_info };
let context = unsafe { &mut *(context as Context) };
// Technically NameLen isn't supposed to contain the null terminator... but it does.
// Yay!
if symbol.NameLen < 1 {
return true;
}
let symbol_name = String::from_utf16(&symbol.Name[..symbol.NameLen as usize - 1])
.expect("Failed to decode UTF-16 symbol name");
context.symbols.push((symbol.Address - symbol.ModBase, symbol_name, size as u64));
true
}
#[link(name = "dbghelp")]
extern {
fn SymInitializeW(hProcess: HANDLE, UserSearchPath: *const u16,
fInvadeProcess: bool) -> bool;
fn SymLoadModuleExW(hProcess: HANDLE, hFile: HANDLE, ImageName: *const u16,
ModuleName: *const u16, BaseOfDll: u64, DllSize: u32,
Data: usize, Flags: u32) -> u64;
fn SymGetModuleInfoW64(hProcess: HANDLE, dwAddr: u64,
ModuleInfo: *mut ImagehlpModule64W) -> bool;
fn SymEnumSymbolsW(hProcess: HANDLE, BaseOfDll: u64, Mask: usize,
callback: extern fn(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool,
context: usize) -> bool;
fn SymEnumSourceLinesW(hProcess: HANDLE, Base: u64, Obj: usize, File: usize, Line: u32,
Flags: u32, callback: extern fn(LineInfo: *const SrcCodeInfoW, UserContext: usize) -> bool,
UserContext: usize) -> bool;
fn SymUnloadModule64(hProcess: HANDLE, BaseOfDll: u64) -> bool;
fn SymCleanup(hProcess: HANDLE) -> bool;
}
pub fn win16_for_str(s: &str) -> Vec<u16> {
OsStr::new(s).encode_wide().chain(once(0)).collect()
}
#[repr(C)]
#[derive(Clone, Default)]
pub struct SymbolContext {
pub symbols: Vec<(u64, String, u64)>,
pub sourceline: Vec<(u64, String, u64)>,
}
/// Get all of the symbols from a PE file `pe_file`
pub fn get_symbols_from_file(pe_file: &str) -> SymbolContext {
let mut symdb = SymbolContext {
symbols: Vec::new(),
sourceline: Vec::new(),
};
let module_base;
unsafe {
let cur_process = GetCurrentProcess();
// Initialize the symbol library for this process
assert!(SymInitializeW(cur_process, 0 as *const _, false),
"Failed to SymInitializeW()");
// Load up a module into the current process as the base address
// the file specified
let filename = win16_for_str(pe_file);
module_base = SymLoadModuleExW(cur_process, 0, filename.as_ptr(), std::ptr::null(), 0, 0, 0, 0);
assert!(module_base != 0, "Failed to SymLoadModuleExW()");
// Get information about the module we just loaded
let mut module_info = ImagehlpModule64W::default();
assert!(SymGetModuleInfoW64(cur_process, module_base,
&mut module_info as *mut _),
"Failed to SymGetModuleInfoW64()");
// This is pedantic but we might as well check it
assert!(module_info.BaseOfImage == module_base);
assert!(SymEnumSymbolsW(cur_process, module_base, 0, sym_callback, &mut symdb as *mut _ as usize));
if !SymEnumSourceLinesW(cur_process, module_base, 0, 0, 0, 0, srcline_callback, &mut symdb as *mut _ as usize) {
// Eh just silently fail here, most people won't have private
// symbols so this would just spam
//print!("Warning: Could not enumerate sourcelines\n");
}
assert!(SymUnloadModule64(cur_process, module_base),
"Failed to SymUnloadModule64()");
assert!(SymCleanup(cur_process), "Failed to SymCleanup()");
}
symdb.symbols.sort_by_key(|x| x.0);
symdb.sourceline.sort_by_key(|x| x.0);
symdb
}
/// Get all of the symbols from a module `module_name` with a TimeDateStamp
/// and SizeOfImage from the PE header. This will automatically download the
/// module and PDB from the symbol store using symchk
pub fn get_symbols_from_module(module: &ModuleInfo)
-> std::io::Result<SymbolContext>
{
// Use symchk to download the module and symbols
let module = download_symbol(module.name(), module.time(), module.size())?;
Ok(get_symbols_from_file(&module))
}
/// Download a module and the corresponding PDB based on module_name,
/// it's TimeDateStamp and SizeOfImage from it's PE header
///
/// Returns a string containing a filename of the downloaded module
fn download_symbol(module_name: &str, timedatestamp: u32, sizeofimage: u32)
-> std::io::Result<String> {
let mut dir = std::env::temp_dir();
dir.push("applepie_manifest");
// Create manifest file for symchk
std::fs::write(&dir, format!("{},{:x}{:x},1\r\n",
module_name, timedatestamp, sizeofimage))?;
// Run symchk to download this module
let res = Command::new("symchk")
.arg("/v")
.arg("/im")
.arg(dir)
.output()?;
if !res.status.success() {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
}
// Symchk apparently ran, check output
let stderr = std::str::from_utf8(&res.stderr)
.expect("Failed to convert symchk output to utf-8");
let mut filename = None;
for line in stderr.lines() {
const PREFIX: &'static str = "DBGHELP: ";
const POSTFIX: &'static str = " - OK";
// The line that contains the filename looks like:
// DBGHELP: C:\symbols\calc.exe\8f598a9eb000\calc.exe - OK
if !line.starts_with(PREFIX) { continue; }
if !line.ends_with(POSTFIX) { continue; }
// We only expect one line of output to match the above criteria
// If there are multiple we'll need to improve this "parser"
assert!(filename.is_none(), "Multiple filenames in symchk output");
// Save the filename we found
filename = Some(&line[PREFIX.len()..line.len() - POSTFIX.len()]);
}
// Fail hard if we didn't get the output filename from symchk
let filename = filename.expect("Did not get expected symchk output");
// Run symchk to download the pdb for the file
let res = Command::new("symchk")
.arg(filename)
.output()?;
if !res.status.success() {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
}
// Now we have downloaded the PDB for this file :)
Ok(filename.into())
}
#[test]
fn test_symchk() {
download_symbol("calc.exe", 0x8F598A9E, 0xB000)
.expect("Failed to download symbol");
}
| {
ImagehlpLineW64 {
SizeOfStruct: std::mem::size_of::<ImagehlpLineW64>() as u32,
Key: 0,
LineNumber: 0,
FileName: std::ptr::null(),
Address: 0,
}
} | identifier_body |
symdumper.rs | use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
use std::iter::once;
use std::process::Command;
use std::io::{Error, ErrorKind};
use crate::win32::ModuleInfo;
type HANDLE = usize;
extern {
fn GetCurrentProcess() -> HANDLE;
}
#[allow(non_snake_case)]
#[repr(C)]
struct | {
SizeOfStruct: u32,
Key: usize,
ModBase: u64,
Obj: [u16; 261],
FileName: [u16; 261],
LineNumber: u32,
Address: u64,
}
impl Default for SrcCodeInfoW {
fn default() -> Self {
SrcCodeInfoW {
SizeOfStruct: std::mem::size_of::<SrcCodeInfoW>() as u32,
Key: 0,
ModBase: 0,
Obj: [0; 261],
FileName: [0; 261],
LineNumber: 0,
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct SymbolInfoW {
SizeOfStruct: u32,
TypeIndex: u32,
Reserved: [u64; 2],
Index: u32,
Size: u32,
ModBase: u64,
Flags: u32,
Value: u64,
Address: u64,
Register: u32,
Scope: u32,
Tag: u32,
NameLen: u32,
MaxNameLen: u32,
// technically this field is dynamically sized as specified by MaxNameLen
Name: [u16; 8192],
}
impl Default for SymbolInfoW {
fn default() -> Self {
SymbolInfoW {
// Subtract off the size of the dynamic component, one byte
// already included in the structure
SizeOfStruct: std::mem::size_of::<SymbolInfoW>() as u32 - 8192*2,
TypeIndex: 0,
Reserved: [0; 2],
Index: 0,
Size: 0,
ModBase: 0,
Flags: 0,
Value: 0,
Address: 0,
Register: 0,
Scope: 0,
Tag: 0,
NameLen: 0,
MaxNameLen: 8192,
Name: [0; 8192],
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpLineW64 {
SizeOfStruct: u32,
Key: usize,
LineNumber: u32,
FileName: *const u16,
Address: u64,
}
impl Default for ImagehlpLineW64 {
fn default() -> Self {
ImagehlpLineW64 {
SizeOfStruct: std::mem::size_of::<ImagehlpLineW64>() as u32,
Key: 0,
LineNumber: 0,
FileName: std::ptr::null(),
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpModule64W {
SizeOfStruct: u32,
BaseOfImage: u64,
ImageSize: u32,
TimeDateStamp: u32,
CheckSum: u32,
NumSyms: u32,
SymType: u32,
ModuleName: [u16; 32],
ImageName: [u16; 256],
LoadedImageName: [u16; 256],
LoadedPdbName: [u16; 256],
CVSig: u32,
CVData: [u16; 780],
PdbSig: u32,
PdbSig70: [u8; 16],
PdbAge: u32,
PdbUnmatched: bool,
DbgUnmatched: bool,
LineNumbers: bool,
GlobalSymbols: bool,
TypeInfo: bool,
SourceIndexed: bool,
Publics: bool,
}
impl Default for ImagehlpModule64W {
fn default() -> Self {
ImagehlpModule64W {
SizeOfStruct: std::mem::size_of::<ImagehlpModule64W>() as u32,
BaseOfImage: 0,
ImageSize: 0,
TimeDateStamp: 0,
CheckSum: 0,
NumSyms: 0,
SymType: 0,
ModuleName: [0; 32],
ImageName: [0; 256],
LoadedImageName: [0; 256],
LoadedPdbName: [0; 256],
CVSig: 0,
CVData: [0; 780],
PdbSig: 0,
PdbSig70: [0; 16],
PdbAge: 0,
PdbUnmatched: false,
DbgUnmatched: false,
LineNumbers: false,
GlobalSymbols: false,
TypeInfo: false,
SourceIndexed: false,
Publics: false,
}
}
}
/// Vector of (virtual address, symbol name, symbol size)
type Context = *mut SymbolContext;
extern fn srcline_callback(srcline_info: *const SrcCodeInfoW, context: usize) -> bool {
let srcline = unsafe { &*srcline_info };
let context = unsafe { &mut *(context as Context) };
let mut filename = Vec::with_capacity(srcline.FileName.len());
for &val in srcline.FileName.iter() {
if val == 0 { break; }
filename.push(val);
}
let source_filename = String::from_utf16(&filename)
.expect("Failed to decode UTF-16 file name");
context.sourceline.push((srcline.Address - srcline.ModBase, source_filename, srcline.LineNumber as u64));
true
}
extern fn sym_callback(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool {
let symbol = unsafe { &*sym_info };
let context = unsafe { &mut *(context as Context) };
// Technically NameLen isn't supposed to contain the null terminator... but it does.
// Yay!
if symbol.NameLen < 1 {
return true;
}
let symbol_name = String::from_utf16(&symbol.Name[..symbol.NameLen as usize - 1])
.expect("Failed to decode UTF-16 symbol name");
context.symbols.push((symbol.Address - symbol.ModBase, symbol_name, size as u64));
true
}
#[link(name = "dbghelp")]
extern {
fn SymInitializeW(hProcess: HANDLE, UserSearchPath: *const u16,
fInvadeProcess: bool) -> bool;
fn SymLoadModuleExW(hProcess: HANDLE, hFile: HANDLE, ImageName: *const u16,
ModuleName: *const u16, BaseOfDll: u64, DllSize: u32,
Data: usize, Flags: u32) -> u64;
fn SymGetModuleInfoW64(hProcess: HANDLE, dwAddr: u64,
ModuleInfo: *mut ImagehlpModule64W) -> bool;
fn SymEnumSymbolsW(hProcess: HANDLE, BaseOfDll: u64, Mask: usize,
callback: extern fn(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool,
context: usize) -> bool;
fn SymEnumSourceLinesW(hProcess: HANDLE, Base: u64, Obj: usize, File: usize, Line: u32,
Flags: u32, callback: extern fn(LineInfo: *const SrcCodeInfoW, UserContext: usize) -> bool,
UserContext: usize) -> bool;
fn SymUnloadModule64(hProcess: HANDLE, BaseOfDll: u64) -> bool;
fn SymCleanup(hProcess: HANDLE) -> bool;
}
pub fn win16_for_str(s: &str) -> Vec<u16> {
OsStr::new(s).encode_wide().chain(once(0)).collect()
}
#[repr(C)]
#[derive(Clone, Default)]
pub struct SymbolContext {
pub symbols: Vec<(u64, String, u64)>,
pub sourceline: Vec<(u64, String, u64)>,
}
/// Get all of the symbols from a PE file `pe_file`
pub fn get_symbols_from_file(pe_file: &str) -> SymbolContext {
let mut symdb = SymbolContext {
symbols: Vec::new(),
sourceline: Vec::new(),
};
let module_base;
unsafe {
let cur_process = GetCurrentProcess();
// Initialize the symbol library for this process
assert!(SymInitializeW(cur_process, 0 as *const _, false),
"Failed to SymInitializeW()");
// Load up a module into the current process as the base address
// the file specified
let filename = win16_for_str(pe_file);
module_base = SymLoadModuleExW(cur_process, 0, filename.as_ptr(), std::ptr::null(), 0, 0, 0, 0);
assert!(module_base != 0, "Failed to SymLoadModuleExW()");
// Get information about the module we just loaded
let mut module_info = ImagehlpModule64W::default();
assert!(SymGetModuleInfoW64(cur_process, module_base,
&mut module_info as *mut _),
"Failed to SymGetModuleInfoW64()");
// This is pedantic but we might as well check it
assert!(module_info.BaseOfImage == module_base);
assert!(SymEnumSymbolsW(cur_process, module_base, 0, sym_callback, &mut symdb as *mut _ as usize));
if !SymEnumSourceLinesW(cur_process, module_base, 0, 0, 0, 0, srcline_callback, &mut symdb as *mut _ as usize) {
// Eh just silently fail here, most people won't have private
// symbols so this would just spam
//print!("Warning: Could not enumerate sourcelines\n");
}
assert!(SymUnloadModule64(cur_process, module_base),
"Failed to SymUnloadModule64()");
assert!(SymCleanup(cur_process), "Failed to SymCleanup()");
}
symdb.symbols.sort_by_key(|x| x.0);
symdb.sourceline.sort_by_key(|x| x.0);
symdb
}
/// Get all of the symbols from a module `module_name` with a TimeDateStamp
/// and SizeOfImage from the PE header. This will automatically download the
/// module and PDB from the symbol store using symchk
pub fn get_symbols_from_module(module: &ModuleInfo)
-> std::io::Result<SymbolContext>
{
// Use symchk to download the module and symbols
let module = download_symbol(module.name(), module.time(), module.size())?;
Ok(get_symbols_from_file(&module))
}
/// Download a module and the corresponding PDB based on module_name,
/// it's TimeDateStamp and SizeOfImage from it's PE header
///
/// Returns a string containing a filename of the downloaded module
fn download_symbol(module_name: &str, timedatestamp: u32, sizeofimage: u32)
-> std::io::Result<String> {
let mut dir = std::env::temp_dir();
dir.push("applepie_manifest");
// Create manifest file for symchk
std::fs::write(&dir, format!("{},{:x}{:x},1\r\n",
module_name, timedatestamp, sizeofimage))?;
// Run symchk to download this module
let res = Command::new("symchk")
.arg("/v")
.arg("/im")
.arg(dir)
.output()?;
if !res.status.success() {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
}
// Symchk apparently ran, check output
let stderr = std::str::from_utf8(&res.stderr)
.expect("Failed to convert symchk output to utf-8");
let mut filename = None;
for line in stderr.lines() {
const PREFIX: &'static str = "DBGHELP: ";
const POSTFIX: &'static str = " - OK";
// The line that contains the filename looks like:
// DBGHELP: C:\symbols\calc.exe\8f598a9eb000\calc.exe - OK
if !line.starts_with(PREFIX) { continue; }
if !line.ends_with(POSTFIX) { continue; }
// We only expect one line of output to match the above criteria
// If there are multiple we'll need to improve this "parser"
assert!(filename.is_none(), "Multiple filenames in symchk output");
// Save the filename we found
filename = Some(&line[PREFIX.len()..line.len() - POSTFIX.len()]);
}
// Fail hard if we didn't get the output filename from symchk
let filename = filename.expect("Did not get expected symchk output");
// Run symchk to download the pdb for the file
let res = Command::new("symchk")
.arg(filename)
.output()?;
if !res.status.success() {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
}
// Now we have downloaded the PDB for this file :)
Ok(filename.into())
}
#[test]
fn test_symchk() {
download_symbol("calc.exe", 0x8F598A9E, 0xB000)
.expect("Failed to download symbol");
}
| SrcCodeInfoW | identifier_name |
symdumper.rs | use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
use std::iter::once;
use std::process::Command;
use std::io::{Error, ErrorKind};
use crate::win32::ModuleInfo;
type HANDLE = usize;
extern {
fn GetCurrentProcess() -> HANDLE;
}
#[allow(non_snake_case)]
#[repr(C)]
struct SrcCodeInfoW {
SizeOfStruct: u32,
Key: usize,
ModBase: u64,
Obj: [u16; 261],
FileName: [u16; 261],
LineNumber: u32,
Address: u64,
}
impl Default for SrcCodeInfoW {
fn default() -> Self {
SrcCodeInfoW {
SizeOfStruct: std::mem::size_of::<SrcCodeInfoW>() as u32,
Key: 0,
ModBase: 0,
Obj: [0; 261],
FileName: [0; 261],
LineNumber: 0,
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct SymbolInfoW {
SizeOfStruct: u32,
TypeIndex: u32,
Reserved: [u64; 2],
Index: u32,
Size: u32,
ModBase: u64,
Flags: u32,
Value: u64,
Address: u64,
Register: u32,
Scope: u32,
Tag: u32,
NameLen: u32,
MaxNameLen: u32,
// technically this field is dynamically sized as specified by MaxNameLen
Name: [u16; 8192],
}
impl Default for SymbolInfoW {
fn default() -> Self {
SymbolInfoW {
// Subtract off the size of the dynamic component, one byte
// already included in the structure
SizeOfStruct: std::mem::size_of::<SymbolInfoW>() as u32 - 8192*2,
TypeIndex: 0,
Reserved: [0; 2],
Index: 0,
Size: 0,
ModBase: 0,
Flags: 0,
Value: 0,
Address: 0,
Register: 0,
Scope: 0,
Tag: 0,
NameLen: 0,
MaxNameLen: 8192,
Name: [0; 8192],
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpLineW64 {
SizeOfStruct: u32,
Key: usize,
LineNumber: u32,
FileName: *const u16,
Address: u64,
}
impl Default for ImagehlpLineW64 {
fn default() -> Self {
ImagehlpLineW64 {
SizeOfStruct: std::mem::size_of::<ImagehlpLineW64>() as u32,
Key: 0,
LineNumber: 0,
FileName: std::ptr::null(),
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpModule64W {
SizeOfStruct: u32,
BaseOfImage: u64,
ImageSize: u32,
TimeDateStamp: u32,
CheckSum: u32,
NumSyms: u32,
SymType: u32,
ModuleName: [u16; 32],
ImageName: [u16; 256],
LoadedImageName: [u16; 256],
LoadedPdbName: [u16; 256],
CVSig: u32,
CVData: [u16; 780],
PdbSig: u32,
PdbSig70: [u8; 16],
PdbAge: u32,
PdbUnmatched: bool,
DbgUnmatched: bool,
LineNumbers: bool,
GlobalSymbols: bool,
TypeInfo: bool,
SourceIndexed: bool,
Publics: bool,
}
impl Default for ImagehlpModule64W {
fn default() -> Self {
ImagehlpModule64W {
SizeOfStruct: std::mem::size_of::<ImagehlpModule64W>() as u32,
BaseOfImage: 0,
ImageSize: 0,
TimeDateStamp: 0,
CheckSum: 0,
NumSyms: 0,
SymType: 0,
ModuleName: [0; 32],
ImageName: [0; 256],
LoadedImageName: [0; 256],
LoadedPdbName: [0; 256],
CVSig: 0,
CVData: [0; 780],
PdbSig: 0,
PdbSig70: [0; 16],
PdbAge: 0,
PdbUnmatched: false,
DbgUnmatched: false,
LineNumbers: false,
GlobalSymbols: false,
TypeInfo: false,
SourceIndexed: false,
Publics: false,
}
}
}
/// Vector of (virtual address, symbol name, symbol size)
type Context = *mut SymbolContext;
extern fn srcline_callback(srcline_info: *const SrcCodeInfoW, context: usize) -> bool {
let srcline = unsafe { &*srcline_info };
let context = unsafe { &mut *(context as Context) };
let mut filename = Vec::with_capacity(srcline.FileName.len());
for &val in srcline.FileName.iter() {
if val == 0 { break; }
filename.push(val);
}
let source_filename = String::from_utf16(&filename)
.expect("Failed to decode UTF-16 file name");
context.sourceline.push((srcline.Address - srcline.ModBase, source_filename, srcline.LineNumber as u64));
true
}
extern fn sym_callback(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool {
let symbol = unsafe { &*sym_info };
let context = unsafe { &mut *(context as Context) };
// Technically NameLen isn't supposed to contain the null terminator... but it does.
// Yay!
if symbol.NameLen < 1 {
return true;
}
let symbol_name = String::from_utf16(&symbol.Name[..symbol.NameLen as usize - 1])
.expect("Failed to decode UTF-16 symbol name");
context.symbols.push((symbol.Address - symbol.ModBase, symbol_name, size as u64));
true
}
#[link(name = "dbghelp")]
extern {
fn SymInitializeW(hProcess: HANDLE, UserSearchPath: *const u16,
fInvadeProcess: bool) -> bool;
fn SymLoadModuleExW(hProcess: HANDLE, hFile: HANDLE, ImageName: *const u16,
ModuleName: *const u16, BaseOfDll: u64, DllSize: u32,
Data: usize, Flags: u32) -> u64;
fn SymGetModuleInfoW64(hProcess: HANDLE, dwAddr: u64,
ModuleInfo: *mut ImagehlpModule64W) -> bool;
fn SymEnumSymbolsW(hProcess: HANDLE, BaseOfDll: u64, Mask: usize,
callback: extern fn(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool,
context: usize) -> bool;
fn SymEnumSourceLinesW(hProcess: HANDLE, Base: u64, Obj: usize, File: usize, Line: u32,
Flags: u32, callback: extern fn(LineInfo: *const SrcCodeInfoW, UserContext: usize) -> bool,
UserContext: usize) -> bool;
fn SymUnloadModule64(hProcess: HANDLE, BaseOfDll: u64) -> bool;
fn SymCleanup(hProcess: HANDLE) -> bool;
}
pub fn win16_for_str(s: &str) -> Vec<u16> {
OsStr::new(s).encode_wide().chain(once(0)).collect()
}
#[repr(C)]
#[derive(Clone, Default)]
pub struct SymbolContext {
pub symbols: Vec<(u64, String, u64)>,
pub sourceline: Vec<(u64, String, u64)>,
}
/// Get all of the symbols from a PE file `pe_file`
pub fn get_symbols_from_file(pe_file: &str) -> SymbolContext {
let mut symdb = SymbolContext {
symbols: Vec::new(),
sourceline: Vec::new(),
};
let module_base;
unsafe {
let cur_process = GetCurrentProcess();
// Initialize the symbol library for this process
assert!(SymInitializeW(cur_process, 0 as *const _, false),
"Failed to SymInitializeW()");
// Load up a module into the current process as the base address
// the file specified
let filename = win16_for_str(pe_file);
module_base = SymLoadModuleExW(cur_process, 0, filename.as_ptr(), std::ptr::null(), 0, 0, 0, 0);
assert!(module_base != 0, "Failed to SymLoadModuleExW()");
// Get information about the module we just loaded
let mut module_info = ImagehlpModule64W::default();
assert!(SymGetModuleInfoW64(cur_process, module_base,
&mut module_info as *mut _),
"Failed to SymGetModuleInfoW64()");
// This is pedantic but we might as well check it
assert!(module_info.BaseOfImage == module_base);
assert!(SymEnumSymbolsW(cur_process, module_base, 0, sym_callback, &mut symdb as *mut _ as usize));
if !SymEnumSourceLinesW(cur_process, module_base, 0, 0, 0, 0, srcline_callback, &mut symdb as *mut _ as usize) {
// Eh just silently fail here, most people won't have private
// symbols so this would just spam
//print!("Warning: Could not enumerate sourcelines\n");
}
assert!(SymUnloadModule64(cur_process, module_base),
"Failed to SymUnloadModule64()");
assert!(SymCleanup(cur_process), "Failed to SymCleanup()");
}
symdb.symbols.sort_by_key(|x| x.0);
symdb.sourceline.sort_by_key(|x| x.0);
symdb
}
/// Get all of the symbols from a module `module_name` with a TimeDateStamp
/// and SizeOfImage from the PE header. This will automatically download the
/// module and PDB from the symbol store using symchk
pub fn get_symbols_from_module(module: &ModuleInfo)
-> std::io::Result<SymbolContext>
{
// Use symchk to download the module and symbols
let module = download_symbol(module.name(), module.time(), module.size())?;
Ok(get_symbols_from_file(&module))
}
/// Download a module and the corresponding PDB based on module_name,
/// it's TimeDateStamp and SizeOfImage from it's PE header
///
/// Returns a string containing a filename of the downloaded module
fn download_symbol(module_name: &str, timedatestamp: u32, sizeofimage: u32)
-> std::io::Result<String> {
let mut dir = std::env::temp_dir();
dir.push("applepie_manifest");
// Create manifest file for symchk
std::fs::write(&dir, format!("{},{:x}{:x},1\r\n",
module_name, timedatestamp, sizeofimage))?;
// Run symchk to download this module
let res = Command::new("symchk")
.arg("/v")
.arg("/im")
.arg(dir)
.output()?;
if !res.status.success() |
// Symchk apparently ran, check output
let stderr = std::str::from_utf8(&res.stderr)
.expect("Failed to convert symchk output to utf-8");
let mut filename = None;
for line in stderr.lines() {
const PREFIX: &'static str = "DBGHELP: ";
const POSTFIX: &'static str = " - OK";
// The line that contains the filename looks like:
// DBGHELP: C:\symbols\calc.exe\8f598a9eb000\calc.exe - OK
if !line.starts_with(PREFIX) { continue; }
if !line.ends_with(POSTFIX) { continue; }
// We only expect one line of output to match the above criteria
// If there are multiple we'll need to improve this "parser"
assert!(filename.is_none(), "Multiple filenames in symchk output");
// Save the filename we found
filename = Some(&line[PREFIX.len()..line.len() - POSTFIX.len()]);
}
// Fail hard if we didn't get the output filename from symchk
let filename = filename.expect("Did not get expected symchk output");
// Run symchk to download the pdb for the file
let res = Command::new("symchk")
.arg(filename)
.output()?;
if !res.status.success() {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
}
// Now we have downloaded the PDB for this file :)
Ok(filename.into())
}
#[test]
fn test_symchk() {
download_symbol("calc.exe", 0x8F598A9E, 0xB000)
.expect("Failed to download symbol");
}
| {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
} | conditional_block |
Initial_Breif.go | package main //we Just Imported the main package
import (
"fmt"
"strconv"
) // we are import the "format" from the main package
func main() { //Just like C,C++,JAVA, Python and other languages, here also we begins with main function
fmt.Println("Hi Buddy! I am Your GoBuddy... :)")
/* The game begins If we know all the available Data Types
There are multiple ways of declaring data types
If you are habitual of using ; after end of a line, You can use, Go has No problems with that. (AND I FORGOT, THIS IS A MULTILINE COMMMENT)*/
// ret := justChecking()
// fmt.Println(ret)
// learnDataTypes()
// playWithFORLOOP()
// anonymousFuntionExample()
// funcWithParameter(456) //456 is the function parameter
// ret2 := funcWith3Parameter(10,11,12)
// fmt.Println("The result of funcWith3Parameter() is: ",ret2)
// val1,val2 := funtionReturning2values()
// fmt.Println("VAL1: ",val1,",VAL2: ",val2)
// practiceArray()
// practiceSlices()
// use_defer()
// how_to_RECOVER()
// panicAndRecover()
// learnPointers()
// ---Uncomment this part to learn the Implementation of structures and their associated functions--------------
// rec1 := Rectangle{10,20,10,20}
// //We just created an object named rec of Rectangle structure. Through this object we can access the structure data items
// fmt.Println("Side1 is: ",rec1.side1)
// fmt.Println("Side2 is: ",rec1.side2)
// fmt.Println("Side3 is: ",rec1.side3)
// fmt.Println("Side4 is: ",rec1.side4)
// fmt.Println("The area of the Rectangle is: ",rec1.Area() )
// ------------------------------------------------------------------------------------------------------------
}
type Rectangle struct{
side1 int
side2 int
side3 int
side4 int
} //We just defined a structure named Rectangle with 4 sides
// If you understand the above sturucture, Now lets create an "Rectangle structure" associated funtion
func (r Rectangle) Area() int{
return r.side1 * r.side2
}
func learnPointers(){
/* Pointers has always been tough to understand and implement for me. But now when I developed a certain method to understand them */
valX := 123
fmt.Println("Initial Value of X: ",valX)
changeValUsingReference(&valX) //Implementation of pointers need to send address of variable not the variable
fmt.Println("Function manipulated Value of X: ",valX)
// Another Experiment to do it
addresX := &valX
changeValUsingReference(addresX)
fmt.Println("Function manipulated Value of X: ",valX)
}
func changeValUsingReference(x *int){ //The parameter must be a pointer to accept address
fmt.Println("Location of X in memory: ",x)
*x = *x * 2 // " *= content of " always consider * as the mentioned value, you will never jumble again
// I just changed the content of memory address, which was earlier 0 and now 213
}
func panicAndRecover(){
// This is GOLANG style of TRY AND CATCH
defer func(){
fmt.Println(recover())
}()
panic("I m PANICCCCC!!, Sending Instructions to recover function")
}
func how_to_RECOVER(){
fmt.Println(perfromDivision1(100,0))
fmt.Println(perfromDivision1(100,1))
}
func perfromDivision1(n1 int, n2 int)string{
defer func(){
fmt.Println(recover())
fmt.Println("I am the saviour of this program, I didn't let the program stop :)")
}()
res := n1/n2
fmt.Println(res)
r := "The result of the Division is: "+strconv.Itoa(res) // I just converted Int64 to Sting, strconv is imported for this purpose
return r
/*After the Execution of this function, you must have observed, that "Runtime error" must have occured, but with the use of "recover"
go routine, the flow of program didn't stop and continued its operation. So recover can be used wherever there is a
chance of occurence of Error*/
}
func use_defer() {
/* The defer statement in go helps to run a function at the last when all other funtions in this block has executed.
Consider and Example to understand this, when we get some books issued from library,which are of our interest.
Now, we performed the tasks, of 1) Reading 2) understanding 3) Creating Notes for future.
After all the tasks executed, the final task is to return the book(This is a clean Up task).
This is why defer is used, to execute the "CLEAN UP TASK".*/
defer playWithFORLOOP()
fmt.Println("-----------------------------------------------------------------")
learnDataTypes()
fmt.Println("-----------------------------------------------------------------")
ret := justChecking()
fmt.Println(ret)
fmt.Println("-----------------------------------------------------------------")
// playWithFORLOOP will execute at last
// learnDataTypes will execute first since its the first in queue Now
// justChecking will execute second
}
func practiceSlices()bool{
// Slices can be considered as subpart of the Array
exampleArray := []int{1,2,3,4,5,6}
// The above used array can be called a dynamic array
slice1 := exampleArray[2:4]
slice2 := exampleArray[2:]
fmt.Println(slice1)
fmt.Println(slice2)
slice3 := make([]int,5,10)
for i,value := range slice3{
fmt.Println(value,i)
}
return true
}
func practiceArray()bool {
//Below this line suppose to be the decalaration of an array
// var <array_name><[size of array]> <data-type> of elements of array
var exampleArray[5] int
// naive assignment method which we all are familiar with
exampleArray[0] = 1
exampleArray[1] = 12
exampleArray[2] = 123
exampleArray[3] = 234
exampleArray[4] = 345
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray)
exampleArray2 := [5]float32{1,2,3,4,5}
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray2)
// How to traverse in an array
fmt.Println("--------------------------------------------------")
for i,value := range exampleArray2{
fmt.Println(value,i) //You must have observed that there were indexes as well,
}
fmt.Println("--------------------------------------------------")
// Use this traversal if you don't want the index to be printed
for _,value := range exampleArray2{
fmt.Println(value) //You must have observed that there were indexes as well,
}
return true
}
func learnDataTypes()bool {
var int_EX1 uint8 = 10 //First way to declare data type, UNSIGNED 8-BIT INT
const int_EX2 uint16 = 10 //CONSTANT UNSIGNED 16-BIT INT
var int_EX3 uint32 = 10 //UNSIGNED 32-BIT INT
var int_EX4 uint64 = 11 //UNSIGNED 64-BIT INT
var int_EX5 int8 = 10 //SIGNED 8-BIT INT
var int_EX6 int16 = 10 //SIGNED 16-BIT INT
var int_EX7 int32 = 10 //SIGNED 32-BIT INT
var int_EX8 int64 = 10 //SIGNED 64-BIT INT
var int_EX9 int = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX10 uint = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX11 uintptr = 10 //value is 32-BIT on 32-BIT system, mine 64
var EX1 byte = 11 // alias for uint8
var EX2 rune = 89 //alias for int32
int_EX12 := 11 //DEFAULT data type SIGNED 64-BIT INT
var float_EX1 float32 = 10
var float_EX2 = 1.22 //The we are used to declare variables #DEFAULT
var bool_EX bool = true //BOOL Example
var string_EX = "Hi.. This is being Done by Nikhil Chawla" //Simple string data-type
float_EX3 := 26666666666666666666666666666666.33 //Here we are moving from right to left,variable data type is decided by the data on the right
fmt.Println("Following are the data types")
fmt.Printf("%T : %d\n",int_EX1,int_EX1)
fmt.Printf("%T : %d\n",int_EX2,int_EX2)
fmt.Printf("%T : %d\n",int_EX3,int_EX3)
fmt.Printf("%T : %d\n",int_EX4,int_EX4)
fmt.Printf("%T : %d\n",int_EX5,int_EX5)
fmt.Printf("%T : %d\n",int_EX6,int_EX6)
fmt.Printf("%T : %d\n",int_EX7,int_EX7)
fmt.Printf("%T : %d\n",int_EX8,int_EX8)
fmt.Printf("%T : %d\n",int_EX9,int_EX9)
fmt.Printf("%T : %d\n",int_EX10,int_EX10)
fmt.Printf("%T : %d\n",int_EX11,int_EX11)
fmt.Printf("%T : %d\n",EX1,EX1)
fmt.Printf("%T : %d\n",EX2,EX2)
fmt.Printf("%T : %d\n",int_EX12,int_EX12)
fmt.Printf("%T : %f\n",float_EX1,float_EX1)
fmt.Printf("%T : %f\n",float_EX2,float_EX2)
fmt.Printf("%T : %f\n",float_EX3,float_EX3)
fmt.Printf("%T : %v\n",string_EX,string_EX)
fmt.Printf("%T : %v\n",bool_EX,bool_EX)
return true
}
//I just proved that I was compiled not Interpreted .. ;)
func justChecking()string {
fmt.Println("YO .. man !.. I am A userdefined function ,, ready to GO")
return "The function is working"
}
func playWithFORLOOP()bool{
//2 WAYS TO EXECUTE
//1. TYPICAL WAY
for i := 1; i <= 10; i += 1 {
if (i % 2 == 0) || (i % 3 == 0) | else if (i % 1 == 0) && (i % 2 == 0){ // LOGICAL AND
fmt.Println("Condition 2")
fmt.Println(i)
} else {
fmt.Println("Condition DEFAULT")
fmt.Println("No If else is satisfied")
}
}
// 2. LIKE WHILE LOOP in C,C++,JAVA,PYTHON
i := 1
for i <=10{
fmt.Println(i)
i += 1
}
return true
}
func anonymousFuntionExample()bool {
//IF YOU've ever done javascript, You must be familiar about the Anonymous function
num := 5
anonyEx := func ()int {
num = num*2
return num
}
fmt.Println("OUTPUT OF THE ANONYMOUS FUNCTION: ",anonyEx)
return true
}
func funcWithParameter(para int)bool {
fmt.Println("The parameter passed to the function is: ",para)
return true
}
func funcWith3Parameter(para1 int,para2 int,para3 int,)int {
fmt.Println("The 3 parameters are: ",para1,para2,para3)
// NOW I AM THINKING TO RETURN THE PRODUCT OF THESE NUMBERS
return para1*para2*para3
}
func funtionReturning2values()(string,int) {
// DON'T FORGET TO CHANGE AND INCREASE THE RETURN TYPES
ret1 := "My AGe is"
ret2 := 22
return ret1, ret2
}
| { //LOGICAL OR
fmt.Println("Condition 1")
fmt.Println(i)
} | conditional_block |
Initial_Breif.go | package main //we Just Imported the main package
import (
"fmt"
"strconv"
) // we are import the "format" from the main package
func main() { //Just like C,C++,JAVA, Python and other languages, here also we begins with main function
fmt.Println("Hi Buddy! I am Your GoBuddy... :)")
/* The game begins If we know all the available Data Types
There are multiple ways of declaring data types
If you are habitual of using ; after end of a line, You can use, Go has No problems with that. (AND I FORGOT, THIS IS A MULTILINE COMMMENT)*/
// ret := justChecking()
// fmt.Println(ret)
// learnDataTypes()
// playWithFORLOOP()
// anonymousFuntionExample()
// funcWithParameter(456) //456 is the function parameter
// ret2 := funcWith3Parameter(10,11,12)
// fmt.Println("The result of funcWith3Parameter() is: ",ret2)
// val1,val2 := funtionReturning2values()
// fmt.Println("VAL1: ",val1,",VAL2: ",val2)
// practiceArray()
// practiceSlices()
// use_defer()
// how_to_RECOVER()
// panicAndRecover()
// learnPointers()
// ---Uncomment this part to learn the Implementation of structures and their associated functions--------------
// rec1 := Rectangle{10,20,10,20}
// //We just created an object named rec of Rectangle structure. Through this object we can access the structure data items
// fmt.Println("Side1 is: ",rec1.side1)
// fmt.Println("Side2 is: ",rec1.side2)
// fmt.Println("Side3 is: ",rec1.side3)
// fmt.Println("Side4 is: ",rec1.side4)
// fmt.Println("The area of the Rectangle is: ",rec1.Area() )
// ------------------------------------------------------------------------------------------------------------
}
type Rectangle struct{
side1 int
side2 int
side3 int
side4 int
} //We just defined a structure named Rectangle with 4 sides
// If you understand the above sturucture, Now lets create an "Rectangle structure" associated funtion
func (r Rectangle) Area() int{
return r.side1 * r.side2
}
func learnPointers(){
/* Pointers has always been tough to understand and implement for me. But now when I developed a certain method to understand them */
valX := 123
fmt.Println("Initial Value of X: ",valX)
changeValUsingReference(&valX) //Implementation of pointers need to send address of variable not the variable
fmt.Println("Function manipulated Value of X: ",valX)
// Another Experiment to do it
addresX := &valX
changeValUsingReference(addresX)
fmt.Println("Function manipulated Value of X: ",valX)
}
func changeValUsingReference(x *int){ //The parameter must be a pointer to accept address
fmt.Println("Location of X in memory: ",x)
*x = *x * 2 // " *= content of " always consider * as the mentioned value, you will never jumble again
// I just changed the content of memory address, which was earlier 0 and now 213
}
func panicAndRecover(){
// This is GOLANG style of TRY AND CATCH
defer func(){
fmt.Println(recover())
}()
panic("I m PANICCCCC!!, Sending Instructions to recover function")
}
func how_to_RECOVER() |
func perfromDivision1(n1 int, n2 int)string{
defer func(){
fmt.Println(recover())
fmt.Println("I am the saviour of this program, I didn't let the program stop :)")
}()
res := n1/n2
fmt.Println(res)
r := "The result of the Division is: "+strconv.Itoa(res) // I just converted Int64 to Sting, strconv is imported for this purpose
return r
/*After the Execution of this function, you must have observed, that "Runtime error" must have occured, but with the use of "recover"
go routine, the flow of program didn't stop and continued its operation. So recover can be used wherever there is a
chance of occurence of Error*/
}
func use_defer() {
/* The defer statement in go helps to run a function at the last when all other funtions in this block has executed.
Consider and Example to understand this, when we get some books issued from library,which are of our interest.
Now, we performed the tasks, of 1) Reading 2) understanding 3) Creating Notes for future.
After all the tasks executed, the final task is to return the book(This is a clean Up task).
This is why defer is used, to execute the "CLEAN UP TASK".*/
defer playWithFORLOOP()
fmt.Println("-----------------------------------------------------------------")
learnDataTypes()
fmt.Println("-----------------------------------------------------------------")
ret := justChecking()
fmt.Println(ret)
fmt.Println("-----------------------------------------------------------------")
// playWithFORLOOP will execute at last
// learnDataTypes will execute first since its the first in queue Now
// justChecking will execute second
}
func practiceSlices()bool{
// Slices can be considered as subpart of the Array
exampleArray := []int{1,2,3,4,5,6}
// The above used array can be called a dynamic array
slice1 := exampleArray[2:4]
slice2 := exampleArray[2:]
fmt.Println(slice1)
fmt.Println(slice2)
slice3 := make([]int,5,10)
for i,value := range slice3{
fmt.Println(value,i)
}
return true
}
func practiceArray()bool {
//Below this line suppose to be the decalaration of an array
// var <array_name><[size of array]> <data-type> of elements of array
var exampleArray[5] int
// naive assignment method which we all are familiar with
exampleArray[0] = 1
exampleArray[1] = 12
exampleArray[2] = 123
exampleArray[3] = 234
exampleArray[4] = 345
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray)
exampleArray2 := [5]float32{1,2,3,4,5}
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray2)
// How to traverse in an array
fmt.Println("--------------------------------------------------")
for i,value := range exampleArray2{
fmt.Println(value,i) //You must have observed that there were indexes as well,
}
fmt.Println("--------------------------------------------------")
// Use this traversal if you don't want the index to be printed
for _,value := range exampleArray2{
fmt.Println(value) //You must have observed that there were indexes as well,
}
return true
}
func learnDataTypes()bool {
var int_EX1 uint8 = 10 //First way to declare data type, UNSIGNED 8-BIT INT
const int_EX2 uint16 = 10 //CONSTANT UNSIGNED 16-BIT INT
var int_EX3 uint32 = 10 //UNSIGNED 32-BIT INT
var int_EX4 uint64 = 11 //UNSIGNED 64-BIT INT
var int_EX5 int8 = 10 //SIGNED 8-BIT INT
var int_EX6 int16 = 10 //SIGNED 16-BIT INT
var int_EX7 int32 = 10 //SIGNED 32-BIT INT
var int_EX8 int64 = 10 //SIGNED 64-BIT INT
var int_EX9 int = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX10 uint = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX11 uintptr = 10 //value is 32-BIT on 32-BIT system, mine 64
var EX1 byte = 11 // alias for uint8
var EX2 rune = 89 //alias for int32
int_EX12 := 11 //DEFAULT data type SIGNED 64-BIT INT
var float_EX1 float32 = 10
var float_EX2 = 1.22 //The we are used to declare variables #DEFAULT
var bool_EX bool = true //BOOL Example
var string_EX = "Hi.. This is being Done by Nikhil Chawla" //Simple string data-type
float_EX3 := 26666666666666666666666666666666.33 //Here we are moving from right to left,variable data type is decided by the data on the right
fmt.Println("Following are the data types")
fmt.Printf("%T : %d\n",int_EX1,int_EX1)
fmt.Printf("%T : %d\n",int_EX2,int_EX2)
fmt.Printf("%T : %d\n",int_EX3,int_EX3)
fmt.Printf("%T : %d\n",int_EX4,int_EX4)
fmt.Printf("%T : %d\n",int_EX5,int_EX5)
fmt.Printf("%T : %d\n",int_EX6,int_EX6)
fmt.Printf("%T : %d\n",int_EX7,int_EX7)
fmt.Printf("%T : %d\n",int_EX8,int_EX8)
fmt.Printf("%T : %d\n",int_EX9,int_EX9)
fmt.Printf("%T : %d\n",int_EX10,int_EX10)
fmt.Printf("%T : %d\n",int_EX11,int_EX11)
fmt.Printf("%T : %d\n",EX1,EX1)
fmt.Printf("%T : %d\n",EX2,EX2)
fmt.Printf("%T : %d\n",int_EX12,int_EX12)
fmt.Printf("%T : %f\n",float_EX1,float_EX1)
fmt.Printf("%T : %f\n",float_EX2,float_EX2)
fmt.Printf("%T : %f\n",float_EX3,float_EX3)
fmt.Printf("%T : %v\n",string_EX,string_EX)
fmt.Printf("%T : %v\n",bool_EX,bool_EX)
return true
}
//I just proved that I was compiled not Interpreted .. ;)
func justChecking()string {
fmt.Println("YO .. man !.. I am A userdefined function ,, ready to GO")
return "The function is working"
}
func playWithFORLOOP()bool{
//2 WAYS TO EXECUTE
//1. TYPICAL WAY
for i := 1; i <= 10; i += 1 {
if (i % 2 == 0) || (i % 3 == 0){ //LOGICAL OR
fmt.Println("Condition 1")
fmt.Println(i)
} else if (i % 1 == 0) && (i % 2 == 0){ // LOGICAL AND
fmt.Println("Condition 2")
fmt.Println(i)
} else {
fmt.Println("Condition DEFAULT")
fmt.Println("No If else is satisfied")
}
}
// 2. LIKE WHILE LOOP in C,C++,JAVA,PYTHON
i := 1
for i <=10{
fmt.Println(i)
i += 1
}
return true
}
func anonymousFuntionExample()bool {
//IF YOU've ever done javascript, You must be familiar about the Anonymous function
num := 5
anonyEx := func ()int {
num = num*2
return num
}
fmt.Println("OUTPUT OF THE ANONYMOUS FUNCTION: ",anonyEx)
return true
}
func funcWithParameter(para int)bool {
fmt.Println("The parameter passed to the function is: ",para)
return true
}
func funcWith3Parameter(para1 int,para2 int,para3 int,)int {
fmt.Println("The 3 parameters are: ",para1,para2,para3)
// NOW I AM THINKING TO RETURN THE PRODUCT OF THESE NUMBERS
return para1*para2*para3
}
func funtionReturning2values()(string,int) {
// DON'T FORGET TO CHANGE AND INCREASE THE RETURN TYPES
ret1 := "My AGe is"
ret2 := 22
return ret1, ret2
}
| {
fmt.Println(perfromDivision1(100,0))
fmt.Println(perfromDivision1(100,1))
} | identifier_body |
Initial_Breif.go | package main //we Just Imported the main package
import (
"fmt"
"strconv"
) // we are import the "format" from the main package
func main() { //Just like C,C++,JAVA, Python and other languages, here also we begins with main function
fmt.Println("Hi Buddy! I am Your GoBuddy... :)") | There are multiple ways of declaring data types
If you are habitual of using ; after end of a line, You can use, Go has No problems with that. (AND I FORGOT, THIS IS A MULTILINE COMMMENT)*/
// ret := justChecking()
// fmt.Println(ret)
// learnDataTypes()
// playWithFORLOOP()
// anonymousFuntionExample()
// funcWithParameter(456) //456 is the function parameter
// ret2 := funcWith3Parameter(10,11,12)
// fmt.Println("The result of funcWith3Parameter() is: ",ret2)
// val1,val2 := funtionReturning2values()
// fmt.Println("VAL1: ",val1,",VAL2: ",val2)
// practiceArray()
// practiceSlices()
// use_defer()
// how_to_RECOVER()
// panicAndRecover()
// learnPointers()
// ---Uncomment this part to learn the Implementation of structures and their associated functions--------------
// rec1 := Rectangle{10,20,10,20}
// //We just created an object named rec of Rectangle structure. Through this object we can access the structure data items
// fmt.Println("Side1 is: ",rec1.side1)
// fmt.Println("Side2 is: ",rec1.side2)
// fmt.Println("Side3 is: ",rec1.side3)
// fmt.Println("Side4 is: ",rec1.side4)
// fmt.Println("The area of the Rectangle is: ",rec1.Area() )
// ------------------------------------------------------------------------------------------------------------
}
type Rectangle struct{
side1 int
side2 int
side3 int
side4 int
} //We just defined a structure named Rectangle with 4 sides
// If you understand the above sturucture, Now lets create an "Rectangle structure" associated funtion
func (r Rectangle) Area() int{
return r.side1 * r.side2
}
func learnPointers(){
/* Pointers has always been tough to understand and implement for me. But now when I developed a certain method to understand them */
valX := 123
fmt.Println("Initial Value of X: ",valX)
changeValUsingReference(&valX) //Implementation of pointers need to send address of variable not the variable
fmt.Println("Function manipulated Value of X: ",valX)
// Another Experiment to do it
addresX := &valX
changeValUsingReference(addresX)
fmt.Println("Function manipulated Value of X: ",valX)
}
func changeValUsingReference(x *int){ //The parameter must be a pointer to accept address
fmt.Println("Location of X in memory: ",x)
*x = *x * 2 // " *= content of " always consider * as the mentioned value, you will never jumble again
// I just changed the content of memory address, which was earlier 0 and now 213
}
func panicAndRecover(){
// This is GOLANG style of TRY AND CATCH
defer func(){
fmt.Println(recover())
}()
panic("I m PANICCCCC!!, Sending Instructions to recover function")
}
func how_to_RECOVER(){
fmt.Println(perfromDivision1(100,0))
fmt.Println(perfromDivision1(100,1))
}
func perfromDivision1(n1 int, n2 int)string{
defer func(){
fmt.Println(recover())
fmt.Println("I am the saviour of this program, I didn't let the program stop :)")
}()
res := n1/n2
fmt.Println(res)
r := "The result of the Division is: "+strconv.Itoa(res) // I just converted Int64 to Sting, strconv is imported for this purpose
return r
/*After the Execution of this function, you must have observed, that "Runtime error" must have occured, but with the use of "recover"
go routine, the flow of program didn't stop and continued its operation. So recover can be used wherever there is a
chance of occurence of Error*/
}
func use_defer() {
/* The defer statement in go helps to run a function at the last when all other funtions in this block has executed.
Consider and Example to understand this, when we get some books issued from library,which are of our interest.
Now, we performed the tasks, of 1) Reading 2) understanding 3) Creating Notes for future.
After all the tasks executed, the final task is to return the book(This is a clean Up task).
This is why defer is used, to execute the "CLEAN UP TASK".*/
defer playWithFORLOOP()
fmt.Println("-----------------------------------------------------------------")
learnDataTypes()
fmt.Println("-----------------------------------------------------------------")
ret := justChecking()
fmt.Println(ret)
fmt.Println("-----------------------------------------------------------------")
// playWithFORLOOP will execute at last
// learnDataTypes will execute first since its the first in queue Now
// justChecking will execute second
}
func practiceSlices()bool{
// Slices can be considered as subpart of the Array
exampleArray := []int{1,2,3,4,5,6}
// The above used array can be called a dynamic array
slice1 := exampleArray[2:4]
slice2 := exampleArray[2:]
fmt.Println(slice1)
fmt.Println(slice2)
slice3 := make([]int,5,10)
for i,value := range slice3{
fmt.Println(value,i)
}
return true
}
func practiceArray()bool {
//Below this line suppose to be the decalaration of an array
// var <array_name><[size of array]> <data-type> of elements of array
var exampleArray[5] int
// naive assignment method which we all are familiar with
exampleArray[0] = 1
exampleArray[1] = 12
exampleArray[2] = 123
exampleArray[3] = 234
exampleArray[4] = 345
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray)
exampleArray2 := [5]float32{1,2,3,4,5}
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray2)
// How to traverse in an array
fmt.Println("--------------------------------------------------")
for i,value := range exampleArray2{
fmt.Println(value,i) //You must have observed that there were indexes as well,
}
fmt.Println("--------------------------------------------------")
// Use this traversal if you don't want the index to be printed
for _,value := range exampleArray2{
fmt.Println(value) //You must have observed that there were indexes as well,
}
return true
}
func learnDataTypes()bool {
var int_EX1 uint8 = 10 //First way to declare data type, UNSIGNED 8-BIT INT
const int_EX2 uint16 = 10 //CONSTANT UNSIGNED 16-BIT INT
var int_EX3 uint32 = 10 //UNSIGNED 32-BIT INT
var int_EX4 uint64 = 11 //UNSIGNED 64-BIT INT
var int_EX5 int8 = 10 //SIGNED 8-BIT INT
var int_EX6 int16 = 10 //SIGNED 16-BIT INT
var int_EX7 int32 = 10 //SIGNED 32-BIT INT
var int_EX8 int64 = 10 //SIGNED 64-BIT INT
var int_EX9 int = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX10 uint = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX11 uintptr = 10 //value is 32-BIT on 32-BIT system, mine 64
var EX1 byte = 11 // alias for uint8
var EX2 rune = 89 //alias for int32
int_EX12 := 11 //DEFAULT data type SIGNED 64-BIT INT
var float_EX1 float32 = 10
var float_EX2 = 1.22 //The we are used to declare variables #DEFAULT
var bool_EX bool = true //BOOL Example
var string_EX = "Hi.. This is being Done by Nikhil Chawla" //Simple string data-type
float_EX3 := 26666666666666666666666666666666.33 //Here we are moving from right to left,variable data type is decided by the data on the right
fmt.Println("Following are the data types")
fmt.Printf("%T : %d\n",int_EX1,int_EX1)
fmt.Printf("%T : %d\n",int_EX2,int_EX2)
fmt.Printf("%T : %d\n",int_EX3,int_EX3)
fmt.Printf("%T : %d\n",int_EX4,int_EX4)
fmt.Printf("%T : %d\n",int_EX5,int_EX5)
fmt.Printf("%T : %d\n",int_EX6,int_EX6)
fmt.Printf("%T : %d\n",int_EX7,int_EX7)
fmt.Printf("%T : %d\n",int_EX8,int_EX8)
fmt.Printf("%T : %d\n",int_EX9,int_EX9)
fmt.Printf("%T : %d\n",int_EX10,int_EX10)
fmt.Printf("%T : %d\n",int_EX11,int_EX11)
fmt.Printf("%T : %d\n",EX1,EX1)
fmt.Printf("%T : %d\n",EX2,EX2)
fmt.Printf("%T : %d\n",int_EX12,int_EX12)
fmt.Printf("%T : %f\n",float_EX1,float_EX1)
fmt.Printf("%T : %f\n",float_EX2,float_EX2)
fmt.Printf("%T : %f\n",float_EX3,float_EX3)
fmt.Printf("%T : %v\n",string_EX,string_EX)
fmt.Printf("%T : %v\n",bool_EX,bool_EX)
return true
}
//I just proved that I was compiled not Interpreted .. ;)
func justChecking()string {
fmt.Println("YO .. man !.. I am A userdefined function ,, ready to GO")
return "The function is working"
}
func playWithFORLOOP()bool{
//2 WAYS TO EXECUTE
//1. TYPICAL WAY
for i := 1; i <= 10; i += 1 {
if (i % 2 == 0) || (i % 3 == 0){ //LOGICAL OR
fmt.Println("Condition 1")
fmt.Println(i)
} else if (i % 1 == 0) && (i % 2 == 0){ // LOGICAL AND
fmt.Println("Condition 2")
fmt.Println(i)
} else {
fmt.Println("Condition DEFAULT")
fmt.Println("No If else is satisfied")
}
}
// 2. LIKE WHILE LOOP in C,C++,JAVA,PYTHON
i := 1
for i <=10{
fmt.Println(i)
i += 1
}
return true
}
func anonymousFuntionExample()bool {
//IF YOU've ever done javascript, You must be familiar about the Anonymous function
num := 5
anonyEx := func ()int {
num = num*2
return num
}
fmt.Println("OUTPUT OF THE ANONYMOUS FUNCTION: ",anonyEx)
return true
}
func funcWithParameter(para int)bool {
fmt.Println("The parameter passed to the function is: ",para)
return true
}
func funcWith3Parameter(para1 int,para2 int,para3 int,)int {
fmt.Println("The 3 parameters are: ",para1,para2,para3)
// NOW I AM THINKING TO RETURN THE PRODUCT OF THESE NUMBERS
return para1*para2*para3
}
func funtionReturning2values()(string,int) {
// DON'T FORGET TO CHANGE AND INCREASE THE RETURN TYPES
ret1 := "My AGe is"
ret2 := 22
return ret1, ret2
} | /* The game begins If we know all the available Data Types | random_line_split |
Initial_Breif.go | package main //we Just Imported the main package
import (
"fmt"
"strconv"
) // we are import the "format" from the main package
func main() { //Just like C,C++,JAVA, Python and other languages, here also we begins with main function
fmt.Println("Hi Buddy! I am Your GoBuddy... :)")
/* The game begins If we know all the available Data Types
There are multiple ways of declaring data types
If you are habitual of using ; after end of a line, You can use, Go has No problems with that. (AND I FORGOT, THIS IS A MULTILINE COMMMENT)*/
// ret := justChecking()
// fmt.Println(ret)
// learnDataTypes()
// playWithFORLOOP()
// anonymousFuntionExample()
// funcWithParameter(456) //456 is the function parameter
// ret2 := funcWith3Parameter(10,11,12)
// fmt.Println("The result of funcWith3Parameter() is: ",ret2)
// val1,val2 := funtionReturning2values()
// fmt.Println("VAL1: ",val1,",VAL2: ",val2)
// practiceArray()
// practiceSlices()
// use_defer()
// how_to_RECOVER()
// panicAndRecover()
// learnPointers()
// ---Uncomment this part to learn the Implementation of structures and their associated functions--------------
// rec1 := Rectangle{10,20,10,20}
// //We just created an object named rec of Rectangle structure. Through this object we can access the structure data items
// fmt.Println("Side1 is: ",rec1.side1)
// fmt.Println("Side2 is: ",rec1.side2)
// fmt.Println("Side3 is: ",rec1.side3)
// fmt.Println("Side4 is: ",rec1.side4)
// fmt.Println("The area of the Rectangle is: ",rec1.Area() )
// ------------------------------------------------------------------------------------------------------------
}
type Rectangle struct{
side1 int
side2 int
side3 int
side4 int
} //We just defined a structure named Rectangle with 4 sides
// If you understand the above sturucture, Now lets create an "Rectangle structure" associated funtion
func (r Rectangle) Area() int{
return r.side1 * r.side2
}
func learnPointers(){
/* Pointers has always been tough to understand and implement for me. But now when I developed a certain method to understand them */
valX := 123
fmt.Println("Initial Value of X: ",valX)
changeValUsingReference(&valX) //Implementation of pointers need to send address of variable not the variable
fmt.Println("Function manipulated Value of X: ",valX)
// Another Experiment to do it
addresX := &valX
changeValUsingReference(addresX)
fmt.Println("Function manipulated Value of X: ",valX)
}
func changeValUsingReference(x *int){ //The parameter must be a pointer to accept address
fmt.Println("Location of X in memory: ",x)
*x = *x * 2 // " *= content of " always consider * as the mentioned value, you will never jumble again
// I just changed the content of memory address, which was earlier 0 and now 213
}
func panicAndRecover(){
// This is GOLANG style of TRY AND CATCH
defer func(){
fmt.Println(recover())
}()
panic("I m PANICCCCC!!, Sending Instructions to recover function")
}
func how_to_RECOVER(){
fmt.Println(perfromDivision1(100,0))
fmt.Println(perfromDivision1(100,1))
}
func | (n1 int, n2 int)string{
defer func(){
fmt.Println(recover())
fmt.Println("I am the saviour of this program, I didn't let the program stop :)")
}()
res := n1/n2
fmt.Println(res)
r := "The result of the Division is: "+strconv.Itoa(res) // I just converted Int64 to Sting, strconv is imported for this purpose
return r
/*After the Execution of this function, you must have observed, that "Runtime error" must have occured, but with the use of "recover"
go routine, the flow of program didn't stop and continued its operation. So recover can be used wherever there is a
chance of occurence of Error*/
}
func use_defer() {
/* The defer statement in go helps to run a function at the last when all other funtions in this block has executed.
Consider and Example to understand this, when we get some books issued from library,which are of our interest.
Now, we performed the tasks, of 1) Reading 2) understanding 3) Creating Notes for future.
After all the tasks executed, the final task is to return the book(This is a clean Up task).
This is why defer is used, to execute the "CLEAN UP TASK".*/
defer playWithFORLOOP()
fmt.Println("-----------------------------------------------------------------")
learnDataTypes()
fmt.Println("-----------------------------------------------------------------")
ret := justChecking()
fmt.Println(ret)
fmt.Println("-----------------------------------------------------------------")
// playWithFORLOOP will execute at last
// learnDataTypes will execute first since its the first in queue Now
// justChecking will execute second
}
func practiceSlices()bool{
// Slices can be considered as subpart of the Array
exampleArray := []int{1,2,3,4,5,6}
// The above used array can be called a dynamic array
slice1 := exampleArray[2:4]
slice2 := exampleArray[2:]
fmt.Println(slice1)
fmt.Println(slice2)
slice3 := make([]int,5,10)
for i,value := range slice3{
fmt.Println(value,i)
}
return true
}
func practiceArray()bool {
//Below this line suppose to be the decalaration of an array
// var <array_name><[size of array]> <data-type> of elements of array
var exampleArray[5] int
// naive assignment method which we all are familiar with
exampleArray[0] = 1
exampleArray[1] = 12
exampleArray[2] = 123
exampleArray[3] = 234
exampleArray[4] = 345
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray)
exampleArray2 := [5]float32{1,2,3,4,5}
fmt.Println("--------------------------------------------------")
fmt.Println(exampleArray2)
// How to traverse in an array
fmt.Println("--------------------------------------------------")
for i,value := range exampleArray2{
fmt.Println(value,i) //You must have observed that there were indexes as well,
}
fmt.Println("--------------------------------------------------")
// Use this traversal if you don't want the index to be printed
for _,value := range exampleArray2{
fmt.Println(value) //You must have observed that there were indexes as well,
}
return true
}
func learnDataTypes()bool {
var int_EX1 uint8 = 10 //First way to declare data type, UNSIGNED 8-BIT INT
const int_EX2 uint16 = 10 //CONSTANT UNSIGNED 16-BIT INT
var int_EX3 uint32 = 10 //UNSIGNED 32-BIT INT
var int_EX4 uint64 = 11 //UNSIGNED 64-BIT INT
var int_EX5 int8 = 10 //SIGNED 8-BIT INT
var int_EX6 int16 = 10 //SIGNED 16-BIT INT
var int_EX7 int32 = 10 //SIGNED 32-BIT INT
var int_EX8 int64 = 10 //SIGNED 64-BIT INT
var int_EX9 int = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX10 uint = 10 //value is 32-BIT on 32-BIT system, mine 64
var int_EX11 uintptr = 10 //value is 32-BIT on 32-BIT system, mine 64
var EX1 byte = 11 // alias for uint8
var EX2 rune = 89 //alias for int32
int_EX12 := 11 //DEFAULT data type SIGNED 64-BIT INT
var float_EX1 float32 = 10
var float_EX2 = 1.22 //The we are used to declare variables #DEFAULT
var bool_EX bool = true //BOOL Example
var string_EX = "Hi.. This is being Done by Nikhil Chawla" //Simple string data-type
float_EX3 := 26666666666666666666666666666666.33 //Here we are moving from right to left,variable data type is decided by the data on the right
fmt.Println("Following are the data types")
fmt.Printf("%T : %d\n",int_EX1,int_EX1)
fmt.Printf("%T : %d\n",int_EX2,int_EX2)
fmt.Printf("%T : %d\n",int_EX3,int_EX3)
fmt.Printf("%T : %d\n",int_EX4,int_EX4)
fmt.Printf("%T : %d\n",int_EX5,int_EX5)
fmt.Printf("%T : %d\n",int_EX6,int_EX6)
fmt.Printf("%T : %d\n",int_EX7,int_EX7)
fmt.Printf("%T : %d\n",int_EX8,int_EX8)
fmt.Printf("%T : %d\n",int_EX9,int_EX9)
fmt.Printf("%T : %d\n",int_EX10,int_EX10)
fmt.Printf("%T : %d\n",int_EX11,int_EX11)
fmt.Printf("%T : %d\n",EX1,EX1)
fmt.Printf("%T : %d\n",EX2,EX2)
fmt.Printf("%T : %d\n",int_EX12,int_EX12)
fmt.Printf("%T : %f\n",float_EX1,float_EX1)
fmt.Printf("%T : %f\n",float_EX2,float_EX2)
fmt.Printf("%T : %f\n",float_EX3,float_EX3)
fmt.Printf("%T : %v\n",string_EX,string_EX)
fmt.Printf("%T : %v\n",bool_EX,bool_EX)
return true
}
//I just proved that I was compiled not Interpreted .. ;)
func justChecking()string {
fmt.Println("YO .. man !.. I am A userdefined function ,, ready to GO")
return "The function is working"
}
func playWithFORLOOP()bool{
//2 WAYS TO EXECUTE
//1. TYPICAL WAY
for i := 1; i <= 10; i += 1 {
if (i % 2 == 0) || (i % 3 == 0){ //LOGICAL OR
fmt.Println("Condition 1")
fmt.Println(i)
} else if (i % 1 == 0) && (i % 2 == 0){ // LOGICAL AND
fmt.Println("Condition 2")
fmt.Println(i)
} else {
fmt.Println("Condition DEFAULT")
fmt.Println("No If else is satisfied")
}
}
// 2. LIKE WHILE LOOP in C,C++,JAVA,PYTHON
i := 1
for i <=10{
fmt.Println(i)
i += 1
}
return true
}
func anonymousFuntionExample()bool {
//IF YOU've ever done javascript, You must be familiar about the Anonymous function
num := 5
anonyEx := func ()int {
num = num*2
return num
}
fmt.Println("OUTPUT OF THE ANONYMOUS FUNCTION: ",anonyEx)
return true
}
func funcWithParameter(para int)bool {
fmt.Println("The parameter passed to the function is: ",para)
return true
}
func funcWith3Parameter(para1 int,para2 int,para3 int,)int {
fmt.Println("The 3 parameters are: ",para1,para2,para3)
// NOW I AM THINKING TO RETURN THE PRODUCT OF THESE NUMBERS
return para1*para2*para3
}
func funtionReturning2values()(string,int) {
// DON'T FORGET TO CHANGE AND INCREASE THE RETURN TYPES
ret1 := "My AGe is"
ret2 := 22
return ret1, ret2
}
| perfromDivision1 | identifier_name |
cursor_renderer.rs | use std::time::{Duration, Instant};
use skulpin::skia_safe::{Canvas, Paint, Path, Point};
use crate::renderer::CachingShaper;
use crate::editor::{EDITOR, Colors, Cursor, CursorShape};
use crate::redraw_scheduler::REDRAW_SCHEDULER;
const AVERAGE_MOTION_PERCENTAGE: f32 = 0.7;
const MOTION_PERCENTAGE_SPREAD: f32 = 0.5;
const COMMAND_LINE_DELAY_FRAMES: u64 = 5;
const DEFAULT_CELL_PERCENTAGE: f32 = 1.0 / 8.0;
const STANDARD_CORNERS: &[(f32, f32); 4] = &[(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)];
enum BlinkState {
Waiting,
On,
Off
}
struct BlinkStatus {
state: BlinkState,
last_transition: Instant,
previous_cursor: Option<Cursor>
}
impl BlinkStatus {
pub fn new() -> BlinkStatus {
BlinkStatus {
state: BlinkState::Waiting,
last_transition: Instant::now(),
previous_cursor: None
}
}
pub fn update_status(&mut self, new_cursor: &Cursor) -> bool {
if self.previous_cursor.is_none() || new_cursor != self.previous_cursor.as_ref().unwrap() {
self.previous_cursor = Some(new_cursor.clone());
self.last_transition = Instant::now();
if new_cursor.blinkwait.is_some() && new_cursor.blinkwait != Some(0) {
self.state = BlinkState::Waiting;
} else {
self.state = BlinkState::On;
}
}
if new_cursor.blinkwait == Some(0) ||
new_cursor.blinkoff == Some(0) ||
new_cursor.blinkon == Some(0) {
return true;
}
let delay = match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}.filter(|millis| millis > &0).map(|millis| Duration::from_millis(millis));
if delay.map(|delay| self.last_transition + delay < Instant::now()).unwrap_or(false) {
self.state = match self.state {
BlinkState::Waiting => BlinkState::On,
BlinkState::On => BlinkState::Off,
BlinkState::Off => BlinkState::On
};
self.last_transition = Instant::now();
}
let scheduled_frame = (match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}).map(|delay| self.last_transition + Duration::from_millis(delay));
| match self.state {
BlinkState::Waiting | BlinkState::Off => false,
BlinkState::On => true
}
}
}
#[derive(Debug, Clone)]
pub struct Corner {
pub current_position: Point,
pub relative_position: Point,
}
impl Corner {
pub fn new(relative_position: Point) -> Corner {
Corner {
current_position: Point::new(0.0, 0.0),
relative_position
}
}
pub fn update(&mut self, font_dimensions: Point, destination: Point) -> bool {
let relative_scaled_position: Point =
(self.relative_position.x * font_dimensions.x, self.relative_position.y * font_dimensions.y).into();
let corner_destination = destination + relative_scaled_position;
let delta = corner_destination - self.current_position;
if delta.length() > 0.0 {
// Project relative_scaled_position (actual possition of the corner relative to the
// center of the cursor) onto the remaining distance vector. This gives us the relative
// distance to the destination along the delta vector which we can then use to scale the
// motion_percentage.
let motion_scale = delta.dot(relative_scaled_position) / delta.length() / font_dimensions.length();
// The motion_percentage is then equal to the motion_scale factor times the
// MOTION_PERCENTAGE_SPREAD and added to the AVERAGE_MOTION_PERCENTAGE. This way all of
// the percentages are positive and spread out by the spread constant.
let motion_percentage = motion_scale * MOTION_PERCENTAGE_SPREAD + AVERAGE_MOTION_PERCENTAGE;
// Then the current_position is animated by taking the delta vector, multiplying it by
// the motion_percentage and adding the resulting value to the current position causing
// the cursor to "jump" toward the target destination. Since further away corners jump
// slower, the cursor appears to smear toward the destination in a satisfying and
// visually trackable way.
let delta = corner_destination - self.current_position;
self.current_position += delta * motion_percentage;
}
delta.length() > 0.001
}
}
pub struct CursorRenderer {
pub corners: Vec<Corner>,
pub previous_position: (u64, u64),
pub command_line_delay: u64,
blink_status: BlinkStatus
}
impl CursorRenderer {
pub fn new() -> CursorRenderer {
let mut renderer = CursorRenderer {
corners: vec![Corner::new((0.0, 0.0).into()); 4],
previous_position: (0, 0),
command_line_delay: 0,
blink_status: BlinkStatus::new()
};
renderer.set_cursor_shape(&CursorShape::Block, DEFAULT_CELL_PERCENTAGE);
renderer
}
fn set_cursor_shape(&mut self, cursor_shape: &CursorShape, cell_percentage: f32) {
self.corners = self.corners
.clone()
.into_iter().enumerate()
.map(|(i, corner)| {
let (x, y) = STANDARD_CORNERS[i];
Corner {
relative_position: match cursor_shape {
CursorShape::Block => (x, y).into(),
// Transform the x position so that the right side is translated over to
// the BAR_WIDTH position
CursorShape::Vertical => ((x + 0.5) * cell_percentage - 0.5, y).into(),
// Do the same as above, but flip the y coordinate and then flip the result
// so that the horizontal bar is at the bottom of the character space
// instead of the top.
CursorShape::Horizontal => (x, -((-y + 0.5) * cell_percentage - 0.5)).into()
},
.. corner
}
})
.collect::<Vec<Corner>>();
}
pub fn draw(&mut self,
cursor: Cursor, default_colors: &Colors,
font_width: f32, font_height: f32,
paint: &mut Paint, shaper: &mut CachingShaper,
canvas: &mut Canvas) {
let render = self.blink_status.update_status(&cursor);
self.previous_position = {
let editor = EDITOR.lock().unwrap();
let (_, grid_y) = cursor.position;
let (_, previous_y) = self.previous_position;
let (_, height) = editor.size;
if grid_y == height - 1 && previous_y != grid_y {
self.command_line_delay = self.command_line_delay + 1;
if self.command_line_delay < COMMAND_LINE_DELAY_FRAMES {
self.previous_position
} else {
self.command_line_delay = 0;
cursor.position
}
} else {
self.command_line_delay = 0;
cursor.position
}
};
let (grid_x, grid_y) = self.previous_position;
let (character, font_dimensions): (String, Point) = {
let editor = EDITOR.lock().unwrap();
let character = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize).cloned())
.flatten()
.map(|(character, _)| character)
.unwrap_or(' '.to_string());
let is_double = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize + 1).cloned())
.flatten()
.map(|(character, _)| character.is_empty())
.unwrap_or(false);
let font_width = match (is_double, &cursor.shape) {
(true, CursorShape::Block) => font_width * 2.0,
_ => font_width
};
(character, (font_width, font_height).into())
};
let destination: Point = (grid_x as f32 * font_width, grid_y as f32 * font_height).into();
let center_destination = destination + font_dimensions * 0.5;
self.set_cursor_shape(&cursor.shape, cursor.cell_percentage.unwrap_or(DEFAULT_CELL_PERCENTAGE));
let mut animating = false;
if !center_destination.is_zero() {
for corner in self.corners.iter_mut() {
let corner_animating = corner.update(font_dimensions, center_destination);
animating = animating || corner_animating;
}
}
if animating {
REDRAW_SCHEDULER.queue_next_frame();
}
if cursor.enabled && render {
// Draw Background
paint.set_color(cursor.background(&default_colors).to_color());
// The cursor is made up of four points, so I create a path with each of the four
// corners.
let mut path = Path::new();
path.move_to(self.corners[0].current_position);
path.line_to(self.corners[1].current_position);
path.line_to(self.corners[2].current_position);
path.line_to(self.corners[3].current_position);
path.close();
canvas.draw_path(&path, &paint);
// Draw foreground
paint.set_color(cursor.foreground(&default_colors).to_color());
canvas.save();
canvas.clip_path(&path, None, Some(false));
let blobs = &shaper.shape_cached(&character.to_string(), false, false);
for blob in blobs.iter() {
canvas.draw_text_blob(&blob, destination, &paint);
}
canvas.restore();
}
}
} |
if let Some(scheduled_frame) = scheduled_frame {
REDRAW_SCHEDULER.schedule(scheduled_frame);
}
| random_line_split |
cursor_renderer.rs | use std::time::{Duration, Instant};
use skulpin::skia_safe::{Canvas, Paint, Path, Point};
use crate::renderer::CachingShaper;
use crate::editor::{EDITOR, Colors, Cursor, CursorShape};
use crate::redraw_scheduler::REDRAW_SCHEDULER;
const AVERAGE_MOTION_PERCENTAGE: f32 = 0.7;
const MOTION_PERCENTAGE_SPREAD: f32 = 0.5;
const COMMAND_LINE_DELAY_FRAMES: u64 = 5;
const DEFAULT_CELL_PERCENTAGE: f32 = 1.0 / 8.0;
const STANDARD_CORNERS: &[(f32, f32); 4] = &[(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)];
enum BlinkState {
Waiting,
On,
Off
}
struct BlinkStatus {
state: BlinkState,
last_transition: Instant,
previous_cursor: Option<Cursor>
}
impl BlinkStatus {
pub fn new() -> BlinkStatus {
BlinkStatus {
state: BlinkState::Waiting,
last_transition: Instant::now(),
previous_cursor: None
}
}
pub fn update_status(&mut self, new_cursor: &Cursor) -> bool {
if self.previous_cursor.is_none() || new_cursor != self.previous_cursor.as_ref().unwrap() {
self.previous_cursor = Some(new_cursor.clone());
self.last_transition = Instant::now();
if new_cursor.blinkwait.is_some() && new_cursor.blinkwait != Some(0) {
self.state = BlinkState::Waiting;
} else {
self.state = BlinkState::On;
}
}
if new_cursor.blinkwait == Some(0) ||
new_cursor.blinkoff == Some(0) ||
new_cursor.blinkon == Some(0) {
return true;
}
let delay = match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}.filter(|millis| millis > &0).map(|millis| Duration::from_millis(millis));
if delay.map(|delay| self.last_transition + delay < Instant::now()).unwrap_or(false) {
self.state = match self.state {
BlinkState::Waiting => BlinkState::On,
BlinkState::On => BlinkState::Off,
BlinkState::Off => BlinkState::On
};
self.last_transition = Instant::now();
}
let scheduled_frame = (match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}).map(|delay| self.last_transition + Duration::from_millis(delay));
if let Some(scheduled_frame) = scheduled_frame {
REDRAW_SCHEDULER.schedule(scheduled_frame);
}
match self.state {
BlinkState::Waiting | BlinkState::Off => false,
BlinkState::On => true
}
}
}
#[derive(Debug, Clone)]
pub struct Corner {
pub current_position: Point,
pub relative_position: Point,
}
impl Corner {
pub fn new(relative_position: Point) -> Corner {
Corner {
current_position: Point::new(0.0, 0.0),
relative_position
}
}
pub fn update(&mut self, font_dimensions: Point, destination: Point) -> bool {
let relative_scaled_position: Point =
(self.relative_position.x * font_dimensions.x, self.relative_position.y * font_dimensions.y).into();
let corner_destination = destination + relative_scaled_position;
let delta = corner_destination - self.current_position;
if delta.length() > 0.0 {
// Project relative_scaled_position (actual possition of the corner relative to the
// center of the cursor) onto the remaining distance vector. This gives us the relative
// distance to the destination along the delta vector which we can then use to scale the
// motion_percentage.
let motion_scale = delta.dot(relative_scaled_position) / delta.length() / font_dimensions.length();
// The motion_percentage is then equal to the motion_scale factor times the
// MOTION_PERCENTAGE_SPREAD and added to the AVERAGE_MOTION_PERCENTAGE. This way all of
// the percentages are positive and spread out by the spread constant.
let motion_percentage = motion_scale * MOTION_PERCENTAGE_SPREAD + AVERAGE_MOTION_PERCENTAGE;
// Then the current_position is animated by taking the delta vector, multiplying it by
// the motion_percentage and adding the resulting value to the current position causing
// the cursor to "jump" toward the target destination. Since further away corners jump
// slower, the cursor appears to smear toward the destination in a satisfying and
// visually trackable way.
let delta = corner_destination - self.current_position;
self.current_position += delta * motion_percentage;
}
delta.length() > 0.001
}
}
pub struct CursorRenderer {
pub corners: Vec<Corner>,
pub previous_position: (u64, u64),
pub command_line_delay: u64,
blink_status: BlinkStatus
}
impl CursorRenderer {
pub fn new() -> CursorRenderer {
let mut renderer = CursorRenderer {
corners: vec![Corner::new((0.0, 0.0).into()); 4],
previous_position: (0, 0),
command_line_delay: 0,
blink_status: BlinkStatus::new()
};
renderer.set_cursor_shape(&CursorShape::Block, DEFAULT_CELL_PERCENTAGE);
renderer
}
fn set_cursor_shape(&mut self, cursor_shape: &CursorShape, cell_percentage: f32) {
self.corners = self.corners
.clone()
.into_iter().enumerate()
.map(|(i, corner)| {
let (x, y) = STANDARD_CORNERS[i];
Corner {
relative_position: match cursor_shape {
CursorShape::Block => (x, y).into(),
// Transform the x position so that the right side is translated over to
// the BAR_WIDTH position
CursorShape::Vertical => ((x + 0.5) * cell_percentage - 0.5, y).into(),
// Do the same as above, but flip the y coordinate and then flip the result
// so that the horizontal bar is at the bottom of the character space
// instead of the top.
CursorShape::Horizontal => (x, -((-y + 0.5) * cell_percentage - 0.5)).into()
},
.. corner
}
})
.collect::<Vec<Corner>>();
}
pub fn | (&mut self,
cursor: Cursor, default_colors: &Colors,
font_width: f32, font_height: f32,
paint: &mut Paint, shaper: &mut CachingShaper,
canvas: &mut Canvas) {
let render = self.blink_status.update_status(&cursor);
self.previous_position = {
let editor = EDITOR.lock().unwrap();
let (_, grid_y) = cursor.position;
let (_, previous_y) = self.previous_position;
let (_, height) = editor.size;
if grid_y == height - 1 && previous_y != grid_y {
self.command_line_delay = self.command_line_delay + 1;
if self.command_line_delay < COMMAND_LINE_DELAY_FRAMES {
self.previous_position
} else {
self.command_line_delay = 0;
cursor.position
}
} else {
self.command_line_delay = 0;
cursor.position
}
};
let (grid_x, grid_y) = self.previous_position;
let (character, font_dimensions): (String, Point) = {
let editor = EDITOR.lock().unwrap();
let character = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize).cloned())
.flatten()
.map(|(character, _)| character)
.unwrap_or(' '.to_string());
let is_double = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize + 1).cloned())
.flatten()
.map(|(character, _)| character.is_empty())
.unwrap_or(false);
let font_width = match (is_double, &cursor.shape) {
(true, CursorShape::Block) => font_width * 2.0,
_ => font_width
};
(character, (font_width, font_height).into())
};
let destination: Point = (grid_x as f32 * font_width, grid_y as f32 * font_height).into();
let center_destination = destination + font_dimensions * 0.5;
self.set_cursor_shape(&cursor.shape, cursor.cell_percentage.unwrap_or(DEFAULT_CELL_PERCENTAGE));
let mut animating = false;
if !center_destination.is_zero() {
for corner in self.corners.iter_mut() {
let corner_animating = corner.update(font_dimensions, center_destination);
animating = animating || corner_animating;
}
}
if animating {
REDRAW_SCHEDULER.queue_next_frame();
}
if cursor.enabled && render {
// Draw Background
paint.set_color(cursor.background(&default_colors).to_color());
// The cursor is made up of four points, so I create a path with each of the four
// corners.
let mut path = Path::new();
path.move_to(self.corners[0].current_position);
path.line_to(self.corners[1].current_position);
path.line_to(self.corners[2].current_position);
path.line_to(self.corners[3].current_position);
path.close();
canvas.draw_path(&path, &paint);
// Draw foreground
paint.set_color(cursor.foreground(&default_colors).to_color());
canvas.save();
canvas.clip_path(&path, None, Some(false));
let blobs = &shaper.shape_cached(&character.to_string(), false, false);
for blob in blobs.iter() {
canvas.draw_text_blob(&blob, destination, &paint);
}
canvas.restore();
}
}
}
| draw | identifier_name |
cursor_renderer.rs | use std::time::{Duration, Instant};
use skulpin::skia_safe::{Canvas, Paint, Path, Point};
use crate::renderer::CachingShaper;
use crate::editor::{EDITOR, Colors, Cursor, CursorShape};
use crate::redraw_scheduler::REDRAW_SCHEDULER;
const AVERAGE_MOTION_PERCENTAGE: f32 = 0.7;
const MOTION_PERCENTAGE_SPREAD: f32 = 0.5;
const COMMAND_LINE_DELAY_FRAMES: u64 = 5;
const DEFAULT_CELL_PERCENTAGE: f32 = 1.0 / 8.0;
const STANDARD_CORNERS: &[(f32, f32); 4] = &[(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)];
enum BlinkState {
Waiting,
On,
Off
}
struct BlinkStatus {
state: BlinkState,
last_transition: Instant,
previous_cursor: Option<Cursor>
}
impl BlinkStatus {
pub fn new() -> BlinkStatus {
BlinkStatus {
state: BlinkState::Waiting,
last_transition: Instant::now(),
previous_cursor: None
}
}
pub fn update_status(&mut self, new_cursor: &Cursor) -> bool |
}
#[derive(Debug, Clone)]
pub struct Corner {
pub current_position: Point,
pub relative_position: Point,
}
impl Corner {
pub fn new(relative_position: Point) -> Corner {
Corner {
current_position: Point::new(0.0, 0.0),
relative_position
}
}
pub fn update(&mut self, font_dimensions: Point, destination: Point) -> bool {
let relative_scaled_position: Point =
(self.relative_position.x * font_dimensions.x, self.relative_position.y * font_dimensions.y).into();
let corner_destination = destination + relative_scaled_position;
let delta = corner_destination - self.current_position;
if delta.length() > 0.0 {
// Project relative_scaled_position (actual possition of the corner relative to the
// center of the cursor) onto the remaining distance vector. This gives us the relative
// distance to the destination along the delta vector which we can then use to scale the
// motion_percentage.
let motion_scale = delta.dot(relative_scaled_position) / delta.length() / font_dimensions.length();
// The motion_percentage is then equal to the motion_scale factor times the
// MOTION_PERCENTAGE_SPREAD and added to the AVERAGE_MOTION_PERCENTAGE. This way all of
// the percentages are positive and spread out by the spread constant.
let motion_percentage = motion_scale * MOTION_PERCENTAGE_SPREAD + AVERAGE_MOTION_PERCENTAGE;
// Then the current_position is animated by taking the delta vector, multiplying it by
// the motion_percentage and adding the resulting value to the current position causing
// the cursor to "jump" toward the target destination. Since further away corners jump
// slower, the cursor appears to smear toward the destination in a satisfying and
// visually trackable way.
let delta = corner_destination - self.current_position;
self.current_position += delta * motion_percentage;
}
delta.length() > 0.001
}
}
pub struct CursorRenderer {
pub corners: Vec<Corner>,
pub previous_position: (u64, u64),
pub command_line_delay: u64,
blink_status: BlinkStatus
}
impl CursorRenderer {
pub fn new() -> CursorRenderer {
let mut renderer = CursorRenderer {
corners: vec![Corner::new((0.0, 0.0).into()); 4],
previous_position: (0, 0),
command_line_delay: 0,
blink_status: BlinkStatus::new()
};
renderer.set_cursor_shape(&CursorShape::Block, DEFAULT_CELL_PERCENTAGE);
renderer
}
fn set_cursor_shape(&mut self, cursor_shape: &CursorShape, cell_percentage: f32) {
self.corners = self.corners
.clone()
.into_iter().enumerate()
.map(|(i, corner)| {
let (x, y) = STANDARD_CORNERS[i];
Corner {
relative_position: match cursor_shape {
CursorShape::Block => (x, y).into(),
// Transform the x position so that the right side is translated over to
// the BAR_WIDTH position
CursorShape::Vertical => ((x + 0.5) * cell_percentage - 0.5, y).into(),
// Do the same as above, but flip the y coordinate and then flip the result
// so that the horizontal bar is at the bottom of the character space
// instead of the top.
CursorShape::Horizontal => (x, -((-y + 0.5) * cell_percentage - 0.5)).into()
},
.. corner
}
})
.collect::<Vec<Corner>>();
}
pub fn draw(&mut self,
cursor: Cursor, default_colors: &Colors,
font_width: f32, font_height: f32,
paint: &mut Paint, shaper: &mut CachingShaper,
canvas: &mut Canvas) {
let render = self.blink_status.update_status(&cursor);
self.previous_position = {
let editor = EDITOR.lock().unwrap();
let (_, grid_y) = cursor.position;
let (_, previous_y) = self.previous_position;
let (_, height) = editor.size;
if grid_y == height - 1 && previous_y != grid_y {
self.command_line_delay = self.command_line_delay + 1;
if self.command_line_delay < COMMAND_LINE_DELAY_FRAMES {
self.previous_position
} else {
self.command_line_delay = 0;
cursor.position
}
} else {
self.command_line_delay = 0;
cursor.position
}
};
let (grid_x, grid_y) = self.previous_position;
let (character, font_dimensions): (String, Point) = {
let editor = EDITOR.lock().unwrap();
let character = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize).cloned())
.flatten()
.map(|(character, _)| character)
.unwrap_or(' '.to_string());
let is_double = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize + 1).cloned())
.flatten()
.map(|(character, _)| character.is_empty())
.unwrap_or(false);
let font_width = match (is_double, &cursor.shape) {
(true, CursorShape::Block) => font_width * 2.0,
_ => font_width
};
(character, (font_width, font_height).into())
};
let destination: Point = (grid_x as f32 * font_width, grid_y as f32 * font_height).into();
let center_destination = destination + font_dimensions * 0.5;
self.set_cursor_shape(&cursor.shape, cursor.cell_percentage.unwrap_or(DEFAULT_CELL_PERCENTAGE));
let mut animating = false;
if !center_destination.is_zero() {
for corner in self.corners.iter_mut() {
let corner_animating = corner.update(font_dimensions, center_destination);
animating = animating || corner_animating;
}
}
if animating {
REDRAW_SCHEDULER.queue_next_frame();
}
if cursor.enabled && render {
// Draw Background
paint.set_color(cursor.background(&default_colors).to_color());
// The cursor is made up of four points, so I create a path with each of the four
// corners.
let mut path = Path::new();
path.move_to(self.corners[0].current_position);
path.line_to(self.corners[1].current_position);
path.line_to(self.corners[2].current_position);
path.line_to(self.corners[3].current_position);
path.close();
canvas.draw_path(&path, &paint);
// Draw foreground
paint.set_color(cursor.foreground(&default_colors).to_color());
canvas.save();
canvas.clip_path(&path, None, Some(false));
let blobs = &shaper.shape_cached(&character.to_string(), false, false);
for blob in blobs.iter() {
canvas.draw_text_blob(&blob, destination, &paint);
}
canvas.restore();
}
}
}
| {
if self.previous_cursor.is_none() || new_cursor != self.previous_cursor.as_ref().unwrap() {
self.previous_cursor = Some(new_cursor.clone());
self.last_transition = Instant::now();
if new_cursor.blinkwait.is_some() && new_cursor.blinkwait != Some(0) {
self.state = BlinkState::Waiting;
} else {
self.state = BlinkState::On;
}
}
if new_cursor.blinkwait == Some(0) ||
new_cursor.blinkoff == Some(0) ||
new_cursor.blinkon == Some(0) {
return true;
}
let delay = match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}.filter(|millis| millis > &0).map(|millis| Duration::from_millis(millis));
if delay.map(|delay| self.last_transition + delay < Instant::now()).unwrap_or(false) {
self.state = match self.state {
BlinkState::Waiting => BlinkState::On,
BlinkState::On => BlinkState::Off,
BlinkState::Off => BlinkState::On
};
self.last_transition = Instant::now();
}
let scheduled_frame = (match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}).map(|delay| self.last_transition + Duration::from_millis(delay));
if let Some(scheduled_frame) = scheduled_frame {
REDRAW_SCHEDULER.schedule(scheduled_frame);
}
match self.state {
BlinkState::Waiting | BlinkState::Off => false,
BlinkState::On => true
}
} | identifier_body |
detail-permission-schema.component.ts | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {Component, ElementRef, Injector, OnDestroy, OnInit, ViewChild} from '@angular/core';
import {AbstractComponent} from '@common/component/abstract.component';
import {ActivatedRoute} from '@angular/router';
import {PermissionService} from '../../../../user/service/permission.service';
import {RoleSet} from '@domain/user/role/roleSet';
import {Alert} from '@common/util/alert.util';
import {Modal} from '@common/domain/modal';
import {Location} from '@angular/common';
import {ConfirmModalComponent} from '@common/component/modal/confirm/confirm.component';
import {isUndefined} from 'util';
import {CommonUtil} from '@common/util/common.util';
import {PermissionSchemaSetComponent} from '../../../../workspace/component/permission/permission-schema-set.component';
import * as _ from 'lodash';
import {Page} from '@domain/common/page';
import {Workspace} from '@domain/workspace/workspace';
@Component({
selector: 'app-detail-permission-schema',
templateUrl: './detail-permission-schema.component.html'
})
export class DetailPermissionSchemaComponent extends AbstractComponent implements OnInit, OnDestroy {
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Private Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 공통 확인 팝업
@ViewChild(ConfirmModalComponent)
private _confirmModalComponent: ConfirmModalComponent;
// 퍼미션 스키마 설정 컴포넌트
@ViewChild(PermissionSchemaSetComponent)
private _permissionSchemaSetComp: PermissionSchemaSetComponent;
// 스키마 아이디
private _schemaId: string;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Protected Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 스키마 상세정보 데이터
public roleSet: RoleSet;
// RoleSet 에 연결된 워크스페이스 목록
public firstWorkspace: Workspace;
public otherWorkspaces: Workspace[] = [];
public totalWsCnt:number = 0;
// 스키마 이름 수정
public editName: string;
// 스키마 설명 수정
public editDesc: string;
// 스키마 이름 수정 플래그
public editNameFl: boolean;
// 스키마 설명 수정 플래그
public editDescFl: boolean;
// 워크스페이스 목록 패널 펼침 여부
public isOpenWorkspaceList: boolean = false;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Constructor
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 생성자
constructor(private permissionService: PermissionService,
private activatedRoute: ActivatedRoute,
private _location:Location,
protected element: ElementRef,
protected injector: Injector) {
super(element, injector);
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Override Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// Init
public ngOnInit() {
// Init
super.ngOnInit();
// url에서 schema id 받아오기
this.activatedRoute.params.subscribe((params) => {
// schemaId
this._schemaId = params['schemaId'];
// ui init
this._initView();
// 퍼미션 스키마 상세정보 및 연관 워크스페이스 목록 조회
this._getPermissionSchemaDetail(this._schemaId);
});
}
// Destroy
public ngOnDestroy() {
super.ngOnDestroy();
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* modal 이벤트 후 핸들러
* @param {Modal} modal
*/
public confirmHandler(modal: Modal): void {
modal.data === 'CLONE' ? this._clonePermissionSchema(modal['schemaId']) : this._deletePermissionSchema(modal['schemaId']);
}
/**
* 스키마 이름 변경모드
*/
public schemaNameEditMode(): void {
if( !this.roleSet.readOnly ) {
// 현재 그룹 이름
this.editName = this.roleSet.name;
// flag
this.editNameFl = true;
}
} // function - schemaNameEditMode
/**
* 스키마 설명 변경모드
*/
public schemaDescEditMode(): void {
if( !this.roleSet.readOnly ) {
// 현재 그룹 설명
this.editDesc = this.roleSet.description;
// flag
this.editDescFl = true;
}
} // function - schemaDescEditMode
/**
* 스키마 이름 수정
*/
public updateSchemaName(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._nameValidation()) {
const params = {
name: this.editName
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// alert
// flag
this.editNameFl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error show
if (error.hasOwnProperty('details') && error.details.includes('Duplicate')) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.used'));
}
});
}
}
/**
* 스키마 설명 수정
*/
public updateSchemaDesc(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._descValidation()) {
const params = {
description: this.editDesc
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// flag
this.editDescFl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error
Alert. | false;
}
// 이름 길이 체크
if (CommonUtil.getByte(this.editName.trim()) > 150) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.len'));
return false;
}
return true;
}
/**
* description validation
* @returns {boolean}
* @private
*/
private _descValidation(): boolean {
if (!isUndefined(this.editDesc) && this.editDesc.trim() !== '') {
// 설명 길이 체크
if (this.editDesc.trim() !== ''
&& CommonUtil.getByte(this.editDesc.trim()) > 450) {
Alert.warning(this.translateService.instant('msg.alert.edit.description.len'));
return false;
}
return true;
}
return true;
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - event
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* 뒤로가기 버튼 클릭
*/
public onClickPrev(): void {
this._location.back();
}
/**
* 퍼미션 스키마 삭제 클릭
*/
public onClickDeletePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'DELETE';
modal.name = this.translateService.instant('msg.permission.ui.delete-schema.ph');
modal.description = this.translateService.instant('msg.permission.ui.delete-schema.ph.sub', { value : `${this.roleSet.linkedWorkspaces ? this.roleSet.linkedWorkspaces : 0}`});
if( this.firstWorkspace ) {
let wsNames = this.firstWorkspace.name;
if( 0 < this.otherWorkspaces.length ) {
wsNames = wsNames + ',' + this.otherWorkspaces.map( item => item.name ).join( ',' );
}
modal.subDescription = wsNames;
}
modal.btnName = this.translateService.instant('msg.permission.ui.delete-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 스키마 복제 클릭
*/
public onClickClonePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'CLONE';
modal.name = this.translateService.instant('msg.permission.ui.copy-schema.ph', { value : `\'${this.roleSet.name} \'`});
modal.btnName = this.translateService.instant('msg.permission.ui.copy-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 설정 팝업 오픈
*/
public onClickOpenPermissionSchemaSet() {
const cloneRoleSet: RoleSet = _.cloneDeep(this.roleSet);
this._permissionSchemaSetComp.init(cloneRoleSet, true, true);
} // function - onClickOpenPermissionSchemaSet
/**
* 워크스페이스 클릭 이벤트
* @param {Workspace} workspace
*/
public onClickWorkspace(workspace:Workspace) {
this.router.navigate([`/admin/workspaces/shared/${workspace.id}`]).then();
} // function - onClickWorkspace
/**
* Role 의 퍼미션 변경 후처리
*/
public afterUpdatePermissionRoles() {
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
} // function - afterUpdatePermissionRoles
/**
* 다른 워크스페이스 목록을 표시/숨김한다.
*/
public showHideOtherWorkspaces() {
this.isOpenWorkspaceList = !this.isOpenWorkspaceList;
if( this.isOpenWorkspaceList && 1 < this.totalWsCnt && 0 === this.otherWorkspaces.length ) {
this._getWorkspacesByRoleSet(this._schemaId, this.totalWsCnt);
}
} // function - showHideOtherWorkspaces
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Protected Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Private Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* ui init
* @private
*/
private _initView(): void {
this.roleSet = new RoleSet();
}
/**
* 스키마 상세정보 조회
* @param {string} schemaId
* @private
*/
private _getPermissionSchemaDetail(schemaId: string) {
// 로딩 show
this.loadingShow();
// 스키마 상세정보 조회
this.permissionService.getRolesetDetail(schemaId)
.then((result) => {
// 스키마 상세정보
this.roleSet = result;
// 로딩 hide
this.loadingHide();
// 연관 워크스페이스 목록 조회
this._getWorkspacesByRoleSet(schemaId);
})
.catch((error) => this.commonExceptionHandler(error));
} // function - _getPermissionSchemaDetail
/**
* RoleSet에 연결된 워크스페이스 목록 조회
* @param {string} roleSetId
* @param {number} pageSize
* @private
*/
private _getWorkspacesByRoleSet(roleSetId: string, pageSize:number = 1) {
this.loadingShow();
const param = new Page();
param.page = 0;
param.size = pageSize;
param.sort = 'name,asc';
if( 1 === pageSize ) {
this.firstWorkspace = null;
} else {
this.otherWorkspaces = [];
}
this.permissionService.getWorkspacesByRoleSet(roleSetId, param).then(result => {
if (result && result['_embedded'] && result['_embedded']['workspaces']) {
const wsList = result['_embedded']['workspaces'];
if( 1 === pageSize ) {
this.firstWorkspace = wsList[0];
this.totalWsCnt = ( 0 < result['page']['totalElements'] ) ? result['page']['totalElements'] : 0;
} else {
if( 1 < wsList.length ) {
wsList.shift(0);
this.otherWorkspaces = wsList;
}
}
}
this.loadingHide();
}).catch((error) => this.commonExceptionHandler(error));
} // function - _getWorkspacesByRoleSet
/**
* 퍼미션 스키마 복사
* @param {string} schemaId
* @private
*/
private _clonePermissionSchema(schemaId: string): void {
// 로딩 show
this.loadingShow();
// 퍼미션 스키마 복사
this.permissionService.copyRoleset(schemaId)
.then((result) => {
// alert
Alert.success(this.translateService.instant('msg.permission.alert.create.ok', {value: result.name}));
// 복제한 스키마 상세페이지로 이동
this.router.navigate(['/admin/workspaces/permission', result.id]).then();
})
.catch((error) => this.commonExceptionHandler(error));
}
/**
* 퍼미션 스키마 삭제
* @param {string} schemaId
* @private
*/
private _deletePermissionSchema(schemaId: string): void {
// 로딩 show
this.loadingShow();
// 퍼미션 스키마 삭제
this.permissionService.deleteRoleset(schemaId).then(() => {
// alert
Alert.success(this.translateService.instant('msg.permission.alert.delete.ok'));
// 나가기
this.onClickPrev();
}).catch((error) => this.commonExceptionHandler(error));
}
/**
* 스키마 수정
* @param {Object} params
* @returns {Promise<any>}
* @private
*/
private _updateSchema(params: object): Promise<any> {
return new Promise((resolve, reject) => {
// 수정 요청
this.permissionService.updateNameAndDescRoleset(this._schemaId, params)
.then((result) => {
resolve(result);
})
.catch((error) => {
reject(error);
});
});
}
}
| error(error);
});
}
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - validation
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* name validation
* @returns {boolean}
* @private
*/
private _nameValidation(): boolean {
// 스키마 이름이 비어 있다면
if (isUndefined(this.editName) || this.editName.trim() === '') {
Alert.warning(this.translateService.instant('msg.groups.alert.name.empty'));
return | conditional_block |
detail-permission-schema.component.ts | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {Component, ElementRef, Injector, OnDestroy, OnInit, ViewChild} from '@angular/core';
import {AbstractComponent} from '@common/component/abstract.component';
import {ActivatedRoute} from '@angular/router';
import {PermissionService} from '../../../../user/service/permission.service';
import {RoleSet} from '@domain/user/role/roleSet';
import {Alert} from '@common/util/alert.util';
import {Modal} from '@common/domain/modal';
import {Location} from '@angular/common';
import {ConfirmModalComponent} from '@common/component/modal/confirm/confirm.component';
import {isUndefined} from 'util';
import {CommonUtil} from '@common/util/common.util';
import {PermissionSchemaSetComponent} from '../../../../workspace/component/permission/permission-schema-set.component';
import * as _ from 'lodash';
import {Page} from '@domain/common/page';
import {Workspace} from '@domain/workspace/workspace';
@Component({
selector: 'app-detail-permission-schema',
templateUrl: './detail-permission-schema.component.html'
})
export class DetailPermissionSchemaComponent extends AbstractComponent implements OnInit, OnDestroy {
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Private Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 공통 확인 팝업
@ViewChild(ConfirmModalComponent)
private _confirmModalComponent: ConfirmModalComponent;
// 퍼미션 스키마 설정 컴포넌트
@ViewChild(PermissionSchemaSetComponent)
private _permissionSchemaSetComp: PermissionSchemaSetComponent;
// 스키마 아이디
private _schemaId: string;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Protected Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 스키마 상세정보 데이터
public roleSet: RoleSet;
// RoleSet 에 연결된 워크스페이스 목록
public firstWorkspace: Workspace;
public otherWorkspaces: Workspace[] = [];
public totalWsCnt:number = 0;
// 스키마 이름 수정
public editName: string;
// 스키마 설명 수정
public editDesc: string;
// 스키마 이름 수정 플래그
public editNameFl: boolean;
// 스키마 설명 수정 플래그
public editDescFl: boolean;
// 워크스페이스 목록 패널 펼침 여부
public isOpenWorkspaceList: boolean = false;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Constructor
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 생성자
constructor(private permissionService: PermissionService,
private activatedRoute: ActivatedRoute,
private _location:Location,
protected element: ElementRef,
protected injector: Injector) {
super(element, injector);
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Override Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// Init
public ngOnInit() {
// Init
super.ngOnInit();
// url에서 schema id 받아오기
this.activatedRoute.params.subscribe((params) => {
// schemaId
this._schemaId = params['schemaId'];
// ui init
this._initView();
// 퍼미션 스키마 상세정보 및 연관 워크스페이스 목록 조회
this._getPermissionSchemaDetail(this._schemaId);
});
}
// Destroy
public ngOnDestroy() {
super.ngOnDestroy();
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* modal 이벤트 후 핸들러
* @param {Modal} modal
*/
public confirmHandler(modal: Modal): void {
modal.data === 'CLONE' ? this._clonePermissionSchema(modal['schemaId']) : this._deletePermissionSchema(modal['schemaId']);
}
/**
* 스키마 이름 변경모드
*/
public schemaNameEditMode(): void {
if( !this.roleSet.readOnly ) {
// 현재 그룹 이름
this.editName = this.roleSet.name;
// flag
this.editNameFl = true;
}
} // function - schemaNameEditMode
/**
* 스키마 설명 변경모드
*/
public schemaDescEditMode(): void {
if( !this.roleSet.readOnly ) {
// 현재 그룹 설명
this.editDesc = this.roleSet.description;
// flag
this.editDescFl = true;
}
} // function - schemaDescEditMode
/**
* 스키마 이름 수정
*/
public updateSchemaName(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._nameValidation()) {
const params = {
name: this.editName
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// alert
// flag
this.editNameFl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error show
if (error.hasOwnProperty('details') && error.details.includes('Duplicate')) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.used'));
}
});
}
}
/**
* 스키마 설명 수정
*/
public updateSchemaDesc(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._descValidation()) {
const params = {
description: this.editDesc
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// flag
this.editDescFl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error
Alert.error(error);
});
}
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - validation
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* name validation
* @returns {boolean}
* @private
*/
private _nameValidation(): boolean {
// 스키마 이름이 비어 있다면
if (isUndefined(this.editName) || this.editName.trim() === '') {
Alert.warning(this.translateService.instant('msg.groups.alert.name.empty'));
return false;
}
// 이름 길이 체크
if (CommonUtil.getByte(this.editName.trim()) > 150) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.len'));
return false;
}
return true;
}
/**
* description validation
* @returns {boolean}
* @private
*/
private _descValidation(): boolean {
if (!isUndefined(this.editDesc) && this.editDesc.trim() !== '') {
// 설명 길이 체크
if (this.editDesc.trim() !== ''
&& CommonUtil.getByte(this.editDesc.trim()) > 450) {
Alert.warning(this.translateService.instant('msg.alert.edit.description.len'));
return false;
}
return true;
}
return true;
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - event
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* 뒤로가기 버튼 클릭
*/
public onClickPrev(): void {
this._location.back();
}
/**
* 퍼미션 스키마 삭제 클릭
*/
public onClickDeletePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'DELETE';
modal.name = this.translateService.instant('msg.permission.ui.delete-schema.ph');
modal.description = this.translateService.instant('msg.permission.ui.delete-schema.ph.sub', { value : `${this.roleSet.linkedWorkspaces ? this.roleSet.linkedWorkspaces : 0}`});
if( this.firstWorkspace ) {
let wsNames = this.firstWorkspace.name;
if( 0 < this.otherWorkspaces.length ) {
wsNames = wsNames + ',' + this.otherWorkspaces.map( item => item.name ).join( ',' );
}
modal.subDescription = wsNames;
}
modal.btnName = this.translateService.instant('msg.permission.ui.delete-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 스키마 복제 클릭
*/
public onClickClonePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'CLONE';
modal.name = this.translateService.instant('msg.permission.ui.copy-schema.ph', { value : `\'${this.roleSet.name} \'`});
modal.btnName = this.translateService.instant('msg.permission.ui.copy-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 설정 팝업 오픈
*/
public onClickOpenPermissionSchemaSet() {
const cloneRoleSet: RoleSet = _.cloneDeep(this.roleSet);
this._permissionSchemaSetComp.init(cloneRoleSet, true, true);
} // function - onClickOpenPermissionSchemaSet
/**
* 워크스페이스 클릭 이벤트
* @param {Workspace} workspace
*/
public onClickWorkspace(workspace:Workspace) {
this.router.navigate([`/admin/workspaces/shared/${workspace.id}`]).then();
} // function - onClickWorkspace
/**
* Role 의 퍼미션 변경 후처리
*/
public afterUpdatePermissionRoles() {
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
} // function - afterUpdatePermissionRoles
/**
* 다른 워크스페이스 목록을 표시/숨김한다.
*/
public showHideOtherWorkspaces() {
this.isOpenWorkspaceList = !this.isOpenWorkspaceList;
if( this.isOpenWorkspaceList && 1 < this.totalWsCnt && 0 === this.otherWorkspaces.length ) {
this._getWorkspacesByRoleSet(this._schemaId, this.totalWsCnt);
}
} // function - showHideOtherWorkspaces
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Protected Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Private Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* ui init
* @private
*/
private _initView(): void {
this.roleSet = new RoleSet();
}
/**
* 스키마 상세정보 조회
* @param {string} schemaId
* @private
*/
private _getPermissionSchemaDetail(schemaId: string) {
// 로딩 show
this.loadingShow();
// 스키마 상세정보 조회
this.permissionService.getRolesetDetail(schemaId)
.then((result) => {
// 스키마 상세정보
this.roleSet = result;
// 로딩 hide
this.loadingHide();
// 연관 워크스페이스 목록 조회
this._getWorkspacesByRoleSet(schemaId);
})
.catch((error) => this.commonExceptionHandler(error));
} // function - _getPermissionSchemaDetail
/**
* RoleSet에 연결된 워크스페이스 목록 조회
* @param {string} roleSetId
* @param {number} pageSize
* @private
*/
private _getWorkspacesByRoleSet(roleSetId: string, pageSize:number = 1) {
this.loadingShow();
const param = new Page();
param.page = 0;
param.size = pageSize;
param.sort = 'name,asc';
if( 1 === pageSize ) {
this.firstWorkspace = null;
} else {
this.otherWorkspaces = [];
}
this.permissionService.getWorkspacesByRoleSet(roleSetId, param).then(result => {
if (result && result['_embedded'] && result['_embedded']['workspaces']) {
const wsList = result['_embedded']['workspaces'];
if( 1 === pageSize ) {
this.firstWorkspace = wsList[0];
this.totalWsCnt = ( 0 < result['page']['totalElements'] ) ? result['page']['totalElements'] : 0;
} else {
if( 1 < wsList.length ) { | this.loadingHide();
}).catch((error) => this.commonExceptionHandler(error));
} // function - _getWorkspacesByRoleSet
/**
* 퍼미션 스키마 복사
* @param {string} schemaId
* @private
*/
private _clonePermissionSchema(schemaId: string): void {
// 로딩 show
this.loadingShow();
// 퍼미션 스키마 복사
this.permissionService.copyRoleset(schemaId)
.then((result) => {
// alert
Alert.success(this.translateService.instant('msg.permission.alert.create.ok', {value: result.name}));
// 복제한 스키마 상세페이지로 이동
this.router.navigate(['/admin/workspaces/permission', result.id]).then();
})
.catch((error) => this.commonExceptionHandler(error));
}
/**
* 퍼미션 스키마 삭제
* @param {string} schemaId
* @private
*/
private _deletePermissionSchema(schemaId: string): void {
// 로딩 show
this.loadingShow();
// 퍼미션 스키마 삭제
this.permissionService.deleteRoleset(schemaId).then(() => {
// alert
Alert.success(this.translateService.instant('msg.permission.alert.delete.ok'));
// 나가기
this.onClickPrev();
}).catch((error) => this.commonExceptionHandler(error));
}
/**
* 스키마 수정
* @param {Object} params
* @returns {Promise<any>}
* @private
*/
private _updateSchema(params: object): Promise<any> {
return new Promise((resolve, reject) => {
// 수정 요청
this.permissionService.updateNameAndDescRoleset(this._schemaId, params)
.then((result) => {
resolve(result);
})
.catch((error) => {
reject(error);
});
});
}
} | wsList.shift(0);
this.otherWorkspaces = wsList;
}
}
} | random_line_split |
detail-permission-schema.component.ts | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {Component, ElementRef, Injector, OnDestroy, OnInit, ViewChild} from '@angular/core';
import {AbstractComponent} from '@common/component/abstract.component';
import {ActivatedRoute} from '@angular/router';
import {PermissionService} from '../../../../user/service/permission.service';
import {RoleSet} from '@domain/user/role/roleSet';
import {Alert} from '@common/util/alert.util';
import {Modal} from '@common/domain/modal';
import {Location} from '@angular/common';
import {ConfirmModalComponent} from '@common/component/modal/confirm/confirm.component';
import {isUndefined} from 'util';
import {CommonUtil} from '@common/util/common.util';
import {PermissionSchemaSetComponent} from '../../../../workspace/component/permission/permission-schema-set.component';
import * as _ from 'lodash';
import {Page} from '@domain/common/page';
import {Workspace} from '@domain/workspace/workspace';
@Component({
selector: 'app-detail-permission-schema',
templateUrl: './detail-permission-schema.component.html'
})
export class DetailPermissionSchemaComponent extends AbstractComponent implements OnInit, OnDestroy {
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Private Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 공통 확인 팝업
@ViewChild(ConfirmModalComponent)
private _confirmModalComponent: ConfirmModalComponent;
// 퍼미션 스키마 설정 컴포넌트
@ViewChild(PermissionSchemaSetComponent)
private _permissionSchemaSetComp: PermissionSchemaSetComponent;
// 스키마 아이디
private _schemaId: string;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Protected Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 스키마 상세정보 데이터
public roleSet: RoleSet;
// RoleSet 에 연결된 워크스페이스 목록
public firstWorkspace: Workspace;
public otherWorkspaces: Workspace[] = [];
public totalWsCnt:number = 0;
// 스키마 이름 수정
public editName: string;
// 스키마 설명 수정
public editDesc: string;
// 스키마 이름 수정 플래그
public editNameFl: boolean;
// 스키마 설명 수정 플래그
public editDescFl: boolean;
// 워크스페이스 목록 패널 펼침 여부
public isOpenWorkspaceList: boolean = false;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Constructor
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 생성자
constructor(private permissionService: PermissionService,
private activatedRoute: ActivatedRoute,
private _location:Location,
protected element: ElementRef,
protected injector: Injector) {
super(element, injector);
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Override Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// Init
public ngOnInit() {
// Init
super.ngOnInit();
// url에서 schema id 받아오기
this.activatedRoute.params.subscribe((params) => {
// schemaId
this._schemaId = params['schemaId'];
// ui init
this._initView();
// 퍼미션 스키마 상세정보 및 연관 워크스페이스 목록 조회
this._getPermissionSchemaDetail(this._schemaId);
});
}
// Destroy
public ngOnDestroy() {
super.ngOnDestroy();
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* modal 이벤트 후 핸들러
* @param {Modal} modal
*/
public confirmHandler(modal: Modal): void {
modal.data === 'CLONE' ? this._clonePermissionSchema(modal['schemaId']) : this._deletePermissionSchema(modal['schemaId']);
}
/**
* 스키마 이름 변경모드
*/
public schemaNameEditMode(): void {
if( !this.roleSet.readOnly ) {
// 현재 그룹 이름
this.editName = this.roleSet.name;
// flag
this.editNameFl = true;
}
} // function - schemaNameEditMode
/**
* 스키마 설명 변경모드
*/
public schemaDescEditMode(): void {
if( !this.roleSet.readOnly ) {
// 현재 그룹 설명
this.editDesc = this.roleSet.description;
// flag
this.editDescFl = true;
}
} // function - schemaDescEditMode
/**
* 스키마 이름 수정
*/
public updateSchemaName(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._nameValidation()) {
const params = {
name: this.editName
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// alert
// flag
| Fl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error show
if (error.hasOwnProperty('details') && error.details.includes('Duplicate')) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.used'));
}
});
}
}
/**
* 스키마 설명 수정
*/
public updateSchemaDesc(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._descValidation()) {
const params = {
description: this.editDesc
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// flag
this.editDescFl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error
Alert.error(error);
});
}
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - validation
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* name validation
* @returns {boolean}
* @private
*/
private _nameValidation(): boolean {
// 스키마 이름이 비어 있다면
if (isUndefined(this.editName) || this.editName.trim() === '') {
Alert.warning(this.translateService.instant('msg.groups.alert.name.empty'));
return false;
}
// 이름 길이 체크
if (CommonUtil.getByte(this.editName.trim()) > 150) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.len'));
return false;
}
return true;
}
/**
* description validation
* @returns {boolean}
* @private
*/
private _descValidation(): boolean {
if (!isUndefined(this.editDesc) && this.editDesc.trim() !== '') {
// 설명 길이 체크
if (this.editDesc.trim() !== ''
&& CommonUtil.getByte(this.editDesc.trim()) > 450) {
Alert.warning(this.translateService.instant('msg.alert.edit.description.len'));
return false;
}
return true;
}
return true;
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - event
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* 뒤로가기 버튼 클릭
*/
public onClickPrev(): void {
this._location.back();
}
/**
* 퍼미션 스키마 삭제 클릭
*/
public onClickDeletePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'DELETE';
modal.name = this.translateService.instant('msg.permission.ui.delete-schema.ph');
modal.description = this.translateService.instant('msg.permission.ui.delete-schema.ph.sub', { value : `${this.roleSet.linkedWorkspaces ? this.roleSet.linkedWorkspaces : 0}`});
if( this.firstWorkspace ) {
let wsNames = this.firstWorkspace.name;
if( 0 < this.otherWorkspaces.length ) {
wsNames = wsNames + ',' + this.otherWorkspaces.map( item => item.name ).join( ',' );
}
modal.subDescription = wsNames;
}
modal.btnName = this.translateService.instant('msg.permission.ui.delete-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 스키마 복제 클릭
*/
public onClickClonePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'CLONE';
modal.name = this.translateService.instant('msg.permission.ui.copy-schema.ph', { value : `\'${this.roleSet.name} \'`});
modal.btnName = this.translateService.instant('msg.permission.ui.copy-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 설정 팝업 오픈
*/
public onClickOpenPermissionSchemaSet() {
const cloneRoleSet: RoleSet = _.cloneDeep(this.roleSet);
this._permissionSchemaSetComp.init(cloneRoleSet, true, true);
} // function - onClickOpenPermissionSchemaSet
/**
* 워크스페이스 클릭 이벤트
* @param {Workspace} workspace
*/
public onClickWorkspace(workspace:Workspace) {
this.router.navigate([`/admin/workspaces/shared/${workspace.id}`]).then();
} // function - onClickWorkspace
/**
* Role 의 퍼미션 변경 후처리
*/
public afterUpdatePermissionRoles() {
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
} // function - afterUpdatePermissionRoles
/**
* 다른 워크스페이스 목록을 표시/숨김한다.
*/
public showHideOtherWorkspaces() {
this.isOpenWorkspaceList = !this.isOpenWorkspaceList;
if( this.isOpenWorkspaceList && 1 < this.totalWsCnt && 0 === this.otherWorkspaces.length ) {
this._getWorkspacesByRoleSet(this._schemaId, this.totalWsCnt);
}
} // function - showHideOtherWorkspaces
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Protected Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Private Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* ui init
* @private
*/
private _initView(): void {
this.roleSet = new RoleSet();
}
/**
* 스키마 상세정보 조회
* @param {string} schemaId
* @private
*/
private _getPermissionSchemaDetail(schemaId: string) {
// 로딩 show
this.loadingShow();
// 스키마 상세정보 조회
this.permissionService.getRolesetDetail(schemaId)
.then((result) => {
// 스키마 상세정보
this.roleSet = result;
// 로딩 hide
this.loadingHide();
// 연관 워크스페이스 목록 조회
this._getWorkspacesByRoleSet(schemaId);
})
.catch((error) => this.commonExceptionHandler(error));
} // function - _getPermissionSchemaDetail
/**
* RoleSet에 연결된 워크스페이스 목록 조회
* @param {string} roleSetId
* @param {number} pageSize
* @private
*/
private _getWorkspacesByRoleSet(roleSetId: string, pageSize:number = 1) {
this.loadingShow();
const param = new Page();
param.page = 0;
param.size = pageSize;
param.sort = 'name,asc';
if( 1 === pageSize ) {
this.firstWorkspace = null;
} else {
this.otherWorkspaces = [];
}
this.permissionService.getWorkspacesByRoleSet(roleSetId, param).then(result => {
if (result && result['_embedded'] && result['_embedded']['workspaces']) {
const wsList = result['_embedded']['workspaces'];
if( 1 === pageSize ) {
this.firstWorkspace = wsList[0];
this.totalWsCnt = ( 0 < result['page']['totalElements'] ) ? result['page']['totalElements'] : 0;
} else {
if( 1 < wsList.length ) {
wsList.shift(0);
this.otherWorkspaces = wsList;
}
}
}
this.loadingHide();
}).catch((error) => this.commonExceptionHandler(error));
} // function - _getWorkspacesByRoleSet
/**
* 퍼미션 스키마 복사
* @param {string} schemaId
* @private
*/
private _clonePermissionSchema(schemaId: string): void {
// 로딩 show
this.loadingShow();
// 퍼미션 스키마 복사
this.permissionService.copyRoleset(schemaId)
.then((result) => {
// alert
Alert.success(this.translateService.instant('msg.permission.alert.create.ok', {value: result.name}));
// 복제한 스키마 상세페이지로 이동
this.router.navigate(['/admin/workspaces/permission', result.id]).then();
})
.catch((error) => this.commonExceptionHandler(error));
}
/**
* 퍼미션 스키마 삭제
* @param {string} schemaId
* @private
*/
private _deletePermissionSchema(schemaId: string): void {
// 로딩 show
this.loadingShow();
// 퍼미션 스키마 삭제
this.permissionService.deleteRoleset(schemaId).then(() => {
// alert
Alert.success(this.translateService.instant('msg.permission.alert.delete.ok'));
// 나가기
this.onClickPrev();
}).catch((error) => this.commonExceptionHandler(error));
}
/**
* 스키마 수정
* @param {Object} params
* @returns {Promise<any>}
* @private
*/
private _updateSchema(params: object): Promise<any> {
return new Promise((resolve, reject) => {
// 수정 요청
this.permissionService.updateNameAndDescRoleset(this._schemaId, params)
.then((result) => {
resolve(result);
})
.catch((error) => {
reject(error);
});
});
}
}
| this.editName | identifier_name |
detail-permission-schema.component.ts | /*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {Component, ElementRef, Injector, OnDestroy, OnInit, ViewChild} from '@angular/core';
import {AbstractComponent} from '@common/component/abstract.component';
import {ActivatedRoute} from '@angular/router';
import {PermissionService} from '../../../../user/service/permission.service';
import {RoleSet} from '@domain/user/role/roleSet';
import {Alert} from '@common/util/alert.util';
import {Modal} from '@common/domain/modal';
import {Location} from '@angular/common';
import {ConfirmModalComponent} from '@common/component/modal/confirm/confirm.component';
import {isUndefined} from 'util';
import {CommonUtil} from '@common/util/common.util';
import {PermissionSchemaSetComponent} from '../../../../workspace/component/permission/permission-schema-set.component';
import * as _ from 'lodash';
import {Page} from '@domain/common/page';
import {Workspace} from '@domain/workspace/workspace';
@Component({
selector: 'app-detail-permission-schema',
templateUrl: './detail-permission-schema.component.html'
})
export class DetailPermissionSchemaComponent extends AbstractComponent implements OnInit, OnDestroy {
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Private Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 공통 확인 팝업
@ViewChild(ConfirmModalComponent)
private _confirmModalComponent: ConfirmModalComponent;
// 퍼미션 스키마 설정 컴포넌트
@ViewChild(PermissionSchemaSetComponent)
private _permissionSchemaSetComp: PermissionSchemaSetComponent;
// 스키마 아이디
private _schemaId: string;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Protected Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Variables
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 스키마 상세정보 데이터
public roleSet: RoleSet;
// RoleSet 에 연결된 워크스페이스 목록
public firstWorkspace: Workspace;
public otherWorkspaces: Workspace[] = [];
public totalWsCnt:number = 0;
// 스키마 이름 수정
public editName: string;
// 스키마 설명 수정
public editDesc: string;
// 스키마 이름 수정 플래그
public editNameFl: boolean;
// 스키마 설명 수정 플래그
public editDescFl: boolean;
// 워크스페이스 목록 패널 펼침 여부
public isOpenWorkspaceList: boolean = false;
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Constructor
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// 생성자
constructor(private permissionService: PermissionService,
private activatedRoute: ActivatedRoute,
private _location:Location,
protected element: ElementRef,
protected injector: Injector) {
super(element, injector);
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Override Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
// Init
public ngOnInit() {
// Init
super.ngOnInit();
// url에서 schema id 받아오기
this.activatedRoute.params.subscribe((params) => {
// schemaId
this._schemaId = params['schemaId'];
// ui init
this._initView();
// 퍼미션 스키마 상세정보 및 연관 워크스페이스 목록 조회
this._getPermissionSchemaDetail(this._schemaId);
});
}
// Destroy
public ngOnDestroy() {
super.ngOnDestroy();
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* modal 이벤트 후 핸들러
* @param {Modal} modal
*/
public confirmHandler(modal: Modal): void {
modal.data === 'CLONE' ? this._clonePermissionSchema(modal['schemaId']) : this._deletePermissionSchema(modal['schemaId']);
}
/**
* 스키마 이름 변경모드
*/
public schemaNameEditMode(): void {
if( !this.roleSet.readOnly ) {
// 현재 그룹 이름
this.editName = this.roleSet.name;
// flag
this.editNameFl = true;
}
} // function - schemaNameEditMode
/**
* 스키마 설명 변경모드
*/
public schemaDescEditMode(): void {
if( !this.roleSet.readOnly ) | dateSchemaName(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._nameValidation()) {
const params = {
name: this.editName
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// alert
// flag
this.editNameFl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error show
if (error.hasOwnProperty('details') && error.details.includes('Duplicate')) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.used'));
}
});
}
}
/**
* 스키마 설명 수정
*/
public updateSchemaDesc(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
// validation
if (this._descValidation()) {
const params = {
description: this.editDesc
};
// 로딩 show
this.loadingShow();
// 스키마 수정
this._updateSchema(params).then(() => {
// flag
this.editDescFl = false;
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
}).catch((error) => {
// 로딩 hide
this.loadingHide();
// error
Alert.error(error);
});
}
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - validation
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* name validation
* @returns {boolean}
* @private
*/
private _nameValidation(): boolean {
// 스키마 이름이 비어 있다면
if (isUndefined(this.editName) || this.editName.trim() === '') {
Alert.warning(this.translateService.instant('msg.groups.alert.name.empty'));
return false;
}
// 이름 길이 체크
if (CommonUtil.getByte(this.editName.trim()) > 150) {
Alert.warning(this.translateService.instant('msg.groups.alert.name.len'));
return false;
}
return true;
}
/**
* description validation
* @returns {boolean}
* @private
*/
private _descValidation(): boolean {
if (!isUndefined(this.editDesc) && this.editDesc.trim() !== '') {
// 설명 길이 체크
if (this.editDesc.trim() !== ''
&& CommonUtil.getByte(this.editDesc.trim()) > 450) {
Alert.warning(this.translateService.instant('msg.alert.edit.description.len'));
return false;
}
return true;
}
return true;
}
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Public Method - event
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* 뒤로가기 버튼 클릭
*/
public onClickPrev(): void {
this._location.back();
}
/**
* 퍼미션 스키마 삭제 클릭
*/
public onClickDeletePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'DELETE';
modal.name = this.translateService.instant('msg.permission.ui.delete-schema.ph');
modal.description = this.translateService.instant('msg.permission.ui.delete-schema.ph.sub', { value : `${this.roleSet.linkedWorkspaces ? this.roleSet.linkedWorkspaces : 0}`});
if( this.firstWorkspace ) {
let wsNames = this.firstWorkspace.name;
if( 0 < this.otherWorkspaces.length ) {
wsNames = wsNames + ',' + this.otherWorkspaces.map( item => item.name ).join( ',' );
}
modal.subDescription = wsNames;
}
modal.btnName = this.translateService.instant('msg.permission.ui.delete-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 스키마 복제 클릭
*/
public onClickClonePermissionSchema(): void {
// 이벤트 전파 stop
event.stopImmediatePropagation();
const modal = new Modal();
modal.data = 'CLONE';
modal.name = this.translateService.instant('msg.permission.ui.copy-schema.ph', { value : `\'${this.roleSet.name} \'`});
modal.btnName = this.translateService.instant('msg.permission.ui.copy-schema');
// schema id
modal['schemaId'] = this.roleSet.id;
// 팝업 창 오픈
this._confirmModalComponent.init(modal);
}
/**
* 퍼미션 설정 팝업 오픈
*/
public onClickOpenPermissionSchemaSet() {
const cloneRoleSet: RoleSet = _.cloneDeep(this.roleSet);
this._permissionSchemaSetComp.init(cloneRoleSet, true, true);
} // function - onClickOpenPermissionSchemaSet
/**
* 워크스페이스 클릭 이벤트
* @param {Workspace} workspace
*/
public onClickWorkspace(workspace:Workspace) {
this.router.navigate([`/admin/workspaces/shared/${workspace.id}`]).then();
} // function - onClickWorkspace
/**
* Role 의 퍼미션 변경 후처리
*/
public afterUpdatePermissionRoles() {
// 스키마 정보 재조회
this._getPermissionSchemaDetail(this._schemaId);
} // function - afterUpdatePermissionRoles
/**
* 다른 워크스페이스 목록을 표시/숨김한다.
*/
public showHideOtherWorkspaces() {
this.isOpenWorkspaceList = !this.isOpenWorkspaceList;
if( this.isOpenWorkspaceList && 1 < this.totalWsCnt && 0 === this.otherWorkspaces.length ) {
this._getWorkspacesByRoleSet(this._schemaId, this.totalWsCnt);
}
} // function - showHideOtherWorkspaces
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Protected Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
| Private Method
|-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* ui init
* @private
*/
private _initView(): void {
this.roleSet = new RoleSet();
}
/**
* 스키마 상세정보 조회
* @param {string} schemaId
* @private
*/
private _getPermissionSchemaDetail(schemaId: string) {
// 로딩 show
this.loadingShow();
// 스키마 상세정보 조회
this.permissionService.getRolesetDetail(schemaId)
.then((result) => {
// 스키마 상세정보
this.roleSet = result;
// 로딩 hide
this.loadingHide();
// 연관 워크스페이스 목록 조회
this._getWorkspacesByRoleSet(schemaId);
})
.catch((error) => this.commonExceptionHandler(error));
} // function - _getPermissionSchemaDetail
/**
* RoleSet에 연결된 워크스페이스 목록 조회
* @param {string} roleSetId
* @param {number} pageSize
* @private
*/
private _getWorkspacesByRoleSet(roleSetId: string, pageSize:number = 1) {
this.loadingShow();
const param = new Page();
param.page = 0;
param.size = pageSize;
param.sort = 'name,asc';
if( 1 === pageSize ) {
this.firstWorkspace = null;
} else {
this.otherWorkspaces = [];
}
this.permissionService.getWorkspacesByRoleSet(roleSetId, param).then(result => {
if (result && result['_embedded'] && result['_embedded']['workspaces']) {
const wsList = result['_embedded']['workspaces'];
if( 1 === pageSize ) {
this.firstWorkspace = wsList[0];
this.totalWsCnt = ( 0 < result['page']['totalElements'] ) ? result['page']['totalElements'] : 0;
} else {
if( 1 < wsList.length ) {
wsList.shift(0);
this.otherWorkspaces = wsList;
}
}
}
this.loadingHide();
}).catch((error) => this.commonExceptionHandler(error));
} // function - _getWorkspacesByRoleSet
/**
* 퍼미션 스키마 복사
* @param {string} schemaId
* @private
*/
private _clonePermissionSchema(schemaId: string): void {
// 로딩 show
this.loadingShow();
// 퍼미션 스키마 복사
this.permissionService.copyRoleset(schemaId)
.then((result) => {
// alert
Alert.success(this.translateService.instant('msg.permission.alert.create.ok', {value: result.name}));
// 복제한 스키마 상세페이지로 이동
this.router.navigate(['/admin/workspaces/permission', result.id]).then();
})
.catch((error) => this.commonExceptionHandler(error));
}
/**
* 퍼미션 스키마 삭제
* @param {string} schemaId
* @private
*/
private _deletePermissionSchema(schemaId: string): void {
// 로딩 show
this.loadingShow();
// 퍼미션 스키마 삭제
this.permissionService.deleteRoleset(schemaId).then(() => {
// alert
Alert.success(this.translateService.instant('msg.permission.alert.delete.ok'));
// 나가기
this.onClickPrev();
}).catch((error) => this.commonExceptionHandler(error));
}
/**
* 스키마 수정
* @param {Object} params
* @returns {Promise<any>}
* @private
*/
private _updateSchema(params: object): Promise<any> {
return new Promise((resolve, reject) => {
// 수정 요청
this.permissionService.updateNameAndDescRoleset(this._schemaId, params)
.then((result) => {
resolve(result);
})
.catch((error) => {
reject(error);
});
});
}
}
| {
// 현재 그룹 설명
this.editDesc = this.roleSet.description;
// flag
this.editDescFl = true;
}
} // function - schemaDescEditMode
/**
* 스키마 이름 수정
*/
public up | identifier_body |
reader.rs | use std::sync::mpsc::{Receiver, Sender, SyncSender, channel};
use std::error::Error;
use item::Item;
use std::sync::{Arc, RwLock};
use std::process::{Command, Stdio, Child};
use std::io::{BufRead, BufReader};
use event::{EventSender, EventReceiver, Event, EventArg};
use std::thread::JoinHandle;
use std::thread;
use std::time::Duration;
use std::collections::HashMap;
use std::mem;
use std::fs::File;
use regex::Regex;
use sender::CachedSender;
use field::{FieldRange, parse_range};
use clap::ArgMatches;
struct ReaderOption {
pub use_ansi_color: bool,
pub default_arg: String,
pub transform_fields: Vec<FieldRange>,
pub matching_fields: Vec<FieldRange>,
pub delimiter: Regex,
pub replace_str: String,
pub line_ending: u8,
}
impl ReaderOption {
pub fn new() -> Self {
ReaderOption {
use_ansi_color: false,
default_arg: String::new(),
transform_fields: Vec::new(),
matching_fields: Vec::new(),
delimiter: Regex::new(r".*?\t").unwrap(),
replace_str: "{}".to_string(),
line_ending: b'\n',
}
}
pub fn parse_options(&mut self, options: &ArgMatches) |
}
pub struct Reader {
rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
option: Arc<RwLock<ReaderOption>>,
real_stdin: Option<File>, // used to support piped output
}
impl Reader {
pub fn new(rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
real_stdin: Option<File>) -> Self {
Reader {
rx_cmd: rx_cmd,
tx_item: tx_item,
option: Arc::new(RwLock::new(ReaderOption::new())),
real_stdin,
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
let mut option = self.option.write().unwrap();
option.parse_options(options);
}
pub fn run(&mut self) {
// event loop
let mut thread_reader: Option<JoinHandle<()>> = None;
let mut tx_reader: Option<Sender<bool>> = None;
let mut last_command = "".to_string();
let mut last_query = "".to_string();
// start sender
let (tx_sender, rx_sender) = channel();
let tx_item = self.tx_item.clone();
let mut sender = CachedSender::new(rx_sender, tx_item);
thread::spawn(move || {
sender.run();
});
while let Ok((ev, arg)) = self.rx_cmd.recv() {
match ev {
Event::EvReaderRestart => {
// close existing command or file if exists
let (cmd, query, force_update) = *arg.downcast::<(String, String, bool)>().unwrap();
if !force_update && cmd == last_command && query == last_query { continue; }
// restart command with new `command`
if cmd != last_command {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
// create needed data for thread
let (tx, rx_reader) = channel();
tx_reader = Some(tx);
let cmd_clone = cmd.clone();
let option_clone = Arc::clone(&self.option);
let tx_sender_clone = tx_sender.clone();
let query_clone = query.clone();
let real_stdin = self.real_stdin.take();
// start the new command
thread_reader = Some(thread::spawn(move || {
let _ = tx_sender_clone.send((Event::EvReaderStarted, Box::new(true)));
let _ = tx_sender_clone.send((Event::EvSenderRestart, Box::new(query_clone)));
reader(&cmd_clone, rx_reader, &tx_sender_clone, option_clone, real_stdin);
let _ = tx_sender_clone.send((Event::EvReaderStopped, Box::new(true)));
}));
} else {
// tell sender to restart
let _ = tx_sender.send((Event::EvSenderRestart, Box::new(query.clone())));
}
last_command = cmd;
last_query = query;
}
Event::EvActAccept => {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
let tx_ack: Sender<usize> = *arg.downcast().unwrap();
let _ = tx_ack.send(0);
}
_ => {
// do nothing
}
}
}
}
}
fn get_command_output(cmd: &str) -> Result<(Option<Child>, Box<BufRead>), Box<Error>> {
let mut command = try!(Command::new("sh")
.arg("-c")
.arg(cmd)
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn());
let stdout = try!(command.stdout.take().ok_or_else(|| "command output: unwrap failed".to_owned()));
Ok((Some(command), Box::new(BufReader::new(stdout))))
}
// Consider that you invoke a command with different arguments several times
// If you select some items each time, how will skim remeber it?
// => Well, we'll give each invokation a number, i.e. RUN_NUM
// What if you invoke the same command and same arguments twice?
// => We use NUM_MAP to specify the same run number.
lazy_static! {
static ref RUN_NUM: RwLock<usize> = RwLock::new(0);
static ref NUM_MAP: RwLock<HashMap<String, usize>> = RwLock::new(HashMap::new());
}
fn reader(cmd: &str,
rx_cmd: Receiver<bool>,
tx_sender: &EventSender,
option: Arc<RwLock<ReaderOption>>,
source_file: Option<File>) {
debug!("reader:reader: called");
let (command, mut source): (Option<Child>, Box<BufRead>) = if source_file.is_some() {
(None, Box::new(BufReader::new(source_file.unwrap())))
} else {
get_command_output(cmd).expect("command not found")
};
let (tx_control, rx_control) = channel();
thread::spawn(move || {
// listen to `rx` for command to quit reader
// kill command if it is got
loop {
if rx_cmd.try_recv().is_ok() {
// clean up resources
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
if rx_control.try_recv().is_ok() {
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
thread::sleep(Duration::from_millis(5));
}
});
let opt = option.read().unwrap();
// set the proper run number
let run_num = {*RUN_NUM.read().unwrap()};
let run_num = *NUM_MAP.write()
.unwrap()
.entry(cmd.to_string())
.or_insert_with(|| {
*(RUN_NUM.write().unwrap()) = run_num + 1;
run_num + 1
});
let mut index = 0;
let mut item_group = Vec::new();
let mut buffer = Vec::with_capacity(100);
loop {
buffer.clear();
// start reading
match source.read_until(opt.line_ending, &mut buffer) {
Ok(n) => {
if n == 0 { break; }
debug!("reader:reader: read a new line. index = {}", index);
if buffer.ends_with(&[b'\r', b'\n']) {
buffer.pop();
buffer.pop();
} else if buffer.ends_with(&[b'\n']) || buffer.ends_with(&[b'\0']) {
buffer.pop();
}
debug!("reader:reader: create new item. index = {}", index);
let item = Item::new(String::from_utf8_lossy(&buffer),
opt.use_ansi_color,
&opt.transform_fields,
&opt.matching_fields,
&opt.delimiter,
(run_num, index));
item_group.push(Arc::new(item));
debug!("reader:reader: item created. index = {}", index);
index += 1;
// % 4096 == 0
if index.trailing_zeros() > 12 {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
}
}
Err(_err) => {} // String not UTF8 or other error, skip.
}
}
if !item_group.is_empty() {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
}
let _ = tx_control.send(true);
}
| {
if options.is_present("ansi") {
self.use_ansi_color = true;
}
if let Some(delimiter) = options.value_of("delimiter") {
self.delimiter = Regex::new(&(".*?".to_string() + delimiter))
.unwrap_or_else(|_| Regex::new(r".*?[\t ]").unwrap());
}
if let Some(transform_fields) = options.value_of("with-nth") {
self.transform_fields = transform_fields.split(',')
.filter_map(|string| {
parse_range(string)
})
.collect();
}
if let Some(matching_fields) = options.value_of("nth") {
self.matching_fields = matching_fields.split(',')
.filter_map(|string| {
parse_range(string)
}).collect();
}
if options.is_present("read0") {
self.line_ending = b'\0';
}
} | identifier_body |
reader.rs | use std::sync::mpsc::{Receiver, Sender, SyncSender, channel};
use std::error::Error;
use item::Item;
use std::sync::{Arc, RwLock};
use std::process::{Command, Stdio, Child};
use std::io::{BufRead, BufReader};
use event::{EventSender, EventReceiver, Event, EventArg};
use std::thread::JoinHandle;
use std::thread;
use std::time::Duration;
use std::collections::HashMap;
use std::mem;
use std::fs::File;
use regex::Regex;
use sender::CachedSender;
use field::{FieldRange, parse_range};
use clap::ArgMatches;
struct ReaderOption {
pub use_ansi_color: bool,
pub default_arg: String,
pub transform_fields: Vec<FieldRange>,
pub matching_fields: Vec<FieldRange>,
pub delimiter: Regex,
pub replace_str: String,
pub line_ending: u8,
}
impl ReaderOption {
pub fn new() -> Self {
ReaderOption {
use_ansi_color: false,
default_arg: String::new(),
transform_fields: Vec::new(),
matching_fields: Vec::new(),
delimiter: Regex::new(r".*?\t").unwrap(),
replace_str: "{}".to_string(),
line_ending: b'\n',
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
if options.is_present("ansi") {
self.use_ansi_color = true;
}
if let Some(delimiter) = options.value_of("delimiter") {
self.delimiter = Regex::new(&(".*?".to_string() + delimiter))
.unwrap_or_else(|_| Regex::new(r".*?[\t ]").unwrap());
}
if let Some(transform_fields) = options.value_of("with-nth") {
self.transform_fields = transform_fields.split(',')
.filter_map(|string| {
parse_range(string)
})
.collect();
}
if let Some(matching_fields) = options.value_of("nth") {
self.matching_fields = matching_fields.split(',')
.filter_map(|string| {
parse_range(string)
}).collect();
}
if options.is_present("read0") {
self.line_ending = b'\0';
}
}
}
pub struct Reader {
rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
option: Arc<RwLock<ReaderOption>>,
real_stdin: Option<File>, // used to support piped output
}
impl Reader {
pub fn new(rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
real_stdin: Option<File>) -> Self {
Reader {
rx_cmd: rx_cmd,
tx_item: tx_item,
option: Arc::new(RwLock::new(ReaderOption::new())),
real_stdin,
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
let mut option = self.option.write().unwrap();
option.parse_options(options);
}
pub fn | (&mut self) {
// event loop
let mut thread_reader: Option<JoinHandle<()>> = None;
let mut tx_reader: Option<Sender<bool>> = None;
let mut last_command = "".to_string();
let mut last_query = "".to_string();
// start sender
let (tx_sender, rx_sender) = channel();
let tx_item = self.tx_item.clone();
let mut sender = CachedSender::new(rx_sender, tx_item);
thread::spawn(move || {
sender.run();
});
while let Ok((ev, arg)) = self.rx_cmd.recv() {
match ev {
Event::EvReaderRestart => {
// close existing command or file if exists
let (cmd, query, force_update) = *arg.downcast::<(String, String, bool)>().unwrap();
if !force_update && cmd == last_command && query == last_query { continue; }
// restart command with new `command`
if cmd != last_command {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
// create needed data for thread
let (tx, rx_reader) = channel();
tx_reader = Some(tx);
let cmd_clone = cmd.clone();
let option_clone = Arc::clone(&self.option);
let tx_sender_clone = tx_sender.clone();
let query_clone = query.clone();
let real_stdin = self.real_stdin.take();
// start the new command
thread_reader = Some(thread::spawn(move || {
let _ = tx_sender_clone.send((Event::EvReaderStarted, Box::new(true)));
let _ = tx_sender_clone.send((Event::EvSenderRestart, Box::new(query_clone)));
reader(&cmd_clone, rx_reader, &tx_sender_clone, option_clone, real_stdin);
let _ = tx_sender_clone.send((Event::EvReaderStopped, Box::new(true)));
}));
} else {
// tell sender to restart
let _ = tx_sender.send((Event::EvSenderRestart, Box::new(query.clone())));
}
last_command = cmd;
last_query = query;
}
Event::EvActAccept => {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
let tx_ack: Sender<usize> = *arg.downcast().unwrap();
let _ = tx_ack.send(0);
}
_ => {
// do nothing
}
}
}
}
}
fn get_command_output(cmd: &str) -> Result<(Option<Child>, Box<BufRead>), Box<Error>> {
let mut command = try!(Command::new("sh")
.arg("-c")
.arg(cmd)
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn());
let stdout = try!(command.stdout.take().ok_or_else(|| "command output: unwrap failed".to_owned()));
Ok((Some(command), Box::new(BufReader::new(stdout))))
}
// Consider that you invoke a command with different arguments several times
// If you select some items each time, how will skim remeber it?
// => Well, we'll give each invokation a number, i.e. RUN_NUM
// What if you invoke the same command and same arguments twice?
// => We use NUM_MAP to specify the same run number.
lazy_static! {
static ref RUN_NUM: RwLock<usize> = RwLock::new(0);
static ref NUM_MAP: RwLock<HashMap<String, usize>> = RwLock::new(HashMap::new());
}
fn reader(cmd: &str,
rx_cmd: Receiver<bool>,
tx_sender: &EventSender,
option: Arc<RwLock<ReaderOption>>,
source_file: Option<File>) {
debug!("reader:reader: called");
let (command, mut source): (Option<Child>, Box<BufRead>) = if source_file.is_some() {
(None, Box::new(BufReader::new(source_file.unwrap())))
} else {
get_command_output(cmd).expect("command not found")
};
let (tx_control, rx_control) = channel();
thread::spawn(move || {
// listen to `rx` for command to quit reader
// kill command if it is got
loop {
if rx_cmd.try_recv().is_ok() {
// clean up resources
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
if rx_control.try_recv().is_ok() {
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
thread::sleep(Duration::from_millis(5));
}
});
let opt = option.read().unwrap();
// set the proper run number
let run_num = {*RUN_NUM.read().unwrap()};
let run_num = *NUM_MAP.write()
.unwrap()
.entry(cmd.to_string())
.or_insert_with(|| {
*(RUN_NUM.write().unwrap()) = run_num + 1;
run_num + 1
});
let mut index = 0;
let mut item_group = Vec::new();
let mut buffer = Vec::with_capacity(100);
loop {
buffer.clear();
// start reading
match source.read_until(opt.line_ending, &mut buffer) {
Ok(n) => {
if n == 0 { break; }
debug!("reader:reader: read a new line. index = {}", index);
if buffer.ends_with(&[b'\r', b'\n']) {
buffer.pop();
buffer.pop();
} else if buffer.ends_with(&[b'\n']) || buffer.ends_with(&[b'\0']) {
buffer.pop();
}
debug!("reader:reader: create new item. index = {}", index);
let item = Item::new(String::from_utf8_lossy(&buffer),
opt.use_ansi_color,
&opt.transform_fields,
&opt.matching_fields,
&opt.delimiter,
(run_num, index));
item_group.push(Arc::new(item));
debug!("reader:reader: item created. index = {}", index);
index += 1;
// % 4096 == 0
if index.trailing_zeros() > 12 {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
}
}
Err(_err) => {} // String not UTF8 or other error, skip.
}
}
if !item_group.is_empty() {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
}
let _ = tx_control.send(true);
}
| run | identifier_name |
reader.rs | use std::sync::mpsc::{Receiver, Sender, SyncSender, channel};
use std::error::Error;
use item::Item;
use std::sync::{Arc, RwLock};
use std::process::{Command, Stdio, Child};
use std::io::{BufRead, BufReader};
use event::{EventSender, EventReceiver, Event, EventArg};
use std::thread::JoinHandle;
use std::thread;
use std::time::Duration;
use std::collections::HashMap;
use std::mem;
use std::fs::File;
use regex::Regex;
use sender::CachedSender;
use field::{FieldRange, parse_range};
use clap::ArgMatches;
struct ReaderOption {
pub use_ansi_color: bool,
pub default_arg: String,
pub transform_fields: Vec<FieldRange>,
pub matching_fields: Vec<FieldRange>,
pub delimiter: Regex,
pub replace_str: String,
pub line_ending: u8,
}
impl ReaderOption {
pub fn new() -> Self {
ReaderOption {
use_ansi_color: false,
default_arg: String::new(),
transform_fields: Vec::new(),
matching_fields: Vec::new(),
delimiter: Regex::new(r".*?\t").unwrap(),
replace_str: "{}".to_string(),
line_ending: b'\n',
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
if options.is_present("ansi") {
self.use_ansi_color = true;
}
if let Some(delimiter) = options.value_of("delimiter") {
self.delimiter = Regex::new(&(".*?".to_string() + delimiter))
.unwrap_or_else(|_| Regex::new(r".*?[\t ]").unwrap());
}
if let Some(transform_fields) = options.value_of("with-nth") {
self.transform_fields = transform_fields.split(',')
.filter_map(|string| {
parse_range(string)
})
.collect();
}
if let Some(matching_fields) = options.value_of("nth") {
self.matching_fields = matching_fields.split(',')
.filter_map(|string| {
parse_range(string)
}).collect();
}
if options.is_present("read0") {
self.line_ending = b'\0';
}
}
}
pub struct Reader {
rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
option: Arc<RwLock<ReaderOption>>,
real_stdin: Option<File>, // used to support piped output
}
impl Reader {
pub fn new(rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
real_stdin: Option<File>) -> Self {
Reader {
rx_cmd: rx_cmd,
tx_item: tx_item,
option: Arc::new(RwLock::new(ReaderOption::new())),
real_stdin,
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
let mut option = self.option.write().unwrap();
option.parse_options(options);
}
pub fn run(&mut self) {
// event loop
let mut thread_reader: Option<JoinHandle<()>> = None;
let mut tx_reader: Option<Sender<bool>> = None;
let mut last_command = "".to_string();
let mut last_query = "".to_string();
// start sender
let (tx_sender, rx_sender) = channel();
let tx_item = self.tx_item.clone();
let mut sender = CachedSender::new(rx_sender, tx_item);
thread::spawn(move || {
sender.run();
});
while let Ok((ev, arg)) = self.rx_cmd.recv() {
match ev {
Event::EvReaderRestart => {
// close existing command or file if exists
let (cmd, query, force_update) = *arg.downcast::<(String, String, bool)>().unwrap();
if !force_update && cmd == last_command && query == last_query { continue; }
// restart command with new `command`
if cmd != last_command {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
// create needed data for thread
let (tx, rx_reader) = channel();
tx_reader = Some(tx);
let cmd_clone = cmd.clone();
let option_clone = Arc::clone(&self.option);
let tx_sender_clone = tx_sender.clone();
let query_clone = query.clone();
let real_stdin = self.real_stdin.take();
// start the new command
thread_reader = Some(thread::spawn(move || {
let _ = tx_sender_clone.send((Event::EvReaderStarted, Box::new(true)));
let _ = tx_sender_clone.send((Event::EvSenderRestart, Box::new(query_clone)));
reader(&cmd_clone, rx_reader, &tx_sender_clone, option_clone, real_stdin);
let _ = tx_sender_clone.send((Event::EvReaderStopped, Box::new(true)));
}));
} else {
// tell sender to restart
let _ = tx_sender.send((Event::EvSenderRestart, Box::new(query.clone())));
}
last_command = cmd;
last_query = query;
}
Event::EvActAccept => {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
let tx_ack: Sender<usize> = *arg.downcast().unwrap();
let _ = tx_ack.send(0);
}
_ => {
// do nothing
}
}
}
}
}
fn get_command_output(cmd: &str) -> Result<(Option<Child>, Box<BufRead>), Box<Error>> {
let mut command = try!(Command::new("sh")
.arg("-c")
.arg(cmd)
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn());
let stdout = try!(command.stdout.take().ok_or_else(|| "command output: unwrap failed".to_owned()));
Ok((Some(command), Box::new(BufReader::new(stdout))))
}
// Consider that you invoke a command with different arguments several times
// If you select some items each time, how will skim remeber it?
// => Well, we'll give each invokation a number, i.e. RUN_NUM
// What if you invoke the same command and same arguments twice?
// => We use NUM_MAP to specify the same run number.
lazy_static! {
static ref RUN_NUM: RwLock<usize> = RwLock::new(0);
static ref NUM_MAP: RwLock<HashMap<String, usize>> = RwLock::new(HashMap::new());
}
fn reader(cmd: &str,
rx_cmd: Receiver<bool>,
tx_sender: &EventSender,
option: Arc<RwLock<ReaderOption>>,
source_file: Option<File>) {
debug!("reader:reader: called");
let (command, mut source): (Option<Child>, Box<BufRead>) = if source_file.is_some() {
(None, Box::new(BufReader::new(source_file.unwrap())))
} else {
get_command_output(cmd).expect("command not found")
};
let (tx_control, rx_control) = channel();
thread::spawn(move || {
// listen to `rx` for command to quit reader
// kill command if it is got
loop {
if rx_cmd.try_recv().is_ok() {
// clean up resources
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
if rx_control.try_recv().is_ok() {
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
thread::sleep(Duration::from_millis(5));
}
});
let opt = option.read().unwrap();
// set the proper run number
let run_num = {*RUN_NUM.read().unwrap()};
let run_num = *NUM_MAP.write()
.unwrap()
.entry(cmd.to_string())
.or_insert_with(|| {
*(RUN_NUM.write().unwrap()) = run_num + 1;
run_num + 1
});
let mut index = 0;
let mut item_group = Vec::new();
let mut buffer = Vec::with_capacity(100);
loop {
buffer.clear();
// start reading
match source.read_until(opt.line_ending, &mut buffer) {
Ok(n) => {
if n == 0 { break; }
debug!("reader:reader: read a new line. index = {}", index);
if buffer.ends_with(&[b'\r', b'\n']) {
buffer.pop();
buffer.pop();
} else if buffer.ends_with(&[b'\n']) || buffer.ends_with(&[b'\0']) {
buffer.pop();
}
debug!("reader:reader: create new item. index = {}", index);
let item = Item::new(String::from_utf8_lossy(&buffer),
opt.use_ansi_color,
&opt.transform_fields,
&opt.matching_fields,
&opt.delimiter,
(run_num, index));
item_group.push(Arc::new(item));
debug!("reader:reader: item created. index = {}", index);
index += 1;
// % 4096 == 0
if index.trailing_zeros() > 12 |
}
Err(_err) => {} // String not UTF8 or other error, skip.
}
}
if !item_group.is_empty() {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
}
let _ = tx_control.send(true);
}
| {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
} | conditional_block |
reader.rs | use std::sync::mpsc::{Receiver, Sender, SyncSender, channel};
use std::error::Error;
use item::Item;
use std::sync::{Arc, RwLock};
use std::process::{Command, Stdio, Child};
use std::io::{BufRead, BufReader};
use event::{EventSender, EventReceiver, Event, EventArg};
use std::thread::JoinHandle;
use std::thread;
use std::time::Duration;
use std::collections::HashMap;
use std::mem;
use std::fs::File;
use regex::Regex;
use sender::CachedSender;
use field::{FieldRange, parse_range};
use clap::ArgMatches;
struct ReaderOption {
pub use_ansi_color: bool,
pub default_arg: String,
pub transform_fields: Vec<FieldRange>,
pub matching_fields: Vec<FieldRange>,
pub delimiter: Regex,
pub replace_str: String,
pub line_ending: u8,
}
impl ReaderOption {
pub fn new() -> Self {
ReaderOption {
use_ansi_color: false,
default_arg: String::new(),
transform_fields: Vec::new(), | }
}
pub fn parse_options(&mut self, options: &ArgMatches) {
if options.is_present("ansi") {
self.use_ansi_color = true;
}
if let Some(delimiter) = options.value_of("delimiter") {
self.delimiter = Regex::new(&(".*?".to_string() + delimiter))
.unwrap_or_else(|_| Regex::new(r".*?[\t ]").unwrap());
}
if let Some(transform_fields) = options.value_of("with-nth") {
self.transform_fields = transform_fields.split(',')
.filter_map(|string| {
parse_range(string)
})
.collect();
}
if let Some(matching_fields) = options.value_of("nth") {
self.matching_fields = matching_fields.split(',')
.filter_map(|string| {
parse_range(string)
}).collect();
}
if options.is_present("read0") {
self.line_ending = b'\0';
}
}
}
pub struct Reader {
rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
option: Arc<RwLock<ReaderOption>>,
real_stdin: Option<File>, // used to support piped output
}
impl Reader {
pub fn new(rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
real_stdin: Option<File>) -> Self {
Reader {
rx_cmd: rx_cmd,
tx_item: tx_item,
option: Arc::new(RwLock::new(ReaderOption::new())),
real_stdin,
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
let mut option = self.option.write().unwrap();
option.parse_options(options);
}
pub fn run(&mut self) {
// event loop
let mut thread_reader: Option<JoinHandle<()>> = None;
let mut tx_reader: Option<Sender<bool>> = None;
let mut last_command = "".to_string();
let mut last_query = "".to_string();
// start sender
let (tx_sender, rx_sender) = channel();
let tx_item = self.tx_item.clone();
let mut sender = CachedSender::new(rx_sender, tx_item);
thread::spawn(move || {
sender.run();
});
while let Ok((ev, arg)) = self.rx_cmd.recv() {
match ev {
Event::EvReaderRestart => {
// close existing command or file if exists
let (cmd, query, force_update) = *arg.downcast::<(String, String, bool)>().unwrap();
if !force_update && cmd == last_command && query == last_query { continue; }
// restart command with new `command`
if cmd != last_command {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
// create needed data for thread
let (tx, rx_reader) = channel();
tx_reader = Some(tx);
let cmd_clone = cmd.clone();
let option_clone = Arc::clone(&self.option);
let tx_sender_clone = tx_sender.clone();
let query_clone = query.clone();
let real_stdin = self.real_stdin.take();
// start the new command
thread_reader = Some(thread::spawn(move || {
let _ = tx_sender_clone.send((Event::EvReaderStarted, Box::new(true)));
let _ = tx_sender_clone.send((Event::EvSenderRestart, Box::new(query_clone)));
reader(&cmd_clone, rx_reader, &tx_sender_clone, option_clone, real_stdin);
let _ = tx_sender_clone.send((Event::EvReaderStopped, Box::new(true)));
}));
} else {
// tell sender to restart
let _ = tx_sender.send((Event::EvSenderRestart, Box::new(query.clone())));
}
last_command = cmd;
last_query = query;
}
Event::EvActAccept => {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
let tx_ack: Sender<usize> = *arg.downcast().unwrap();
let _ = tx_ack.send(0);
}
_ => {
// do nothing
}
}
}
}
}
fn get_command_output(cmd: &str) -> Result<(Option<Child>, Box<BufRead>), Box<Error>> {
let mut command = try!(Command::new("sh")
.arg("-c")
.arg(cmd)
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn());
let stdout = try!(command.stdout.take().ok_or_else(|| "command output: unwrap failed".to_owned()));
Ok((Some(command), Box::new(BufReader::new(stdout))))
}
// Consider that you invoke a command with different arguments several times
// If you select some items each time, how will skim remeber it?
// => Well, we'll give each invokation a number, i.e. RUN_NUM
// What if you invoke the same command and same arguments twice?
// => We use NUM_MAP to specify the same run number.
lazy_static! {
static ref RUN_NUM: RwLock<usize> = RwLock::new(0);
static ref NUM_MAP: RwLock<HashMap<String, usize>> = RwLock::new(HashMap::new());
}
fn reader(cmd: &str,
rx_cmd: Receiver<bool>,
tx_sender: &EventSender,
option: Arc<RwLock<ReaderOption>>,
source_file: Option<File>) {
debug!("reader:reader: called");
let (command, mut source): (Option<Child>, Box<BufRead>) = if source_file.is_some() {
(None, Box::new(BufReader::new(source_file.unwrap())))
} else {
get_command_output(cmd).expect("command not found")
};
let (tx_control, rx_control) = channel();
thread::spawn(move || {
// listen to `rx` for command to quit reader
// kill command if it is got
loop {
if rx_cmd.try_recv().is_ok() {
// clean up resources
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
if rx_control.try_recv().is_ok() {
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
thread::sleep(Duration::from_millis(5));
}
});
let opt = option.read().unwrap();
// set the proper run number
let run_num = {*RUN_NUM.read().unwrap()};
let run_num = *NUM_MAP.write()
.unwrap()
.entry(cmd.to_string())
.or_insert_with(|| {
*(RUN_NUM.write().unwrap()) = run_num + 1;
run_num + 1
});
let mut index = 0;
let mut item_group = Vec::new();
let mut buffer = Vec::with_capacity(100);
loop {
buffer.clear();
// start reading
match source.read_until(opt.line_ending, &mut buffer) {
Ok(n) => {
if n == 0 { break; }
debug!("reader:reader: read a new line. index = {}", index);
if buffer.ends_with(&[b'\r', b'\n']) {
buffer.pop();
buffer.pop();
} else if buffer.ends_with(&[b'\n']) || buffer.ends_with(&[b'\0']) {
buffer.pop();
}
debug!("reader:reader: create new item. index = {}", index);
let item = Item::new(String::from_utf8_lossy(&buffer),
opt.use_ansi_color,
&opt.transform_fields,
&opt.matching_fields,
&opt.delimiter,
(run_num, index));
item_group.push(Arc::new(item));
debug!("reader:reader: item created. index = {}", index);
index += 1;
// % 4096 == 0
if index.trailing_zeros() > 12 {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
}
}
Err(_err) => {} // String not UTF8 or other error, skip.
}
}
if !item_group.is_empty() {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
}
let _ = tx_control.send(true);
} | matching_fields: Vec::new(),
delimiter: Regex::new(r".*?\t").unwrap(),
replace_str: "{}".to_string(),
line_ending: b'\n', | random_line_split |
core.py | # -*- coding: utf-8 -*-
import re # used to get info from frd file
import os
import sys
import subprocess # used to check ccx version
from enum import Enum, auto
from typing import List, Tuple
import logging
from .mesh import Mesher
from .results import ResultProcessor
import gmsh
import numpy as np
class AnalysisError(Exception):
"""Exception raised for errors generated during the analysis
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
class AnalysisType(Enum):
STRUCTURAL = auto()
THERMAL = auto()
FLUID = auto()
class NodeSet:
"""
An node set is basic entity for storing node set lists. The set remains constant without any dynamic referencing
to any underlying geometric entities.
"""
def __init__(self, name, nodes):
self.name = name
self._nodes = nodes
@property
def nodes(self):
"""
Nodes contains the list of Node IDs
"""
return self._nodes
@nodes.setter
def nodes(self, nodes):
self._nodes = nodes
def writeInput(self) -> str:
out = '*NSET,NSET={:s}\n'.format(self.name)
out += np.array2string(self.nodes, precision=2, separator=', ', threshold=9999999999)[1:-1]
return out
class ElementSet:
"""
An element set is basic entity for storing element set lists.The set remains constant without any dynamic referencing
to any underlying geometric entities.
"""
def __init__(self, name, els):
self.name = name
self._els = els
@property
def els(self):
"""
Elements contains the list of Node IDs
"""
return self._els
@els.setter
def els(self, elements):
self._els = elements
def writeInput(self) -> str:
out = '*ELSET,ELSET={:s\n}'.format(self.name)
out += np.array2string(self.els, precision=2, separator=', ', threshold=9999999999)[1:-1]
return out
class SurfaceSet:
"""
A surface-set set is basic entity for storing element face lists, typically for setting directional fluxes onto
surface elements based on the element ordering. The set remains constant without any dynamic referencing
to any underlying geometric entities.
"""
def __init__(self, name, surfacePairs):
self.name = name
self._elSurfacePairs = surfacePairs
@property
def surfacePairs(self):
"""
Elements with the associated face orientations are specified as Nx2 numpy array, with the first column being
the element Id, and the second column the chosen face orientation
"""
return self._els
@surfacePairs.setter
def els(self, surfacePairs):
self._elSurfacePairs = surfacePairs
def writeInput(self) -> str:
out = '*SURFACE,NAME={:s}\n'.format(self.name)
for i in range(self._elSurfacePairs.shape[0]):
out += '{:d},S{:d}\n'.format(self._elSurfacePairs[i,0], self._elSurfacePairs[i,1])
out += np.array2string(self.els, precision=2, separator=', ', threshold=9999999999)[1:-1]
return out
class Connector:
"""
A Connector ir a rigid connector between a set of nodes and an (optional) reference node.
"""
def __init__(self, name, nodes, refNode = None):
self.name = name
self._refNode = refNode
self._nodeset = None
@property
def refNode(self):
"""
Reference Node ID
"""
return self._refNode
@refNode.setter
def refNode(self, node):
self._refNode = node
@property
def nodeset(self):
"""
Nodes contains the list of Node IDs
"""
return self._nodeset
@nodeset.setter
def nodeset(self, nodes):
if isinstance(nodes, list) or isinstance(nodes,np.ndarray):
self._nodeset = NodeSet('Connecter_{:s}'.format(self.name), np.array(nodes))
elif isinstance(nodes,NodeSet):
self._nodeset = nodes
else:
raise ValueError('Invalid type for nodes passed to Connector()')
def writeInput(self) -> str:
# A nodeset is automatically created from the name of the connector
strOut = '*RIGIDBODY, NSET={:s}'.format(self.nodeset.name)
# A reference node is optional
if isinstance(self.redNode, int):
strOut += ',REF NODE={:d}\n'.format(self.refNode)
else:
strOut += '\n'
return strOut
class DOF:
UX = 1
UY = 2
UZ = 3
RX = 4
RY = 5
RZ = 6
T = 11
class Simulation:
"""
Provides the base class for running a Calculix simulation
"""
NUMTHREADS = 1
""" Number of Threads used by the Calculix Solver """
CALCULIX_PATH = ''
""" The Calculix directory path used for Windows platforms"""
VERBOSE_OUTPUT = True
""" When enabled, the output during the analysis is redirected to the console"""
def __init__(self, meshModel: Mesher):
self._input = ''
self._workingDirectory = ''
self._analysisCompleted = False
self.mpcSets = []
self.connectors = []
self.materials = []
self.materialAssignments = []
self.model = meshModel
self.initialTimeStep = 0.1
self.defaultTimeStep = 0.1
self.totalTime = 1.0
self.useSteadyStateAnalysis = True
self.TZERO = -273.15
self.SIGMAB = 5.669E-8
self._numThreads = 1
self.initialConditions = [] # 'dict of node set names,
self.loadCases = []
self._nodeSets = []
self._elSets = []
self.nodeSets = []
self.elSets = []
self.includes = []
def init(self):
self._input = ''
self._nodeSets = self.nodeSets
self._elSets = self.elSets
@classmethod
def setNumThreads(cls, numThreads: int):
"""
Sets the number of simulation threads to use in Calculix
:param numThreads:
:return:
"""
cls.NUMTHREADS = numThreads
@classmethod
def getNumThreads(cls) -> int:
"""
Returns the number of threads used
:return: int:
"""
return cls.NUMTHREADS
@classmethod
def setCalculixPath(cls, calculixPath: str) -> None:
"""
Sets the path for the Calculix executable. Necessary when using Windows where there is not a default
installation proceedure for Calculix
:param calculixPath: Directory containing the Calculix Executable
"""
if os.path.isdir(calculixPath) :
cls.CALCULIX_PATH = calculixPath
@classmethod
def setVerboseOuput(cls, state: bool) -> None:
"""
Sets if the output from Calculix should be verbose i.e. printed to the console
:param state:
"""
cls.VERBOSE_OUTPUT = state
def setWorkingDirectory(self, workDir):
if os.path.isdir(workDir) and os.access(workDir, os.W_OK):
self._workingDirectory = workDir
else:
raise ValueError('Working directory ({:s}) is not accessible or writable'.format(workDir))
@property
def name(self):
return self._name
def writeHeaders(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' INCLUDES ')
for filename in self.includes:
self._input += '*include,input={:s}'.format(filename)
def prepareConnectors(self):
"""
Creates node sets for any RBE connectors used in the simulation
"""
# Kinematic Connectors require creating node sets
# These are created and added to the node set collection prior to writing
numConnectors = 1
for connector in self.connectors:
# Node are created and are an attribute of a Connector
self._nodeSets.append(connector.nodeset)
numConnectors += 1
def writeInput(self) -> str:
"""
Writes the input deck for the simulation
"""
self.init()
self.prepareConnectors()
self.writeHeaders()
self.writeMesh()
self.writeNodeSets()
self.writeElementSets()
self.writeKinematicConnectors()
self.writeMPCs()
self.writeMaterials()
self.writeMaterialAssignments()
self.writeInitialConditions()
self.writeAnalysisConditions()
self.writeLoadSteps()
return self._input
def writeElementSets(self):
if len(self._elSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' ELEMENT SETS ')
for elSet in self._elSets:
self._input += os.linesep
self._input += elSet.writeInput()
#self._input += '*ELSET,ELSET={:s\n}'.format(elSet['name'])
#self._input += np.array2string(elSet['els'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeNodeSets(self):
if len(self._nodeSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' NODE SETS ')
for nodeSet in self._nodeSets:
self._input += os.linesep
self._input += nodeSet.writeInput()
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += np.array2string(nodeSet['nodes'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeKinematicConnectors(self):
if len(self.connectors) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' KINEMATIC CONNECTORS ')
for connector in self.connectors:
# A nodeset is automatically created from the name of the connector
self.input += connector.writeInput()
def writeMPCs(self):
if len(self.mpcSets) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MPCS ')
for mpcSet in self.mpcSets:
self.input += '*EQUATION\n'
self.input += '{:d}\n'.format(len(mpcSet['numTerms'])) # Assume each line constrains two nodes and one dof
for mpc in mpcSet['equations']:
for i in range(len(mpc['eqn'])):
self._input += '{:d},{:d},{:d}'.format(mpc['node'][i], mpc['dof'][i], mpc['eqn'][i])
self.input += os.linesep
# *EQUATION
# 2 # number of terms in equation # typically two
# 28,2,1.,22,2,-1. # node a id, dof, node b id, dof b
def writeMaterialAssignments(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MATERIAL ASSIGNMENTS ')
for matAssignment in self.materialAssignments:
self._input += '*solid section, elset={:s}, material={:s}\n'.format(matAssignment[0], matAssignment[1])
def writeMaterials(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MATERIALS ')
for material in self.materials:
self._input += material.writeInput()
def writeInitialConditions(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' INITIAL CONDITIONS ')
for initCond in self.initialConditions:
self._input += '*INITIAL CONDITIONS,TYPE={:s}\n'.format(initCond['type'].upper())
self._input += '{:s},{:e}\n'.format(initCond['set'], initCond['value'])
self._input += os.linesep
# Write the Physical Constants
self._input += '*PHYSICAL CONSTANTS,ABSOLUTE ZERO={:e},STEFAN BOLTZMANN={:e}\n'.format(self.TZERO, self.SIGMAB)
def writeAnalysisConditions(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' ANALYSIS CONDITIONS ')
# Write the Initial Timestep
self._input += '{:.3f}, {:.3f}\n'.format(self.initialTimeStep, self.defaultTimeStep)
def writeLoadSteps(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' LOAD STEPS ')
for loadCase in self.loadCases:
self._input += loadCase.writeInput()
def writeMesh(self):
# TODO make a unique auto-generated name for the mesh
meshFilename = 'mesh.inp'
meshPath= os.path.join(self._workingDirectory, meshFilename)
self.model.writeMesh(meshPath)
self._input += '*include,input={:s}'.format(meshFilename)
def checkAnalysis(self) -> bool:
"""
Routine checks that the analysis has been correctly generated
:return: bool: True if no analysis error occur
:raise: AnalysisError: Analysis error that occured
"""
if len(self.materials) == 0:
raise AnalysisError('No material models have been assigned to the analysis')
for material in self.materials:
if not material.isValid():
|
return True
def version(self):
if sys.platform == 'win32':
cmdPath = os.path.join(self.CALCULIX_PATH, 'ccx.exe ')
p = subprocess.Popen([cmdPath, '-v'], stdout=subprocess.PIPE, universal_newlines=True )
stdout, stderr = p.communicate()
version = re.search(r"(\d+).(\d+)", stdout)
return int(version.group(1)), int(version.group(2))
elif sys.platform == 'linux':
p = subprocess.Popen(['ccx', '-v'], stdout=subprocess.PIPE, universal_newlines=True )
stdout, stderr = p.communicate()
version = re.search(r"(\d+).(\d+)", stdout)
return int(version.group(1)), int(version.group(2))
else:
raise NotImplemented(' Platform is not currently supported')
def results(self) -> ResultProcessor:
""" Returns the results obtained after running an analysis """
if self.isAnalysisCompleted():
return ResultProcessor('input')
else:
raise ValueError('Results were not available')
def isAnalysisCompleted(self) -> bool:
""" Returns if the analysis was completed successfully. """
return self._analysisCompleted
def clearAnalysis(self, includeResults:bool = False) -> None:
""" Clears any files generated from the analysis
:param includeResults: If set True will also delete the result files generated from the analysis
"""
filename = 'input' # Base filename for the analysis
files = [filename + '.inp',
filename + '.cvg',
filename + '.sta']
if includeResults:
files.append(filename + '.frd')
files.append(filename + '.dat')
try:
for file in files:
filePath = os.path.join(self._workingDirectory,file)
os.remove(filePath)
except:
pass
def run(self):
"""
Performs pre-analysis checks on the model and submits the job for Calculix to perform.
"""
# Reset analysis status
self._analysisCompleted = False
print('{:=^60}\n'.format(' RUNNING PRE-ANALYSIS CHECKS '))
self.checkAnalysis()
print('{:=^60}\n'.format(' WRITING INPUT FILE '))
inputDeckContents = self.writeInput()
inputDeckPath = os.path.join(self._workingDirectory,'input.inp')
with open(inputDeckPath, "w") as text_file:
text_file.write(inputDeckContents)
# Set environment variables for performing multi-threaded
os.environ["CCX_NPROC_STIFFNESS"] = '{:d}'.format(Simulation.NUMTHREADS)
os.environ["CCX_NPROC_EQUATION_SOLVER"] = '{:d}'.format(Simulation.NUMTHREADS)
os.environ["OMP_NUM_THREADS"] = '{:d}'.format(Simulation.NUMTHREADS)
print('\n{:=^60}\n'.format(' RUNNING CALCULIX '))
if sys.platform == 'win32':
cmdPath = os.path.join(self.CALCULIX_PATH, 'ccx.exe ')
arguments = '-i input'
cmd = cmdPath + arguments
popen = subprocess.Popen(cmd, cwd=self._workingDirectory, stdout=subprocess.PIPE, universal_newlines=True)
if self.VERBOSE_OUTPUT:
for stdout_line in iter(popen.stdout.readline, ""):
print(stdout_line, end='')
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
# Analysis was completed successfully
self._analysisCompleted = True
elif sys.platform == 'linux':
filename = 'input'
cmdSt = ['ccx', '-i', filename]
popen = subprocess.Popen(cmdSt, cwd=self._workingDirectory, stdout=subprocess.PIPE, universal_newlines=True)
if self.VERBOSE_OUTPUT:
for stdout_line in iter(popen.stdout.readline, ""):
print(stdout_line, end='')
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmdSt)
# Analysis was completed successfully
self._analysisCompleted = True
else:
raise NotImplemented(' Platform is not currently supported')
| raise AnalysisError('Material ({:s}) is not valid'.format(material.name)) | conditional_block |
core.py | # -*- coding: utf-8 -*-
import re # used to get info from frd file
import os
import sys
import subprocess # used to check ccx version
from enum import Enum, auto
from typing import List, Tuple
import logging
from .mesh import Mesher
from .results import ResultProcessor
import gmsh
import numpy as np
class AnalysisError(Exception):
"""Exception raised for errors generated during the analysis
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
class AnalysisType(Enum):
STRUCTURAL = auto()
THERMAL = auto()
FLUID = auto()
class NodeSet:
"""
An node set is basic entity for storing node set lists. The set remains constant without any dynamic referencing
to any underlying geometric entities.
"""
def __init__(self, name, nodes):
self.name = name
self._nodes = nodes
@property
def nodes(self):
"""
Nodes contains the list of Node IDs
"""
return self._nodes
@nodes.setter
def nodes(self, nodes):
self._nodes = nodes
def writeInput(self) -> str:
out = '*NSET,NSET={:s}\n'.format(self.name)
out += np.array2string(self.nodes, precision=2, separator=', ', threshold=9999999999)[1:-1]
return out
class ElementSet:
"""
An element set is basic entity for storing element set lists.The set remains constant without any dynamic referencing
to any underlying geometric entities.
"""
def __init__(self, name, els):
self.name = name
self._els = els
@property
def els(self):
"""
Elements contains the list of Node IDs
"""
return self._els
@els.setter
def els(self, elements):
self._els = elements
def writeInput(self) -> str:
out = '*ELSET,ELSET={:s\n}'.format(self.name)
out += np.array2string(self.els, precision=2, separator=', ', threshold=9999999999)[1:-1]
return out
class SurfaceSet:
"""
A surface-set set is basic entity for storing element face lists, typically for setting directional fluxes onto
surface elements based on the element ordering. The set remains constant without any dynamic referencing
to any underlying geometric entities.
"""
def __init__(self, name, surfacePairs):
self.name = name
self._elSurfacePairs = surfacePairs
@property
def surfacePairs(self):
"""
Elements with the associated face orientations are specified as Nx2 numpy array, with the first column being
the element Id, and the second column the chosen face orientation
"""
return self._els
@surfacePairs.setter
def els(self, surfacePairs):
self._elSurfacePairs = surfacePairs
def writeInput(self) -> str:
out = '*SURFACE,NAME={:s}\n'.format(self.name)
for i in range(self._elSurfacePairs.shape[0]):
out += '{:d},S{:d}\n'.format(self._elSurfacePairs[i,0], self._elSurfacePairs[i,1])
out += np.array2string(self.els, precision=2, separator=', ', threshold=9999999999)[1:-1]
return out
class Connector:
"""
A Connector ir a rigid connector between a set of nodes and an (optional) reference node.
"""
def __init__(self, name, nodes, refNode = None):
self.name = name
self._refNode = refNode
self._nodeset = None
@property
def refNode(self):
"""
Reference Node ID
"""
return self._refNode
@refNode.setter
def refNode(self, node):
self._refNode = node
@property
def nodeset(self):
"""
Nodes contains the list of Node IDs
"""
return self._nodeset
@nodeset.setter
def nodeset(self, nodes):
if isinstance(nodes, list) or isinstance(nodes,np.ndarray):
self._nodeset = NodeSet('Connecter_{:s}'.format(self.name), np.array(nodes))
elif isinstance(nodes,NodeSet):
self._nodeset = nodes
else:
raise ValueError('Invalid type for nodes passed to Connector()')
def writeInput(self) -> str:
# A nodeset is automatically created from the name of the connector
strOut = '*RIGIDBODY, NSET={:s}'.format(self.nodeset.name)
# A reference node is optional
if isinstance(self.redNode, int):
strOut += ',REF NODE={:d}\n'.format(self.refNode)
else:
strOut += '\n'
return strOut
class DOF:
UX = 1
UY = 2
UZ = 3
RX = 4
RY = 5
RZ = 6
T = 11
class Simulation:
"""
Provides the base class for running a Calculix simulation
"""
NUMTHREADS = 1
""" Number of Threads used by the Calculix Solver """
CALCULIX_PATH = ''
""" The Calculix directory path used for Windows platforms"""
VERBOSE_OUTPUT = True
""" When enabled, the output during the analysis is redirected to the console"""
def __init__(self, meshModel: Mesher):
self._input = ''
self._workingDirectory = ''
self._analysisCompleted = False
self.mpcSets = []
self.connectors = []
self.materials = []
self.materialAssignments = []
self.model = meshModel
self.initialTimeStep = 0.1
self.defaultTimeStep = 0.1
self.totalTime = 1.0
self.useSteadyStateAnalysis = True
self.TZERO = -273.15
self.SIGMAB = 5.669E-8
self._numThreads = 1
self.initialConditions = [] # 'dict of node set names,
self.loadCases = []
self._nodeSets = []
self._elSets = []
self.nodeSets = []
self.elSets = []
self.includes = []
def init(self):
self._input = ''
self._nodeSets = self.nodeSets
self._elSets = self.elSets
@classmethod
def setNumThreads(cls, numThreads: int):
"""
Sets the number of simulation threads to use in Calculix
:param numThreads:
:return:
"""
cls.NUMTHREADS = numThreads
@classmethod
def getNumThreads(cls) -> int:
"""
Returns the number of threads used
:return: int:
"""
return cls.NUMTHREADS
@classmethod
def setCalculixPath(cls, calculixPath: str) -> None:
"""
Sets the path for the Calculix executable. Necessary when using Windows where there is not a default
installation proceedure for Calculix
:param calculixPath: Directory containing the Calculix Executable
"""
if os.path.isdir(calculixPath) :
cls.CALCULIX_PATH = calculixPath
@classmethod
def setVerboseOuput(cls, state: bool) -> None:
"""
Sets if the output from Calculix should be verbose i.e. printed to the console
:param state:
"""
cls.VERBOSE_OUTPUT = state
def setWorkingDirectory(self, workDir):
if os.path.isdir(workDir) and os.access(workDir, os.W_OK):
self._workingDirectory = workDir
else:
raise ValueError('Working directory ({:s}) is not accessible or writable'.format(workDir))
@property
def name(self):
return self._name
def writeHeaders(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' INCLUDES ')
for filename in self.includes:
self._input += '*include,input={:s}'.format(filename)
def prepareConnectors(self):
|
def writeInput(self) -> str:
"""
Writes the input deck for the simulation
"""
self.init()
self.prepareConnectors()
self.writeHeaders()
self.writeMesh()
self.writeNodeSets()
self.writeElementSets()
self.writeKinematicConnectors()
self.writeMPCs()
self.writeMaterials()
self.writeMaterialAssignments()
self.writeInitialConditions()
self.writeAnalysisConditions()
self.writeLoadSteps()
return self._input
def writeElementSets(self):
if len(self._elSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' ELEMENT SETS ')
for elSet in self._elSets:
self._input += os.linesep
self._input += elSet.writeInput()
#self._input += '*ELSET,ELSET={:s\n}'.format(elSet['name'])
#self._input += np.array2string(elSet['els'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeNodeSets(self):
if len(self._nodeSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' NODE SETS ')
for nodeSet in self._nodeSets:
self._input += os.linesep
self._input += nodeSet.writeInput()
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += np.array2string(nodeSet['nodes'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeKinematicConnectors(self):
if len(self.connectors) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' KINEMATIC CONNECTORS ')
for connector in self.connectors:
# A nodeset is automatically created from the name of the connector
self.input += connector.writeInput()
def writeMPCs(self):
if len(self.mpcSets) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MPCS ')
for mpcSet in self.mpcSets:
self.input += '*EQUATION\n'
self.input += '{:d}\n'.format(len(mpcSet['numTerms'])) # Assume each line constrains two nodes and one dof
for mpc in mpcSet['equations']:
for i in range(len(mpc['eqn'])):
self._input += '{:d},{:d},{:d}'.format(mpc['node'][i], mpc['dof'][i], mpc['eqn'][i])
self.input += os.linesep
# *EQUATION
# 2 # number of terms in equation # typically two
# 28,2,1.,22,2,-1. # node a id, dof, node b id, dof b
def writeMaterialAssignments(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MATERIAL ASSIGNMENTS ')
for matAssignment in self.materialAssignments:
self._input += '*solid section, elset={:s}, material={:s}\n'.format(matAssignment[0], matAssignment[1])
def writeMaterials(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MATERIALS ')
for material in self.materials:
self._input += material.writeInput()
def writeInitialConditions(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' INITIAL CONDITIONS ')
for initCond in self.initialConditions:
self._input += '*INITIAL CONDITIONS,TYPE={:s}\n'.format(initCond['type'].upper())
self._input += '{:s},{:e}\n'.format(initCond['set'], initCond['value'])
self._input += os.linesep
# Write the Physical Constants
self._input += '*PHYSICAL CONSTANTS,ABSOLUTE ZERO={:e},STEFAN BOLTZMANN={:e}\n'.format(self.TZERO, self.SIGMAB)
def writeAnalysisConditions(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' ANALYSIS CONDITIONS ')
# Write the Initial Timestep
self._input += '{:.3f}, {:.3f}\n'.format(self.initialTimeStep, self.defaultTimeStep)
def writeLoadSteps(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' LOAD STEPS ')
for loadCase in self.loadCases:
self._input += loadCase.writeInput()
def writeMesh(self):
# TODO make a unique auto-generated name for the mesh
meshFilename = 'mesh.inp'
meshPath= os.path.join(self._workingDirectory, meshFilename)
self.model.writeMesh(meshPath)
self._input += '*include,input={:s}'.format(meshFilename)
def checkAnalysis(self) -> bool:
"""
Routine checks that the analysis has been correctly generated
:return: bool: True if no analysis error occur
:raise: AnalysisError: Analysis error that occured
"""
if len(self.materials) == 0:
raise AnalysisError('No material models have been assigned to the analysis')
for material in self.materials:
if not material.isValid():
raise AnalysisError('Material ({:s}) is not valid'.format(material.name))
return True
def version(self):
if sys.platform == 'win32':
cmdPath = os.path.join(self.CALCULIX_PATH, 'ccx.exe ')
p = subprocess.Popen([cmdPath, '-v'], stdout=subprocess.PIPE, universal_newlines=True )
stdout, stderr = p.communicate()
version = re.search(r"(\d+).(\d+)", stdout)
return int(version.group(1)), int(version.group(2))
elif sys.platform == 'linux':
p = subprocess.Popen(['ccx', '-v'], stdout=subprocess.PIPE, universal_newlines=True )
stdout, stderr = p.communicate()
version = re.search(r"(\d+).(\d+)", stdout)
return int(version.group(1)), int(version.group(2))
else:
raise NotImplemented(' Platform is not currently supported')
def results(self) -> ResultProcessor:
""" Returns the results obtained after running an analysis """
if self.isAnalysisCompleted():
return ResultProcessor('input')
else:
raise ValueError('Results were not available')
def isAnalysisCompleted(self) -> bool:
""" Returns if the analysis was completed successfully. """
return self._analysisCompleted
def clearAnalysis(self, includeResults:bool = False) -> None:
""" Clears any files generated from the analysis
:param includeResults: If set True will also delete the result files generated from the analysis
"""
filename = 'input' # Base filename for the analysis
files = [filename + '.inp',
filename + '.cvg',
filename + '.sta']
if includeResults:
files.append(filename + '.frd')
files.append(filename + '.dat')
try:
for file in files:
filePath = os.path.join(self._workingDirectory,file)
os.remove(filePath)
except:
pass
def run(self):
"""
Performs pre-analysis checks on the model and submits the job for Calculix to perform.
"""
# Reset analysis status
self._analysisCompleted = False
print('{:=^60}\n'.format(' RUNNING PRE-ANALYSIS CHECKS '))
self.checkAnalysis()
print('{:=^60}\n'.format(' WRITING INPUT FILE '))
inputDeckContents = self.writeInput()
inputDeckPath = os.path.join(self._workingDirectory,'input.inp')
with open(inputDeckPath, "w") as text_file:
text_file.write(inputDeckContents)
# Set environment variables for performing multi-threaded
os.environ["CCX_NPROC_STIFFNESS"] = '{:d}'.format(Simulation.NUMTHREADS)
os.environ["CCX_NPROC_EQUATION_SOLVER"] = '{:d}'.format(Simulation.NUMTHREADS)
os.environ["OMP_NUM_THREADS"] = '{:d}'.format(Simulation.NUMTHREADS)
print('\n{:=^60}\n'.format(' RUNNING CALCULIX '))
if sys.platform == 'win32':
cmdPath = os.path.join(self.CALCULIX_PATH, 'ccx.exe ')
arguments = '-i input'
cmd = cmdPath + arguments
popen = subprocess.Popen(cmd, cwd=self._workingDirectory, stdout=subprocess.PIPE, universal_newlines=True)
if self.VERBOSE_OUTPUT:
for stdout_line in iter(popen.stdout.readline, ""):
print(stdout_line, end='')
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
# Analysis was completed successfully
self._analysisCompleted = True
elif sys.platform == 'linux':
filename = 'input'
cmdSt = ['ccx', '-i', filename]
popen = subprocess.Popen(cmdSt, cwd=self._workingDirectory, stdout=subprocess.PIPE, universal_newlines=True)
if self.VERBOSE_OUTPUT:
for stdout_line in iter(popen.stdout.readline, ""):
print(stdout_line, end='')
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmdSt)
# Analysis was completed successfully
self._analysisCompleted = True
else:
raise NotImplemented(' Platform is not currently supported')
| """
Creates node sets for any RBE connectors used in the simulation
"""
# Kinematic Connectors require creating node sets
# These are created and added to the node set collection prior to writing
numConnectors = 1
for connector in self.connectors:
# Node are created and are an attribute of a Connector
self._nodeSets.append(connector.nodeset)
numConnectors += 1 | identifier_body |
core.py | # -*- coding: utf-8 -*-
import re # used to get info from frd file
import os
import sys
import subprocess # used to check ccx version
from enum import Enum, auto
from typing import List, Tuple
import logging
from .mesh import Mesher
from .results import ResultProcessor
import gmsh
import numpy as np
class AnalysisError(Exception):
"""Exception raised for errors generated during the analysis
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
class AnalysisType(Enum):
STRUCTURAL = auto()
THERMAL = auto()
FLUID = auto()
class NodeSet:
"""
An node set is basic entity for storing node set lists. The set remains constant without any dynamic referencing
to any underlying geometric entities.
"""
def __init__(self, name, nodes):
self.name = name
self._nodes = nodes
@property
def nodes(self):
"""
Nodes contains the list of Node IDs
"""
return self._nodes
@nodes.setter
def nodes(self, nodes):
self._nodes = nodes
def writeInput(self) -> str:
out = '*NSET,NSET={:s}\n'.format(self.name)
out += np.array2string(self.nodes, precision=2, separator=', ', threshold=9999999999)[1:-1]
return out
class ElementSet:
"""
An element set is basic entity for storing element set lists.The set remains constant without any dynamic referencing
to any underlying geometric entities.
"""
def __init__(self, name, els):
self.name = name
self._els = els
@property
def els(self):
"""
Elements contains the list of Node IDs
"""
return self._els
@els.setter
def els(self, elements):
self._els = elements
def writeInput(self) -> str:
out = '*ELSET,ELSET={:s\n}'.format(self.name)
out += np.array2string(self.els, precision=2, separator=', ', threshold=9999999999)[1:-1]
return out
class SurfaceSet:
"""
A surface-set set is basic entity for storing element face lists, typically for setting directional fluxes onto
surface elements based on the element ordering. The set remains constant without any dynamic referencing
to any underlying geometric entities.
"""
def __init__(self, name, surfacePairs):
self.name = name
self._elSurfacePairs = surfacePairs
@property
def surfacePairs(self):
"""
Elements with the associated face orientations are specified as Nx2 numpy array, with the first column being
the element Id, and the second column the chosen face orientation
"""
return self._els
@surfacePairs.setter
def els(self, surfacePairs):
self._elSurfacePairs = surfacePairs
def writeInput(self) -> str:
out = '*SURFACE,NAME={:s}\n'.format(self.name)
for i in range(self._elSurfacePairs.shape[0]):
out += '{:d},S{:d}\n'.format(self._elSurfacePairs[i,0], self._elSurfacePairs[i,1])
out += np.array2string(self.els, precision=2, separator=', ', threshold=9999999999)[1:-1]
return out
class Connector:
"""
A Connector ir a rigid connector between a set of nodes and an (optional) reference node.
"""
def __init__(self, name, nodes, refNode = None):
self.name = name
self._refNode = refNode
self._nodeset = None
@property
def refNode(self):
"""
Reference Node ID
"""
return self._refNode
@refNode.setter
def refNode(self, node):
self._refNode = node
@property
def nodeset(self):
"""
Nodes contains the list of Node IDs
"""
return self._nodeset
@nodeset.setter
def nodeset(self, nodes):
if isinstance(nodes, list) or isinstance(nodes,np.ndarray):
self._nodeset = NodeSet('Connecter_{:s}'.format(self.name), np.array(nodes))
elif isinstance(nodes,NodeSet):
self._nodeset = nodes
else:
raise ValueError('Invalid type for nodes passed to Connector()')
def writeInput(self) -> str:
# A nodeset is automatically created from the name of the connector
strOut = '*RIGIDBODY, NSET={:s}'.format(self.nodeset.name)
# A reference node is optional
if isinstance(self.redNode, int):
strOut += ',REF NODE={:d}\n'.format(self.refNode)
else:
strOut += '\n'
return strOut
class | :
UX = 1
UY = 2
UZ = 3
RX = 4
RY = 5
RZ = 6
T = 11
class Simulation:
"""
Provides the base class for running a Calculix simulation
"""
NUMTHREADS = 1
""" Number of Threads used by the Calculix Solver """
CALCULIX_PATH = ''
""" The Calculix directory path used for Windows platforms"""
VERBOSE_OUTPUT = True
""" When enabled, the output during the analysis is redirected to the console"""
def __init__(self, meshModel: Mesher):
self._input = ''
self._workingDirectory = ''
self._analysisCompleted = False
self.mpcSets = []
self.connectors = []
self.materials = []
self.materialAssignments = []
self.model = meshModel
self.initialTimeStep = 0.1
self.defaultTimeStep = 0.1
self.totalTime = 1.0
self.useSteadyStateAnalysis = True
self.TZERO = -273.15
self.SIGMAB = 5.669E-8
self._numThreads = 1
self.initialConditions = [] # 'dict of node set names,
self.loadCases = []
self._nodeSets = []
self._elSets = []
self.nodeSets = []
self.elSets = []
self.includes = []
def init(self):
self._input = ''
self._nodeSets = self.nodeSets
self._elSets = self.elSets
@classmethod
def setNumThreads(cls, numThreads: int):
"""
Sets the number of simulation threads to use in Calculix
:param numThreads:
:return:
"""
cls.NUMTHREADS = numThreads
@classmethod
def getNumThreads(cls) -> int:
"""
Returns the number of threads used
:return: int:
"""
return cls.NUMTHREADS
@classmethod
def setCalculixPath(cls, calculixPath: str) -> None:
"""
Sets the path for the Calculix executable. Necessary when using Windows where there is not a default
installation proceedure for Calculix
:param calculixPath: Directory containing the Calculix Executable
"""
if os.path.isdir(calculixPath) :
cls.CALCULIX_PATH = calculixPath
@classmethod
def setVerboseOuput(cls, state: bool) -> None:
"""
Sets if the output from Calculix should be verbose i.e. printed to the console
:param state:
"""
cls.VERBOSE_OUTPUT = state
def setWorkingDirectory(self, workDir):
if os.path.isdir(workDir) and os.access(workDir, os.W_OK):
self._workingDirectory = workDir
else:
raise ValueError('Working directory ({:s}) is not accessible or writable'.format(workDir))
@property
def name(self):
return self._name
def writeHeaders(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' INCLUDES ')
for filename in self.includes:
self._input += '*include,input={:s}'.format(filename)
def prepareConnectors(self):
"""
Creates node sets for any RBE connectors used in the simulation
"""
# Kinematic Connectors require creating node sets
# These are created and added to the node set collection prior to writing
numConnectors = 1
for connector in self.connectors:
# Node are created and are an attribute of a Connector
self._nodeSets.append(connector.nodeset)
numConnectors += 1
def writeInput(self) -> str:
"""
Writes the input deck for the simulation
"""
self.init()
self.prepareConnectors()
self.writeHeaders()
self.writeMesh()
self.writeNodeSets()
self.writeElementSets()
self.writeKinematicConnectors()
self.writeMPCs()
self.writeMaterials()
self.writeMaterialAssignments()
self.writeInitialConditions()
self.writeAnalysisConditions()
self.writeLoadSteps()
return self._input
def writeElementSets(self):
if len(self._elSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' ELEMENT SETS ')
for elSet in self._elSets:
self._input += os.linesep
self._input += elSet.writeInput()
#self._input += '*ELSET,ELSET={:s\n}'.format(elSet['name'])
#self._input += np.array2string(elSet['els'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeNodeSets(self):
if len(self._nodeSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' NODE SETS ')
for nodeSet in self._nodeSets:
self._input += os.linesep
self._input += nodeSet.writeInput()
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += np.array2string(nodeSet['nodes'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeKinematicConnectors(self):
if len(self.connectors) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' KINEMATIC CONNECTORS ')
for connector in self.connectors:
# A nodeset is automatically created from the name of the connector
self.input += connector.writeInput()
def writeMPCs(self):
if len(self.mpcSets) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MPCS ')
for mpcSet in self.mpcSets:
self.input += '*EQUATION\n'
self.input += '{:d}\n'.format(len(mpcSet['numTerms'])) # Assume each line constrains two nodes and one dof
for mpc in mpcSet['equations']:
for i in range(len(mpc['eqn'])):
self._input += '{:d},{:d},{:d}'.format(mpc['node'][i], mpc['dof'][i], mpc['eqn'][i])
self.input += os.linesep
# *EQUATION
# 2 # number of terms in equation # typically two
# 28,2,1.,22,2,-1. # node a id, dof, node b id, dof b
def writeMaterialAssignments(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MATERIAL ASSIGNMENTS ')
for matAssignment in self.materialAssignments:
self._input += '*solid section, elset={:s}, material={:s}\n'.format(matAssignment[0], matAssignment[1])
def writeMaterials(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MATERIALS ')
for material in self.materials:
self._input += material.writeInput()
def writeInitialConditions(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' INITIAL CONDITIONS ')
for initCond in self.initialConditions:
self._input += '*INITIAL CONDITIONS,TYPE={:s}\n'.format(initCond['type'].upper())
self._input += '{:s},{:e}\n'.format(initCond['set'], initCond['value'])
self._input += os.linesep
# Write the Physical Constants
self._input += '*PHYSICAL CONSTANTS,ABSOLUTE ZERO={:e},STEFAN BOLTZMANN={:e}\n'.format(self.TZERO, self.SIGMAB)
def writeAnalysisConditions(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' ANALYSIS CONDITIONS ')
# Write the Initial Timestep
self._input += '{:.3f}, {:.3f}\n'.format(self.initialTimeStep, self.defaultTimeStep)
def writeLoadSteps(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' LOAD STEPS ')
for loadCase in self.loadCases:
self._input += loadCase.writeInput()
def writeMesh(self):
# TODO make a unique auto-generated name for the mesh
meshFilename = 'mesh.inp'
meshPath= os.path.join(self._workingDirectory, meshFilename)
self.model.writeMesh(meshPath)
self._input += '*include,input={:s}'.format(meshFilename)
def checkAnalysis(self) -> bool:
"""
Routine checks that the analysis has been correctly generated
:return: bool: True if no analysis error occur
:raise: AnalysisError: Analysis error that occured
"""
if len(self.materials) == 0:
raise AnalysisError('No material models have been assigned to the analysis')
for material in self.materials:
if not material.isValid():
raise AnalysisError('Material ({:s}) is not valid'.format(material.name))
return True
def version(self):
if sys.platform == 'win32':
cmdPath = os.path.join(self.CALCULIX_PATH, 'ccx.exe ')
p = subprocess.Popen([cmdPath, '-v'], stdout=subprocess.PIPE, universal_newlines=True )
stdout, stderr = p.communicate()
version = re.search(r"(\d+).(\d+)", stdout)
return int(version.group(1)), int(version.group(2))
elif sys.platform == 'linux':
p = subprocess.Popen(['ccx', '-v'], stdout=subprocess.PIPE, universal_newlines=True )
stdout, stderr = p.communicate()
version = re.search(r"(\d+).(\d+)", stdout)
return int(version.group(1)), int(version.group(2))
else:
raise NotImplemented(' Platform is not currently supported')
def results(self) -> ResultProcessor:
""" Returns the results obtained after running an analysis """
if self.isAnalysisCompleted():
return ResultProcessor('input')
else:
raise ValueError('Results were not available')
def isAnalysisCompleted(self) -> bool:
""" Returns if the analysis was completed successfully. """
return self._analysisCompleted
def clearAnalysis(self, includeResults:bool = False) -> None:
""" Clears any files generated from the analysis
:param includeResults: If set True will also delete the result files generated from the analysis
"""
filename = 'input' # Base filename for the analysis
files = [filename + '.inp',
filename + '.cvg',
filename + '.sta']
if includeResults:
files.append(filename + '.frd')
files.append(filename + '.dat')
try:
for file in files:
filePath = os.path.join(self._workingDirectory,file)
os.remove(filePath)
except:
pass
def run(self):
"""
Performs pre-analysis checks on the model and submits the job for Calculix to perform.
"""
# Reset analysis status
self._analysisCompleted = False
print('{:=^60}\n'.format(' RUNNING PRE-ANALYSIS CHECKS '))
self.checkAnalysis()
print('{:=^60}\n'.format(' WRITING INPUT FILE '))
inputDeckContents = self.writeInput()
inputDeckPath = os.path.join(self._workingDirectory,'input.inp')
with open(inputDeckPath, "w") as text_file:
text_file.write(inputDeckContents)
# Set environment variables for performing multi-threaded
os.environ["CCX_NPROC_STIFFNESS"] = '{:d}'.format(Simulation.NUMTHREADS)
os.environ["CCX_NPROC_EQUATION_SOLVER"] = '{:d}'.format(Simulation.NUMTHREADS)
os.environ["OMP_NUM_THREADS"] = '{:d}'.format(Simulation.NUMTHREADS)
print('\n{:=^60}\n'.format(' RUNNING CALCULIX '))
if sys.platform == 'win32':
cmdPath = os.path.join(self.CALCULIX_PATH, 'ccx.exe ')
arguments = '-i input'
cmd = cmdPath + arguments
popen = subprocess.Popen(cmd, cwd=self._workingDirectory, stdout=subprocess.PIPE, universal_newlines=True)
if self.VERBOSE_OUTPUT:
for stdout_line in iter(popen.stdout.readline, ""):
print(stdout_line, end='')
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
# Analysis was completed successfully
self._analysisCompleted = True
elif sys.platform == 'linux':
filename = 'input'
cmdSt = ['ccx', '-i', filename]
popen = subprocess.Popen(cmdSt, cwd=self._workingDirectory, stdout=subprocess.PIPE, universal_newlines=True)
if self.VERBOSE_OUTPUT:
for stdout_line in iter(popen.stdout.readline, ""):
print(stdout_line, end='')
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmdSt)
# Analysis was completed successfully
self._analysisCompleted = True
else:
raise NotImplemented(' Platform is not currently supported')
| DOF | identifier_name |
core.py | # -*- coding: utf-8 -*-
import re # used to get info from frd file
import os
import sys
import subprocess # used to check ccx version
from enum import Enum, auto
from typing import List, Tuple
import logging
from .mesh import Mesher
from .results import ResultProcessor
import gmsh
import numpy as np
class AnalysisError(Exception):
"""Exception raised for errors generated during the analysis
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
class AnalysisType(Enum):
STRUCTURAL = auto()
THERMAL = auto()
FLUID = auto()
class NodeSet:
"""
An node set is basic entity for storing node set lists. The set remains constant without any dynamic referencing
to any underlying geometric entities.
"""
def __init__(self, name, nodes):
self.name = name
self._nodes = nodes
@property
def nodes(self):
"""
Nodes contains the list of Node IDs
"""
return self._nodes
@nodes.setter
def nodes(self, nodes):
self._nodes = nodes
def writeInput(self) -> str:
out = '*NSET,NSET={:s}\n'.format(self.name)
out += np.array2string(self.nodes, precision=2, separator=', ', threshold=9999999999)[1:-1]
return out
class ElementSet:
"""
An element set is basic entity for storing element set lists.The set remains constant without any dynamic referencing
to any underlying geometric entities.
"""
def __init__(self, name, els):
self.name = name
self._els = els
@property
def els(self):
"""
Elements contains the list of Node IDs
"""
return self._els
@els.setter
def els(self, elements):
self._els = elements
def writeInput(self) -> str:
out = '*ELSET,ELSET={:s\n}'.format(self.name)
out += np.array2string(self.els, precision=2, separator=', ', threshold=9999999999)[1:-1]
return out
class SurfaceSet:
"""
A surface-set set is basic entity for storing element face lists, typically for setting directional fluxes onto
surface elements based on the element ordering. The set remains constant without any dynamic referencing
to any underlying geometric entities.
"""
def __init__(self, name, surfacePairs):
self.name = name
self._elSurfacePairs = surfacePairs
@property
def surfacePairs(self):
"""
Elements with the associated face orientations are specified as Nx2 numpy array, with the first column being
the element Id, and the second column the chosen face orientation
"""
return self._els
@surfacePairs.setter
def els(self, surfacePairs):
self._elSurfacePairs = surfacePairs
def writeInput(self) -> str:
out = '*SURFACE,NAME={:s}\n'.format(self.name)
for i in range(self._elSurfacePairs.shape[0]):
out += '{:d},S{:d}\n'.format(self._elSurfacePairs[i,0], self._elSurfacePairs[i,1])
out += np.array2string(self.els, precision=2, separator=', ', threshold=9999999999)[1:-1]
return out
class Connector:
"""
A Connector ir a rigid connector between a set of nodes and an (optional) reference node.
"""
def __init__(self, name, nodes, refNode = None):
self.name = name
self._refNode = refNode
self._nodeset = None
@property
def refNode(self):
"""
Reference Node ID
"""
return self._refNode
@refNode.setter
def refNode(self, node):
self._refNode = node
@property
def nodeset(self):
"""
Nodes contains the list of Node IDs
"""
return self._nodeset
@nodeset.setter
def nodeset(self, nodes):
if isinstance(nodes, list) or isinstance(nodes,np.ndarray):
self._nodeset = NodeSet('Connecter_{:s}'.format(self.name), np.array(nodes))
elif isinstance(nodes,NodeSet):
self._nodeset = nodes
else:
raise ValueError('Invalid type for nodes passed to Connector()')
def writeInput(self) -> str:
# A nodeset is automatically created from the name of the connector
strOut = '*RIGIDBODY, NSET={:s}'.format(self.nodeset.name)
# A reference node is optional
if isinstance(self.redNode, int):
strOut += ',REF NODE={:d}\n'.format(self.refNode)
else:
strOut += '\n'
return strOut
class DOF:
UX = 1
UY = 2
UZ = 3
RX = 4
RY = 5
RZ = 6
T = 11
class Simulation:
"""
Provides the base class for running a Calculix simulation
"""
NUMTHREADS = 1
""" Number of Threads used by the Calculix Solver """
CALCULIX_PATH = ''
""" The Calculix directory path used for Windows platforms"""
VERBOSE_OUTPUT = True
""" When enabled, the output during the analysis is redirected to the console"""
def __init__(self, meshModel: Mesher):
self._input = ''
self._workingDirectory = ''
self._analysisCompleted = False
self.mpcSets = []
self.connectors = []
self.materials = []
self.materialAssignments = []
self.model = meshModel
self.initialTimeStep = 0.1
self.defaultTimeStep = 0.1
self.totalTime = 1.0
self.useSteadyStateAnalysis = True
self.TZERO = -273.15
self.SIGMAB = 5.669E-8
self._numThreads = 1
self.initialConditions = [] # 'dict of node set names,
self.loadCases = []
self._nodeSets = []
self._elSets = []
self.nodeSets = []
self.elSets = []
self.includes = []
def init(self):
self._input = ''
self._nodeSets = self.nodeSets
self._elSets = self.elSets
@classmethod
def setNumThreads(cls, numThreads: int):
"""
Sets the number of simulation threads to use in Calculix
:param numThreads:
:return:
"""
cls.NUMTHREADS = numThreads
@classmethod
def getNumThreads(cls) -> int:
"""
Returns the number of threads used
:return: int:
"""
return cls.NUMTHREADS
@classmethod
def setCalculixPath(cls, calculixPath: str) -> None:
"""
Sets the path for the Calculix executable. Necessary when using Windows where there is not a default
installation proceedure for Calculix
:param calculixPath: Directory containing the Calculix Executable
"""
if os.path.isdir(calculixPath) :
cls.CALCULIX_PATH = calculixPath
@classmethod
def setVerboseOuput(cls, state: bool) -> None:
"""
Sets if the output from Calculix should be verbose i.e. printed to the console
:param state:
"""
cls.VERBOSE_OUTPUT = state
def setWorkingDirectory(self, workDir):
if os.path.isdir(workDir) and os.access(workDir, os.W_OK):
self._workingDirectory = workDir
else:
raise ValueError('Working directory ({:s}) is not accessible or writable'.format(workDir))
@property
def name(self):
return self._name
def writeHeaders(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' INCLUDES ')
for filename in self.includes:
self._input += '*include,input={:s}'.format(filename)
def prepareConnectors(self):
"""
Creates node sets for any RBE connectors used in the simulation
"""
# Kinematic Connectors require creating node sets
# These are created and added to the node set collection prior to writing
numConnectors = 1
for connector in self.connectors:
# Node are created and are an attribute of a Connector
self._nodeSets.append(connector.nodeset)
numConnectors += 1
def writeInput(self) -> str:
"""
Writes the input deck for the simulation
"""
self.init()
self.prepareConnectors()
self.writeHeaders()
self.writeMesh()
self.writeNodeSets()
self.writeElementSets()
self.writeKinematicConnectors()
self.writeMPCs()
self.writeMaterials()
self.writeMaterialAssignments()
self.writeInitialConditions()
self.writeAnalysisConditions()
self.writeLoadSteps()
return self._input
def writeElementSets(self):
if len(self._elSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' ELEMENT SETS ')
for elSet in self._elSets:
self._input += os.linesep
self._input += elSet.writeInput()
#self._input += '*ELSET,ELSET={:s\n}'.format(elSet['name'])
#self._input += np.array2string(elSet['els'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeNodeSets(self):
if len(self._nodeSets) == 0:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' NODE SETS ')
for nodeSet in self._nodeSets:
self._input += os.linesep
self._input += nodeSet.writeInput()
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += '*NSET,NSET={:s}\n'.format(nodeSet['name'])
#self._input += np.array2string(nodeSet['nodes'], precision=2, separator=', ', threshold=9999999999)[1:-1]
def writeKinematicConnectors(self):
if len(self.connectors) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' KINEMATIC CONNECTORS ')
for connector in self.connectors:
# A nodeset is automatically created from the name of the connector
self.input += connector.writeInput()
def writeMPCs(self):
if len(self.mpcSets) < 1:
return
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MPCS ')
for mpcSet in self.mpcSets:
self.input += '*EQUATION\n'
self.input += '{:d}\n'.format(len(mpcSet['numTerms'])) # Assume each line constrains two nodes and one dof
for mpc in mpcSet['equations']:
for i in range(len(mpc['eqn'])):
self._input += '{:d},{:d},{:d}'.format(mpc['node'][i], mpc['dof'][i], mpc['eqn'][i])
self.input += os.linesep
# *EQUATION
# 2 # number of terms in equation # typically two
# 28,2,1.,22,2,-1. # node a id, dof, node b id, dof b
def writeMaterialAssignments(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MATERIAL ASSIGNMENTS ')
for matAssignment in self.materialAssignments:
self._input += '*solid section, elset={:s}, material={:s}\n'.format(matAssignment[0], matAssignment[1])
def writeMaterials(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' MATERIALS ')
for material in self.materials: |
def writeInitialConditions(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' INITIAL CONDITIONS ')
for initCond in self.initialConditions:
self._input += '*INITIAL CONDITIONS,TYPE={:s}\n'.format(initCond['type'].upper())
self._input += '{:s},{:e}\n'.format(initCond['set'], initCond['value'])
self._input += os.linesep
# Write the Physical Constants
self._input += '*PHYSICAL CONSTANTS,ABSOLUTE ZERO={:e},STEFAN BOLTZMANN={:e}\n'.format(self.TZERO, self.SIGMAB)
def writeAnalysisConditions(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' ANALYSIS CONDITIONS ')
# Write the Initial Timestep
self._input += '{:.3f}, {:.3f}\n'.format(self.initialTimeStep, self.defaultTimeStep)
def writeLoadSteps(self):
self._input += os.linesep
self._input += '{:*^125}\n'.format(' LOAD STEPS ')
for loadCase in self.loadCases:
self._input += loadCase.writeInput()
def writeMesh(self):
# TODO make a unique auto-generated name for the mesh
meshFilename = 'mesh.inp'
meshPath= os.path.join(self._workingDirectory, meshFilename)
self.model.writeMesh(meshPath)
self._input += '*include,input={:s}'.format(meshFilename)
def checkAnalysis(self) -> bool:
"""
Routine checks that the analysis has been correctly generated
:return: bool: True if no analysis error occur
:raise: AnalysisError: Analysis error that occured
"""
if len(self.materials) == 0:
raise AnalysisError('No material models have been assigned to the analysis')
for material in self.materials:
if not material.isValid():
raise AnalysisError('Material ({:s}) is not valid'.format(material.name))
return True
def version(self):
if sys.platform == 'win32':
cmdPath = os.path.join(self.CALCULIX_PATH, 'ccx.exe ')
p = subprocess.Popen([cmdPath, '-v'], stdout=subprocess.PIPE, universal_newlines=True )
stdout, stderr = p.communicate()
version = re.search(r"(\d+).(\d+)", stdout)
return int(version.group(1)), int(version.group(2))
elif sys.platform == 'linux':
p = subprocess.Popen(['ccx', '-v'], stdout=subprocess.PIPE, universal_newlines=True )
stdout, stderr = p.communicate()
version = re.search(r"(\d+).(\d+)", stdout)
return int(version.group(1)), int(version.group(2))
else:
raise NotImplemented(' Platform is not currently supported')
def results(self) -> ResultProcessor:
""" Returns the results obtained after running an analysis """
if self.isAnalysisCompleted():
return ResultProcessor('input')
else:
raise ValueError('Results were not available')
def isAnalysisCompleted(self) -> bool:
""" Returns if the analysis was completed successfully. """
return self._analysisCompleted
def clearAnalysis(self, includeResults:bool = False) -> None:
""" Clears any files generated from the analysis
:param includeResults: If set True will also delete the result files generated from the analysis
"""
filename = 'input' # Base filename for the analysis
files = [filename + '.inp',
filename + '.cvg',
filename + '.sta']
if includeResults:
files.append(filename + '.frd')
files.append(filename + '.dat')
try:
for file in files:
filePath = os.path.join(self._workingDirectory,file)
os.remove(filePath)
except:
pass
def run(self):
"""
Performs pre-analysis checks on the model and submits the job for Calculix to perform.
"""
# Reset analysis status
self._analysisCompleted = False
print('{:=^60}\n'.format(' RUNNING PRE-ANALYSIS CHECKS '))
self.checkAnalysis()
print('{:=^60}\n'.format(' WRITING INPUT FILE '))
inputDeckContents = self.writeInput()
inputDeckPath = os.path.join(self._workingDirectory,'input.inp')
with open(inputDeckPath, "w") as text_file:
text_file.write(inputDeckContents)
# Set environment variables for performing multi-threaded
os.environ["CCX_NPROC_STIFFNESS"] = '{:d}'.format(Simulation.NUMTHREADS)
os.environ["CCX_NPROC_EQUATION_SOLVER"] = '{:d}'.format(Simulation.NUMTHREADS)
os.environ["OMP_NUM_THREADS"] = '{:d}'.format(Simulation.NUMTHREADS)
print('\n{:=^60}\n'.format(' RUNNING CALCULIX '))
if sys.platform == 'win32':
cmdPath = os.path.join(self.CALCULIX_PATH, 'ccx.exe ')
arguments = '-i input'
cmd = cmdPath + arguments
popen = subprocess.Popen(cmd, cwd=self._workingDirectory, stdout=subprocess.PIPE, universal_newlines=True)
if self.VERBOSE_OUTPUT:
for stdout_line in iter(popen.stdout.readline, ""):
print(stdout_line, end='')
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
# Analysis was completed successfully
self._analysisCompleted = True
elif sys.platform == 'linux':
filename = 'input'
cmdSt = ['ccx', '-i', filename]
popen = subprocess.Popen(cmdSt, cwd=self._workingDirectory, stdout=subprocess.PIPE, universal_newlines=True)
if self.VERBOSE_OUTPUT:
for stdout_line in iter(popen.stdout.readline, ""):
print(stdout_line, end='')
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmdSt)
# Analysis was completed successfully
self._analysisCompleted = True
else:
raise NotImplemented(' Platform is not currently supported') | self._input += material.writeInput() | random_line_split |
types_string.go | // Code generated by "stringer -type=CpuType,CpuSubtypeX86,CpuSubtypeX86_64,CpuSubtypePPC,CpuSubtypeARM,CpuSubtypeARM64,Magic,FileType,SectionType,LoadCommand,SymbolType,StabType,ReferenceType -output types_string.go"; DO NOT EDIT.
package macho_widgets
import "fmt"
const (
_CpuType_name_0 = "CPU_TYPE_VAX"
_CpuType_name_1 = "CPU_TYPE_MC680x0CPU_TYPE_X86"
_CpuType_name_2 = "CPU_TYPE_MC98000CPU_TYPE_HPPACPU_TYPE_ARMCPU_TYPE_MC88000CPU_TYPE_SPARCCPU_TYPE_I860"
_CpuType_name_3 = "CPU_TYPE_POWERPC"
_CpuType_name_4 = "CPU_TYPE_X86_64"
_CpuType_name_5 = "CPU_TYPE_ARM64"
_CpuType_name_6 = "CPU_TYPE_POWERPC64"
)
var (
_CpuType_index_0 = [...]uint8{0, 12}
_CpuType_index_1 = [...]uint8{0, 16, 28}
_CpuType_index_2 = [...]uint8{0, 16, 29, 41, 57, 71, 84}
_CpuType_index_3 = [...]uint8{0, 16}
_CpuType_index_4 = [...]uint8{0, 15}
_CpuType_index_5 = [...]uint8{0, 14}
_CpuType_index_6 = [...]uint8{0, 18}
)
func (i CpuType) String() string {
switch {
case i == 1:
return _CpuType_name_0
case 6 <= i && i <= 7:
i -= 6
return _CpuType_name_1[_CpuType_index_1[i]:_CpuType_index_1[i+1]]
case 10 <= i && i <= 15:
i -= 10
return _CpuType_name_2[_CpuType_index_2[i]:_CpuType_index_2[i+1]]
case i == 18:
return _CpuType_name_3
case i == 16777223:
return _CpuType_name_4
case i == 16777228:
return _CpuType_name_5
case i == 16777234:
return _CpuType_name_6
default:
return fmt.Sprintf("CpuType(%d)", i)
}
}
const _CpuSubtypeX86_name = "CPU_SUBTYPE_X86_ALLCPU_SUBTYPE_X86_ARCH1"
var _CpuSubtypeX86_index = [...]uint8{0, 19, 40}
func (i CpuSubtypeX86) String() string {
i -= 3
if i >= CpuSubtypeX86(len(_CpuSubtypeX86_index)-1) {
return fmt.Sprintf("CpuSubtypeX86(%d)", i+3)
}
return _CpuSubtypeX86_name[_CpuSubtypeX86_index[i]:_CpuSubtypeX86_index[i+1]]
}
const (
_CpuSubtypeX86_64_name_0 = "CPU_SUBTYPE_X86_64_ALL"
_CpuSubtypeX86_64_name_1 = "CPU_SUBTYPE_X86_64_H"
)
var (
_CpuSubtypeX86_64_index_0 = [...]uint8{0, 22}
_CpuSubtypeX86_64_index_1 = [...]uint8{0, 20}
)
func (i CpuSubtypeX86_64) String() string {
switch {
case i == 3:
return _CpuSubtypeX86_64_name_0
case i == 8:
return _CpuSubtypeX86_64_name_1
default:
return fmt.Sprintf("CpuSubtypeX86_64(%d)", i)
}
}
const (
_CpuSubtypePPC_name_0 = "CPU_SUBTYPE_POWERPC_ALLCPU_SUBTYPE_POWERPC_601CPU_SUBTYPE_POWERPC_602CPU_SUBTYPE_POWERPC_603CPU_SUBTYPE_POWERPC_603eCPU_SUBTYPE_POWERPC_603evCPU_SUBTYPE_POWERPC_604CPU_SUBTYPE_POWERPC_604eCPU_SUBTYPE_POWERPC_620CPU_SUBTYPE_POWERPC_750CPU_SUBTYPE_POWERPC_7400CPU_SUBTYPE_POWERPC_7450"
_CpuSubtypePPC_name_1 = "CPU_SUBTYPE_POWERPC_970"
)
var (
_CpuSubtypePPC_index_0 = [...]uint16{0, 23, 46, 69, 92, 116, 141, 164, 188, 211, 234, 258, 282}
_CpuSubtypePPC_index_1 = [...]uint8{0, 23}
)
func (i CpuSubtypePPC) String() string {
switch {
case 0 <= i && i <= 11:
return _CpuSubtypePPC_name_0[_CpuSubtypePPC_index_0[i]:_CpuSubtypePPC_index_0[i+1]]
case i == 100:
return _CpuSubtypePPC_name_1
default:
return fmt.Sprintf("CpuSubtypePPC(%d)", i)
}
}
const (
_CpuSubtypeARM_name_0 = "CPU_SUBTYPE_ARM_ALL"
_CpuSubtypeARM_name_1 = "CPU_SUBTYPE_ARM_V4TCPU_SUBTYPE_ARM_V6CPU_SUBTYPE_ARM_V5TEJCPU_SUBTYPE_ARM_XSCALECPU_SUBTYPE_ARM_V7CPU_SUBTYPE_ARM_V7FCPU_SUBTYPE_ARM_V7SCPU_SUBTYPE_ARM_V7KCPU_SUBTYPE_ARM_V8CPU_SUBTYPE_ARM_V6MCPU_SUBTYPE_ARM_V7MCPU_SUBTYPE_ARM_V7EM"
)
var (
_CpuSubtypeARM_index_0 = [...]uint8{0, 19}
_CpuSubtypeARM_index_1 = [...]uint8{0, 19, 37, 58, 80, 98, 117, 136, 155, 173, 192, 211, 231}
)
func (i CpuSubtypeARM) String() string {
switch {
case i == 0:
return _CpuSubtypeARM_name_0
case 5 <= i && i <= 16:
i -= 5
return _CpuSubtypeARM_name_1[_CpuSubtypeARM_index_1[i]:_CpuSubtypeARM_index_1[i+1]]
default:
return fmt.Sprintf("CpuSubtypeARM(%d)", i)
}
}
const _CpuSubtypeARM64_name = "CPU_SUBTYPE_ARM64_ALLCPU_SUBTYPE_ARM64_V8"
var _CpuSubtypeARM64_index = [...]uint8{0, 21, 41}
func (i CpuSubtypeARM64) String() string {
if i >= CpuSubtypeARM64(len(_CpuSubtypeARM64_index)-1) |
return _CpuSubtypeARM64_name[_CpuSubtypeARM64_index[i]:_CpuSubtypeARM64_index[i+1]]
}
const (
_Magic_name_0 = "FAT_CIGAM"
_Magic_name_1 = "FAT_CIGAM_64"
_Magic_name_2 = "FAT_MAGICFAT_MAGIC_64"
_Magic_name_3 = "MH_CIGAM"
_Magic_name_4 = "MH_CIGAM_64"
_Magic_name_5 = "MH_MAGICMH_MAGIC_64"
)
var (
_Magic_index_0 = [...]uint8{0, 9}
_Magic_index_1 = [...]uint8{0, 12}
_Magic_index_2 = [...]uint8{0, 9, 21}
_Magic_index_3 = [...]uint8{0, 8}
_Magic_index_4 = [...]uint8{0, 11}
_Magic_index_5 = [...]uint8{0, 8, 19}
)
func (i Magic) String() string {
switch {
case i == 3199925962:
return _Magic_name_0
case i == 3216703178:
return _Magic_name_1
case 3405691582 <= i && i <= 3405691583:
i -= 3405691582
return _Magic_name_2[_Magic_index_2[i]:_Magic_index_2[i+1]]
case i == 3472551422:
return _Magic_name_3
case i == 3489328638:
return _Magic_name_4
case 4277009102 <= i && i <= 4277009103:
i -= 4277009102
return _Magic_name_5[_Magic_index_5[i]:_Magic_index_5[i+1]]
default:
return fmt.Sprintf("Magic(%d)", i)
}
}
const _FileType_name = "MH_OBJECTMH_EXECUTEMH_FVMLIBMH_COREMH_PRELOADMH_DYLIBMH_DYLINKERMH_BUNDLEMH_DYLIB_STUBMH_DSYMMH_KEXT_BUNDLE"
var _FileType_index = [...]uint8{0, 9, 19, 28, 35, 45, 53, 64, 73, 86, 93, 107}
func (i FileType) String() string {
i -= 1
if i >= FileType(len(_FileType_index)-1) {
return fmt.Sprintf("FileType(%d)", i+1)
}
return _FileType_name[_FileType_index[i]:_FileType_index[i+1]]
}
const _SectionType_name = "S_REGULARS_ZEROFILLS_CSTRING_LITERALSS_4BYTE_LITERALSS_8BYTE_LITERALSS_LITERAL_POINTERSS_NON_LAZY_SYMBOL_POINTERSS_LAZY_SYMBOL_POINTERSS_SYMBOL_STUBSS_MOD_INIT_FUNC_POINTERSS_MOD_TERM_FUNC_POINTERSS_COALESCEDS_GB_ZEROFILLS_INTERPOSINGS_16BYTE_LITERALSS_DTRACE_DOFS_LAZY_DYLIB_SYMBOL_POINTERSS_THREAD_LOCAL_REGULARS_THREAD_LOCAL_ZEROFILLS_THREAD_LOCAL_VARIABLESS_THREAD_LOCAL_VARIABLE_POINTERSS_THREAD_LOCAL_INIT_FUNCTION_POINTERS"
var _SectionType_index = [...]uint16{0, 9, 19, 37, 53, 69, 87, 113, 135, 149, 173, 197, 208, 221, 234, 251, 263, 291, 313, 336, 360, 392, 429}
func (i SectionType) String() string {
if i >= SectionType(len(_SectionType_index)-1) {
return fmt.Sprintf("SectionType(%d)", i)
}
return _SectionType_name[_SectionType_index[i]:_SectionType_index[i+1]]
}
const _LoadCommand_name = "LC_SEGMENTLC_SYMTABLC_SYMSEGLC_THREADLC_UNIXTHREADLC_LOADFVMLIBLC_IDFVMLIBLC_IDENTLC_FVMFILELC_PREPAGELC_DYSYMTABLC_LOAD_DYLIBLC_ID_DYLIBLC_LOAD_DYLINKERLC_ID_DYLINKERLC_PREBOUND_DYLIBLC_ROUTINESLC_SUB_FRAMEWORKLC_SUB_UMBRELLALC_SUB_CLIENTLC_SUB_LIBRARYLC_TWOLEVEL_HINTSLC_PREBIND_CKSUMLC_SEGMENT_64LC_ROUTINES_64LC_UUIDLC_CODE_SIGNATURELC_SEGMENT_SPLIT_INFOLC_LAZY_LOAD_DYLIBLC_ENCRYPTION_INFOLC_DYLD_INFOLC_VERSION_MIN_MACOSXLC_VERSION_MIN_IPHONEOSLC_FUNCTION_STARTSLC_DYLD_ENVIRONMENTLC_DATA_IN_CODELC_SOURCE_VERSIONLC_DYLIB_CODE_SIGN_DRSLC_ENCRYPTION_INFO_64LC_LINKER_OPTIONLC_LINKER_OPTIMIZATION_HINTLC_VERSION_MIN_TVOSLC_VERSION_MIN_WATCHOSLC_REQ_DYLDLC_LOAD_WEAK_DYLIBLC_RPATHLC_REEXPORT_DYLIBLC_DYLD_INFO_ONLYLC_LOAD_UPWARD_DYLIBLC_MAIN"
var _LoadCommand_map = map[LoadCommand]string{
1: _LoadCommand_name[0:10],
2: _LoadCommand_name[10:19],
3: _LoadCommand_name[19:28],
4: _LoadCommand_name[28:37],
5: _LoadCommand_name[37:50],
6: _LoadCommand_name[50:63],
7: _LoadCommand_name[63:74],
8: _LoadCommand_name[74:82],
9: _LoadCommand_name[82:92],
10: _LoadCommand_name[92:102],
11: _LoadCommand_name[102:113],
12: _LoadCommand_name[113:126],
13: _LoadCommand_name[126:137],
14: _LoadCommand_name[137:153],
15: _LoadCommand_name[153:167],
16: _LoadCommand_name[167:184],
17: _LoadCommand_name[184:195],
18: _LoadCommand_name[195:211],
19: _LoadCommand_name[211:226],
20: _LoadCommand_name[226:239],
21: _LoadCommand_name[239:253],
22: _LoadCommand_name[253:270],
23: _LoadCommand_name[270:286],
25: _LoadCommand_name[286:299],
26: _LoadCommand_name[299:313],
27: _LoadCommand_name[313:320],
29: _LoadCommand_name[320:337],
30: _LoadCommand_name[337:358],
32: _LoadCommand_name[358:376],
33: _LoadCommand_name[376:394],
34: _LoadCommand_name[394:406],
36: _LoadCommand_name[406:427],
37: _LoadCommand_name[427:450],
38: _LoadCommand_name[450:468],
39: _LoadCommand_name[468:487],
41: _LoadCommand_name[487:502],
42: _LoadCommand_name[502:519],
43: _LoadCommand_name[519:541],
44: _LoadCommand_name[541:562],
45: _LoadCommand_name[562:578],
46: _LoadCommand_name[578:605],
47: _LoadCommand_name[605:624],
48: _LoadCommand_name[624:646],
2147483648: _LoadCommand_name[646:657],
2147483672: _LoadCommand_name[657:675],
2147483676: _LoadCommand_name[675:683],
2147483679: _LoadCommand_name[683:700],
2147483682: _LoadCommand_name[700:717],
2147483683: _LoadCommand_name[717:737],
2147483688: _LoadCommand_name[737:744],
}
func (i LoadCommand) String() string {
if str, ok := _LoadCommand_map[i]; ok {
return str
}
return fmt.Sprintf("LoadCommand(%d)", i)
}
const (
_SymbolType_name_0 = "N_UNDF"
_SymbolType_name_1 = "N_ABS"
_SymbolType_name_2 = "N_INDR"
_SymbolType_name_3 = "N_PBUD"
_SymbolType_name_4 = "N_SECT"
)
var (
_SymbolType_index_0 = [...]uint8{0, 6}
_SymbolType_index_1 = [...]uint8{0, 5}
_SymbolType_index_2 = [...]uint8{0, 6}
_SymbolType_index_3 = [...]uint8{0, 6}
_SymbolType_index_4 = [...]uint8{0, 6}
)
func (i SymbolType) String() string {
switch {
case i == 0:
return _SymbolType_name_0
case i == 2:
return _SymbolType_name_1
case i == 10:
return _SymbolType_name_2
case i == 12:
return _SymbolType_name_3
case i == 14:
return _SymbolType_name_4
default:
return fmt.Sprintf("SymbolType(%d)", i)
}
}
const _StabType_name = "N_GSYMN_FNAMEN_FUNN_STSYMN_LCSYMN_BNSYMN_ASTN_OPTN_RSYMN_SLINEN_ENSYMN_SSYMN_SON_OSON_LSYMN_BINCLN_SOLN_PARAMSN_VERSIONN_OLEVELN_PSYMN_EINCLN_ENTRYN_LBRACN_EXCLN_RBRACN_BCOMMN_ECOMMN_ECOMLN_LENG"
var _StabType_map = map[StabType]string{
32: _StabType_name[0:6],
34: _StabType_name[6:13],
36: _StabType_name[13:18],
38: _StabType_name[18:25],
40: _StabType_name[25:32],
46: _StabType_name[32:39],
50: _StabType_name[39:44],
60: _StabType_name[44:49],
64: _StabType_name[49:55],
68: _StabType_name[55:62],
78: _StabType_name[62:69],
96: _StabType_name[69:75],
100: _StabType_name[75:79],
102: _StabType_name[79:84],
128: _StabType_name[84:90],
130: _StabType_name[90:97],
132: _StabType_name[97:102],
134: _StabType_name[102:110],
136: _StabType_name[110:119],
138: _StabType_name[119:127],
160: _StabType_name[127:133],
162: _StabType_name[133:140],
164: _StabType_name[140:147],
192: _StabType_name[147:154],
194: _StabType_name[154:160],
224: _StabType_name[160:167],
226: _StabType_name[167:174],
228: _StabType_name[174:181],
232: _StabType_name[181:188],
254: _StabType_name[188:194],
}
func (i StabType) String() string {
if str, ok := _StabType_map[i]; ok {
return str
}
return fmt.Sprintf("StabType(%d)", i)
}
const _ReferenceType_name = "REFERENCE_FLAG_UNDEFINED_NON_LAZYREFERENCE_FLAG_UNDEFINED_LAZYREFERENCE_FLAG_DEFINEDREFERENCE_FLAG_PRIVATE_DEFINEDREFERENCE_FLAG_PRIVATE_UNDEFINED_NON_LAZYREFERENCE_FLAG_PRIVATE_UNDEFINED_LAZY"
var _ReferenceType_index = [...]uint8{0, 33, 62, 84, 114, 155, 192}
func (i ReferenceType) String() string {
if i >= ReferenceType(len(_ReferenceType_index)-1) {
return fmt.Sprintf("ReferenceType(%d)", i)
}
return _ReferenceType_name[_ReferenceType_index[i]:_ReferenceType_index[i+1]]
}
| {
return fmt.Sprintf("CpuSubtypeARM64(%d)", i)
} | conditional_block |
types_string.go | // Code generated by "stringer -type=CpuType,CpuSubtypeX86,CpuSubtypeX86_64,CpuSubtypePPC,CpuSubtypeARM,CpuSubtypeARM64,Magic,FileType,SectionType,LoadCommand,SymbolType,StabType,ReferenceType -output types_string.go"; DO NOT EDIT.
package macho_widgets
import "fmt"
const (
_CpuType_name_0 = "CPU_TYPE_VAX"
_CpuType_name_1 = "CPU_TYPE_MC680x0CPU_TYPE_X86"
_CpuType_name_2 = "CPU_TYPE_MC98000CPU_TYPE_HPPACPU_TYPE_ARMCPU_TYPE_MC88000CPU_TYPE_SPARCCPU_TYPE_I860"
_CpuType_name_3 = "CPU_TYPE_POWERPC"
_CpuType_name_4 = "CPU_TYPE_X86_64"
_CpuType_name_5 = "CPU_TYPE_ARM64"
_CpuType_name_6 = "CPU_TYPE_POWERPC64"
)
var (
_CpuType_index_0 = [...]uint8{0, 12}
_CpuType_index_1 = [...]uint8{0, 16, 28}
_CpuType_index_2 = [...]uint8{0, 16, 29, 41, 57, 71, 84}
_CpuType_index_3 = [...]uint8{0, 16}
_CpuType_index_4 = [...]uint8{0, 15}
_CpuType_index_5 = [...]uint8{0, 14}
_CpuType_index_6 = [...]uint8{0, 18}
)
func (i CpuType) String() string {
switch {
case i == 1:
return _CpuType_name_0
case 6 <= i && i <= 7:
i -= 6
return _CpuType_name_1[_CpuType_index_1[i]:_CpuType_index_1[i+1]]
case 10 <= i && i <= 15:
i -= 10
return _CpuType_name_2[_CpuType_index_2[i]:_CpuType_index_2[i+1]]
case i == 18:
return _CpuType_name_3
case i == 16777223:
return _CpuType_name_4
case i == 16777228:
return _CpuType_name_5
case i == 16777234:
return _CpuType_name_6
default:
return fmt.Sprintf("CpuType(%d)", i)
}
}
const _CpuSubtypeX86_name = "CPU_SUBTYPE_X86_ALLCPU_SUBTYPE_X86_ARCH1"
var _CpuSubtypeX86_index = [...]uint8{0, 19, 40}
func (i CpuSubtypeX86) String() string {
i -= 3
if i >= CpuSubtypeX86(len(_CpuSubtypeX86_index)-1) {
return fmt.Sprintf("CpuSubtypeX86(%d)", i+3)
}
return _CpuSubtypeX86_name[_CpuSubtypeX86_index[i]:_CpuSubtypeX86_index[i+1]]
}
const (
_CpuSubtypeX86_64_name_0 = "CPU_SUBTYPE_X86_64_ALL"
_CpuSubtypeX86_64_name_1 = "CPU_SUBTYPE_X86_64_H"
)
var (
_CpuSubtypeX86_64_index_0 = [...]uint8{0, 22}
_CpuSubtypeX86_64_index_1 = [...]uint8{0, 20}
)
func (i CpuSubtypeX86_64) String() string {
switch {
case i == 3:
return _CpuSubtypeX86_64_name_0
case i == 8:
return _CpuSubtypeX86_64_name_1
default:
return fmt.Sprintf("CpuSubtypeX86_64(%d)", i)
}
}
const (
_CpuSubtypePPC_name_0 = "CPU_SUBTYPE_POWERPC_ALLCPU_SUBTYPE_POWERPC_601CPU_SUBTYPE_POWERPC_602CPU_SUBTYPE_POWERPC_603CPU_SUBTYPE_POWERPC_603eCPU_SUBTYPE_POWERPC_603evCPU_SUBTYPE_POWERPC_604CPU_SUBTYPE_POWERPC_604eCPU_SUBTYPE_POWERPC_620CPU_SUBTYPE_POWERPC_750CPU_SUBTYPE_POWERPC_7400CPU_SUBTYPE_POWERPC_7450"
_CpuSubtypePPC_name_1 = "CPU_SUBTYPE_POWERPC_970"
)
var (
_CpuSubtypePPC_index_0 = [...]uint16{0, 23, 46, 69, 92, 116, 141, 164, 188, 211, 234, 258, 282}
_CpuSubtypePPC_index_1 = [...]uint8{0, 23}
)
func (i CpuSubtypePPC) String() string {
switch {
case 0 <= i && i <= 11:
return _CpuSubtypePPC_name_0[_CpuSubtypePPC_index_0[i]:_CpuSubtypePPC_index_0[i+1]]
case i == 100:
return _CpuSubtypePPC_name_1
default:
return fmt.Sprintf("CpuSubtypePPC(%d)", i)
}
}
const (
_CpuSubtypeARM_name_0 = "CPU_SUBTYPE_ARM_ALL"
_CpuSubtypeARM_name_1 = "CPU_SUBTYPE_ARM_V4TCPU_SUBTYPE_ARM_V6CPU_SUBTYPE_ARM_V5TEJCPU_SUBTYPE_ARM_XSCALECPU_SUBTYPE_ARM_V7CPU_SUBTYPE_ARM_V7FCPU_SUBTYPE_ARM_V7SCPU_SUBTYPE_ARM_V7KCPU_SUBTYPE_ARM_V8CPU_SUBTYPE_ARM_V6MCPU_SUBTYPE_ARM_V7MCPU_SUBTYPE_ARM_V7EM"
)
var (
_CpuSubtypeARM_index_0 = [...]uint8{0, 19}
_CpuSubtypeARM_index_1 = [...]uint8{0, 19, 37, 58, 80, 98, 117, 136, 155, 173, 192, 211, 231}
)
func (i CpuSubtypeARM) String() string {
switch {
case i == 0:
return _CpuSubtypeARM_name_0
case 5 <= i && i <= 16:
i -= 5
return _CpuSubtypeARM_name_1[_CpuSubtypeARM_index_1[i]:_CpuSubtypeARM_index_1[i+1]]
default:
return fmt.Sprintf("CpuSubtypeARM(%d)", i)
}
}
const _CpuSubtypeARM64_name = "CPU_SUBTYPE_ARM64_ALLCPU_SUBTYPE_ARM64_V8"
var _CpuSubtypeARM64_index = [...]uint8{0, 21, 41}
func (i CpuSubtypeARM64) String() string {
if i >= CpuSubtypeARM64(len(_CpuSubtypeARM64_index)-1) {
return fmt.Sprintf("CpuSubtypeARM64(%d)", i)
}
return _CpuSubtypeARM64_name[_CpuSubtypeARM64_index[i]:_CpuSubtypeARM64_index[i+1]]
}
const (
_Magic_name_0 = "FAT_CIGAM"
_Magic_name_1 = "FAT_CIGAM_64"
_Magic_name_2 = "FAT_MAGICFAT_MAGIC_64"
_Magic_name_3 = "MH_CIGAM"
_Magic_name_4 = "MH_CIGAM_64"
_Magic_name_5 = "MH_MAGICMH_MAGIC_64"
)
var (
_Magic_index_0 = [...]uint8{0, 9}
_Magic_index_1 = [...]uint8{0, 12}
_Magic_index_2 = [...]uint8{0, 9, 21}
_Magic_index_3 = [...]uint8{0, 8}
_Magic_index_4 = [...]uint8{0, 11}
_Magic_index_5 = [...]uint8{0, 8, 19}
)
func (i Magic) String() string {
switch {
case i == 3199925962:
return _Magic_name_0
case i == 3216703178:
return _Magic_name_1
case 3405691582 <= i && i <= 3405691583:
i -= 3405691582
return _Magic_name_2[_Magic_index_2[i]:_Magic_index_2[i+1]]
case i == 3472551422:
return _Magic_name_3
case i == 3489328638:
return _Magic_name_4
case 4277009102 <= i && i <= 4277009103:
i -= 4277009102
return _Magic_name_5[_Magic_index_5[i]:_Magic_index_5[i+1]]
default:
return fmt.Sprintf("Magic(%d)", i)
}
}
const _FileType_name = "MH_OBJECTMH_EXECUTEMH_FVMLIBMH_COREMH_PRELOADMH_DYLIBMH_DYLINKERMH_BUNDLEMH_DYLIB_STUBMH_DSYMMH_KEXT_BUNDLE"
var _FileType_index = [...]uint8{0, 9, 19, 28, 35, 45, 53, 64, 73, 86, 93, 107}
func (i FileType) String() string {
i -= 1
if i >= FileType(len(_FileType_index)-1) {
return fmt.Sprintf("FileType(%d)", i+1)
}
return _FileType_name[_FileType_index[i]:_FileType_index[i+1]]
}
const _SectionType_name = "S_REGULARS_ZEROFILLS_CSTRING_LITERALSS_4BYTE_LITERALSS_8BYTE_LITERALSS_LITERAL_POINTERSS_NON_LAZY_SYMBOL_POINTERSS_LAZY_SYMBOL_POINTERSS_SYMBOL_STUBSS_MOD_INIT_FUNC_POINTERSS_MOD_TERM_FUNC_POINTERSS_COALESCEDS_GB_ZEROFILLS_INTERPOSINGS_16BYTE_LITERALSS_DTRACE_DOFS_LAZY_DYLIB_SYMBOL_POINTERSS_THREAD_LOCAL_REGULARS_THREAD_LOCAL_ZEROFILLS_THREAD_LOCAL_VARIABLESS_THREAD_LOCAL_VARIABLE_POINTERSS_THREAD_LOCAL_INIT_FUNCTION_POINTERS"
var _SectionType_index = [...]uint16{0, 9, 19, 37, 53, 69, 87, 113, 135, 149, 173, 197, 208, 221, 234, 251, 263, 291, 313, 336, 360, 392, 429}
func (i SectionType) | () string {
if i >= SectionType(len(_SectionType_index)-1) {
return fmt.Sprintf("SectionType(%d)", i)
}
return _SectionType_name[_SectionType_index[i]:_SectionType_index[i+1]]
}
const _LoadCommand_name = "LC_SEGMENTLC_SYMTABLC_SYMSEGLC_THREADLC_UNIXTHREADLC_LOADFVMLIBLC_IDFVMLIBLC_IDENTLC_FVMFILELC_PREPAGELC_DYSYMTABLC_LOAD_DYLIBLC_ID_DYLIBLC_LOAD_DYLINKERLC_ID_DYLINKERLC_PREBOUND_DYLIBLC_ROUTINESLC_SUB_FRAMEWORKLC_SUB_UMBRELLALC_SUB_CLIENTLC_SUB_LIBRARYLC_TWOLEVEL_HINTSLC_PREBIND_CKSUMLC_SEGMENT_64LC_ROUTINES_64LC_UUIDLC_CODE_SIGNATURELC_SEGMENT_SPLIT_INFOLC_LAZY_LOAD_DYLIBLC_ENCRYPTION_INFOLC_DYLD_INFOLC_VERSION_MIN_MACOSXLC_VERSION_MIN_IPHONEOSLC_FUNCTION_STARTSLC_DYLD_ENVIRONMENTLC_DATA_IN_CODELC_SOURCE_VERSIONLC_DYLIB_CODE_SIGN_DRSLC_ENCRYPTION_INFO_64LC_LINKER_OPTIONLC_LINKER_OPTIMIZATION_HINTLC_VERSION_MIN_TVOSLC_VERSION_MIN_WATCHOSLC_REQ_DYLDLC_LOAD_WEAK_DYLIBLC_RPATHLC_REEXPORT_DYLIBLC_DYLD_INFO_ONLYLC_LOAD_UPWARD_DYLIBLC_MAIN"
var _LoadCommand_map = map[LoadCommand]string{
1: _LoadCommand_name[0:10],
2: _LoadCommand_name[10:19],
3: _LoadCommand_name[19:28],
4: _LoadCommand_name[28:37],
5: _LoadCommand_name[37:50],
6: _LoadCommand_name[50:63],
7: _LoadCommand_name[63:74],
8: _LoadCommand_name[74:82],
9: _LoadCommand_name[82:92],
10: _LoadCommand_name[92:102],
11: _LoadCommand_name[102:113],
12: _LoadCommand_name[113:126],
13: _LoadCommand_name[126:137],
14: _LoadCommand_name[137:153],
15: _LoadCommand_name[153:167],
16: _LoadCommand_name[167:184],
17: _LoadCommand_name[184:195],
18: _LoadCommand_name[195:211],
19: _LoadCommand_name[211:226],
20: _LoadCommand_name[226:239],
21: _LoadCommand_name[239:253],
22: _LoadCommand_name[253:270],
23: _LoadCommand_name[270:286],
25: _LoadCommand_name[286:299],
26: _LoadCommand_name[299:313],
27: _LoadCommand_name[313:320],
29: _LoadCommand_name[320:337],
30: _LoadCommand_name[337:358],
32: _LoadCommand_name[358:376],
33: _LoadCommand_name[376:394],
34: _LoadCommand_name[394:406],
36: _LoadCommand_name[406:427],
37: _LoadCommand_name[427:450],
38: _LoadCommand_name[450:468],
39: _LoadCommand_name[468:487],
41: _LoadCommand_name[487:502],
42: _LoadCommand_name[502:519],
43: _LoadCommand_name[519:541],
44: _LoadCommand_name[541:562],
45: _LoadCommand_name[562:578],
46: _LoadCommand_name[578:605],
47: _LoadCommand_name[605:624],
48: _LoadCommand_name[624:646],
2147483648: _LoadCommand_name[646:657],
2147483672: _LoadCommand_name[657:675],
2147483676: _LoadCommand_name[675:683],
2147483679: _LoadCommand_name[683:700],
2147483682: _LoadCommand_name[700:717],
2147483683: _LoadCommand_name[717:737],
2147483688: _LoadCommand_name[737:744],
}
func (i LoadCommand) String() string {
if str, ok := _LoadCommand_map[i]; ok {
return str
}
return fmt.Sprintf("LoadCommand(%d)", i)
}
const (
_SymbolType_name_0 = "N_UNDF"
_SymbolType_name_1 = "N_ABS"
_SymbolType_name_2 = "N_INDR"
_SymbolType_name_3 = "N_PBUD"
_SymbolType_name_4 = "N_SECT"
)
var (
_SymbolType_index_0 = [...]uint8{0, 6}
_SymbolType_index_1 = [...]uint8{0, 5}
_SymbolType_index_2 = [...]uint8{0, 6}
_SymbolType_index_3 = [...]uint8{0, 6}
_SymbolType_index_4 = [...]uint8{0, 6}
)
func (i SymbolType) String() string {
switch {
case i == 0:
return _SymbolType_name_0
case i == 2:
return _SymbolType_name_1
case i == 10:
return _SymbolType_name_2
case i == 12:
return _SymbolType_name_3
case i == 14:
return _SymbolType_name_4
default:
return fmt.Sprintf("SymbolType(%d)", i)
}
}
const _StabType_name = "N_GSYMN_FNAMEN_FUNN_STSYMN_LCSYMN_BNSYMN_ASTN_OPTN_RSYMN_SLINEN_ENSYMN_SSYMN_SON_OSON_LSYMN_BINCLN_SOLN_PARAMSN_VERSIONN_OLEVELN_PSYMN_EINCLN_ENTRYN_LBRACN_EXCLN_RBRACN_BCOMMN_ECOMMN_ECOMLN_LENG"
var _StabType_map = map[StabType]string{
32: _StabType_name[0:6],
34: _StabType_name[6:13],
36: _StabType_name[13:18],
38: _StabType_name[18:25],
40: _StabType_name[25:32],
46: _StabType_name[32:39],
50: _StabType_name[39:44],
60: _StabType_name[44:49],
64: _StabType_name[49:55],
68: _StabType_name[55:62],
78: _StabType_name[62:69],
96: _StabType_name[69:75],
100: _StabType_name[75:79],
102: _StabType_name[79:84],
128: _StabType_name[84:90],
130: _StabType_name[90:97],
132: _StabType_name[97:102],
134: _StabType_name[102:110],
136: _StabType_name[110:119],
138: _StabType_name[119:127],
160: _StabType_name[127:133],
162: _StabType_name[133:140],
164: _StabType_name[140:147],
192: _StabType_name[147:154],
194: _StabType_name[154:160],
224: _StabType_name[160:167],
226: _StabType_name[167:174],
228: _StabType_name[174:181],
232: _StabType_name[181:188],
254: _StabType_name[188:194],
}
func (i StabType) String() string {
if str, ok := _StabType_map[i]; ok {
return str
}
return fmt.Sprintf("StabType(%d)", i)
}
const _ReferenceType_name = "REFERENCE_FLAG_UNDEFINED_NON_LAZYREFERENCE_FLAG_UNDEFINED_LAZYREFERENCE_FLAG_DEFINEDREFERENCE_FLAG_PRIVATE_DEFINEDREFERENCE_FLAG_PRIVATE_UNDEFINED_NON_LAZYREFERENCE_FLAG_PRIVATE_UNDEFINED_LAZY"
var _ReferenceType_index = [...]uint8{0, 33, 62, 84, 114, 155, 192}
func (i ReferenceType) String() string {
if i >= ReferenceType(len(_ReferenceType_index)-1) {
return fmt.Sprintf("ReferenceType(%d)", i)
}
return _ReferenceType_name[_ReferenceType_index[i]:_ReferenceType_index[i+1]]
}
| String | identifier_name |
types_string.go | // Code generated by "stringer -type=CpuType,CpuSubtypeX86,CpuSubtypeX86_64,CpuSubtypePPC,CpuSubtypeARM,CpuSubtypeARM64,Magic,FileType,SectionType,LoadCommand,SymbolType,StabType,ReferenceType -output types_string.go"; DO NOT EDIT.
package macho_widgets
import "fmt"
const (
_CpuType_name_0 = "CPU_TYPE_VAX"
_CpuType_name_1 = "CPU_TYPE_MC680x0CPU_TYPE_X86"
_CpuType_name_2 = "CPU_TYPE_MC98000CPU_TYPE_HPPACPU_TYPE_ARMCPU_TYPE_MC88000CPU_TYPE_SPARCCPU_TYPE_I860"
_CpuType_name_3 = "CPU_TYPE_POWERPC"
_CpuType_name_4 = "CPU_TYPE_X86_64"
_CpuType_name_5 = "CPU_TYPE_ARM64"
_CpuType_name_6 = "CPU_TYPE_POWERPC64"
)
var (
_CpuType_index_0 = [...]uint8{0, 12}
_CpuType_index_1 = [...]uint8{0, 16, 28}
_CpuType_index_2 = [...]uint8{0, 16, 29, 41, 57, 71, 84}
_CpuType_index_3 = [...]uint8{0, 16}
_CpuType_index_4 = [...]uint8{0, 15}
_CpuType_index_5 = [...]uint8{0, 14}
_CpuType_index_6 = [...]uint8{0, 18}
)
func (i CpuType) String() string {
switch {
case i == 1:
return _CpuType_name_0
case 6 <= i && i <= 7:
i -= 6
return _CpuType_name_1[_CpuType_index_1[i]:_CpuType_index_1[i+1]]
case 10 <= i && i <= 15:
i -= 10
return _CpuType_name_2[_CpuType_index_2[i]:_CpuType_index_2[i+1]]
case i == 18:
return _CpuType_name_3 | return _CpuType_name_6
default:
return fmt.Sprintf("CpuType(%d)", i)
}
}
const _CpuSubtypeX86_name = "CPU_SUBTYPE_X86_ALLCPU_SUBTYPE_X86_ARCH1"
var _CpuSubtypeX86_index = [...]uint8{0, 19, 40}
func (i CpuSubtypeX86) String() string {
i -= 3
if i >= CpuSubtypeX86(len(_CpuSubtypeX86_index)-1) {
return fmt.Sprintf("CpuSubtypeX86(%d)", i+3)
}
return _CpuSubtypeX86_name[_CpuSubtypeX86_index[i]:_CpuSubtypeX86_index[i+1]]
}
const (
_CpuSubtypeX86_64_name_0 = "CPU_SUBTYPE_X86_64_ALL"
_CpuSubtypeX86_64_name_1 = "CPU_SUBTYPE_X86_64_H"
)
var (
_CpuSubtypeX86_64_index_0 = [...]uint8{0, 22}
_CpuSubtypeX86_64_index_1 = [...]uint8{0, 20}
)
func (i CpuSubtypeX86_64) String() string {
switch {
case i == 3:
return _CpuSubtypeX86_64_name_0
case i == 8:
return _CpuSubtypeX86_64_name_1
default:
return fmt.Sprintf("CpuSubtypeX86_64(%d)", i)
}
}
const (
_CpuSubtypePPC_name_0 = "CPU_SUBTYPE_POWERPC_ALLCPU_SUBTYPE_POWERPC_601CPU_SUBTYPE_POWERPC_602CPU_SUBTYPE_POWERPC_603CPU_SUBTYPE_POWERPC_603eCPU_SUBTYPE_POWERPC_603evCPU_SUBTYPE_POWERPC_604CPU_SUBTYPE_POWERPC_604eCPU_SUBTYPE_POWERPC_620CPU_SUBTYPE_POWERPC_750CPU_SUBTYPE_POWERPC_7400CPU_SUBTYPE_POWERPC_7450"
_CpuSubtypePPC_name_1 = "CPU_SUBTYPE_POWERPC_970"
)
var (
_CpuSubtypePPC_index_0 = [...]uint16{0, 23, 46, 69, 92, 116, 141, 164, 188, 211, 234, 258, 282}
_CpuSubtypePPC_index_1 = [...]uint8{0, 23}
)
func (i CpuSubtypePPC) String() string {
switch {
case 0 <= i && i <= 11:
return _CpuSubtypePPC_name_0[_CpuSubtypePPC_index_0[i]:_CpuSubtypePPC_index_0[i+1]]
case i == 100:
return _CpuSubtypePPC_name_1
default:
return fmt.Sprintf("CpuSubtypePPC(%d)", i)
}
}
const (
_CpuSubtypeARM_name_0 = "CPU_SUBTYPE_ARM_ALL"
_CpuSubtypeARM_name_1 = "CPU_SUBTYPE_ARM_V4TCPU_SUBTYPE_ARM_V6CPU_SUBTYPE_ARM_V5TEJCPU_SUBTYPE_ARM_XSCALECPU_SUBTYPE_ARM_V7CPU_SUBTYPE_ARM_V7FCPU_SUBTYPE_ARM_V7SCPU_SUBTYPE_ARM_V7KCPU_SUBTYPE_ARM_V8CPU_SUBTYPE_ARM_V6MCPU_SUBTYPE_ARM_V7MCPU_SUBTYPE_ARM_V7EM"
)
var (
_CpuSubtypeARM_index_0 = [...]uint8{0, 19}
_CpuSubtypeARM_index_1 = [...]uint8{0, 19, 37, 58, 80, 98, 117, 136, 155, 173, 192, 211, 231}
)
func (i CpuSubtypeARM) String() string {
switch {
case i == 0:
return _CpuSubtypeARM_name_0
case 5 <= i && i <= 16:
i -= 5
return _CpuSubtypeARM_name_1[_CpuSubtypeARM_index_1[i]:_CpuSubtypeARM_index_1[i+1]]
default:
return fmt.Sprintf("CpuSubtypeARM(%d)", i)
}
}
const _CpuSubtypeARM64_name = "CPU_SUBTYPE_ARM64_ALLCPU_SUBTYPE_ARM64_V8"
var _CpuSubtypeARM64_index = [...]uint8{0, 21, 41}
func (i CpuSubtypeARM64) String() string {
if i >= CpuSubtypeARM64(len(_CpuSubtypeARM64_index)-1) {
return fmt.Sprintf("CpuSubtypeARM64(%d)", i)
}
return _CpuSubtypeARM64_name[_CpuSubtypeARM64_index[i]:_CpuSubtypeARM64_index[i+1]]
}
const (
_Magic_name_0 = "FAT_CIGAM"
_Magic_name_1 = "FAT_CIGAM_64"
_Magic_name_2 = "FAT_MAGICFAT_MAGIC_64"
_Magic_name_3 = "MH_CIGAM"
_Magic_name_4 = "MH_CIGAM_64"
_Magic_name_5 = "MH_MAGICMH_MAGIC_64"
)
var (
_Magic_index_0 = [...]uint8{0, 9}
_Magic_index_1 = [...]uint8{0, 12}
_Magic_index_2 = [...]uint8{0, 9, 21}
_Magic_index_3 = [...]uint8{0, 8}
_Magic_index_4 = [...]uint8{0, 11}
_Magic_index_5 = [...]uint8{0, 8, 19}
)
func (i Magic) String() string {
switch {
case i == 3199925962:
return _Magic_name_0
case i == 3216703178:
return _Magic_name_1
case 3405691582 <= i && i <= 3405691583:
i -= 3405691582
return _Magic_name_2[_Magic_index_2[i]:_Magic_index_2[i+1]]
case i == 3472551422:
return _Magic_name_3
case i == 3489328638:
return _Magic_name_4
case 4277009102 <= i && i <= 4277009103:
i -= 4277009102
return _Magic_name_5[_Magic_index_5[i]:_Magic_index_5[i+1]]
default:
return fmt.Sprintf("Magic(%d)", i)
}
}
const _FileType_name = "MH_OBJECTMH_EXECUTEMH_FVMLIBMH_COREMH_PRELOADMH_DYLIBMH_DYLINKERMH_BUNDLEMH_DYLIB_STUBMH_DSYMMH_KEXT_BUNDLE"
var _FileType_index = [...]uint8{0, 9, 19, 28, 35, 45, 53, 64, 73, 86, 93, 107}
func (i FileType) String() string {
i -= 1
if i >= FileType(len(_FileType_index)-1) {
return fmt.Sprintf("FileType(%d)", i+1)
}
return _FileType_name[_FileType_index[i]:_FileType_index[i+1]]
}
const _SectionType_name = "S_REGULARS_ZEROFILLS_CSTRING_LITERALSS_4BYTE_LITERALSS_8BYTE_LITERALSS_LITERAL_POINTERSS_NON_LAZY_SYMBOL_POINTERSS_LAZY_SYMBOL_POINTERSS_SYMBOL_STUBSS_MOD_INIT_FUNC_POINTERSS_MOD_TERM_FUNC_POINTERSS_COALESCEDS_GB_ZEROFILLS_INTERPOSINGS_16BYTE_LITERALSS_DTRACE_DOFS_LAZY_DYLIB_SYMBOL_POINTERSS_THREAD_LOCAL_REGULARS_THREAD_LOCAL_ZEROFILLS_THREAD_LOCAL_VARIABLESS_THREAD_LOCAL_VARIABLE_POINTERSS_THREAD_LOCAL_INIT_FUNCTION_POINTERS"
var _SectionType_index = [...]uint16{0, 9, 19, 37, 53, 69, 87, 113, 135, 149, 173, 197, 208, 221, 234, 251, 263, 291, 313, 336, 360, 392, 429}
func (i SectionType) String() string {
if i >= SectionType(len(_SectionType_index)-1) {
return fmt.Sprintf("SectionType(%d)", i)
}
return _SectionType_name[_SectionType_index[i]:_SectionType_index[i+1]]
}
const _LoadCommand_name = "LC_SEGMENTLC_SYMTABLC_SYMSEGLC_THREADLC_UNIXTHREADLC_LOADFVMLIBLC_IDFVMLIBLC_IDENTLC_FVMFILELC_PREPAGELC_DYSYMTABLC_LOAD_DYLIBLC_ID_DYLIBLC_LOAD_DYLINKERLC_ID_DYLINKERLC_PREBOUND_DYLIBLC_ROUTINESLC_SUB_FRAMEWORKLC_SUB_UMBRELLALC_SUB_CLIENTLC_SUB_LIBRARYLC_TWOLEVEL_HINTSLC_PREBIND_CKSUMLC_SEGMENT_64LC_ROUTINES_64LC_UUIDLC_CODE_SIGNATURELC_SEGMENT_SPLIT_INFOLC_LAZY_LOAD_DYLIBLC_ENCRYPTION_INFOLC_DYLD_INFOLC_VERSION_MIN_MACOSXLC_VERSION_MIN_IPHONEOSLC_FUNCTION_STARTSLC_DYLD_ENVIRONMENTLC_DATA_IN_CODELC_SOURCE_VERSIONLC_DYLIB_CODE_SIGN_DRSLC_ENCRYPTION_INFO_64LC_LINKER_OPTIONLC_LINKER_OPTIMIZATION_HINTLC_VERSION_MIN_TVOSLC_VERSION_MIN_WATCHOSLC_REQ_DYLDLC_LOAD_WEAK_DYLIBLC_RPATHLC_REEXPORT_DYLIBLC_DYLD_INFO_ONLYLC_LOAD_UPWARD_DYLIBLC_MAIN"
var _LoadCommand_map = map[LoadCommand]string{
1: _LoadCommand_name[0:10],
2: _LoadCommand_name[10:19],
3: _LoadCommand_name[19:28],
4: _LoadCommand_name[28:37],
5: _LoadCommand_name[37:50],
6: _LoadCommand_name[50:63],
7: _LoadCommand_name[63:74],
8: _LoadCommand_name[74:82],
9: _LoadCommand_name[82:92],
10: _LoadCommand_name[92:102],
11: _LoadCommand_name[102:113],
12: _LoadCommand_name[113:126],
13: _LoadCommand_name[126:137],
14: _LoadCommand_name[137:153],
15: _LoadCommand_name[153:167],
16: _LoadCommand_name[167:184],
17: _LoadCommand_name[184:195],
18: _LoadCommand_name[195:211],
19: _LoadCommand_name[211:226],
20: _LoadCommand_name[226:239],
21: _LoadCommand_name[239:253],
22: _LoadCommand_name[253:270],
23: _LoadCommand_name[270:286],
25: _LoadCommand_name[286:299],
26: _LoadCommand_name[299:313],
27: _LoadCommand_name[313:320],
29: _LoadCommand_name[320:337],
30: _LoadCommand_name[337:358],
32: _LoadCommand_name[358:376],
33: _LoadCommand_name[376:394],
34: _LoadCommand_name[394:406],
36: _LoadCommand_name[406:427],
37: _LoadCommand_name[427:450],
38: _LoadCommand_name[450:468],
39: _LoadCommand_name[468:487],
41: _LoadCommand_name[487:502],
42: _LoadCommand_name[502:519],
43: _LoadCommand_name[519:541],
44: _LoadCommand_name[541:562],
45: _LoadCommand_name[562:578],
46: _LoadCommand_name[578:605],
47: _LoadCommand_name[605:624],
48: _LoadCommand_name[624:646],
2147483648: _LoadCommand_name[646:657],
2147483672: _LoadCommand_name[657:675],
2147483676: _LoadCommand_name[675:683],
2147483679: _LoadCommand_name[683:700],
2147483682: _LoadCommand_name[700:717],
2147483683: _LoadCommand_name[717:737],
2147483688: _LoadCommand_name[737:744],
}
func (i LoadCommand) String() string {
if str, ok := _LoadCommand_map[i]; ok {
return str
}
return fmt.Sprintf("LoadCommand(%d)", i)
}
const (
_SymbolType_name_0 = "N_UNDF"
_SymbolType_name_1 = "N_ABS"
_SymbolType_name_2 = "N_INDR"
_SymbolType_name_3 = "N_PBUD"
_SymbolType_name_4 = "N_SECT"
)
var (
_SymbolType_index_0 = [...]uint8{0, 6}
_SymbolType_index_1 = [...]uint8{0, 5}
_SymbolType_index_2 = [...]uint8{0, 6}
_SymbolType_index_3 = [...]uint8{0, 6}
_SymbolType_index_4 = [...]uint8{0, 6}
)
func (i SymbolType) String() string {
switch {
case i == 0:
return _SymbolType_name_0
case i == 2:
return _SymbolType_name_1
case i == 10:
return _SymbolType_name_2
case i == 12:
return _SymbolType_name_3
case i == 14:
return _SymbolType_name_4
default:
return fmt.Sprintf("SymbolType(%d)", i)
}
}
const _StabType_name = "N_GSYMN_FNAMEN_FUNN_STSYMN_LCSYMN_BNSYMN_ASTN_OPTN_RSYMN_SLINEN_ENSYMN_SSYMN_SON_OSON_LSYMN_BINCLN_SOLN_PARAMSN_VERSIONN_OLEVELN_PSYMN_EINCLN_ENTRYN_LBRACN_EXCLN_RBRACN_BCOMMN_ECOMMN_ECOMLN_LENG"
var _StabType_map = map[StabType]string{
32: _StabType_name[0:6],
34: _StabType_name[6:13],
36: _StabType_name[13:18],
38: _StabType_name[18:25],
40: _StabType_name[25:32],
46: _StabType_name[32:39],
50: _StabType_name[39:44],
60: _StabType_name[44:49],
64: _StabType_name[49:55],
68: _StabType_name[55:62],
78: _StabType_name[62:69],
96: _StabType_name[69:75],
100: _StabType_name[75:79],
102: _StabType_name[79:84],
128: _StabType_name[84:90],
130: _StabType_name[90:97],
132: _StabType_name[97:102],
134: _StabType_name[102:110],
136: _StabType_name[110:119],
138: _StabType_name[119:127],
160: _StabType_name[127:133],
162: _StabType_name[133:140],
164: _StabType_name[140:147],
192: _StabType_name[147:154],
194: _StabType_name[154:160],
224: _StabType_name[160:167],
226: _StabType_name[167:174],
228: _StabType_name[174:181],
232: _StabType_name[181:188],
254: _StabType_name[188:194],
}
func (i StabType) String() string {
if str, ok := _StabType_map[i]; ok {
return str
}
return fmt.Sprintf("StabType(%d)", i)
}
const _ReferenceType_name = "REFERENCE_FLAG_UNDEFINED_NON_LAZYREFERENCE_FLAG_UNDEFINED_LAZYREFERENCE_FLAG_DEFINEDREFERENCE_FLAG_PRIVATE_DEFINEDREFERENCE_FLAG_PRIVATE_UNDEFINED_NON_LAZYREFERENCE_FLAG_PRIVATE_UNDEFINED_LAZY"
var _ReferenceType_index = [...]uint8{0, 33, 62, 84, 114, 155, 192}
func (i ReferenceType) String() string {
if i >= ReferenceType(len(_ReferenceType_index)-1) {
return fmt.Sprintf("ReferenceType(%d)", i)
}
return _ReferenceType_name[_ReferenceType_index[i]:_ReferenceType_index[i+1]]
} | case i == 16777223:
return _CpuType_name_4
case i == 16777228:
return _CpuType_name_5
case i == 16777234: | random_line_split |
types_string.go | // Code generated by "stringer -type=CpuType,CpuSubtypeX86,CpuSubtypeX86_64,CpuSubtypePPC,CpuSubtypeARM,CpuSubtypeARM64,Magic,FileType,SectionType,LoadCommand,SymbolType,StabType,ReferenceType -output types_string.go"; DO NOT EDIT.
package macho_widgets
import "fmt"
const (
_CpuType_name_0 = "CPU_TYPE_VAX"
_CpuType_name_1 = "CPU_TYPE_MC680x0CPU_TYPE_X86"
_CpuType_name_2 = "CPU_TYPE_MC98000CPU_TYPE_HPPACPU_TYPE_ARMCPU_TYPE_MC88000CPU_TYPE_SPARCCPU_TYPE_I860"
_CpuType_name_3 = "CPU_TYPE_POWERPC"
_CpuType_name_4 = "CPU_TYPE_X86_64"
_CpuType_name_5 = "CPU_TYPE_ARM64"
_CpuType_name_6 = "CPU_TYPE_POWERPC64"
)
var (
_CpuType_index_0 = [...]uint8{0, 12}
_CpuType_index_1 = [...]uint8{0, 16, 28}
_CpuType_index_2 = [...]uint8{0, 16, 29, 41, 57, 71, 84}
_CpuType_index_3 = [...]uint8{0, 16}
_CpuType_index_4 = [...]uint8{0, 15}
_CpuType_index_5 = [...]uint8{0, 14}
_CpuType_index_6 = [...]uint8{0, 18}
)
func (i CpuType) String() string {
switch {
case i == 1:
return _CpuType_name_0
case 6 <= i && i <= 7:
i -= 6
return _CpuType_name_1[_CpuType_index_1[i]:_CpuType_index_1[i+1]]
case 10 <= i && i <= 15:
i -= 10
return _CpuType_name_2[_CpuType_index_2[i]:_CpuType_index_2[i+1]]
case i == 18:
return _CpuType_name_3
case i == 16777223:
return _CpuType_name_4
case i == 16777228:
return _CpuType_name_5
case i == 16777234:
return _CpuType_name_6
default:
return fmt.Sprintf("CpuType(%d)", i)
}
}
const _CpuSubtypeX86_name = "CPU_SUBTYPE_X86_ALLCPU_SUBTYPE_X86_ARCH1"
var _CpuSubtypeX86_index = [...]uint8{0, 19, 40}
func (i CpuSubtypeX86) String() string {
i -= 3
if i >= CpuSubtypeX86(len(_CpuSubtypeX86_index)-1) {
return fmt.Sprintf("CpuSubtypeX86(%d)", i+3)
}
return _CpuSubtypeX86_name[_CpuSubtypeX86_index[i]:_CpuSubtypeX86_index[i+1]]
}
const (
_CpuSubtypeX86_64_name_0 = "CPU_SUBTYPE_X86_64_ALL"
_CpuSubtypeX86_64_name_1 = "CPU_SUBTYPE_X86_64_H"
)
var (
_CpuSubtypeX86_64_index_0 = [...]uint8{0, 22}
_CpuSubtypeX86_64_index_1 = [...]uint8{0, 20}
)
func (i CpuSubtypeX86_64) String() string {
switch {
case i == 3:
return _CpuSubtypeX86_64_name_0
case i == 8:
return _CpuSubtypeX86_64_name_1
default:
return fmt.Sprintf("CpuSubtypeX86_64(%d)", i)
}
}
const (
_CpuSubtypePPC_name_0 = "CPU_SUBTYPE_POWERPC_ALLCPU_SUBTYPE_POWERPC_601CPU_SUBTYPE_POWERPC_602CPU_SUBTYPE_POWERPC_603CPU_SUBTYPE_POWERPC_603eCPU_SUBTYPE_POWERPC_603evCPU_SUBTYPE_POWERPC_604CPU_SUBTYPE_POWERPC_604eCPU_SUBTYPE_POWERPC_620CPU_SUBTYPE_POWERPC_750CPU_SUBTYPE_POWERPC_7400CPU_SUBTYPE_POWERPC_7450"
_CpuSubtypePPC_name_1 = "CPU_SUBTYPE_POWERPC_970"
)
var (
_CpuSubtypePPC_index_0 = [...]uint16{0, 23, 46, 69, 92, 116, 141, 164, 188, 211, 234, 258, 282}
_CpuSubtypePPC_index_1 = [...]uint8{0, 23}
)
func (i CpuSubtypePPC) String() string {
switch {
case 0 <= i && i <= 11:
return _CpuSubtypePPC_name_0[_CpuSubtypePPC_index_0[i]:_CpuSubtypePPC_index_0[i+1]]
case i == 100:
return _CpuSubtypePPC_name_1
default:
return fmt.Sprintf("CpuSubtypePPC(%d)", i)
}
}
const (
_CpuSubtypeARM_name_0 = "CPU_SUBTYPE_ARM_ALL"
_CpuSubtypeARM_name_1 = "CPU_SUBTYPE_ARM_V4TCPU_SUBTYPE_ARM_V6CPU_SUBTYPE_ARM_V5TEJCPU_SUBTYPE_ARM_XSCALECPU_SUBTYPE_ARM_V7CPU_SUBTYPE_ARM_V7FCPU_SUBTYPE_ARM_V7SCPU_SUBTYPE_ARM_V7KCPU_SUBTYPE_ARM_V8CPU_SUBTYPE_ARM_V6MCPU_SUBTYPE_ARM_V7MCPU_SUBTYPE_ARM_V7EM"
)
var (
_CpuSubtypeARM_index_0 = [...]uint8{0, 19}
_CpuSubtypeARM_index_1 = [...]uint8{0, 19, 37, 58, 80, 98, 117, 136, 155, 173, 192, 211, 231}
)
func (i CpuSubtypeARM) String() string {
switch {
case i == 0:
return _CpuSubtypeARM_name_0
case 5 <= i && i <= 16:
i -= 5
return _CpuSubtypeARM_name_1[_CpuSubtypeARM_index_1[i]:_CpuSubtypeARM_index_1[i+1]]
default:
return fmt.Sprintf("CpuSubtypeARM(%d)", i)
}
}
const _CpuSubtypeARM64_name = "CPU_SUBTYPE_ARM64_ALLCPU_SUBTYPE_ARM64_V8"
var _CpuSubtypeARM64_index = [...]uint8{0, 21, 41}
func (i CpuSubtypeARM64) String() string {
if i >= CpuSubtypeARM64(len(_CpuSubtypeARM64_index)-1) {
return fmt.Sprintf("CpuSubtypeARM64(%d)", i)
}
return _CpuSubtypeARM64_name[_CpuSubtypeARM64_index[i]:_CpuSubtypeARM64_index[i+1]]
}
const (
_Magic_name_0 = "FAT_CIGAM"
_Magic_name_1 = "FAT_CIGAM_64"
_Magic_name_2 = "FAT_MAGICFAT_MAGIC_64"
_Magic_name_3 = "MH_CIGAM"
_Magic_name_4 = "MH_CIGAM_64"
_Magic_name_5 = "MH_MAGICMH_MAGIC_64"
)
var (
_Magic_index_0 = [...]uint8{0, 9}
_Magic_index_1 = [...]uint8{0, 12}
_Magic_index_2 = [...]uint8{0, 9, 21}
_Magic_index_3 = [...]uint8{0, 8}
_Magic_index_4 = [...]uint8{0, 11}
_Magic_index_5 = [...]uint8{0, 8, 19}
)
func (i Magic) String() string {
switch {
case i == 3199925962:
return _Magic_name_0
case i == 3216703178:
return _Magic_name_1
case 3405691582 <= i && i <= 3405691583:
i -= 3405691582
return _Magic_name_2[_Magic_index_2[i]:_Magic_index_2[i+1]]
case i == 3472551422:
return _Magic_name_3
case i == 3489328638:
return _Magic_name_4
case 4277009102 <= i && i <= 4277009103:
i -= 4277009102
return _Magic_name_5[_Magic_index_5[i]:_Magic_index_5[i+1]]
default:
return fmt.Sprintf("Magic(%d)", i)
}
}
const _FileType_name = "MH_OBJECTMH_EXECUTEMH_FVMLIBMH_COREMH_PRELOADMH_DYLIBMH_DYLINKERMH_BUNDLEMH_DYLIB_STUBMH_DSYMMH_KEXT_BUNDLE"
var _FileType_index = [...]uint8{0, 9, 19, 28, 35, 45, 53, 64, 73, 86, 93, 107}
func (i FileType) String() string {
i -= 1
if i >= FileType(len(_FileType_index)-1) {
return fmt.Sprintf("FileType(%d)", i+1)
}
return _FileType_name[_FileType_index[i]:_FileType_index[i+1]]
}
const _SectionType_name = "S_REGULARS_ZEROFILLS_CSTRING_LITERALSS_4BYTE_LITERALSS_8BYTE_LITERALSS_LITERAL_POINTERSS_NON_LAZY_SYMBOL_POINTERSS_LAZY_SYMBOL_POINTERSS_SYMBOL_STUBSS_MOD_INIT_FUNC_POINTERSS_MOD_TERM_FUNC_POINTERSS_COALESCEDS_GB_ZEROFILLS_INTERPOSINGS_16BYTE_LITERALSS_DTRACE_DOFS_LAZY_DYLIB_SYMBOL_POINTERSS_THREAD_LOCAL_REGULARS_THREAD_LOCAL_ZEROFILLS_THREAD_LOCAL_VARIABLESS_THREAD_LOCAL_VARIABLE_POINTERSS_THREAD_LOCAL_INIT_FUNCTION_POINTERS"
var _SectionType_index = [...]uint16{0, 9, 19, 37, 53, 69, 87, 113, 135, 149, 173, 197, 208, 221, 234, 251, 263, 291, 313, 336, 360, 392, 429}
func (i SectionType) String() string {
if i >= SectionType(len(_SectionType_index)-1) {
return fmt.Sprintf("SectionType(%d)", i)
}
return _SectionType_name[_SectionType_index[i]:_SectionType_index[i+1]]
}
const _LoadCommand_name = "LC_SEGMENTLC_SYMTABLC_SYMSEGLC_THREADLC_UNIXTHREADLC_LOADFVMLIBLC_IDFVMLIBLC_IDENTLC_FVMFILELC_PREPAGELC_DYSYMTABLC_LOAD_DYLIBLC_ID_DYLIBLC_LOAD_DYLINKERLC_ID_DYLINKERLC_PREBOUND_DYLIBLC_ROUTINESLC_SUB_FRAMEWORKLC_SUB_UMBRELLALC_SUB_CLIENTLC_SUB_LIBRARYLC_TWOLEVEL_HINTSLC_PREBIND_CKSUMLC_SEGMENT_64LC_ROUTINES_64LC_UUIDLC_CODE_SIGNATURELC_SEGMENT_SPLIT_INFOLC_LAZY_LOAD_DYLIBLC_ENCRYPTION_INFOLC_DYLD_INFOLC_VERSION_MIN_MACOSXLC_VERSION_MIN_IPHONEOSLC_FUNCTION_STARTSLC_DYLD_ENVIRONMENTLC_DATA_IN_CODELC_SOURCE_VERSIONLC_DYLIB_CODE_SIGN_DRSLC_ENCRYPTION_INFO_64LC_LINKER_OPTIONLC_LINKER_OPTIMIZATION_HINTLC_VERSION_MIN_TVOSLC_VERSION_MIN_WATCHOSLC_REQ_DYLDLC_LOAD_WEAK_DYLIBLC_RPATHLC_REEXPORT_DYLIBLC_DYLD_INFO_ONLYLC_LOAD_UPWARD_DYLIBLC_MAIN"
var _LoadCommand_map = map[LoadCommand]string{
1: _LoadCommand_name[0:10],
2: _LoadCommand_name[10:19],
3: _LoadCommand_name[19:28],
4: _LoadCommand_name[28:37],
5: _LoadCommand_name[37:50],
6: _LoadCommand_name[50:63],
7: _LoadCommand_name[63:74],
8: _LoadCommand_name[74:82],
9: _LoadCommand_name[82:92],
10: _LoadCommand_name[92:102],
11: _LoadCommand_name[102:113],
12: _LoadCommand_name[113:126],
13: _LoadCommand_name[126:137],
14: _LoadCommand_name[137:153],
15: _LoadCommand_name[153:167],
16: _LoadCommand_name[167:184],
17: _LoadCommand_name[184:195],
18: _LoadCommand_name[195:211],
19: _LoadCommand_name[211:226],
20: _LoadCommand_name[226:239],
21: _LoadCommand_name[239:253],
22: _LoadCommand_name[253:270],
23: _LoadCommand_name[270:286],
25: _LoadCommand_name[286:299],
26: _LoadCommand_name[299:313],
27: _LoadCommand_name[313:320],
29: _LoadCommand_name[320:337],
30: _LoadCommand_name[337:358],
32: _LoadCommand_name[358:376],
33: _LoadCommand_name[376:394],
34: _LoadCommand_name[394:406],
36: _LoadCommand_name[406:427],
37: _LoadCommand_name[427:450],
38: _LoadCommand_name[450:468],
39: _LoadCommand_name[468:487],
41: _LoadCommand_name[487:502],
42: _LoadCommand_name[502:519],
43: _LoadCommand_name[519:541],
44: _LoadCommand_name[541:562],
45: _LoadCommand_name[562:578],
46: _LoadCommand_name[578:605],
47: _LoadCommand_name[605:624],
48: _LoadCommand_name[624:646],
2147483648: _LoadCommand_name[646:657],
2147483672: _LoadCommand_name[657:675],
2147483676: _LoadCommand_name[675:683],
2147483679: _LoadCommand_name[683:700],
2147483682: _LoadCommand_name[700:717],
2147483683: _LoadCommand_name[717:737],
2147483688: _LoadCommand_name[737:744],
}
func (i LoadCommand) String() string {
if str, ok := _LoadCommand_map[i]; ok {
return str
}
return fmt.Sprintf("LoadCommand(%d)", i)
}
const (
_SymbolType_name_0 = "N_UNDF"
_SymbolType_name_1 = "N_ABS"
_SymbolType_name_2 = "N_INDR"
_SymbolType_name_3 = "N_PBUD"
_SymbolType_name_4 = "N_SECT"
)
var (
_SymbolType_index_0 = [...]uint8{0, 6}
_SymbolType_index_1 = [...]uint8{0, 5}
_SymbolType_index_2 = [...]uint8{0, 6}
_SymbolType_index_3 = [...]uint8{0, 6}
_SymbolType_index_4 = [...]uint8{0, 6}
)
func (i SymbolType) String() string {
switch {
case i == 0:
return _SymbolType_name_0
case i == 2:
return _SymbolType_name_1
case i == 10:
return _SymbolType_name_2
case i == 12:
return _SymbolType_name_3
case i == 14:
return _SymbolType_name_4
default:
return fmt.Sprintf("SymbolType(%d)", i)
}
}
const _StabType_name = "N_GSYMN_FNAMEN_FUNN_STSYMN_LCSYMN_BNSYMN_ASTN_OPTN_RSYMN_SLINEN_ENSYMN_SSYMN_SON_OSON_LSYMN_BINCLN_SOLN_PARAMSN_VERSIONN_OLEVELN_PSYMN_EINCLN_ENTRYN_LBRACN_EXCLN_RBRACN_BCOMMN_ECOMMN_ECOMLN_LENG"
var _StabType_map = map[StabType]string{
32: _StabType_name[0:6],
34: _StabType_name[6:13],
36: _StabType_name[13:18],
38: _StabType_name[18:25],
40: _StabType_name[25:32],
46: _StabType_name[32:39],
50: _StabType_name[39:44],
60: _StabType_name[44:49],
64: _StabType_name[49:55],
68: _StabType_name[55:62],
78: _StabType_name[62:69],
96: _StabType_name[69:75],
100: _StabType_name[75:79],
102: _StabType_name[79:84],
128: _StabType_name[84:90],
130: _StabType_name[90:97],
132: _StabType_name[97:102],
134: _StabType_name[102:110],
136: _StabType_name[110:119],
138: _StabType_name[119:127],
160: _StabType_name[127:133],
162: _StabType_name[133:140],
164: _StabType_name[140:147],
192: _StabType_name[147:154],
194: _StabType_name[154:160],
224: _StabType_name[160:167],
226: _StabType_name[167:174],
228: _StabType_name[174:181],
232: _StabType_name[181:188],
254: _StabType_name[188:194],
}
func (i StabType) String() string {
if str, ok := _StabType_map[i]; ok {
return str
}
return fmt.Sprintf("StabType(%d)", i)
}
const _ReferenceType_name = "REFERENCE_FLAG_UNDEFINED_NON_LAZYREFERENCE_FLAG_UNDEFINED_LAZYREFERENCE_FLAG_DEFINEDREFERENCE_FLAG_PRIVATE_DEFINEDREFERENCE_FLAG_PRIVATE_UNDEFINED_NON_LAZYREFERENCE_FLAG_PRIVATE_UNDEFINED_LAZY"
var _ReferenceType_index = [...]uint8{0, 33, 62, 84, 114, 155, 192}
func (i ReferenceType) String() string | {
if i >= ReferenceType(len(_ReferenceType_index)-1) {
return fmt.Sprintf("ReferenceType(%d)", i)
}
return _ReferenceType_name[_ReferenceType_index[i]:_ReferenceType_index[i+1]]
} | identifier_body |
|
caffenet.py | import sys
import os
import numpy as np
from net import Net
class CaffeNet(Net):
def __init__(self, settings):
"""Initialize the caffe network.
Initializing the caffe network includes two steps:
(1) importing the caffe library. We are interested in
the modified version that provides deconvolution support.
(2) load the caffe model data.
Arguments:
settings: The settings object to be used. CaffeNet will only
used settings prefixed with "caffe". ULF[todo]: check this claim!
"""
super(CaffeNet, self).__init__(settings)
self._range_scale = 1.0 # not needed; image already in [0,255]
#ULF[todo]: explain, make this a setting
self._net_channel_swap = (2,1,0)
#self._net_channel_swap = None
if self._net_channel_swap:
self._net_channel_swap_inv = tuple([self._net_channel_swap.index(ii) for ii in range(len(self._net_channel_swap))])
else:
self._net_channel_swap_inv = None
# (1) import caffe library
#
sys.path.insert(0, os.path.join(settings.caffevis_caffe_root, 'python'))
import caffe
print 'debug[caffe]: CaffeNet.__init__: using Caffe in', caffe.__file__
# Check if the imported caffe provides all required functions
self._check_caffe_version(caffe)
# Set the mode to CPU or GPU.
# Note: in the latest Caffe versions, there is one Caffe object
# *per thread*, so the mode must be set per thread!
# Here we set the mode for the main thread; it is also separately
# set in CaffeProcThread.
if settings.caffevis_mode_gpu:
caffe.set_mode_gpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): GPU'
else:
caffe.set_mode_cpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): CPU'
print 'debug[caffe]: CaffeNet.__init__: Loading the classifier (', settings.caffevis_deploy_prototxt, settings.caffevis_network_weights, ') ...'
# (2) load the caffe model
#
# ULF[hack]: make Caffe silent - there should be a better
# (i.e. official) way to do so. We only want to suppress
# the info (like network topology) while still seeing warnings
# and errors!
suppress_output = (hasattr(self.settings, 'caffe_init_silent')
and self.settings.caffe_init_silent)
if suppress_output:
# open 2 file descriptors
null_fds = [os.open(os.devnull, os.O_RDWR) for x in xrange(2)]
# save the current file descriptors to a tuple
original_fds = os.dup(1), os.dup(2)
# put /dev/null fds on stdout (1) and stderr (2)
os.dup2(null_fds[0], 1)
os.dup2(null_fds[1], 2)
self.net = caffe.Classifier(
settings.caffevis_deploy_prototxt,
settings.caffevis_network_weights,
mean = None, # Set to None for now, assign later # self._data_mean,
channel_swap = self._net_channel_swap,
raw_scale = self._range_scale,
)
if suppress_output:
# restore file original descriptors for stdout (1) and stderr (2)
os.dup2(original_fds[0], 1)
os.dup2(original_fds[1], 2)
# close the temporary file descriptors
os.close(null_fds[0])
os.close(null_fds[1])
print 'debug[caffe]: CaffeNet.__init__: ... loading completed.'
self._init_data_mean()
self._check_force_backward_true()
def _check_caffe_version(self, caffe):
"""Check if the caffe version provides all required functions.
The deep visualization toolbox requires a modified version of
caffe, that supports deconvolution. Without this functions,
the toolbox is able to run, but will not provide full functionality.
This method will issue a warning, if caffe does not provide the
required functions.
"""
if 'deconv_from_layer' in dir(caffe.classifier.Classifier):
print "debug[caffe]: caffe version provides all required functions. Good!"
else:
print "warning: Function 'deconv_from_layer' is missing in caffe. Probably you are using a wrong caffe version. Some functions will not be available!'"
def _init_data_mean(self):
"""Initialize the data mean.
The data mean values are loaded from a separate file. Caffe can
use thes
"""
if isinstance(self.settings.caffevis_data_mean, basestring):
# If the mean is given as a filename, load the file
try:
data_mean = np.load(self.settings.caffevis_data_mean)
except IOError:
print '\n\nCound not load mean file:', self.settings.caffevis_data_mean
print 'Ensure that the values in settings.py point to a valid model weights file, network'
print 'definition prototxt, and mean. To fetch a default model and mean file, use:\n'
print '$ cd models/caffenet-yos/'
print '$ ./fetch.sh\n\n'
raise
input_shape = self.get_input_data_shape() # e.g. 227x227
# Crop center region (e.g. 227x227) if mean is larger (e.g. 256x256)
excess_h = data_mean.shape[1] - input_shape[0]
excess_w = data_mean.shape[2] - input_shape[1]
assert excess_h >= 0 and excess_w >= 0, 'mean should be at least as large as %s' % repr(input_shape)
data_mean = data_mean[:, (excess_h/2):(excess_h/2+input_shape[0]),
(excess_w/2):(excess_w/2+input_shape[1])]
elif self.settings.caffevis_data_mean is None:
data_mean = None
else:
# The mean has been given as a value or a tuple of values
data_mean = np.array(self.settings.caffevis_data_mean)
# Promote to shape C,1,1
while len(data_mean.shape) < 1:
data_mean = np.expand_dims(data_mean, -1)
#if not isinstance(data_mean, tuple):
# # If given as int/float: promote to tuple
# data_mean = tuple(data_mean)
if data_mean is not None:
self.net.transformer.set_mean(self.net.inputs[0], data_mean)
def _check_force_backward_true(self):
"""Check the force_backward flag is set in the caffe model definition.
Checks whether the given file contains a line with the
following text, ignoring whitespace:
force_backward: true
If this is not the case, a warning text will be issued.
ULF: This method should not be called from outside, but it is still
called from "optimize_image.py"
"""
prototxt_file = self.settings.caffevis_deploy_prototxt
found = False
with open(prototxt_file, 'r') as ff:
for line in ff:
fields = line.strip().split()
if len(fields) == 2 and fields[0] == 'force_backward:' and fields[1] == 'true':
found = True
break
if not found:
print '\n\nWARNING: the specified prototxt'
print '"%s"' % prototxt_file
print 'does not contain the line "force_backward: true". This may result in backprop'
print 'and deconv producing all zeros at the input layer. You may want to add this line'
print 'to your prototxt file before continuing to force backprop to compute derivatives'
print 'at the data layer as well.\n\n'
def get_layer_ids(self, include_input = True):
|
def get_input_id(self):
"""Get the identifier for the input layer.
Result: The type of this dentifier depends on the underlying
network library. However, the identifier returned by this
method is suitable as argument for other methods in this
class that expect a layer_id.
"""
return self.net.inputs[0]
def get_layer_shape(self,layer_id):
"""Get the shape of the given layer.
Returns a tuples describing the shape of the layer:
Fully connected layer: 1-tuple, the number of neurons,
example: (1000, )
Convolutional layer: n_filter x n_rows x n_columns,
example: (96, 55, 55)
"""
return self.net.blobs[layer_id].data.shape[1:] # Chop off batch size
def get_layer_data(self, layer_id, unit = None, flatten = False):
"""Provide activation data for a given layer.
Result:
An array of apropriate shape (see get_layer_shape()) containing
the layer activation values.
"""
data = self.net.blobs[layer_id].data
return data.flatten() if flatten else (data[0] if unit is None else data[0,unit])
def get_layer_diff(self, layer_id, flatten = False):
"""Provide diff data for a given layer.
Result:
An array of apropriate shape (see get_layer_shape()) containing
the layer diff values.
ULF[todo]: find out what these diff-values actually are!
"""
diff = self.net.blobs[layer_id].diff
return diff.flatten() if flatten else diff[0]
def preproc_forward(self, img, data_hw):
"""Prepare image data for processing and do forward propagation.
Uses caffe.transformer.preprocess and caffe.net.forward
Arguments:
img:
data_hw
ULF[todo]: find out what this exactly does!
ULF: called by deepvis/proc_thread.py
ULF: app_helper.py: provides a function with similar name
"""
appropriate_shape = data_hw + (3,)
assert img.shape == appropriate_shape, 'img is wrong size (got %s but expected %s)' % (img.shape, appropriate_shape)
#resized = caffe.io.resize_image(img, self.net.image_dims) # e.g. (227, 227, 3)
data_blob = self.net.transformer.preprocess('data', img) # e.g. (3, 227, 227), mean subtracted and scaled to [0,255]
data_blob = data_blob[np.newaxis,:,:,:] # e.g. (1, 3, 227, 227)
output = self.net.forward(data=data_blob)
return output
def backward_from_layer(self, layer_id, diffs):
'''Compute backward gradients from layer.
Notice: this method relies on the methond deconv_from_layer(),
which is not part of the standard Caffe. You need the
deconv-deep-vis-toolbox branch of caffe to run this function.
ULF[fixme]: currently the interface freezes, when the function
is not available - look for a better way to deal with this problem.
'''
#print '**** Doing backprop with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max())
try:
#ULF[old]:
self.net.backward_from_layer(layer_id, diffs, zero_higher = True)
except AttributeError:
print 'ERROR: required bindings (backward_from_layer) not found! Try using the deconv-deep-vis-toolbox branch as described here: https://github.com/yosinski/deep-visualization-toolbox'
raise
def deconv_from_layer(self, layer_id, diffs):
'''Compute backward gradients from layer.
Notice: this method relies on the methond deconv_from_layer(),
which is not part of the standard Caffe. You need the
deconv-deep-vis-toolbox branch of caffe to run this function.
ULF[fixme]: currently the interface freezes, when the function
is not available - look for a better way to deal with this problem.
'''
#print '**** Doing deconv with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max())
try:
self.net.deconv_from_layer(layer_id, diffs, zero_higher = True)
except AttributeError:
print 'ERROR: required bindings (deconv_from_layer) not found! Try using the deconv-deep-vis-toolbox branch as described here: https://github.com/yosinski/deep-visualization-toolbox'
raise
| """Get the layer identifiers of the network layers.
Arguments:
include_input:
a flag indicating if the input layer should be
included in the result.
Result: A list of identifiers (strings in the case of Caffe).
Notice that the type of these identifiers may depend on
the underlying network library. However, the identifiers
returned by this method should be suitable as arguments
for other methods in this class that expect a layer_id.
"""
layers = self.net.blobs.keys()
if not include_input:
layers = layers[1:]
return layers | identifier_body |
caffenet.py | import sys
import os
import numpy as np
from net import Net
class CaffeNet(Net):
def __init__(self, settings): | (1) importing the caffe library. We are interested in
the modified version that provides deconvolution support.
(2) load the caffe model data.
Arguments:
settings: The settings object to be used. CaffeNet will only
used settings prefixed with "caffe". ULF[todo]: check this claim!
"""
super(CaffeNet, self).__init__(settings)
self._range_scale = 1.0 # not needed; image already in [0,255]
#ULF[todo]: explain, make this a setting
self._net_channel_swap = (2,1,0)
#self._net_channel_swap = None
if self._net_channel_swap:
self._net_channel_swap_inv = tuple([self._net_channel_swap.index(ii) for ii in range(len(self._net_channel_swap))])
else:
self._net_channel_swap_inv = None
# (1) import caffe library
#
sys.path.insert(0, os.path.join(settings.caffevis_caffe_root, 'python'))
import caffe
print 'debug[caffe]: CaffeNet.__init__: using Caffe in', caffe.__file__
# Check if the imported caffe provides all required functions
self._check_caffe_version(caffe)
# Set the mode to CPU or GPU.
# Note: in the latest Caffe versions, there is one Caffe object
# *per thread*, so the mode must be set per thread!
# Here we set the mode for the main thread; it is also separately
# set in CaffeProcThread.
if settings.caffevis_mode_gpu:
caffe.set_mode_gpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): GPU'
else:
caffe.set_mode_cpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): CPU'
print 'debug[caffe]: CaffeNet.__init__: Loading the classifier (', settings.caffevis_deploy_prototxt, settings.caffevis_network_weights, ') ...'
# (2) load the caffe model
#
# ULF[hack]: make Caffe silent - there should be a better
# (i.e. official) way to do so. We only want to suppress
# the info (like network topology) while still seeing warnings
# and errors!
suppress_output = (hasattr(self.settings, 'caffe_init_silent')
and self.settings.caffe_init_silent)
if suppress_output:
# open 2 file descriptors
null_fds = [os.open(os.devnull, os.O_RDWR) for x in xrange(2)]
# save the current file descriptors to a tuple
original_fds = os.dup(1), os.dup(2)
# put /dev/null fds on stdout (1) and stderr (2)
os.dup2(null_fds[0], 1)
os.dup2(null_fds[1], 2)
self.net = caffe.Classifier(
settings.caffevis_deploy_prototxt,
settings.caffevis_network_weights,
mean = None, # Set to None for now, assign later # self._data_mean,
channel_swap = self._net_channel_swap,
raw_scale = self._range_scale,
)
if suppress_output:
# restore file original descriptors for stdout (1) and stderr (2)
os.dup2(original_fds[0], 1)
os.dup2(original_fds[1], 2)
# close the temporary file descriptors
os.close(null_fds[0])
os.close(null_fds[1])
print 'debug[caffe]: CaffeNet.__init__: ... loading completed.'
self._init_data_mean()
self._check_force_backward_true()
def _check_caffe_version(self, caffe):
"""Check if the caffe version provides all required functions.
The deep visualization toolbox requires a modified version of
caffe, that supports deconvolution. Without this functions,
the toolbox is able to run, but will not provide full functionality.
This method will issue a warning, if caffe does not provide the
required functions.
"""
if 'deconv_from_layer' in dir(caffe.classifier.Classifier):
print "debug[caffe]: caffe version provides all required functions. Good!"
else:
print "warning: Function 'deconv_from_layer' is missing in caffe. Probably you are using a wrong caffe version. Some functions will not be available!'"
def _init_data_mean(self):
"""Initialize the data mean.
The data mean values are loaded from a separate file. Caffe can
use thes
"""
if isinstance(self.settings.caffevis_data_mean, basestring):
# If the mean is given as a filename, load the file
try:
data_mean = np.load(self.settings.caffevis_data_mean)
except IOError:
print '\n\nCound not load mean file:', self.settings.caffevis_data_mean
print 'Ensure that the values in settings.py point to a valid model weights file, network'
print 'definition prototxt, and mean. To fetch a default model and mean file, use:\n'
print '$ cd models/caffenet-yos/'
print '$ ./fetch.sh\n\n'
raise
input_shape = self.get_input_data_shape() # e.g. 227x227
# Crop center region (e.g. 227x227) if mean is larger (e.g. 256x256)
excess_h = data_mean.shape[1] - input_shape[0]
excess_w = data_mean.shape[2] - input_shape[1]
assert excess_h >= 0 and excess_w >= 0, 'mean should be at least as large as %s' % repr(input_shape)
data_mean = data_mean[:, (excess_h/2):(excess_h/2+input_shape[0]),
(excess_w/2):(excess_w/2+input_shape[1])]
elif self.settings.caffevis_data_mean is None:
data_mean = None
else:
# The mean has been given as a value or a tuple of values
data_mean = np.array(self.settings.caffevis_data_mean)
# Promote to shape C,1,1
while len(data_mean.shape) < 1:
data_mean = np.expand_dims(data_mean, -1)
#if not isinstance(data_mean, tuple):
# # If given as int/float: promote to tuple
# data_mean = tuple(data_mean)
if data_mean is not None:
self.net.transformer.set_mean(self.net.inputs[0], data_mean)
def _check_force_backward_true(self):
"""Check the force_backward flag is set in the caffe model definition.
Checks whether the given file contains a line with the
following text, ignoring whitespace:
force_backward: true
If this is not the case, a warning text will be issued.
ULF: This method should not be called from outside, but it is still
called from "optimize_image.py"
"""
prototxt_file = self.settings.caffevis_deploy_prototxt
found = False
with open(prototxt_file, 'r') as ff:
for line in ff:
fields = line.strip().split()
if len(fields) == 2 and fields[0] == 'force_backward:' and fields[1] == 'true':
found = True
break
if not found:
print '\n\nWARNING: the specified prototxt'
print '"%s"' % prototxt_file
print 'does not contain the line "force_backward: true". This may result in backprop'
print 'and deconv producing all zeros at the input layer. You may want to add this line'
print 'to your prototxt file before continuing to force backprop to compute derivatives'
print 'at the data layer as well.\n\n'
def get_layer_ids(self, include_input = True):
"""Get the layer identifiers of the network layers.
Arguments:
include_input:
a flag indicating if the input layer should be
included in the result.
Result: A list of identifiers (strings in the case of Caffe).
Notice that the type of these identifiers may depend on
the underlying network library. However, the identifiers
returned by this method should be suitable as arguments
for other methods in this class that expect a layer_id.
"""
layers = self.net.blobs.keys()
if not include_input:
layers = layers[1:]
return layers
def get_input_id(self):
"""Get the identifier for the input layer.
Result: The type of this dentifier depends on the underlying
network library. However, the identifier returned by this
method is suitable as argument for other methods in this
class that expect a layer_id.
"""
return self.net.inputs[0]
def get_layer_shape(self,layer_id):
"""Get the shape of the given layer.
Returns a tuples describing the shape of the layer:
Fully connected layer: 1-tuple, the number of neurons,
example: (1000, )
Convolutional layer: n_filter x n_rows x n_columns,
example: (96, 55, 55)
"""
return self.net.blobs[layer_id].data.shape[1:] # Chop off batch size
def get_layer_data(self, layer_id, unit = None, flatten = False):
"""Provide activation data for a given layer.
Result:
An array of apropriate shape (see get_layer_shape()) containing
the layer activation values.
"""
data = self.net.blobs[layer_id].data
return data.flatten() if flatten else (data[0] if unit is None else data[0,unit])
def get_layer_diff(self, layer_id, flatten = False):
"""Provide diff data for a given layer.
Result:
An array of apropriate shape (see get_layer_shape()) containing
the layer diff values.
ULF[todo]: find out what these diff-values actually are!
"""
diff = self.net.blobs[layer_id].diff
return diff.flatten() if flatten else diff[0]
def preproc_forward(self, img, data_hw):
"""Prepare image data for processing and do forward propagation.
Uses caffe.transformer.preprocess and caffe.net.forward
Arguments:
img:
data_hw
ULF[todo]: find out what this exactly does!
ULF: called by deepvis/proc_thread.py
ULF: app_helper.py: provides a function with similar name
"""
appropriate_shape = data_hw + (3,)
assert img.shape == appropriate_shape, 'img is wrong size (got %s but expected %s)' % (img.shape, appropriate_shape)
#resized = caffe.io.resize_image(img, self.net.image_dims) # e.g. (227, 227, 3)
data_blob = self.net.transformer.preprocess('data', img) # e.g. (3, 227, 227), mean subtracted and scaled to [0,255]
data_blob = data_blob[np.newaxis,:,:,:] # e.g. (1, 3, 227, 227)
output = self.net.forward(data=data_blob)
return output
def backward_from_layer(self, layer_id, diffs):
'''Compute backward gradients from layer.
Notice: this method relies on the methond deconv_from_layer(),
which is not part of the standard Caffe. You need the
deconv-deep-vis-toolbox branch of caffe to run this function.
ULF[fixme]: currently the interface freezes, when the function
is not available - look for a better way to deal with this problem.
'''
#print '**** Doing backprop with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max())
try:
#ULF[old]:
self.net.backward_from_layer(layer_id, diffs, zero_higher = True)
except AttributeError:
print 'ERROR: required bindings (backward_from_layer) not found! Try using the deconv-deep-vis-toolbox branch as described here: https://github.com/yosinski/deep-visualization-toolbox'
raise
def deconv_from_layer(self, layer_id, diffs):
'''Compute backward gradients from layer.
Notice: this method relies on the methond deconv_from_layer(),
which is not part of the standard Caffe. You need the
deconv-deep-vis-toolbox branch of caffe to run this function.
ULF[fixme]: currently the interface freezes, when the function
is not available - look for a better way to deal with this problem.
'''
#print '**** Doing deconv with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max())
try:
self.net.deconv_from_layer(layer_id, diffs, zero_higher = True)
except AttributeError:
print 'ERROR: required bindings (deconv_from_layer) not found! Try using the deconv-deep-vis-toolbox branch as described here: https://github.com/yosinski/deep-visualization-toolbox'
raise | """Initialize the caffe network.
Initializing the caffe network includes two steps: | random_line_split |
caffenet.py | import sys
import os
import numpy as np
from net import Net
class CaffeNet(Net):
def __init__(self, settings):
"""Initialize the caffe network.
Initializing the caffe network includes two steps:
(1) importing the caffe library. We are interested in
the modified version that provides deconvolution support.
(2) load the caffe model data.
Arguments:
settings: The settings object to be used. CaffeNet will only
used settings prefixed with "caffe". ULF[todo]: check this claim!
"""
super(CaffeNet, self).__init__(settings)
self._range_scale = 1.0 # not needed; image already in [0,255]
#ULF[todo]: explain, make this a setting
self._net_channel_swap = (2,1,0)
#self._net_channel_swap = None
if self._net_channel_swap:
self._net_channel_swap_inv = tuple([self._net_channel_swap.index(ii) for ii in range(len(self._net_channel_swap))])
else:
self._net_channel_swap_inv = None
# (1) import caffe library
#
sys.path.insert(0, os.path.join(settings.caffevis_caffe_root, 'python'))
import caffe
print 'debug[caffe]: CaffeNet.__init__: using Caffe in', caffe.__file__
# Check if the imported caffe provides all required functions
self._check_caffe_version(caffe)
# Set the mode to CPU or GPU.
# Note: in the latest Caffe versions, there is one Caffe object
# *per thread*, so the mode must be set per thread!
# Here we set the mode for the main thread; it is also separately
# set in CaffeProcThread.
if settings.caffevis_mode_gpu:
caffe.set_mode_gpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): GPU'
else:
caffe.set_mode_cpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): CPU'
print 'debug[caffe]: CaffeNet.__init__: Loading the classifier (', settings.caffevis_deploy_prototxt, settings.caffevis_network_weights, ') ...'
# (2) load the caffe model
#
# ULF[hack]: make Caffe silent - there should be a better
# (i.e. official) way to do so. We only want to suppress
# the info (like network topology) while still seeing warnings
# and errors!
suppress_output = (hasattr(self.settings, 'caffe_init_silent')
and self.settings.caffe_init_silent)
if suppress_output:
# open 2 file descriptors
null_fds = [os.open(os.devnull, os.O_RDWR) for x in xrange(2)]
# save the current file descriptors to a tuple
original_fds = os.dup(1), os.dup(2)
# put /dev/null fds on stdout (1) and stderr (2)
os.dup2(null_fds[0], 1)
os.dup2(null_fds[1], 2)
self.net = caffe.Classifier(
settings.caffevis_deploy_prototxt,
settings.caffevis_network_weights,
mean = None, # Set to None for now, assign later # self._data_mean,
channel_swap = self._net_channel_swap,
raw_scale = self._range_scale,
)
if suppress_output:
# restore file original descriptors for stdout (1) and stderr (2)
os.dup2(original_fds[0], 1)
os.dup2(original_fds[1], 2)
# close the temporary file descriptors
os.close(null_fds[0])
os.close(null_fds[1])
print 'debug[caffe]: CaffeNet.__init__: ... loading completed.'
self._init_data_mean()
self._check_force_backward_true()
def _check_caffe_version(self, caffe):
"""Check if the caffe version provides all required functions.
The deep visualization toolbox requires a modified version of
caffe, that supports deconvolution. Without this functions,
the toolbox is able to run, but will not provide full functionality.
This method will issue a warning, if caffe does not provide the
required functions.
"""
if 'deconv_from_layer' in dir(caffe.classifier.Classifier):
print "debug[caffe]: caffe version provides all required functions. Good!"
else:
print "warning: Function 'deconv_from_layer' is missing in caffe. Probably you are using a wrong caffe version. Some functions will not be available!'"
def _init_data_mean(self):
"""Initialize the data mean.
The data mean values are loaded from a separate file. Caffe can
use thes
"""
if isinstance(self.settings.caffevis_data_mean, basestring):
# If the mean is given as a filename, load the file
try:
data_mean = np.load(self.settings.caffevis_data_mean)
except IOError:
print '\n\nCound not load mean file:', self.settings.caffevis_data_mean
print 'Ensure that the values in settings.py point to a valid model weights file, network'
print 'definition prototxt, and mean. To fetch a default model and mean file, use:\n'
print '$ cd models/caffenet-yos/'
print '$ ./fetch.sh\n\n'
raise
input_shape = self.get_input_data_shape() # e.g. 227x227
# Crop center region (e.g. 227x227) if mean is larger (e.g. 256x256)
excess_h = data_mean.shape[1] - input_shape[0]
excess_w = data_mean.shape[2] - input_shape[1]
assert excess_h >= 0 and excess_w >= 0, 'mean should be at least as large as %s' % repr(input_shape)
data_mean = data_mean[:, (excess_h/2):(excess_h/2+input_shape[0]),
(excess_w/2):(excess_w/2+input_shape[1])]
elif self.settings.caffevis_data_mean is None:
data_mean = None
else:
# The mean has been given as a value or a tuple of values
data_mean = np.array(self.settings.caffevis_data_mean)
# Promote to shape C,1,1
while len(data_mean.shape) < 1:
data_mean = np.expand_dims(data_mean, -1)
#if not isinstance(data_mean, tuple):
# # If given as int/float: promote to tuple
# data_mean = tuple(data_mean)
if data_mean is not None:
self.net.transformer.set_mean(self.net.inputs[0], data_mean)
def _check_force_backward_true(self):
"""Check the force_backward flag is set in the caffe model definition.
Checks whether the given file contains a line with the
following text, ignoring whitespace:
force_backward: true
If this is not the case, a warning text will be issued.
ULF: This method should not be called from outside, but it is still
called from "optimize_image.py"
"""
prototxt_file = self.settings.caffevis_deploy_prototxt
found = False
with open(prototxt_file, 'r') as ff:
for line in ff:
fields = line.strip().split()
if len(fields) == 2 and fields[0] == 'force_backward:' and fields[1] == 'true':
found = True
break
if not found:
print '\n\nWARNING: the specified prototxt'
print '"%s"' % prototxt_file
print 'does not contain the line "force_backward: true". This may result in backprop'
print 'and deconv producing all zeros at the input layer. You may want to add this line'
print 'to your prototxt file before continuing to force backprop to compute derivatives'
print 'at the data layer as well.\n\n'
def get_layer_ids(self, include_input = True):
"""Get the layer identifiers of the network layers.
Arguments:
include_input:
a flag indicating if the input layer should be
included in the result.
Result: A list of identifiers (strings in the case of Caffe).
Notice that the type of these identifiers may depend on
the underlying network library. However, the identifiers
returned by this method should be suitable as arguments
for other methods in this class that expect a layer_id.
"""
layers = self.net.blobs.keys()
if not include_input:
layers = layers[1:]
return layers
def get_input_id(self):
"""Get the identifier for the input layer.
Result: The type of this dentifier depends on the underlying
network library. However, the identifier returned by this
method is suitable as argument for other methods in this
class that expect a layer_id.
"""
return self.net.inputs[0]
def | (self,layer_id):
"""Get the shape of the given layer.
Returns a tuples describing the shape of the layer:
Fully connected layer: 1-tuple, the number of neurons,
example: (1000, )
Convolutional layer: n_filter x n_rows x n_columns,
example: (96, 55, 55)
"""
return self.net.blobs[layer_id].data.shape[1:] # Chop off batch size
def get_layer_data(self, layer_id, unit = None, flatten = False):
"""Provide activation data for a given layer.
Result:
An array of apropriate shape (see get_layer_shape()) containing
the layer activation values.
"""
data = self.net.blobs[layer_id].data
return data.flatten() if flatten else (data[0] if unit is None else data[0,unit])
def get_layer_diff(self, layer_id, flatten = False):
"""Provide diff data for a given layer.
Result:
An array of apropriate shape (see get_layer_shape()) containing
the layer diff values.
ULF[todo]: find out what these diff-values actually are!
"""
diff = self.net.blobs[layer_id].diff
return diff.flatten() if flatten else diff[0]
def preproc_forward(self, img, data_hw):
"""Prepare image data for processing and do forward propagation.
Uses caffe.transformer.preprocess and caffe.net.forward
Arguments:
img:
data_hw
ULF[todo]: find out what this exactly does!
ULF: called by deepvis/proc_thread.py
ULF: app_helper.py: provides a function with similar name
"""
appropriate_shape = data_hw + (3,)
assert img.shape == appropriate_shape, 'img is wrong size (got %s but expected %s)' % (img.shape, appropriate_shape)
#resized = caffe.io.resize_image(img, self.net.image_dims) # e.g. (227, 227, 3)
data_blob = self.net.transformer.preprocess('data', img) # e.g. (3, 227, 227), mean subtracted and scaled to [0,255]
data_blob = data_blob[np.newaxis,:,:,:] # e.g. (1, 3, 227, 227)
output = self.net.forward(data=data_blob)
return output
def backward_from_layer(self, layer_id, diffs):
'''Compute backward gradients from layer.
Notice: this method relies on the methond deconv_from_layer(),
which is not part of the standard Caffe. You need the
deconv-deep-vis-toolbox branch of caffe to run this function.
ULF[fixme]: currently the interface freezes, when the function
is not available - look for a better way to deal with this problem.
'''
#print '**** Doing backprop with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max())
try:
#ULF[old]:
self.net.backward_from_layer(layer_id, diffs, zero_higher = True)
except AttributeError:
print 'ERROR: required bindings (backward_from_layer) not found! Try using the deconv-deep-vis-toolbox branch as described here: https://github.com/yosinski/deep-visualization-toolbox'
raise
def deconv_from_layer(self, layer_id, diffs):
'''Compute backward gradients from layer.
Notice: this method relies on the methond deconv_from_layer(),
which is not part of the standard Caffe. You need the
deconv-deep-vis-toolbox branch of caffe to run this function.
ULF[fixme]: currently the interface freezes, when the function
is not available - look for a better way to deal with this problem.
'''
#print '**** Doing deconv with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max())
try:
self.net.deconv_from_layer(layer_id, diffs, zero_higher = True)
except AttributeError:
print 'ERROR: required bindings (deconv_from_layer) not found! Try using the deconv-deep-vis-toolbox branch as described here: https://github.com/yosinski/deep-visualization-toolbox'
raise
| get_layer_shape | identifier_name |
caffenet.py | import sys
import os
import numpy as np
from net import Net
class CaffeNet(Net):
def __init__(self, settings):
"""Initialize the caffe network.
Initializing the caffe network includes two steps:
(1) importing the caffe library. We are interested in
the modified version that provides deconvolution support.
(2) load the caffe model data.
Arguments:
settings: The settings object to be used. CaffeNet will only
used settings prefixed with "caffe". ULF[todo]: check this claim!
"""
super(CaffeNet, self).__init__(settings)
self._range_scale = 1.0 # not needed; image already in [0,255]
#ULF[todo]: explain, make this a setting
self._net_channel_swap = (2,1,0)
#self._net_channel_swap = None
if self._net_channel_swap:
self._net_channel_swap_inv = tuple([self._net_channel_swap.index(ii) for ii in range(len(self._net_channel_swap))])
else:
self._net_channel_swap_inv = None
# (1) import caffe library
#
sys.path.insert(0, os.path.join(settings.caffevis_caffe_root, 'python'))
import caffe
print 'debug[caffe]: CaffeNet.__init__: using Caffe in', caffe.__file__
# Check if the imported caffe provides all required functions
self._check_caffe_version(caffe)
# Set the mode to CPU or GPU.
# Note: in the latest Caffe versions, there is one Caffe object
# *per thread*, so the mode must be set per thread!
# Here we set the mode for the main thread; it is also separately
# set in CaffeProcThread.
if settings.caffevis_mode_gpu:
caffe.set_mode_gpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): GPU'
else:
caffe.set_mode_cpu()
print 'debug[caffe]: CaffeNet.__init__: CaffeVisApp mode (in main thread): CPU'
print 'debug[caffe]: CaffeNet.__init__: Loading the classifier (', settings.caffevis_deploy_prototxt, settings.caffevis_network_weights, ') ...'
# (2) load the caffe model
#
# ULF[hack]: make Caffe silent - there should be a better
# (i.e. official) way to do so. We only want to suppress
# the info (like network topology) while still seeing warnings
# and errors!
suppress_output = (hasattr(self.settings, 'caffe_init_silent')
and self.settings.caffe_init_silent)
if suppress_output:
# open 2 file descriptors
null_fds = [os.open(os.devnull, os.O_RDWR) for x in xrange(2)]
# save the current file descriptors to a tuple
original_fds = os.dup(1), os.dup(2)
# put /dev/null fds on stdout (1) and stderr (2)
os.dup2(null_fds[0], 1)
os.dup2(null_fds[1], 2)
self.net = caffe.Classifier(
settings.caffevis_deploy_prototxt,
settings.caffevis_network_weights,
mean = None, # Set to None for now, assign later # self._data_mean,
channel_swap = self._net_channel_swap,
raw_scale = self._range_scale,
)
if suppress_output:
# restore file original descriptors for stdout (1) and stderr (2)
os.dup2(original_fds[0], 1)
os.dup2(original_fds[1], 2)
# close the temporary file descriptors
os.close(null_fds[0])
os.close(null_fds[1])
print 'debug[caffe]: CaffeNet.__init__: ... loading completed.'
self._init_data_mean()
self._check_force_backward_true()
def _check_caffe_version(self, caffe):
"""Check if the caffe version provides all required functions.
The deep visualization toolbox requires a modified version of
caffe, that supports deconvolution. Without this functions,
the toolbox is able to run, but will not provide full functionality.
This method will issue a warning, if caffe does not provide the
required functions.
"""
if 'deconv_from_layer' in dir(caffe.classifier.Classifier):
print "debug[caffe]: caffe version provides all required functions. Good!"
else:
print "warning: Function 'deconv_from_layer' is missing in caffe. Probably you are using a wrong caffe version. Some functions will not be available!'"
def _init_data_mean(self):
"""Initialize the data mean.
The data mean values are loaded from a separate file. Caffe can
use thes
"""
if isinstance(self.settings.caffevis_data_mean, basestring):
# If the mean is given as a filename, load the file
|
elif self.settings.caffevis_data_mean is None:
data_mean = None
else:
# The mean has been given as a value or a tuple of values
data_mean = np.array(self.settings.caffevis_data_mean)
# Promote to shape C,1,1
while len(data_mean.shape) < 1:
data_mean = np.expand_dims(data_mean, -1)
#if not isinstance(data_mean, tuple):
# # If given as int/float: promote to tuple
# data_mean = tuple(data_mean)
if data_mean is not None:
self.net.transformer.set_mean(self.net.inputs[0], data_mean)
def _check_force_backward_true(self):
"""Check the force_backward flag is set in the caffe model definition.
Checks whether the given file contains a line with the
following text, ignoring whitespace:
force_backward: true
If this is not the case, a warning text will be issued.
ULF: This method should not be called from outside, but it is still
called from "optimize_image.py"
"""
prototxt_file = self.settings.caffevis_deploy_prototxt
found = False
with open(prototxt_file, 'r') as ff:
for line in ff:
fields = line.strip().split()
if len(fields) == 2 and fields[0] == 'force_backward:' and fields[1] == 'true':
found = True
break
if not found:
print '\n\nWARNING: the specified prototxt'
print '"%s"' % prototxt_file
print 'does not contain the line "force_backward: true". This may result in backprop'
print 'and deconv producing all zeros at the input layer. You may want to add this line'
print 'to your prototxt file before continuing to force backprop to compute derivatives'
print 'at the data layer as well.\n\n'
def get_layer_ids(self, include_input = True):
"""Get the layer identifiers of the network layers.
Arguments:
include_input:
a flag indicating if the input layer should be
included in the result.
Result: A list of identifiers (strings in the case of Caffe).
Notice that the type of these identifiers may depend on
the underlying network library. However, the identifiers
returned by this method should be suitable as arguments
for other methods in this class that expect a layer_id.
"""
layers = self.net.blobs.keys()
if not include_input:
layers = layers[1:]
return layers
def get_input_id(self):
"""Get the identifier for the input layer.
Result: The type of this dentifier depends on the underlying
network library. However, the identifier returned by this
method is suitable as argument for other methods in this
class that expect a layer_id.
"""
return self.net.inputs[0]
def get_layer_shape(self,layer_id):
"""Get the shape of the given layer.
Returns a tuples describing the shape of the layer:
Fully connected layer: 1-tuple, the number of neurons,
example: (1000, )
Convolutional layer: n_filter x n_rows x n_columns,
example: (96, 55, 55)
"""
return self.net.blobs[layer_id].data.shape[1:] # Chop off batch size
def get_layer_data(self, layer_id, unit = None, flatten = False):
"""Provide activation data for a given layer.
Result:
An array of apropriate shape (see get_layer_shape()) containing
the layer activation values.
"""
data = self.net.blobs[layer_id].data
return data.flatten() if flatten else (data[0] if unit is None else data[0,unit])
def get_layer_diff(self, layer_id, flatten = False):
"""Provide diff data for a given layer.
Result:
An array of apropriate shape (see get_layer_shape()) containing
the layer diff values.
ULF[todo]: find out what these diff-values actually are!
"""
diff = self.net.blobs[layer_id].diff
return diff.flatten() if flatten else diff[0]
def preproc_forward(self, img, data_hw):
"""Prepare image data for processing and do forward propagation.
Uses caffe.transformer.preprocess and caffe.net.forward
Arguments:
img:
data_hw
ULF[todo]: find out what this exactly does!
ULF: called by deepvis/proc_thread.py
ULF: app_helper.py: provides a function with similar name
"""
appropriate_shape = data_hw + (3,)
assert img.shape == appropriate_shape, 'img is wrong size (got %s but expected %s)' % (img.shape, appropriate_shape)
#resized = caffe.io.resize_image(img, self.net.image_dims) # e.g. (227, 227, 3)
data_blob = self.net.transformer.preprocess('data', img) # e.g. (3, 227, 227), mean subtracted and scaled to [0,255]
data_blob = data_blob[np.newaxis,:,:,:] # e.g. (1, 3, 227, 227)
output = self.net.forward(data=data_blob)
return output
def backward_from_layer(self, layer_id, diffs):
'''Compute backward gradients from layer.
Notice: this method relies on the methond deconv_from_layer(),
which is not part of the standard Caffe. You need the
deconv-deep-vis-toolbox branch of caffe to run this function.
ULF[fixme]: currently the interface freezes, when the function
is not available - look for a better way to deal with this problem.
'''
#print '**** Doing backprop with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max())
try:
#ULF[old]:
self.net.backward_from_layer(layer_id, diffs, zero_higher = True)
except AttributeError:
print 'ERROR: required bindings (backward_from_layer) not found! Try using the deconv-deep-vis-toolbox branch as described here: https://github.com/yosinski/deep-visualization-toolbox'
raise
def deconv_from_layer(self, layer_id, diffs):
'''Compute backward gradients from layer.
Notice: this method relies on the methond deconv_from_layer(),
which is not part of the standard Caffe. You need the
deconv-deep-vis-toolbox branch of caffe to run this function.
ULF[fixme]: currently the interface freezes, when the function
is not available - look for a better way to deal with this problem.
'''
#print '**** Doing deconv with %s diffs in [%s,%s]' % (backprop_layer, diffs.min(), diffs.max())
try:
self.net.deconv_from_layer(layer_id, diffs, zero_higher = True)
except AttributeError:
print 'ERROR: required bindings (deconv_from_layer) not found! Try using the deconv-deep-vis-toolbox branch as described here: https://github.com/yosinski/deep-visualization-toolbox'
raise
| try:
data_mean = np.load(self.settings.caffevis_data_mean)
except IOError:
print '\n\nCound not load mean file:', self.settings.caffevis_data_mean
print 'Ensure that the values in settings.py point to a valid model weights file, network'
print 'definition prototxt, and mean. To fetch a default model and mean file, use:\n'
print '$ cd models/caffenet-yos/'
print '$ ./fetch.sh\n\n'
raise
input_shape = self.get_input_data_shape() # e.g. 227x227
# Crop center region (e.g. 227x227) if mean is larger (e.g. 256x256)
excess_h = data_mean.shape[1] - input_shape[0]
excess_w = data_mean.shape[2] - input_shape[1]
assert excess_h >= 0 and excess_w >= 0, 'mean should be at least as large as %s' % repr(input_shape)
data_mean = data_mean[:, (excess_h/2):(excess_h/2+input_shape[0]),
(excess_w/2):(excess_w/2+input_shape[1])] | conditional_block |
simulator.ts | /// <reference path="../../built/pxtsim.d.ts" />
/// <reference path="../../localtypings/pxtparts.d.ts" />
import * as core from "./core";
import U = pxt.U
interface SimulatorConfig {
highlightStatement(stmt: pxtc.LocationInfo): void;
restartSimulator(): void;
editor: string;
}
export const FAST_TRACE_INTERVAL = 100;
export const SLOW_TRACE_INTERVAL = 500;
export var driver: pxsim.SimulatorDriver;
let nextFrameId: number = 0;
const themes = ["blue", "red", "green", "yellow"];
let config: SimulatorConfig;
let lastCompileResult: pxtc.CompileResult;
let tutorialMode: boolean;
let displayedModals: pxt.Map<boolean> = {};
export let simTranslations: pxt.Map<string>;
let dirty = false;
let $debugger: JQuery;
export function setTranslations(translations: pxt.Map<string>) {
simTranslations = translations;
}
export function init(root: HTMLElement, cfg: SimulatorConfig) {
$(root).html(
`
<div id="simulators" class='simulator'>
</div>
<div id="debugger" class="ui item landscape only">
</div>
`
)
$debugger = $('#debugger')
let options: pxsim.SimulatorDriverOptions = {
revealElement: (el) => {
($(el) as any).transition({
animation: pxt.appTarget.appTheme.simAnimationEnter || 'fly right in',
duration: '0.5s',
})
},
removeElement: (el, completeHandler) => {
if (pxt.appTarget.simulator.headless) {
$(el).addClass('simHeadless');
completeHandler();
}
else {
($(el) as any).transition({
animation: pxt.appTarget.appTheme.simAnimationExit || 'fly right out',
duration: '0.5s',
onComplete: function () {
if (completeHandler) completeHandler();
$(el).remove();
}
}).on('error', () => {
// Problem with animation, still complete
if (completeHandler) completeHandler();
$(el).remove();
})
}
},
unhideElement: (el) => {
$(el).removeClass("simHeadless");
},
onDebuggerBreakpoint: function (brk) {
updateDebuggerButtons(brk)
let brkInfo = lastCompileResult.breakpoints[brk.breakpointId]
if (config) config.highlightStatement(brkInfo)
if (brk.exceptionMessage) {
core.errorNotification(lf("Program Error: {0}", brk.exceptionMessage))
}
postSimEditorEvent("stopped", brk.exceptionMessage);
},
onTraceMessage: function (msg) {
let brkInfo = lastCompileResult.breakpoints[msg.breakpointId]
if (config) config.highlightStatement(brkInfo)
},
onDebuggerWarning: function (wrn) {
for (let id of wrn.breakpointIds) {
let brkInfo = lastCompileResult.breakpoints[id]
if (brkInfo) {
if (!U.startsWith("pxt_modules/", brkInfo.fileName)) {
if (config) config.highlightStatement(brkInfo)
break
}
}
}
},
onDebuggerResume: function () {
postSimEditorEvent("resumed");
if (config) config.highlightStatement(null)
updateDebuggerButtons()
},
onStateChanged: function (state) {
if (state === pxsim.SimulatorState.Stopped) {
postSimEditorEvent("stopped");
}
updateDebuggerButtons()
},
onSimulatorCommand: (msg: pxsim.SimulatorCommandMessage): void => {
switch (msg.command) {
case "restart":
cfg.restartSimulator();
break;
case "modal":
stop();
if (!pxt.shell.isSandboxMode() && (!msg.displayOnceId || !displayedModals[msg.displayOnceId])) {
const modalOpts: core.ConfirmOptions = {
header: msg.header,
body: msg.body,
size: "large",
copyable: msg.copyable,
disagreeLbl: lf("Close"),
modalContext: msg.modalContext
};
const trustedSimUrls = pxt.appTarget.simulator.trustedUrls;
const hasTrustedLink = msg.linkButtonHref && trustedSimUrls && trustedSimUrls.indexOf(msg.linkButtonHref) !== -1;
if (hasTrustedLink) {
modalOpts.agreeLbl = msg.linkButtonLabel;
} else {
modalOpts.hideAgree = true;
}
displayedModals[msg.displayOnceId] = true;
core.confirmAsync(modalOpts)
.then((selection) => {
if (hasTrustedLink && selection == 1) {
window.open(msg.linkButtonHref,'_blank');
}
})
.done();
}
break;
}
},
onTopLevelCodeEnd: () => {
postSimEditorEvent("toplevelfinished");
},
stoppedClass: getStoppedClass()
};
driver = new pxsim.SimulatorDriver($('#simulators')[0], options);
config = cfg
updateDebuggerButtons();
}
function postSimEditorEvent(subtype: string, exception?: string) {
if (pxt.appTarget.appTheme.allowParentController && pxt.BrowserUtils.isIFrame()) {
pxt.editor.postHostMessageAsync({
type: "pxthost",
action: "simevent",
subtype: subtype as any,
exception: exception
} as pxt.editor.EditorSimulatorStoppedEvent);
}
}
export function setState(editor: string, tutMode?: boolean) {
if (config && config.editor != editor) {
config.editor = editor;
config.highlightStatement(null)
updateDebuggerButtons();
}
tutorialMode = tutMode;
}
export function makeDirty() { // running outdated code
pxsim.U.addClass(driver.container, getInvalidatedClass());
dirty = true;
}
export function isDirty(): boolean { // in need of a restart?
return dirty;
}
export function run(pkg: pxt.MainPackage, debug: boolean, res: pxtc.CompileResult, mute?: boolean, highContrast?: boolean) {
makeClean();
const js = res.outfiles[pxtc.BINARY_JS]
const boardDefinition = pxt.appTarget.simulator.boardDefinition;
const parts = pxtc.computeUsedParts(res, true);
const fnArgs = res.usedArguments;
lastCompileResult = res;
const opts: pxsim.SimulatorRunOptions = {
boardDefinition: boardDefinition,
mute,
parts,
debug,
fnArgs,
highContrast,
aspectRatio: parts.length ? pxt.appTarget.simulator.partsAspectRatio : pxt.appTarget.simulator.aspectRatio,
partDefinitions: pkg.computePartDefinitions(parts),
cdnUrl: pxt.webConfig.commitCdnUrl,
localizedStrings: simTranslations,
refCountingDebug: pxt.options.debug
}
postSimEditorEvent("started");
driver.run(js, opts);
}
export function mute(mute: boolean) {
driver.mute(mute);
$debugger.empty();
}
export function stop(unload?: boolean) {
if (!driver) return;
makeClean();
driver.stop(unload);
$debugger.empty();
}
export function hide(completeHandler?: () => void) {
if (!pxt.appTarget.simulator.headless) {
makeDirty();
}
driver.hide(completeHandler);
$debugger.empty();
}
export function unhide() |
export function setTraceInterval(intervalMs: number) {
driver.setTraceInterval(intervalMs);
}
export function proxy(message: pxsim.SimulatorCustomMessage) {
if (!driver) return;
driver.postMessage(message);
$debugger.empty();
}
function makeClean() {
pxsim.U.removeClass(driver.container, getInvalidatedClass());
dirty = false;
}
function getInvalidatedClass() {
if (pxt.appTarget.simulator && pxt.appTarget.simulator.invalidatedClass) {
return pxt.appTarget.simulator.invalidatedClass;
}
return "sepia";
}
function getStoppedClass() {
if (pxt.appTarget.simulator && pxt.appTarget.simulator.stoppedClass) {
return pxt.appTarget.simulator.stoppedClass;
}
return undefined;
}
function updateDebuggerButtons(brk: pxsim.DebuggerBreakpointMessage = null) {
function btn(icon: string, name: string, label: string, click: () => void) {
let b = $(`<button class="ui mini button teal" title="${Util.htmlEscape(label)}"></button>`)
if (icon) b.addClass("icon").append(`<i class="${icon} icon"></i>`)
if (name) b.append(Util.htmlEscape(name));
return b.click(click)
}
$debugger.empty();
if (!driver.runOptions.debug) return;
let advanced = config.editor == 'tsprj';
if (driver.state == pxsim.SimulatorState.Paused) {
let $resume = btn("play", lf("Resume"), lf("Resume execution"), () => driver.resume(pxsim.SimulatorDebuggerCommand.Resume));
let $stepOver = btn("xicon stepover", lf("Step over"), lf("Step over next function call"), () => driver.resume(pxsim.SimulatorDebuggerCommand.StepOver));
let $stepInto = btn("xicon stepinto", lf("Step into"), lf("Step into next function call"), () => driver.resume(pxsim.SimulatorDebuggerCommand.StepInto));
$debugger.append($resume).append($stepOver)
if (advanced)
$debugger.append($stepInto);
} else if (driver.state == pxsim.SimulatorState.Running) {
let $pause = btn("pause", lf("Pause"), lf("Pause execution on the next instruction"), () => driver.resume(pxsim.SimulatorDebuggerCommand.Pause));
$debugger.append($pause);
}
if (!brk || !advanced) return
function vars(hd: string, frame: pxsim.Variables) {
let frameView = $(`<div><h4>${U.htmlEscape(hd)}</h4></div>`)
for (let k of Object.keys(frame)) {
let v = frame[k]
let sv = ""
switch (typeof (v)) {
case "number": sv = v + ""; break;
case "string": sv = JSON.stringify(v); break;
case "object":
if (v == null) sv = "null";
else if (v.id !== undefined) sv = "(object)"
else if (v.text) sv = v.text;
else sv = "(unknown)"
break;
default: U.oops()
}
let n = k.replace(/___\d+$/, "")
frameView.append(`<div>${U.htmlEscape(n)}: ${U.htmlEscape(sv)}</div>`)
}
return frameView
}
let dbgView = $(`<div class="ui segment debuggerview"></div>`)
dbgView.append(vars(U.lf("globals"), brk.globals))
brk.stackframes.forEach(sf => {
let info = sf.funcInfo as pxtc.FunctionLocationInfo
dbgView.append(vars(info.functionName, sf.locals))
})
$('#debugger').append(dbgView)
}
| {
driver.unhide();
} | identifier_body |
simulator.ts | /// <reference path="../../built/pxtsim.d.ts" />
/// <reference path="../../localtypings/pxtparts.d.ts" />
import * as core from "./core";
import U = pxt.U
interface SimulatorConfig {
highlightStatement(stmt: pxtc.LocationInfo): void;
restartSimulator(): void;
editor: string;
}
export const FAST_TRACE_INTERVAL = 100;
export const SLOW_TRACE_INTERVAL = 500;
export var driver: pxsim.SimulatorDriver;
let nextFrameId: number = 0;
const themes = ["blue", "red", "green", "yellow"];
let config: SimulatorConfig;
let lastCompileResult: pxtc.CompileResult;
let tutorialMode: boolean;
let displayedModals: pxt.Map<boolean> = {};
export let simTranslations: pxt.Map<string>;
let dirty = false;
let $debugger: JQuery;
export function setTranslations(translations: pxt.Map<string>) {
simTranslations = translations;
}
export function init(root: HTMLElement, cfg: SimulatorConfig) {
$(root).html(
`
<div id="simulators" class='simulator'>
</div>
<div id="debugger" class="ui item landscape only">
</div>
`
)
$debugger = $('#debugger')
let options: pxsim.SimulatorDriverOptions = {
revealElement: (el) => {
($(el) as any).transition({
animation: pxt.appTarget.appTheme.simAnimationEnter || 'fly right in',
duration: '0.5s',
})
},
removeElement: (el, completeHandler) => {
if (pxt.appTarget.simulator.headless) {
$(el).addClass('simHeadless');
completeHandler();
}
else {
($(el) as any).transition({
animation: pxt.appTarget.appTheme.simAnimationExit || 'fly right out',
duration: '0.5s',
onComplete: function () {
if (completeHandler) completeHandler();
$(el).remove();
}
}).on('error', () => {
// Problem with animation, still complete
if (completeHandler) completeHandler();
$(el).remove();
})
}
},
unhideElement: (el) => {
$(el).removeClass("simHeadless");
},
onDebuggerBreakpoint: function (brk) {
updateDebuggerButtons(brk)
let brkInfo = lastCompileResult.breakpoints[brk.breakpointId]
if (config) config.highlightStatement(brkInfo)
if (brk.exceptionMessage) {
core.errorNotification(lf("Program Error: {0}", brk.exceptionMessage))
}
postSimEditorEvent("stopped", brk.exceptionMessage);
},
onTraceMessage: function (msg) {
let brkInfo = lastCompileResult.breakpoints[msg.breakpointId]
if (config) config.highlightStatement(brkInfo)
},
onDebuggerWarning: function (wrn) {
for (let id of wrn.breakpointIds) {
let brkInfo = lastCompileResult.breakpoints[id]
if (brkInfo) {
if (!U.startsWith("pxt_modules/", brkInfo.fileName)) {
if (config) config.highlightStatement(brkInfo)
break
}
}
}
},
onDebuggerResume: function () {
postSimEditorEvent("resumed");
if (config) config.highlightStatement(null)
updateDebuggerButtons()
},
onStateChanged: function (state) {
if (state === pxsim.SimulatorState.Stopped) {
postSimEditorEvent("stopped");
}
updateDebuggerButtons()
},
onSimulatorCommand: (msg: pxsim.SimulatorCommandMessage): void => {
switch (msg.command) {
case "restart":
cfg.restartSimulator();
break;
case "modal":
stop();
if (!pxt.shell.isSandboxMode() && (!msg.displayOnceId || !displayedModals[msg.displayOnceId])) {
const modalOpts: core.ConfirmOptions = {
header: msg.header,
body: msg.body,
size: "large",
copyable: msg.copyable,
disagreeLbl: lf("Close"),
modalContext: msg.modalContext
};
const trustedSimUrls = pxt.appTarget.simulator.trustedUrls;
const hasTrustedLink = msg.linkButtonHref && trustedSimUrls && trustedSimUrls.indexOf(msg.linkButtonHref) !== -1;
if (hasTrustedLink) {
modalOpts.agreeLbl = msg.linkButtonLabel;
} else {
modalOpts.hideAgree = true;
}
displayedModals[msg.displayOnceId] = true;
core.confirmAsync(modalOpts)
.then((selection) => {
if (hasTrustedLink && selection == 1) {
window.open(msg.linkButtonHref,'_blank');
}
})
.done();
}
break;
}
},
onTopLevelCodeEnd: () => {
postSimEditorEvent("toplevelfinished");
},
stoppedClass: getStoppedClass()
};
driver = new pxsim.SimulatorDriver($('#simulators')[0], options);
config = cfg
updateDebuggerButtons();
}
function postSimEditorEvent(subtype: string, exception?: string) {
if (pxt.appTarget.appTheme.allowParentController && pxt.BrowserUtils.isIFrame()) {
pxt.editor.postHostMessageAsync({
type: "pxthost",
action: "simevent",
subtype: subtype as any,
exception: exception
} as pxt.editor.EditorSimulatorStoppedEvent);
}
}
export function setState(editor: string, tutMode?: boolean) {
if (config && config.editor != editor) {
config.editor = editor;
config.highlightStatement(null)
updateDebuggerButtons();
}
tutorialMode = tutMode;
}
export function makeDirty() { // running outdated code
pxsim.U.addClass(driver.container, getInvalidatedClass());
dirty = true;
} |
export function isDirty(): boolean { // in need of a restart?
return dirty;
}
export function run(pkg: pxt.MainPackage, debug: boolean, res: pxtc.CompileResult, mute?: boolean, highContrast?: boolean) {
makeClean();
const js = res.outfiles[pxtc.BINARY_JS]
const boardDefinition = pxt.appTarget.simulator.boardDefinition;
const parts = pxtc.computeUsedParts(res, true);
const fnArgs = res.usedArguments;
lastCompileResult = res;
const opts: pxsim.SimulatorRunOptions = {
boardDefinition: boardDefinition,
mute,
parts,
debug,
fnArgs,
highContrast,
aspectRatio: parts.length ? pxt.appTarget.simulator.partsAspectRatio : pxt.appTarget.simulator.aspectRatio,
partDefinitions: pkg.computePartDefinitions(parts),
cdnUrl: pxt.webConfig.commitCdnUrl,
localizedStrings: simTranslations,
refCountingDebug: pxt.options.debug
}
postSimEditorEvent("started");
driver.run(js, opts);
}
export function mute(mute: boolean) {
driver.mute(mute);
$debugger.empty();
}
export function stop(unload?: boolean) {
if (!driver) return;
makeClean();
driver.stop(unload);
$debugger.empty();
}
export function hide(completeHandler?: () => void) {
if (!pxt.appTarget.simulator.headless) {
makeDirty();
}
driver.hide(completeHandler);
$debugger.empty();
}
export function unhide() {
driver.unhide();
}
export function setTraceInterval(intervalMs: number) {
driver.setTraceInterval(intervalMs);
}
export function proxy(message: pxsim.SimulatorCustomMessage) {
if (!driver) return;
driver.postMessage(message);
$debugger.empty();
}
function makeClean() {
pxsim.U.removeClass(driver.container, getInvalidatedClass());
dirty = false;
}
function getInvalidatedClass() {
if (pxt.appTarget.simulator && pxt.appTarget.simulator.invalidatedClass) {
return pxt.appTarget.simulator.invalidatedClass;
}
return "sepia";
}
function getStoppedClass() {
if (pxt.appTarget.simulator && pxt.appTarget.simulator.stoppedClass) {
return pxt.appTarget.simulator.stoppedClass;
}
return undefined;
}
function updateDebuggerButtons(brk: pxsim.DebuggerBreakpointMessage = null) {
function btn(icon: string, name: string, label: string, click: () => void) {
let b = $(`<button class="ui mini button teal" title="${Util.htmlEscape(label)}"></button>`)
if (icon) b.addClass("icon").append(`<i class="${icon} icon"></i>`)
if (name) b.append(Util.htmlEscape(name));
return b.click(click)
}
$debugger.empty();
if (!driver.runOptions.debug) return;
let advanced = config.editor == 'tsprj';
if (driver.state == pxsim.SimulatorState.Paused) {
let $resume = btn("play", lf("Resume"), lf("Resume execution"), () => driver.resume(pxsim.SimulatorDebuggerCommand.Resume));
let $stepOver = btn("xicon stepover", lf("Step over"), lf("Step over next function call"), () => driver.resume(pxsim.SimulatorDebuggerCommand.StepOver));
let $stepInto = btn("xicon stepinto", lf("Step into"), lf("Step into next function call"), () => driver.resume(pxsim.SimulatorDebuggerCommand.StepInto));
$debugger.append($resume).append($stepOver)
if (advanced)
$debugger.append($stepInto);
} else if (driver.state == pxsim.SimulatorState.Running) {
let $pause = btn("pause", lf("Pause"), lf("Pause execution on the next instruction"), () => driver.resume(pxsim.SimulatorDebuggerCommand.Pause));
$debugger.append($pause);
}
if (!brk || !advanced) return
function vars(hd: string, frame: pxsim.Variables) {
let frameView = $(`<div><h4>${U.htmlEscape(hd)}</h4></div>`)
for (let k of Object.keys(frame)) {
let v = frame[k]
let sv = ""
switch (typeof (v)) {
case "number": sv = v + ""; break;
case "string": sv = JSON.stringify(v); break;
case "object":
if (v == null) sv = "null";
else if (v.id !== undefined) sv = "(object)"
else if (v.text) sv = v.text;
else sv = "(unknown)"
break;
default: U.oops()
}
let n = k.replace(/___\d+$/, "")
frameView.append(`<div>${U.htmlEscape(n)}: ${U.htmlEscape(sv)}</div>`)
}
return frameView
}
let dbgView = $(`<div class="ui segment debuggerview"></div>`)
dbgView.append(vars(U.lf("globals"), brk.globals))
brk.stackframes.forEach(sf => {
let info = sf.funcInfo as pxtc.FunctionLocationInfo
dbgView.append(vars(info.functionName, sf.locals))
})
$('#debugger').append(dbgView)
} | random_line_split |
|
simulator.ts | /// <reference path="../../built/pxtsim.d.ts" />
/// <reference path="../../localtypings/pxtparts.d.ts" />
import * as core from "./core";
import U = pxt.U
interface SimulatorConfig {
highlightStatement(stmt: pxtc.LocationInfo): void;
restartSimulator(): void;
editor: string;
}
export const FAST_TRACE_INTERVAL = 100;
export const SLOW_TRACE_INTERVAL = 500;
export var driver: pxsim.SimulatorDriver;
let nextFrameId: number = 0;
const themes = ["blue", "red", "green", "yellow"];
let config: SimulatorConfig;
let lastCompileResult: pxtc.CompileResult;
let tutorialMode: boolean;
let displayedModals: pxt.Map<boolean> = {};
export let simTranslations: pxt.Map<string>;
let dirty = false;
let $debugger: JQuery;
export function setTranslations(translations: pxt.Map<string>) {
simTranslations = translations;
}
export function init(root: HTMLElement, cfg: SimulatorConfig) {
$(root).html(
`
<div id="simulators" class='simulator'>
</div>
<div id="debugger" class="ui item landscape only">
</div>
`
)
$debugger = $('#debugger')
let options: pxsim.SimulatorDriverOptions = {
revealElement: (el) => {
($(el) as any).transition({
animation: pxt.appTarget.appTheme.simAnimationEnter || 'fly right in',
duration: '0.5s',
})
},
removeElement: (el, completeHandler) => {
if (pxt.appTarget.simulator.headless) {
$(el).addClass('simHeadless');
completeHandler();
}
else {
($(el) as any).transition({
animation: pxt.appTarget.appTheme.simAnimationExit || 'fly right out',
duration: '0.5s',
onComplete: function () {
if (completeHandler) completeHandler();
$(el).remove();
}
}).on('error', () => {
// Problem with animation, still complete
if (completeHandler) completeHandler();
$(el).remove();
})
}
},
unhideElement: (el) => {
$(el).removeClass("simHeadless");
},
onDebuggerBreakpoint: function (brk) {
updateDebuggerButtons(brk)
let brkInfo = lastCompileResult.breakpoints[brk.breakpointId]
if (config) config.highlightStatement(brkInfo)
if (brk.exceptionMessage) {
core.errorNotification(lf("Program Error: {0}", brk.exceptionMessage))
}
postSimEditorEvent("stopped", brk.exceptionMessage);
},
onTraceMessage: function (msg) {
let brkInfo = lastCompileResult.breakpoints[msg.breakpointId]
if (config) config.highlightStatement(brkInfo)
},
onDebuggerWarning: function (wrn) {
for (let id of wrn.breakpointIds) {
let brkInfo = lastCompileResult.breakpoints[id]
if (brkInfo) {
if (!U.startsWith("pxt_modules/", brkInfo.fileName)) {
if (config) config.highlightStatement(brkInfo)
break
}
}
}
},
onDebuggerResume: function () {
postSimEditorEvent("resumed");
if (config) config.highlightStatement(null)
updateDebuggerButtons()
},
onStateChanged: function (state) {
if (state === pxsim.SimulatorState.Stopped) {
postSimEditorEvent("stopped");
}
updateDebuggerButtons()
},
onSimulatorCommand: (msg: pxsim.SimulatorCommandMessage): void => {
switch (msg.command) {
case "restart":
cfg.restartSimulator();
break;
case "modal":
stop();
if (!pxt.shell.isSandboxMode() && (!msg.displayOnceId || !displayedModals[msg.displayOnceId])) {
const modalOpts: core.ConfirmOptions = {
header: msg.header,
body: msg.body,
size: "large",
copyable: msg.copyable,
disagreeLbl: lf("Close"),
modalContext: msg.modalContext
};
const trustedSimUrls = pxt.appTarget.simulator.trustedUrls;
const hasTrustedLink = msg.linkButtonHref && trustedSimUrls && trustedSimUrls.indexOf(msg.linkButtonHref) !== -1;
if (hasTrustedLink) {
modalOpts.agreeLbl = msg.linkButtonLabel;
} else {
modalOpts.hideAgree = true;
}
displayedModals[msg.displayOnceId] = true;
core.confirmAsync(modalOpts)
.then((selection) => {
if (hasTrustedLink && selection == 1) {
window.open(msg.linkButtonHref,'_blank');
}
})
.done();
}
break;
}
},
onTopLevelCodeEnd: () => {
postSimEditorEvent("toplevelfinished");
},
stoppedClass: getStoppedClass()
};
driver = new pxsim.SimulatorDriver($('#simulators')[0], options);
config = cfg
updateDebuggerButtons();
}
function postSimEditorEvent(subtype: string, exception?: string) {
if (pxt.appTarget.appTheme.allowParentController && pxt.BrowserUtils.isIFrame()) {
pxt.editor.postHostMessageAsync({
type: "pxthost",
action: "simevent",
subtype: subtype as any,
exception: exception
} as pxt.editor.EditorSimulatorStoppedEvent);
}
}
export function setState(editor: string, tutMode?: boolean) {
if (config && config.editor != editor) {
config.editor = editor;
config.highlightStatement(null)
updateDebuggerButtons();
}
tutorialMode = tutMode;
}
export function makeDirty() { // running outdated code
pxsim.U.addClass(driver.container, getInvalidatedClass());
dirty = true;
}
export function isDirty(): boolean { // in need of a restart?
return dirty;
}
export function run(pkg: pxt.MainPackage, debug: boolean, res: pxtc.CompileResult, mute?: boolean, highContrast?: boolean) {
makeClean();
const js = res.outfiles[pxtc.BINARY_JS]
const boardDefinition = pxt.appTarget.simulator.boardDefinition;
const parts = pxtc.computeUsedParts(res, true);
const fnArgs = res.usedArguments;
lastCompileResult = res;
const opts: pxsim.SimulatorRunOptions = {
boardDefinition: boardDefinition,
mute,
parts,
debug,
fnArgs,
highContrast,
aspectRatio: parts.length ? pxt.appTarget.simulator.partsAspectRatio : pxt.appTarget.simulator.aspectRatio,
partDefinitions: pkg.computePartDefinitions(parts),
cdnUrl: pxt.webConfig.commitCdnUrl,
localizedStrings: simTranslations,
refCountingDebug: pxt.options.debug
}
postSimEditorEvent("started");
driver.run(js, opts);
}
export function mute(mute: boolean) {
driver.mute(mute);
$debugger.empty();
}
export function stop(unload?: boolean) {
if (!driver) return;
makeClean();
driver.stop(unload);
$debugger.empty();
}
export function hide(completeHandler?: () => void) {
if (!pxt.appTarget.simulator.headless) {
makeDirty();
}
driver.hide(completeHandler);
$debugger.empty();
}
export function unhide() {
driver.unhide();
}
export function setTraceInterval(intervalMs: number) {
driver.setTraceInterval(intervalMs);
}
export function proxy(message: pxsim.SimulatorCustomMessage) {
if (!driver) return;
driver.postMessage(message);
$debugger.empty();
}
function | () {
pxsim.U.removeClass(driver.container, getInvalidatedClass());
dirty = false;
}
function getInvalidatedClass() {
if (pxt.appTarget.simulator && pxt.appTarget.simulator.invalidatedClass) {
return pxt.appTarget.simulator.invalidatedClass;
}
return "sepia";
}
function getStoppedClass() {
if (pxt.appTarget.simulator && pxt.appTarget.simulator.stoppedClass) {
return pxt.appTarget.simulator.stoppedClass;
}
return undefined;
}
function updateDebuggerButtons(brk: pxsim.DebuggerBreakpointMessage = null) {
function btn(icon: string, name: string, label: string, click: () => void) {
let b = $(`<button class="ui mini button teal" title="${Util.htmlEscape(label)}"></button>`)
if (icon) b.addClass("icon").append(`<i class="${icon} icon"></i>`)
if (name) b.append(Util.htmlEscape(name));
return b.click(click)
}
$debugger.empty();
if (!driver.runOptions.debug) return;
let advanced = config.editor == 'tsprj';
if (driver.state == pxsim.SimulatorState.Paused) {
let $resume = btn("play", lf("Resume"), lf("Resume execution"), () => driver.resume(pxsim.SimulatorDebuggerCommand.Resume));
let $stepOver = btn("xicon stepover", lf("Step over"), lf("Step over next function call"), () => driver.resume(pxsim.SimulatorDebuggerCommand.StepOver));
let $stepInto = btn("xicon stepinto", lf("Step into"), lf("Step into next function call"), () => driver.resume(pxsim.SimulatorDebuggerCommand.StepInto));
$debugger.append($resume).append($stepOver)
if (advanced)
$debugger.append($stepInto);
} else if (driver.state == pxsim.SimulatorState.Running) {
let $pause = btn("pause", lf("Pause"), lf("Pause execution on the next instruction"), () => driver.resume(pxsim.SimulatorDebuggerCommand.Pause));
$debugger.append($pause);
}
if (!brk || !advanced) return
function vars(hd: string, frame: pxsim.Variables) {
let frameView = $(`<div><h4>${U.htmlEscape(hd)}</h4></div>`)
for (let k of Object.keys(frame)) {
let v = frame[k]
let sv = ""
switch (typeof (v)) {
case "number": sv = v + ""; break;
case "string": sv = JSON.stringify(v); break;
case "object":
if (v == null) sv = "null";
else if (v.id !== undefined) sv = "(object)"
else if (v.text) sv = v.text;
else sv = "(unknown)"
break;
default: U.oops()
}
let n = k.replace(/___\d+$/, "")
frameView.append(`<div>${U.htmlEscape(n)}: ${U.htmlEscape(sv)}</div>`)
}
return frameView
}
let dbgView = $(`<div class="ui segment debuggerview"></div>`)
dbgView.append(vars(U.lf("globals"), brk.globals))
brk.stackframes.forEach(sf => {
let info = sf.funcInfo as pxtc.FunctionLocationInfo
dbgView.append(vars(info.functionName, sf.locals))
})
$('#debugger').append(dbgView)
}
| makeClean | identifier_name |
main.rs | //! A simple demonstration of how to construct and use Canvasses by splitting up the window.
#[macro_use] extern crate conrod;
#[macro_use] extern crate serde_derive;
extern crate chrono;
extern crate serde;
mod support;
fn main() {
feature::main();
}
mod feature {
const FILENAME : &str = "timetracker.json";
extern crate find_folder;
use std::fs::File;
use std::io::prelude::*;
use std::time::Duration;
use std::thread::sleep;
use conrod;
use conrod::backend::glium::glium;
use conrod::backend::glium::glium::Surface;
extern crate serde_json;
use support;
use chrono::prelude::*;
use chrono;
pub fn main() {
const WIDTH: u32 = 800;
const HEIGHT: u32 = 600;
const SLEEPTIME: Duration = Duration::from_millis(500);
// Build the window.
let mut events_loop = glium::glutin::EventsLoop::new();
let window = glium::glutin::WindowBuilder::new()
.with_title("Timetracker")
.with_dimensions(WIDTH, HEIGHT);
let context = glium::glutin::ContextBuilder::new()
.with_vsync(true)
.with_multisampling(4);
let display = glium::Display::new(window, context, &events_loop).unwrap();
// construct our `Ui`.
let mut ui = conrod::UiBuilder::new([WIDTH as f64, HEIGHT as f64]).build();
// Add a `Font` to the `Ui`'s `font::Map` from file.
let assets = find_folder::Search::KidsThenParents(3, 5).for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
ui.fonts.insert_from_file(font_path).unwrap();
// A type used for converting `conrod::render::Primitives` into `Command`s that can be used
// for drawing to the glium `Surface`.
let mut renderer = conrod::backend::glium::Renderer::new(&display).unwrap();
// The image map describing each of our widget->image mappings (in our case, none).
let image_map = conrod::image::Map::<glium::texture::Texture2d>::new();
// Instantiate the generated list of widget identifiers.
let ids = &mut Ids::new(ui.widget_id_generator());
let mut ids_list = Vec::new();
let mut curname = "Enter name".to_string();
// Poll events from the window.
let mut event_loop = support::EventLoop::new();
let mut timerstates : Vec<support::TimerState> = match File::open(FILENAME) {
Ok(mut a) => {
let mut s = String::new();
a.read_to_string(&mut s).expect("Failed to read config");
serde_json::from_str(&s).expect("Failed convert to json")
},
Err(_e) => {
Vec::new()
}
};
'main: loop {
sleep(SLEEPTIME);
// Handle all events.
for event in event_loop.next(&mut events_loop) {
// Use the `winit` backend feature to convert the winit event to a conrod one.
if let Some(event) = conrod::backend::winit::convert_event(event.clone(), &display) {
ui.handle_event(event);
event_loop.needs_update();
}
match event {
glium::glutin::Event::WindowEvent { event, .. } => match event {
// Break from the loop upon `Escape`.
glium::glutin::WindowEvent::Closed |
glium::glutin::WindowEvent::KeyboardInput {
input: glium::glutin::KeyboardInput {
virtual_keycode: Some(glium::glutin::VirtualKeyCode::Escape),
..
},
..
} => {
let mut f = File::create(FILENAME).unwrap();
f.write_all(serde_json::to_string(&timerstates)
.unwrap()
.as_bytes()).unwrap();
break 'main
},
_ => (),
},
_ => (),
}
}
// Instantiate all widgets in the GUI.
set_widgets(ui.set_widgets(), ids, &mut ids_list, &mut timerstates, &mut curname);
// Render the `Ui` and then display it on the screen.
if let Some(primitives) = ui.draw_if_changed() {
renderer.fill(&display, primitives, &image_map);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
renderer.draw(&display, &mut target, &image_map).unwrap();
target.finish().unwrap();
}
}
}
// Draw the Ui.
fn set_widgets(ref mut ui: conrod::UiCell, ids: &mut Ids, ids_list: &mut Vec<ListItem>, timerstates : &mut Vec<support::TimerState> ,text : &mut String) {
use conrod::{color, widget, Colorable, Borderable, Positionable, Labelable, Sizeable, Widget};
let main_color = color::rgb(0.2,0.2,0.3);
let other_color = color::rgb(0.1,0.1,0.2);
let green_color = color::rgb(0.45,1.,0.12);
// Construct our main `Canvas` tree.
widget::Canvas::new().flow_down(&[
(ids.header, widget::Canvas::new().color(main_color).length(70.0)),
(ids.body, widget::Canvas::new().color(color::ORANGE).scroll_kids_vertically()),
]).set(ids.master, ui);
// A scrollbar for the `FOOTER` canvas.
widget::Scrollbar::y_axis(ids.body).auto_hide(false).set(ids.footer_scrollbar, ui);
widget::Text::new("Time tracker")
.color(color::LIGHT_ORANGE)
.font_size(28)
.mid_left_with_margin_on(ids.header,28.)
.left_justify()
.set(ids.title, ui);
// Here we make some canvas `Tabs` in the middle column.
widget::Tabs::new(&[(ids.tab_timers, "Timers")/*,(ids.tab_statistics, "Statistics")*/])
.wh_of(ids.body)
.color(other_color)
.border(0.)
.label_color(color::WHITE)
.middle_of(ids.body)
.set(ids.tabs, ui);
while ids_list.len() < timerstates.len() {
ids_list.push(ListItem::new(ui.widget_id_generator()));
}
let (mut items, _scrollbar) = widget::List::flow_down(timerstates.len())
.item_size(50.0)
.scrollbar_on_top()
.middle_of(ids.tab_timers)
.wh_of(ids.tab_timers)
.set(ids.timer_list, ui);
while let Some(item) = items.next(ui) {
let i = item.i;
let mut label;
let dummy = widget::Canvas::new().w_of(ids.timer_list);
item.set(dummy , ui);
widget::Canvas::new()
.wh_of(item.widget_id)
.middle_of(item.widget_id)
.set(ids_list[i].master, ui);
//Make the label for the toggle button
if timerstates[i].active {
let zero : u32 = 0;
let timesince : DateTime<Utc> = chrono::MIN_DATE.and_hms(zero,zero,zero).checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
let delta = format_time(timesince);
label = format!("{}", delta);
}
else {
label = format!("{}",format_time(timerstates[i].total));
}
for b in widget::Toggle::new(timerstates[i].active)
.h_of(ids_list[i].master)
.padded_w_of(ids_list[i].master,25.)
.label(&label)
.label_color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.mid_left_of(ids_list[i].master)
.color(if timerstates[i].active {green_color}else {other_color})
.set(ids_list[i].toggle, ui) {
if b {
timerstates[i].active_since = Utc::now();
}
else {
timerstates[i].total = timerstates[i].total.checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
}
timerstates[i].active = b;
}
widget::Text::new(timerstates[i].name.as_str())
.color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.font_size(28)
.bottom_left_with_margin_on(ids_list[i].toggle,14.)
.left_justify()
.set(ids_list[i].name, ui);
for _press in widget::Button::new()
.h_of(ids_list[i].master)
.w(50.)
.label("-")
.mid_right_of(ids_list[i].master)
.set(ids_list[i].remove, ui){
timerstates.remove(i);
ids_list.remove(i);
return;
}
}
for edit in widget::TextBox::new(text)
.color(color::WHITE)
.h(50.)
.padded_w_of(ids.tab_timers, 25.0)
.bottom_left_of(ids.tab_timers)
.center_justify()
.set(ids.add_name, ui)
{
use conrod::widget::text_box::Event::{Update,Enter};
match edit {
Update(txt) => {
*text = txt;
},
Enter => {
timerstates.push(support::TimerState::new(text.clone()));
},
}
}
for _press in widget::Button::new()
.h(50.)
.w(50.)
.label("+")
.bottom_right_of(ids.tab_timers)
.set(ids.plus_button, ui){
timerstates.push(support::TimerState::new(text.clone()));
}
}
fn format_time(t : chrono::DateTime<Utc>) -> String {
let dur = t.signed_duration_since(chrono::MIN_DATE.and_hms(0u32,0u32,0u32));
let ret = format!(
"{:02}:{:02}:{:02}",
dur.num_hours(),
dur.num_minutes()%60,
dur.num_seconds()%60
);
ret
}
fn duration_elapsed(t : chrono::DateTime<Utc>) -> chrono::Duration {
chrono::offset::Utc::now().signed_duration_since(t)
}
// Generate a unique `WidgetId` for each widget.
widget_ids! {
struct Ids {
master,
header,
body,
timer_list,
plus_button,
add_name,
footer_scrollbar,
tabs,
tab_timers,
tab_statistics,
title,
subtitle,
}
} | widget_ids! {
struct ListItem {
master,
toggle,
remove,
name,
time,
session,
}
}
} | random_line_split |
|
main.rs | //! A simple demonstration of how to construct and use Canvasses by splitting up the window.
#[macro_use] extern crate conrod;
#[macro_use] extern crate serde_derive;
extern crate chrono;
extern crate serde;
mod support;
fn main() {
feature::main();
}
mod feature {
const FILENAME : &str = "timetracker.json";
extern crate find_folder;
use std::fs::File;
use std::io::prelude::*;
use std::time::Duration;
use std::thread::sleep;
use conrod;
use conrod::backend::glium::glium;
use conrod::backend::glium::glium::Surface;
extern crate serde_json;
use support;
use chrono::prelude::*;
use chrono;
pub fn main() |
// Draw the Ui.
fn set_widgets(ref mut ui: conrod::UiCell, ids: &mut Ids, ids_list: &mut Vec<ListItem>, timerstates : &mut Vec<support::TimerState> ,text : &mut String) {
use conrod::{color, widget, Colorable, Borderable, Positionable, Labelable, Sizeable, Widget};
let main_color = color::rgb(0.2,0.2,0.3);
let other_color = color::rgb(0.1,0.1,0.2);
let green_color = color::rgb(0.45,1.,0.12);
// Construct our main `Canvas` tree.
widget::Canvas::new().flow_down(&[
(ids.header, widget::Canvas::new().color(main_color).length(70.0)),
(ids.body, widget::Canvas::new().color(color::ORANGE).scroll_kids_vertically()),
]).set(ids.master, ui);
// A scrollbar for the `FOOTER` canvas.
widget::Scrollbar::y_axis(ids.body).auto_hide(false).set(ids.footer_scrollbar, ui);
widget::Text::new("Time tracker")
.color(color::LIGHT_ORANGE)
.font_size(28)
.mid_left_with_margin_on(ids.header,28.)
.left_justify()
.set(ids.title, ui);
// Here we make some canvas `Tabs` in the middle column.
widget::Tabs::new(&[(ids.tab_timers, "Timers")/*,(ids.tab_statistics, "Statistics")*/])
.wh_of(ids.body)
.color(other_color)
.border(0.)
.label_color(color::WHITE)
.middle_of(ids.body)
.set(ids.tabs, ui);
while ids_list.len() < timerstates.len() {
ids_list.push(ListItem::new(ui.widget_id_generator()));
}
let (mut items, _scrollbar) = widget::List::flow_down(timerstates.len())
.item_size(50.0)
.scrollbar_on_top()
.middle_of(ids.tab_timers)
.wh_of(ids.tab_timers)
.set(ids.timer_list, ui);
while let Some(item) = items.next(ui) {
let i = item.i;
let mut label;
let dummy = widget::Canvas::new().w_of(ids.timer_list);
item.set(dummy , ui);
widget::Canvas::new()
.wh_of(item.widget_id)
.middle_of(item.widget_id)
.set(ids_list[i].master, ui);
//Make the label for the toggle button
if timerstates[i].active {
let zero : u32 = 0;
let timesince : DateTime<Utc> = chrono::MIN_DATE.and_hms(zero,zero,zero).checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
let delta = format_time(timesince);
label = format!("{}", delta);
}
else {
label = format!("{}",format_time(timerstates[i].total));
}
for b in widget::Toggle::new(timerstates[i].active)
.h_of(ids_list[i].master)
.padded_w_of(ids_list[i].master,25.)
.label(&label)
.label_color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.mid_left_of(ids_list[i].master)
.color(if timerstates[i].active {green_color}else {other_color})
.set(ids_list[i].toggle, ui) {
if b {
timerstates[i].active_since = Utc::now();
}
else {
timerstates[i].total = timerstates[i].total.checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
}
timerstates[i].active = b;
}
widget::Text::new(timerstates[i].name.as_str())
.color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.font_size(28)
.bottom_left_with_margin_on(ids_list[i].toggle,14.)
.left_justify()
.set(ids_list[i].name, ui);
for _press in widget::Button::new()
.h_of(ids_list[i].master)
.w(50.)
.label("-")
.mid_right_of(ids_list[i].master)
.set(ids_list[i].remove, ui){
timerstates.remove(i);
ids_list.remove(i);
return;
}
}
for edit in widget::TextBox::new(text)
.color(color::WHITE)
.h(50.)
.padded_w_of(ids.tab_timers, 25.0)
.bottom_left_of(ids.tab_timers)
.center_justify()
.set(ids.add_name, ui)
{
use conrod::widget::text_box::Event::{Update,Enter};
match edit {
Update(txt) => {
*text = txt;
},
Enter => {
timerstates.push(support::TimerState::new(text.clone()));
},
}
}
for _press in widget::Button::new()
.h(50.)
.w(50.)
.label("+")
.bottom_right_of(ids.tab_timers)
.set(ids.plus_button, ui){
timerstates.push(support::TimerState::new(text.clone()));
}
}
fn format_time(t : chrono::DateTime<Utc>) -> String {
let dur = t.signed_duration_since(chrono::MIN_DATE.and_hms(0u32,0u32,0u32));
let ret = format!(
"{:02}:{:02}:{:02}",
dur.num_hours(),
dur.num_minutes()%60,
dur.num_seconds()%60
);
ret
}
fn duration_elapsed(t : chrono::DateTime<Utc>) -> chrono::Duration {
chrono::offset::Utc::now().signed_duration_since(t)
}
// Generate a unique `WidgetId` for each widget.
widget_ids! {
struct Ids {
master,
header,
body,
timer_list,
plus_button,
add_name,
footer_scrollbar,
tabs,
tab_timers,
tab_statistics,
title,
subtitle,
}
}
widget_ids! {
struct ListItem {
master,
toggle,
remove,
name,
time,
session,
}
}
} | {
const WIDTH: u32 = 800;
const HEIGHT: u32 = 600;
const SLEEPTIME: Duration = Duration::from_millis(500);
// Build the window.
let mut events_loop = glium::glutin::EventsLoop::new();
let window = glium::glutin::WindowBuilder::new()
.with_title("Timetracker")
.with_dimensions(WIDTH, HEIGHT);
let context = glium::glutin::ContextBuilder::new()
.with_vsync(true)
.with_multisampling(4);
let display = glium::Display::new(window, context, &events_loop).unwrap();
// construct our `Ui`.
let mut ui = conrod::UiBuilder::new([WIDTH as f64, HEIGHT as f64]).build();
// Add a `Font` to the `Ui`'s `font::Map` from file.
let assets = find_folder::Search::KidsThenParents(3, 5).for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
ui.fonts.insert_from_file(font_path).unwrap();
// A type used for converting `conrod::render::Primitives` into `Command`s that can be used
// for drawing to the glium `Surface`.
let mut renderer = conrod::backend::glium::Renderer::new(&display).unwrap();
// The image map describing each of our widget->image mappings (in our case, none).
let image_map = conrod::image::Map::<glium::texture::Texture2d>::new();
// Instantiate the generated list of widget identifiers.
let ids = &mut Ids::new(ui.widget_id_generator());
let mut ids_list = Vec::new();
let mut curname = "Enter name".to_string();
// Poll events from the window.
let mut event_loop = support::EventLoop::new();
let mut timerstates : Vec<support::TimerState> = match File::open(FILENAME) {
Ok(mut a) => {
let mut s = String::new();
a.read_to_string(&mut s).expect("Failed to read config");
serde_json::from_str(&s).expect("Failed convert to json")
},
Err(_e) => {
Vec::new()
}
};
'main: loop {
sleep(SLEEPTIME);
// Handle all events.
for event in event_loop.next(&mut events_loop) {
// Use the `winit` backend feature to convert the winit event to a conrod one.
if let Some(event) = conrod::backend::winit::convert_event(event.clone(), &display) {
ui.handle_event(event);
event_loop.needs_update();
}
match event {
glium::glutin::Event::WindowEvent { event, .. } => match event {
// Break from the loop upon `Escape`.
glium::glutin::WindowEvent::Closed |
glium::glutin::WindowEvent::KeyboardInput {
input: glium::glutin::KeyboardInput {
virtual_keycode: Some(glium::glutin::VirtualKeyCode::Escape),
..
},
..
} => {
let mut f = File::create(FILENAME).unwrap();
f.write_all(serde_json::to_string(&timerstates)
.unwrap()
.as_bytes()).unwrap();
break 'main
},
_ => (),
},
_ => (),
}
}
// Instantiate all widgets in the GUI.
set_widgets(ui.set_widgets(), ids, &mut ids_list, &mut timerstates, &mut curname);
// Render the `Ui` and then display it on the screen.
if let Some(primitives) = ui.draw_if_changed() {
renderer.fill(&display, primitives, &image_map);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
renderer.draw(&display, &mut target, &image_map).unwrap();
target.finish().unwrap();
}
}
} | identifier_body |
main.rs | //! A simple demonstration of how to construct and use Canvasses by splitting up the window.
#[macro_use] extern crate conrod;
#[macro_use] extern crate serde_derive;
extern crate chrono;
extern crate serde;
mod support;
fn main() {
feature::main();
}
mod feature {
const FILENAME : &str = "timetracker.json";
extern crate find_folder;
use std::fs::File;
use std::io::prelude::*;
use std::time::Duration;
use std::thread::sleep;
use conrod;
use conrod::backend::glium::glium;
use conrod::backend::glium::glium::Surface;
extern crate serde_json;
use support;
use chrono::prelude::*;
use chrono;
pub fn main() {
const WIDTH: u32 = 800;
const HEIGHT: u32 = 600;
const SLEEPTIME: Duration = Duration::from_millis(500);
// Build the window.
let mut events_loop = glium::glutin::EventsLoop::new();
let window = glium::glutin::WindowBuilder::new()
.with_title("Timetracker")
.with_dimensions(WIDTH, HEIGHT);
let context = glium::glutin::ContextBuilder::new()
.with_vsync(true)
.with_multisampling(4);
let display = glium::Display::new(window, context, &events_loop).unwrap();
// construct our `Ui`.
let mut ui = conrod::UiBuilder::new([WIDTH as f64, HEIGHT as f64]).build();
// Add a `Font` to the `Ui`'s `font::Map` from file.
let assets = find_folder::Search::KidsThenParents(3, 5).for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
ui.fonts.insert_from_file(font_path).unwrap();
// A type used for converting `conrod::render::Primitives` into `Command`s that can be used
// for drawing to the glium `Surface`.
let mut renderer = conrod::backend::glium::Renderer::new(&display).unwrap();
// The image map describing each of our widget->image mappings (in our case, none).
let image_map = conrod::image::Map::<glium::texture::Texture2d>::new();
// Instantiate the generated list of widget identifiers.
let ids = &mut Ids::new(ui.widget_id_generator());
let mut ids_list = Vec::new();
let mut curname = "Enter name".to_string();
// Poll events from the window.
let mut event_loop = support::EventLoop::new();
let mut timerstates : Vec<support::TimerState> = match File::open(FILENAME) {
Ok(mut a) => {
let mut s = String::new();
a.read_to_string(&mut s).expect("Failed to read config");
serde_json::from_str(&s).expect("Failed convert to json")
},
Err(_e) => {
Vec::new()
}
};
'main: loop {
sleep(SLEEPTIME);
// Handle all events.
for event in event_loop.next(&mut events_loop) {
// Use the `winit` backend feature to convert the winit event to a conrod one.
if let Some(event) = conrod::backend::winit::convert_event(event.clone(), &display) {
ui.handle_event(event);
event_loop.needs_update();
}
match event {
glium::glutin::Event::WindowEvent { event, .. } => match event {
// Break from the loop upon `Escape`.
glium::glutin::WindowEvent::Closed |
glium::glutin::WindowEvent::KeyboardInput {
input: glium::glutin::KeyboardInput {
virtual_keycode: Some(glium::glutin::VirtualKeyCode::Escape),
..
},
..
} => {
let mut f = File::create(FILENAME).unwrap();
f.write_all(serde_json::to_string(&timerstates)
.unwrap()
.as_bytes()).unwrap();
break 'main
},
_ => (),
},
_ => (),
}
}
// Instantiate all widgets in the GUI.
set_widgets(ui.set_widgets(), ids, &mut ids_list, &mut timerstates, &mut curname);
// Render the `Ui` and then display it on the screen.
if let Some(primitives) = ui.draw_if_changed() {
renderer.fill(&display, primitives, &image_map);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
renderer.draw(&display, &mut target, &image_map).unwrap();
target.finish().unwrap();
}
}
}
// Draw the Ui.
fn set_widgets(ref mut ui: conrod::UiCell, ids: &mut Ids, ids_list: &mut Vec<ListItem>, timerstates : &mut Vec<support::TimerState> ,text : &mut String) {
use conrod::{color, widget, Colorable, Borderable, Positionable, Labelable, Sizeable, Widget};
let main_color = color::rgb(0.2,0.2,0.3);
let other_color = color::rgb(0.1,0.1,0.2);
let green_color = color::rgb(0.45,1.,0.12);
// Construct our main `Canvas` tree.
widget::Canvas::new().flow_down(&[
(ids.header, widget::Canvas::new().color(main_color).length(70.0)),
(ids.body, widget::Canvas::new().color(color::ORANGE).scroll_kids_vertically()),
]).set(ids.master, ui);
// A scrollbar for the `FOOTER` canvas.
widget::Scrollbar::y_axis(ids.body).auto_hide(false).set(ids.footer_scrollbar, ui);
widget::Text::new("Time tracker")
.color(color::LIGHT_ORANGE)
.font_size(28)
.mid_left_with_margin_on(ids.header,28.)
.left_justify()
.set(ids.title, ui);
// Here we make some canvas `Tabs` in the middle column.
widget::Tabs::new(&[(ids.tab_timers, "Timers")/*,(ids.tab_statistics, "Statistics")*/])
.wh_of(ids.body)
.color(other_color)
.border(0.)
.label_color(color::WHITE)
.middle_of(ids.body)
.set(ids.tabs, ui);
while ids_list.len() < timerstates.len() {
ids_list.push(ListItem::new(ui.widget_id_generator()));
}
let (mut items, _scrollbar) = widget::List::flow_down(timerstates.len())
.item_size(50.0)
.scrollbar_on_top()
.middle_of(ids.tab_timers)
.wh_of(ids.tab_timers)
.set(ids.timer_list, ui);
while let Some(item) = items.next(ui) {
let i = item.i;
let mut label;
let dummy = widget::Canvas::new().w_of(ids.timer_list);
item.set(dummy , ui);
widget::Canvas::new()
.wh_of(item.widget_id)
.middle_of(item.widget_id)
.set(ids_list[i].master, ui);
//Make the label for the toggle button
if timerstates[i].active {
let zero : u32 = 0;
let timesince : DateTime<Utc> = chrono::MIN_DATE.and_hms(zero,zero,zero).checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
let delta = format_time(timesince);
label = format!("{}", delta);
}
else {
label = format!("{}",format_time(timerstates[i].total));
}
for b in widget::Toggle::new(timerstates[i].active)
.h_of(ids_list[i].master)
.padded_w_of(ids_list[i].master,25.)
.label(&label)
.label_color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.mid_left_of(ids_list[i].master)
.color(if timerstates[i].active {green_color}else {other_color})
.set(ids_list[i].toggle, ui) {
if b {
timerstates[i].active_since = Utc::now();
}
else {
timerstates[i].total = timerstates[i].total.checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
}
timerstates[i].active = b;
}
widget::Text::new(timerstates[i].name.as_str())
.color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.font_size(28)
.bottom_left_with_margin_on(ids_list[i].toggle,14.)
.left_justify()
.set(ids_list[i].name, ui);
for _press in widget::Button::new()
.h_of(ids_list[i].master)
.w(50.)
.label("-")
.mid_right_of(ids_list[i].master)
.set(ids_list[i].remove, ui){
timerstates.remove(i);
ids_list.remove(i);
return;
}
}
for edit in widget::TextBox::new(text)
.color(color::WHITE)
.h(50.)
.padded_w_of(ids.tab_timers, 25.0)
.bottom_left_of(ids.tab_timers)
.center_justify()
.set(ids.add_name, ui)
{
use conrod::widget::text_box::Event::{Update,Enter};
match edit {
Update(txt) => {
*text = txt;
},
Enter => {
timerstates.push(support::TimerState::new(text.clone()));
},
}
}
for _press in widget::Button::new()
.h(50.)
.w(50.)
.label("+")
.bottom_right_of(ids.tab_timers)
.set(ids.plus_button, ui){
timerstates.push(support::TimerState::new(text.clone()));
}
}
fn | (t : chrono::DateTime<Utc>) -> String {
let dur = t.signed_duration_since(chrono::MIN_DATE.and_hms(0u32,0u32,0u32));
let ret = format!(
"{:02}:{:02}:{:02}",
dur.num_hours(),
dur.num_minutes()%60,
dur.num_seconds()%60
);
ret
}
fn duration_elapsed(t : chrono::DateTime<Utc>) -> chrono::Duration {
chrono::offset::Utc::now().signed_duration_since(t)
}
// Generate a unique `WidgetId` for each widget.
widget_ids! {
struct Ids {
master,
header,
body,
timer_list,
plus_button,
add_name,
footer_scrollbar,
tabs,
tab_timers,
tab_statistics,
title,
subtitle,
}
}
widget_ids! {
struct ListItem {
master,
toggle,
remove,
name,
time,
session,
}
}
} | format_time | identifier_name |
main.rs | //! A simple demonstration of how to construct and use Canvasses by splitting up the window.
#[macro_use] extern crate conrod;
#[macro_use] extern crate serde_derive;
extern crate chrono;
extern crate serde;
mod support;
fn main() {
feature::main();
}
mod feature {
const FILENAME : &str = "timetracker.json";
extern crate find_folder;
use std::fs::File;
use std::io::prelude::*;
use std::time::Duration;
use std::thread::sleep;
use conrod;
use conrod::backend::glium::glium;
use conrod::backend::glium::glium::Surface;
extern crate serde_json;
use support;
use chrono::prelude::*;
use chrono;
pub fn main() {
const WIDTH: u32 = 800;
const HEIGHT: u32 = 600;
const SLEEPTIME: Duration = Duration::from_millis(500);
// Build the window.
let mut events_loop = glium::glutin::EventsLoop::new();
let window = glium::glutin::WindowBuilder::new()
.with_title("Timetracker")
.with_dimensions(WIDTH, HEIGHT);
let context = glium::glutin::ContextBuilder::new()
.with_vsync(true)
.with_multisampling(4);
let display = glium::Display::new(window, context, &events_loop).unwrap();
// construct our `Ui`.
let mut ui = conrod::UiBuilder::new([WIDTH as f64, HEIGHT as f64]).build();
// Add a `Font` to the `Ui`'s `font::Map` from file.
let assets = find_folder::Search::KidsThenParents(3, 5).for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
ui.fonts.insert_from_file(font_path).unwrap();
// A type used for converting `conrod::render::Primitives` into `Command`s that can be used
// for drawing to the glium `Surface`.
let mut renderer = conrod::backend::glium::Renderer::new(&display).unwrap();
// The image map describing each of our widget->image mappings (in our case, none).
let image_map = conrod::image::Map::<glium::texture::Texture2d>::new();
// Instantiate the generated list of widget identifiers.
let ids = &mut Ids::new(ui.widget_id_generator());
let mut ids_list = Vec::new();
let mut curname = "Enter name".to_string();
// Poll events from the window.
let mut event_loop = support::EventLoop::new();
let mut timerstates : Vec<support::TimerState> = match File::open(FILENAME) {
Ok(mut a) => {
let mut s = String::new();
a.read_to_string(&mut s).expect("Failed to read config");
serde_json::from_str(&s).expect("Failed convert to json")
},
Err(_e) => {
Vec::new()
}
};
'main: loop {
sleep(SLEEPTIME);
// Handle all events.
for event in event_loop.next(&mut events_loop) {
// Use the `winit` backend feature to convert the winit event to a conrod one.
if let Some(event) = conrod::backend::winit::convert_event(event.clone(), &display) {
ui.handle_event(event);
event_loop.needs_update();
}
match event {
glium::glutin::Event::WindowEvent { event, .. } => match event {
// Break from the loop upon `Escape`.
glium::glutin::WindowEvent::Closed |
glium::glutin::WindowEvent::KeyboardInput {
input: glium::glutin::KeyboardInput {
virtual_keycode: Some(glium::glutin::VirtualKeyCode::Escape),
..
},
..
} => {
let mut f = File::create(FILENAME).unwrap();
f.write_all(serde_json::to_string(&timerstates)
.unwrap()
.as_bytes()).unwrap();
break 'main
},
_ => (),
},
_ => (),
}
}
// Instantiate all widgets in the GUI.
set_widgets(ui.set_widgets(), ids, &mut ids_list, &mut timerstates, &mut curname);
// Render the `Ui` and then display it on the screen.
if let Some(primitives) = ui.draw_if_changed() {
renderer.fill(&display, primitives, &image_map);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
renderer.draw(&display, &mut target, &image_map).unwrap();
target.finish().unwrap();
}
}
}
// Draw the Ui.
fn set_widgets(ref mut ui: conrod::UiCell, ids: &mut Ids, ids_list: &mut Vec<ListItem>, timerstates : &mut Vec<support::TimerState> ,text : &mut String) {
use conrod::{color, widget, Colorable, Borderable, Positionable, Labelable, Sizeable, Widget};
let main_color = color::rgb(0.2,0.2,0.3);
let other_color = color::rgb(0.1,0.1,0.2);
let green_color = color::rgb(0.45,1.,0.12);
// Construct our main `Canvas` tree.
widget::Canvas::new().flow_down(&[
(ids.header, widget::Canvas::new().color(main_color).length(70.0)),
(ids.body, widget::Canvas::new().color(color::ORANGE).scroll_kids_vertically()),
]).set(ids.master, ui);
// A scrollbar for the `FOOTER` canvas.
widget::Scrollbar::y_axis(ids.body).auto_hide(false).set(ids.footer_scrollbar, ui);
widget::Text::new("Time tracker")
.color(color::LIGHT_ORANGE)
.font_size(28)
.mid_left_with_margin_on(ids.header,28.)
.left_justify()
.set(ids.title, ui);
// Here we make some canvas `Tabs` in the middle column.
widget::Tabs::new(&[(ids.tab_timers, "Timers")/*,(ids.tab_statistics, "Statistics")*/])
.wh_of(ids.body)
.color(other_color)
.border(0.)
.label_color(color::WHITE)
.middle_of(ids.body)
.set(ids.tabs, ui);
while ids_list.len() < timerstates.len() {
ids_list.push(ListItem::new(ui.widget_id_generator()));
}
let (mut items, _scrollbar) = widget::List::flow_down(timerstates.len())
.item_size(50.0)
.scrollbar_on_top()
.middle_of(ids.tab_timers)
.wh_of(ids.tab_timers)
.set(ids.timer_list, ui);
while let Some(item) = items.next(ui) {
let i = item.i;
let mut label;
let dummy = widget::Canvas::new().w_of(ids.timer_list);
item.set(dummy , ui);
widget::Canvas::new()
.wh_of(item.widget_id)
.middle_of(item.widget_id)
.set(ids_list[i].master, ui);
//Make the label for the toggle button
if timerstates[i].active {
let zero : u32 = 0;
let timesince : DateTime<Utc> = chrono::MIN_DATE.and_hms(zero,zero,zero).checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
let delta = format_time(timesince);
label = format!("{}", delta);
}
else {
label = format!("{}",format_time(timerstates[i].total));
}
for b in widget::Toggle::new(timerstates[i].active)
.h_of(ids_list[i].master)
.padded_w_of(ids_list[i].master,25.)
.label(&label)
.label_color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.mid_left_of(ids_list[i].master)
.color(if timerstates[i].active {green_color}else {other_color})
.set(ids_list[i].toggle, ui) {
if b {
timerstates[i].active_since = Utc::now();
}
else {
timerstates[i].total = timerstates[i].total.checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
}
timerstates[i].active = b;
}
widget::Text::new(timerstates[i].name.as_str())
.color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.font_size(28)
.bottom_left_with_margin_on(ids_list[i].toggle,14.)
.left_justify()
.set(ids_list[i].name, ui);
for _press in widget::Button::new()
.h_of(ids_list[i].master)
.w(50.)
.label("-")
.mid_right_of(ids_list[i].master)
.set(ids_list[i].remove, ui){
timerstates.remove(i);
ids_list.remove(i);
return;
}
}
for edit in widget::TextBox::new(text)
.color(color::WHITE)
.h(50.)
.padded_w_of(ids.tab_timers, 25.0)
.bottom_left_of(ids.tab_timers)
.center_justify()
.set(ids.add_name, ui)
{
use conrod::widget::text_box::Event::{Update,Enter};
match edit {
Update(txt) => | ,
Enter => {
timerstates.push(support::TimerState::new(text.clone()));
},
}
}
for _press in widget::Button::new()
.h(50.)
.w(50.)
.label("+")
.bottom_right_of(ids.tab_timers)
.set(ids.plus_button, ui){
timerstates.push(support::TimerState::new(text.clone()));
}
}
fn format_time(t : chrono::DateTime<Utc>) -> String {
let dur = t.signed_duration_since(chrono::MIN_DATE.and_hms(0u32,0u32,0u32));
let ret = format!(
"{:02}:{:02}:{:02}",
dur.num_hours(),
dur.num_minutes()%60,
dur.num_seconds()%60
);
ret
}
fn duration_elapsed(t : chrono::DateTime<Utc>) -> chrono::Duration {
chrono::offset::Utc::now().signed_duration_since(t)
}
// Generate a unique `WidgetId` for each widget.
widget_ids! {
struct Ids {
master,
header,
body,
timer_list,
plus_button,
add_name,
footer_scrollbar,
tabs,
tab_timers,
tab_statistics,
title,
subtitle,
}
}
widget_ids! {
struct ListItem {
master,
toggle,
remove,
name,
time,
session,
}
}
} | {
*text = txt;
} | conditional_block |
RBM_diagonalisation-V4.py | #!/usr/bin/env python3
"""
Created on Wed Feb 12 10:44:59 2020
@author: German Sinuco
Skeleton modified from
https://www.tensorflow.org/tutorials/customization/custom_training
https://www.tensorflow.org/tutorials/customization/custom_training_walkthrough
Training of an RBM parametrization of the unitary matrix that diagonalises the 2x2 real,
and symmetric HAMILTONIAN:
==================== IMPORTANT NOTE ========================
as V2, but using complex parameters, which I used for the first time in TensorFlow_Floquet.py
============================================================
"""
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
get_ipython().run_line_magic('tensorflow_version', '2.x')
except Exception:
pass
import tensorflow as tf
import numpy as np
import math as m
from model import FloquetHamiltonian
from scipy.stats import unitary_group
class | (object):
def __init__(self,delta=0.0,Omega=0.1,phase=0.0):
self.spin = True
self.omega_0 = 1.00
# Hamiltonian parameters
self.delta = 0.00
self.omega = self.delta + self.omega_0
self.Omega = 1.00
self.phase = phase # the phase in cos(omega t + phase)
# Initialize the spin value and number of floquet channels
self.hidden_n = 4 # hidden neurons
self.hidden_ph = 4 # hidden neurons
self.S = 4 # spin 3.2. Hilbert space dimension
#self.S = 2 # spin 1/2. Hilbert space dimension
self.N = 0 # Number of positive Floquet manifolds
self.dim = self.S*(2*self.N+1) # Dimension of the extended floquet space
zero_ = tf.constant(0.0,dtype=tf.float64)
one_ = tf.constant(1.0,dtype=tf.float64)
j_ = tf.constant(tf.complex(zero_,one_),dtype=tf.complex128)
#uf_ = tf.random.stateless_uniform([self.dim,self.dim],seed=[2,1],dtype=tf.float32,minval=0.0,maxval=1.0)
#s,u,v = tf.linalg.svd(uf_, full_matrices=True)
#uf_ = u
# Declaring training variables
# Training parameters defining the norm
self.W_n = tf.Variable(tf.random.stateless_uniform([self.hidden_n,self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.b_n = tf.Variable(tf.random.stateless_uniform([self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.c_n = tf.Variable(tf.random.stateless_uniform([self.hidden_n],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
# Training parameters defining the phase
self.W_ph = tf.Variable(tf.random.stateless_uniform([self.hidden_ph,self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.b_ph = tf.Variable(tf.random.stateless_uniform([self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.c_ph = tf.Variable(tf.random.stateless_uniform([self.hidden_ph],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
UF_aux = tf.Variable(np.zeros((self.dim*self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_n = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_ph = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
self.UF = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
# defining the labels of the input layer, which are the components of the UF matrix
self.x = tf.Variable([[0.0,0.0]],dtype=tf.float64)
counter = 0
self.count = counter
for i in range(1,self.dim+1):
for j in range(1,self.dim+1):
if(self.S==4):
y = [[i-2.5,j-2.5]]
if(self.S==2):
y = [[i-1.5,j-1.5]]
self.x = tf.concat([self.x, y], 0)
counter +=1
self.count = counter
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm)
WX_n = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[0]),1)+self.c_n[0]]
for j in range(1,self.hidden_n):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[j]),1)+self.c_n[j]
WX_n = tf.concat([WX_n, [y]], 0)
UF_aux = tf.sqrt(tf.abs(tf.multiply(tf.reduce_prod(tf.math.cosh(WX_n),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(
self.x[1:counter+1],self.b_n),1))))))
UF_n = tf.reshape(UF_aux,[self.dim,self.dim])
# 2. Phase
WX_ph = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[0]),1)+self.c_ph[0]]
for j in range(1,self.hidden_ph):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[j]),1)+self.c_ph[j]
WX_ph = tf.concat([WX_ph, [y]], 0)
UF_aux = tf.multiply(tf.reduce_prod(tf.math.cosh(WX_ph),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.b_ph),1))))
UF_ph = tf.reshape(tf.math.log(UF_aux),[self.dim,self.dim])
UF_cos = tf.cos(UF_ph/2.0)
UF_sin = tf.sin(UF_ph/2.0)
UF = tf.complex(UF_n*UF_cos,UF_n*UF_sin)
# 1st of March 2020. Task: REVISE NORMALISATION AND GRAM-SCHMIDT PROCEDURE FOR COMPLEX VECTORS
# 5th of March 2020. Normalisation done by hand: OK. Now I am using the G-S algorithm
# reported in https://stackoverflow.com/questions/48119473/gram-schmidt-orthogonalization-in-pure-tensorflow-performance-for-iterative-sol.
# Task: incorparate a basis rotation in the training loop
UF = normalisation(UF)
UF = tf_gram_schmidt(UF)
self.UF = UF
if self.S == 2:
# spin 1/2
self.Identity = tf.constant([[1.0,0.0],[ 0.0, 1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0,1.0],[ 1.0, 0.0]],dtype = tf.complex128)
self.Sy = j_*0.5*tf.constant([[0.0,1.0],[-1.0, 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[1.0,0.0],[ 0.0,-1.0]],dtype = tf.complex128)
else:
if self.S == 4:
# spin 3/2
self.Identity = tf.constant([[1.0,0.0,0.0,0.0],
[0.0,1.0,0.0,0.0],
[0.0,0.0,1.0,0.0],
[0.0,0.0,0.0,1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0, np.sqrt(3.0),0.0, 0.0],
[np.sqrt(3.0),0.0, np.sqrt(4.0), 0.0],
[0.0, np.sqrt(4.0),0.0, np.sqrt(4.0)],
[0.0, 0.0, np.sqrt(3.0), 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[3.0,0.0, 0.0, 0.0],
[0.0,1.0, 0.0, 0.0],
[0.0,0.0,-1.0, 0.0],
[0.0,0.0, 0.0,-3.0]],dtype = tf.complex128)
self.Szero = tf.zeros([self.S,self.S],dtype=tf.complex128)
#else:
# if (self.S != 4 & self.S !=2):
# for j in range(0,self.S):
# H[j,j]
if self.N == 0:
self.H_TLS = tf.Variable(self.delta*self.Sz+0.5*self.Omega*self.Sx,shape=(self.dim,self.dim),dtype = tf.complex128,trainable = False) # ext. Hamiltonian
else:
self.H_TLS = FloquetHamiltonian(self) # ext. Hamiltonian
self.trainable_variables = [self.W_n,self.b_n,self.c_n,self.W_ph,self.b_ph,self.c_ph]
def getH(self):
return self.H_TLS
def __call__(trainable_variables):
return self.H_TLS
def normalisation(U_):
# U_ (in) original matrix
# (out) matrix with normalised vectors
normaU_ = tf.sqrt(tf.math.reduce_sum(tf.multiply(tf.math.conj(U_),U_,1),axis=0))
U_ = tf.math.truediv(U_,normaU_)
return U_
def tf_gram_schmidt(vectors):
# add batch dimension for matmul
basis = tf.expand_dims(vectors[:,0]/tf.norm(vectors[:,0]),0)
for i in range(1,vectors.shape[0]):#vectors.get_shape()[0].value):
v = vectors[:,i]
# add batch dimension for matmul
v = tf.expand_dims(v,0)
w = v - tf.matmul(tf.matmul(v, basis, adjoint_b=True), basis)
# I assume that my matrix is close to orthogonal
basis = tf.concat([basis, w/tf.norm(w)],axis=0)
return basis
def Unitary_Matrix(model):
UF = tf.Variable(np.zeros((model.dim*model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
UF_n = tf.Variable(np.zeros((model.dim,model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
UF_ph = tf.Variable(np.zeros((model.dim,model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
#dim = model.dim
counter = model.count
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm)
WX_n = [tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_n[0]),1)+model.c_n[0]]
for j in range(1,model.hidden_n):
y = tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_n[j]),1)+model.c_n[j]
WX_n = tf.concat([WX_n, [y]], 0)
UF_n = tf.sqrt(tf.multiply(tf.reduce_prod(tf.math.cosh(WX_n),0),tf.transpose(tf.exp(tf.reduce_sum(
tf.multiply(model.x[1:counter+1],model.b_n),1)))))
UF_n = tf.reshape(UF_n,[model.dim,model.dim])
# 2. Phase
WX_ph = [tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_ph[0]),1)+model.c_ph[0]]
for j in range(1,model.hidden_ph):
y = tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_ph[j]),1)+model.c_ph[j]
WX_ph = tf.concat([WX_ph, [y]], 0)
UF_ph = tf.multiply(tf.reduce_prod(tf.math.cosh(WX_ph),0),tf.transpose(tf.exp(tf.reduce_sum(
tf.multiply(model.x[1:counter+1],model.b_ph),1))))
UF_ph = tf.reshape(tf.math.log(UF_ph),[model.dim,model.dim])
UF_cos = tf.cos(UF_ph/2.0)
UF_sin = tf.sin(UF_ph/2.0)
UF = tf.complex(UF_n*UF_cos,UF_n*UF_sin)
UF = tf_gram_schmidt(UF)
#s,u,v = tf.linalg.svd(UF, full_matrices=True)
#UF = u
return UF
def train(model,learning_rate):
with tf.GradientTape() as t:
current_loss = loss(model)
dU = t.gradient(current_loss, model.trainable_variables)
model.UF.assign_sub(learning_rate*dU)
# 3e. Loss function := Use U^dagger H U, sum over the columns, take the difference with the diagonal,
# the loss function is the summ of the square of these differences.
def loss(model):
# define the loss function explicitly including the training variables: self.W, self.b
# model.UF is a function of self.W,self.b,self.c
#UF = tf.Variable(np.zeros((model.dim*model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
UF = tf.Variable(np.zeros((model.dim,model.dim),dtype=np.complex64))
a = np.zeros((model.dim,model.dim),dtype=np.float32)
counter = model.count
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm)
UF = Unitary_Matrix(model)
U_ = tf.abs(tf.transpose(tf.math.conj(UF))@model.H_TLS@UF)
U_diag = tf.linalg.tensor_diag_part(U_)
dotProd = tf.math.reduce_sum(abs(U_),axis=1,)
residual = tf.math.reduce_sum(tf.abs((U_diag-dotProd)),0)
U_ = tf.abs(tf.transpose(tf.math.conj(UF))@UF)
#print(U_)
U_diag = tf.linalg.tensor_diag_part(U_)
dotProd = tf.math.reduce_sum(abs(U_),axis=1)
residual_unitary = tf.pow(tf.math.reduce_sum(dotProd,0) - model.dim,2.0)
#residual += 1.0*residual_unitary
return residual
# This is the gradient of the loss function. required for keras optimisers
def grad(model):
with tf.GradientTape() as tape:
loss_value = loss(model)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.999,
epsilon=1e-07, amsgrad=False,name='Adam')
#optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)
model = Model()
loss_value = loss(model)
print("Initial UF guess: ", Unitary_Matrix(model))
print("Initial loss value: ",loss_value.numpy())
epochs = range(2048)
for i in epochs:
loss_value, grads = grad(model)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
print("Final loss value: ",loss_value.numpy())
print("Final UF matrix:", Unitary_Matrix(model))
UF = Unitary_Matrix(model)
U_ = tf.abs(tf.transpose(tf.math.conj(UF))@(model.H_TLS@UF))
U_diag = tf.linalg.tensor_diag_part(U_)
dotProd = tf.math.reduce_sum(abs(U_),axis=1,)
residual = tf.math.reduce_sum(tf.pow((U_diag-dotProd),2),0)
print(residual)
print(tf.abs(UF))
print(U_)
| Model | identifier_name |
RBM_diagonalisation-V4.py | #!/usr/bin/env python3
"""
Created on Wed Feb 12 10:44:59 2020
@author: German Sinuco
Skeleton modified from
https://www.tensorflow.org/tutorials/customization/custom_training
https://www.tensorflow.org/tutorials/customization/custom_training_walkthrough
Training of an RBM parametrization of the unitary matrix that diagonalises the 2x2 real,
and symmetric HAMILTONIAN:
==================== IMPORTANT NOTE ========================
as V2, but using complex parameters, which I used for the first time in TensorFlow_Floquet.py
============================================================
"""
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
get_ipython().run_line_magic('tensorflow_version', '2.x')
except Exception:
pass
import tensorflow as tf
import numpy as np
import math as m
from model import FloquetHamiltonian
from scipy.stats import unitary_group
class Model(object):
def __init__(self,delta=0.0,Omega=0.1,phase=0.0):
self.spin = True
self.omega_0 = 1.00
# Hamiltonian parameters
self.delta = 0.00
self.omega = self.delta + self.omega_0
self.Omega = 1.00
self.phase = phase # the phase in cos(omega t + phase)
# Initialize the spin value and number of floquet channels
self.hidden_n = 4 # hidden neurons
self.hidden_ph = 4 # hidden neurons
self.S = 4 # spin 3.2. Hilbert space dimension
#self.S = 2 # spin 1/2. Hilbert space dimension
self.N = 0 # Number of positive Floquet manifolds
self.dim = self.S*(2*self.N+1) # Dimension of the extended floquet space
zero_ = tf.constant(0.0,dtype=tf.float64)
one_ = tf.constant(1.0,dtype=tf.float64)
j_ = tf.constant(tf.complex(zero_,one_),dtype=tf.complex128)
#uf_ = tf.random.stateless_uniform([self.dim,self.dim],seed=[2,1],dtype=tf.float32,minval=0.0,maxval=1.0)
#s,u,v = tf.linalg.svd(uf_, full_matrices=True)
#uf_ = u
# Declaring training variables
# Training parameters defining the norm
self.W_n = tf.Variable(tf.random.stateless_uniform([self.hidden_n,self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.b_n = tf.Variable(tf.random.stateless_uniform([self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.c_n = tf.Variable(tf.random.stateless_uniform([self.hidden_n],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
# Training parameters defining the phase
self.W_ph = tf.Variable(tf.random.stateless_uniform([self.hidden_ph,self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.b_ph = tf.Variable(tf.random.stateless_uniform([self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.c_ph = tf.Variable(tf.random.stateless_uniform([self.hidden_ph],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
UF_aux = tf.Variable(np.zeros((self.dim*self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_n = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_ph = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
self.UF = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
# defining the labels of the input layer, which are the components of the UF matrix
self.x = tf.Variable([[0.0,0.0]],dtype=tf.float64)
counter = 0
self.count = counter
for i in range(1,self.dim+1):
|
self.count = counter
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm)
WX_n = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[0]),1)+self.c_n[0]]
for j in range(1,self.hidden_n):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[j]),1)+self.c_n[j]
WX_n = tf.concat([WX_n, [y]], 0)
UF_aux = tf.sqrt(tf.abs(tf.multiply(tf.reduce_prod(tf.math.cosh(WX_n),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(
self.x[1:counter+1],self.b_n),1))))))
UF_n = tf.reshape(UF_aux,[self.dim,self.dim])
# 2. Phase
WX_ph = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[0]),1)+self.c_ph[0]]
for j in range(1,self.hidden_ph):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[j]),1)+self.c_ph[j]
WX_ph = tf.concat([WX_ph, [y]], 0)
UF_aux = tf.multiply(tf.reduce_prod(tf.math.cosh(WX_ph),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.b_ph),1))))
UF_ph = tf.reshape(tf.math.log(UF_aux),[self.dim,self.dim])
UF_cos = tf.cos(UF_ph/2.0)
UF_sin = tf.sin(UF_ph/2.0)
UF = tf.complex(UF_n*UF_cos,UF_n*UF_sin)
# 1st of March 2020. Task: REVISE NORMALISATION AND GRAM-SCHMIDT PROCEDURE FOR COMPLEX VECTORS
# 5th of March 2020. Normalisation done by hand: OK. Now I am using the G-S algorithm
# reported in https://stackoverflow.com/questions/48119473/gram-schmidt-orthogonalization-in-pure-tensorflow-performance-for-iterative-sol.
# Task: incorparate a basis rotation in the training loop
UF = normalisation(UF)
UF = tf_gram_schmidt(UF)
self.UF = UF
if self.S == 2:
# spin 1/2
self.Identity = tf.constant([[1.0,0.0],[ 0.0, 1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0,1.0],[ 1.0, 0.0]],dtype = tf.complex128)
self.Sy = j_*0.5*tf.constant([[0.0,1.0],[-1.0, 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[1.0,0.0],[ 0.0,-1.0]],dtype = tf.complex128)
else:
if self.S == 4:
# spin 3/2
self.Identity = tf.constant([[1.0,0.0,0.0,0.0],
[0.0,1.0,0.0,0.0],
[0.0,0.0,1.0,0.0],
[0.0,0.0,0.0,1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0, np.sqrt(3.0),0.0, 0.0],
[np.sqrt(3.0),0.0, np.sqrt(4.0), 0.0],
[0.0, np.sqrt(4.0),0.0, np.sqrt(4.0)],
[0.0, 0.0, np.sqrt(3.0), 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[3.0,0.0, 0.0, 0.0],
[0.0,1.0, 0.0, 0.0],
[0.0,0.0,-1.0, 0.0],
[0.0,0.0, 0.0,-3.0]],dtype = tf.complex128)
self.Szero = tf.zeros([self.S,self.S],dtype=tf.complex128)
#else:
# if (self.S != 4 & self.S !=2):
# for j in range(0,self.S):
# H[j,j]
if self.N == 0:
self.H_TLS = tf.Variable(self.delta*self.Sz+0.5*self.Omega*self.Sx,shape=(self.dim,self.dim),dtype = tf.complex128,trainable = False) # ext. Hamiltonian
else:
self.H_TLS = FloquetHamiltonian(self) # ext. Hamiltonian
self.trainable_variables = [self.W_n,self.b_n,self.c_n,self.W_ph,self.b_ph,self.c_ph]
def getH(self):
return self.H_TLS
def __call__(trainable_variables):
return self.H_TLS
def normalisation(U_):
# U_ (in) original matrix
# (out) matrix with normalised vectors
normaU_ = tf.sqrt(tf.math.reduce_sum(tf.multiply(tf.math.conj(U_),U_,1),axis=0))
U_ = tf.math.truediv(U_,normaU_)
return U_
def tf_gram_schmidt(vectors):
# add batch dimension for matmul
basis = tf.expand_dims(vectors[:,0]/tf.norm(vectors[:,0]),0)
for i in range(1,vectors.shape[0]):#vectors.get_shape()[0].value):
v = vectors[:,i]
# add batch dimension for matmul
v = tf.expand_dims(v,0)
w = v - tf.matmul(tf.matmul(v, basis, adjoint_b=True), basis)
# I assume that my matrix is close to orthogonal
basis = tf.concat([basis, w/tf.norm(w)],axis=0)
return basis
def Unitary_Matrix(model):
UF = tf.Variable(np.zeros((model.dim*model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
UF_n = tf.Variable(np.zeros((model.dim,model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
UF_ph = tf.Variable(np.zeros((model.dim,model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
#dim = model.dim
counter = model.count
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm)
WX_n = [tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_n[0]),1)+model.c_n[0]]
for j in range(1,model.hidden_n):
y = tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_n[j]),1)+model.c_n[j]
WX_n = tf.concat([WX_n, [y]], 0)
UF_n = tf.sqrt(tf.multiply(tf.reduce_prod(tf.math.cosh(WX_n),0),tf.transpose(tf.exp(tf.reduce_sum(
tf.multiply(model.x[1:counter+1],model.b_n),1)))))
UF_n = tf.reshape(UF_n,[model.dim,model.dim])
# 2. Phase
WX_ph = [tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_ph[0]),1)+model.c_ph[0]]
for j in range(1,model.hidden_ph):
y = tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_ph[j]),1)+model.c_ph[j]
WX_ph = tf.concat([WX_ph, [y]], 0)
UF_ph = tf.multiply(tf.reduce_prod(tf.math.cosh(WX_ph),0),tf.transpose(tf.exp(tf.reduce_sum(
tf.multiply(model.x[1:counter+1],model.b_ph),1))))
UF_ph = tf.reshape(tf.math.log(UF_ph),[model.dim,model.dim])
UF_cos = tf.cos(UF_ph/2.0)
UF_sin = tf.sin(UF_ph/2.0)
UF = tf.complex(UF_n*UF_cos,UF_n*UF_sin)
UF = tf_gram_schmidt(UF)
#s,u,v = tf.linalg.svd(UF, full_matrices=True)
#UF = u
return UF
def train(model,learning_rate):
with tf.GradientTape() as t:
current_loss = loss(model)
dU = t.gradient(current_loss, model.trainable_variables)
model.UF.assign_sub(learning_rate*dU)
# 3e. Loss function := Use U^dagger H U, sum over the columns, take the difference with the diagonal,
# the loss function is the summ of the square of these differences.
def loss(model):
# define the loss function explicitly including the training variables: self.W, self.b
# model.UF is a function of self.W,self.b,self.c
#UF = tf.Variable(np.zeros((model.dim*model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
UF = tf.Variable(np.zeros((model.dim,model.dim),dtype=np.complex64))
a = np.zeros((model.dim,model.dim),dtype=np.float32)
counter = model.count
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm)
UF = Unitary_Matrix(model)
U_ = tf.abs(tf.transpose(tf.math.conj(UF))@model.H_TLS@UF)
U_diag = tf.linalg.tensor_diag_part(U_)
dotProd = tf.math.reduce_sum(abs(U_),axis=1,)
residual = tf.math.reduce_sum(tf.abs((U_diag-dotProd)),0)
U_ = tf.abs(tf.transpose(tf.math.conj(UF))@UF)
#print(U_)
U_diag = tf.linalg.tensor_diag_part(U_)
dotProd = tf.math.reduce_sum(abs(U_),axis=1)
residual_unitary = tf.pow(tf.math.reduce_sum(dotProd,0) - model.dim,2.0)
#residual += 1.0*residual_unitary
return residual
# This is the gradient of the loss function. required for keras optimisers
def grad(model):
with tf.GradientTape() as tape:
loss_value = loss(model)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.999,
epsilon=1e-07, amsgrad=False,name='Adam')
#optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)
model = Model()
loss_value = loss(model)
print("Initial UF guess: ", Unitary_Matrix(model))
print("Initial loss value: ",loss_value.numpy())
epochs = range(2048)
for i in epochs:
loss_value, grads = grad(model)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
print("Final loss value: ",loss_value.numpy())
print("Final UF matrix:", Unitary_Matrix(model))
UF = Unitary_Matrix(model)
U_ = tf.abs(tf.transpose(tf.math.conj(UF))@(model.H_TLS@UF))
U_diag = tf.linalg.tensor_diag_part(U_)
dotProd = tf.math.reduce_sum(abs(U_),axis=1,)
residual = tf.math.reduce_sum(tf.pow((U_diag-dotProd),2),0)
print(residual)
print(tf.abs(UF))
print(U_)
| for j in range(1,self.dim+1):
if(self.S==4):
y = [[i-2.5,j-2.5]]
if(self.S==2):
y = [[i-1.5,j-1.5]]
self.x = tf.concat([self.x, y], 0)
counter +=1 | conditional_block |
RBM_diagonalisation-V4.py | #!/usr/bin/env python3
"""
Created on Wed Feb 12 10:44:59 2020
@author: German Sinuco
Skeleton modified from
https://www.tensorflow.org/tutorials/customization/custom_training
https://www.tensorflow.org/tutorials/customization/custom_training_walkthrough
Training of an RBM parametrization of the unitary matrix that diagonalises the 2x2 real,
and symmetric HAMILTONIAN:
==================== IMPORTANT NOTE ========================
as V2, but using complex parameters, which I used for the first time in TensorFlow_Floquet.py
============================================================
"""
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
get_ipython().run_line_magic('tensorflow_version', '2.x')
except Exception:
pass
import tensorflow as tf
import numpy as np
import math as m
from model import FloquetHamiltonian
from scipy.stats import unitary_group
class Model(object):
def __init__(self,delta=0.0,Omega=0.1,phase=0.0):
self.spin = True
self.omega_0 = 1.00
# Hamiltonian parameters
self.delta = 0.00
self.omega = self.delta + self.omega_0
self.Omega = 1.00
self.phase = phase # the phase in cos(omega t + phase)
# Initialize the spin value and number of floquet channels
self.hidden_n = 4 # hidden neurons
self.hidden_ph = 4 # hidden neurons
self.S = 4 # spin 3.2. Hilbert space dimension
#self.S = 2 # spin 1/2. Hilbert space dimension
self.N = 0 # Number of positive Floquet manifolds
self.dim = self.S*(2*self.N+1) # Dimension of the extended floquet space
zero_ = tf.constant(0.0,dtype=tf.float64)
one_ = tf.constant(1.0,dtype=tf.float64)
j_ = tf.constant(tf.complex(zero_,one_),dtype=tf.complex128)
#uf_ = tf.random.stateless_uniform([self.dim,self.dim],seed=[2,1],dtype=tf.float32,minval=0.0,maxval=1.0)
#s,u,v = tf.linalg.svd(uf_, full_matrices=True)
#uf_ = u
# Declaring training variables
# Training parameters defining the norm
self.W_n = tf.Variable(tf.random.stateless_uniform([self.hidden_n,self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.b_n = tf.Variable(tf.random.stateless_uniform([self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.c_n = tf.Variable(tf.random.stateless_uniform([self.hidden_n],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
# Training parameters defining the phase
self.W_ph = tf.Variable(tf.random.stateless_uniform([self.hidden_ph,self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.b_ph = tf.Variable(tf.random.stateless_uniform([self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.c_ph = tf.Variable(tf.random.stateless_uniform([self.hidden_ph],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
UF_aux = tf.Variable(np.zeros((self.dim*self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_n = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_ph = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
self.UF = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
# defining the labels of the input layer, which are the components of the UF matrix
self.x = tf.Variable([[0.0,0.0]],dtype=tf.float64)
counter = 0
self.count = counter
for i in range(1,self.dim+1):
for j in range(1,self.dim+1):
if(self.S==4):
y = [[i-2.5,j-2.5]]
if(self.S==2):
y = [[i-1.5,j-1.5]]
self.x = tf.concat([self.x, y], 0)
counter +=1
self.count = counter
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm)
WX_n = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[0]),1)+self.c_n[0]]
for j in range(1,self.hidden_n):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[j]),1)+self.c_n[j]
WX_n = tf.concat([WX_n, [y]], 0)
UF_aux = tf.sqrt(tf.abs(tf.multiply(tf.reduce_prod(tf.math.cosh(WX_n),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(
self.x[1:counter+1],self.b_n),1))))))
UF_n = tf.reshape(UF_aux,[self.dim,self.dim])
# 2. Phase
WX_ph = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[0]),1)+self.c_ph[0]]
for j in range(1,self.hidden_ph):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[j]),1)+self.c_ph[j]
WX_ph = tf.concat([WX_ph, [y]], 0)
UF_aux = tf.multiply(tf.reduce_prod(tf.math.cosh(WX_ph),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.b_ph),1))))
UF_ph = tf.reshape(tf.math.log(UF_aux),[self.dim,self.dim])
UF_cos = tf.cos(UF_ph/2.0)
UF_sin = tf.sin(UF_ph/2.0)
UF = tf.complex(UF_n*UF_cos,UF_n*UF_sin)
# 1st of March 2020. Task: REVISE NORMALISATION AND GRAM-SCHMIDT PROCEDURE FOR COMPLEX VECTORS
# 5th of March 2020. Normalisation done by hand: OK. Now I am using the G-S algorithm
# reported in https://stackoverflow.com/questions/48119473/gram-schmidt-orthogonalization-in-pure-tensorflow-performance-for-iterative-sol.
# Task: incorparate a basis rotation in the training loop
UF = normalisation(UF)
UF = tf_gram_schmidt(UF)
self.UF = UF
if self.S == 2:
# spin 1/2
self.Identity = tf.constant([[1.0,0.0],[ 0.0, 1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0,1.0],[ 1.0, 0.0]],dtype = tf.complex128)
self.Sy = j_*0.5*tf.constant([[0.0,1.0],[-1.0, 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[1.0,0.0],[ 0.0,-1.0]],dtype = tf.complex128)
else:
if self.S == 4:
# spin 3/2
self.Identity = tf.constant([[1.0,0.0,0.0,0.0],
[0.0,1.0,0.0,0.0],
[0.0,0.0,1.0,0.0],
[0.0,0.0,0.0,1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0, np.sqrt(3.0),0.0, 0.0],
[np.sqrt(3.0),0.0, np.sqrt(4.0), 0.0],
[0.0, np.sqrt(4.0),0.0, np.sqrt(4.0)],
[0.0, 0.0, np.sqrt(3.0), 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[3.0,0.0, 0.0, 0.0],
[0.0,1.0, 0.0, 0.0],
[0.0,0.0,-1.0, 0.0],
[0.0,0.0, 0.0,-3.0]],dtype = tf.complex128)
self.Szero = tf.zeros([self.S,self.S],dtype=tf.complex128)
#else:
# if (self.S != 4 & self.S !=2):
# for j in range(0,self.S):
# H[j,j]
if self.N == 0:
self.H_TLS = tf.Variable(self.delta*self.Sz+0.5*self.Omega*self.Sx,shape=(self.dim,self.dim),dtype = tf.complex128,trainable = False) # ext. Hamiltonian
else:
self.H_TLS = FloquetHamiltonian(self) # ext. Hamiltonian
self.trainable_variables = [self.W_n,self.b_n,self.c_n,self.W_ph,self.b_ph,self.c_ph]
def getH(self):
return self.H_TLS
def __call__(trainable_variables):
|
def normalisation(U_):
# U_ (in) original matrix
# (out) matrix with normalised vectors
normaU_ = tf.sqrt(tf.math.reduce_sum(tf.multiply(tf.math.conj(U_),U_,1),axis=0))
U_ = tf.math.truediv(U_,normaU_)
return U_
def tf_gram_schmidt(vectors):
# add batch dimension for matmul
basis = tf.expand_dims(vectors[:,0]/tf.norm(vectors[:,0]),0)
for i in range(1,vectors.shape[0]):#vectors.get_shape()[0].value):
v = vectors[:,i]
# add batch dimension for matmul
v = tf.expand_dims(v,0)
w = v - tf.matmul(tf.matmul(v, basis, adjoint_b=True), basis)
# I assume that my matrix is close to orthogonal
basis = tf.concat([basis, w/tf.norm(w)],axis=0)
return basis
def Unitary_Matrix(model):
UF = tf.Variable(np.zeros((model.dim*model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
UF_n = tf.Variable(np.zeros((model.dim,model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
UF_ph = tf.Variable(np.zeros((model.dim,model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
#dim = model.dim
counter = model.count
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm)
WX_n = [tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_n[0]),1)+model.c_n[0]]
for j in range(1,model.hidden_n):
y = tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_n[j]),1)+model.c_n[j]
WX_n = tf.concat([WX_n, [y]], 0)
UF_n = tf.sqrt(tf.multiply(tf.reduce_prod(tf.math.cosh(WX_n),0),tf.transpose(tf.exp(tf.reduce_sum(
tf.multiply(model.x[1:counter+1],model.b_n),1)))))
UF_n = tf.reshape(UF_n,[model.dim,model.dim])
# 2. Phase
WX_ph = [tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_ph[0]),1)+model.c_ph[0]]
for j in range(1,model.hidden_ph):
y = tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_ph[j]),1)+model.c_ph[j]
WX_ph = tf.concat([WX_ph, [y]], 0)
UF_ph = tf.multiply(tf.reduce_prod(tf.math.cosh(WX_ph),0),tf.transpose(tf.exp(tf.reduce_sum(
tf.multiply(model.x[1:counter+1],model.b_ph),1))))
UF_ph = tf.reshape(tf.math.log(UF_ph),[model.dim,model.dim])
UF_cos = tf.cos(UF_ph/2.0)
UF_sin = tf.sin(UF_ph/2.0)
UF = tf.complex(UF_n*UF_cos,UF_n*UF_sin)
UF = tf_gram_schmidt(UF)
#s,u,v = tf.linalg.svd(UF, full_matrices=True)
#UF = u
return UF
def train(model,learning_rate):
with tf.GradientTape() as t:
current_loss = loss(model)
dU = t.gradient(current_loss, model.trainable_variables)
model.UF.assign_sub(learning_rate*dU)
# 3e. Loss function := Use U^dagger H U, sum over the columns, take the difference with the diagonal,
# the loss function is the summ of the square of these differences.
def loss(model):
# define the loss function explicitly including the training variables: self.W, self.b
# model.UF is a function of self.W,self.b,self.c
#UF = tf.Variable(np.zeros((model.dim*model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
UF = tf.Variable(np.zeros((model.dim,model.dim),dtype=np.complex64))
a = np.zeros((model.dim,model.dim),dtype=np.float32)
counter = model.count
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm)
UF = Unitary_Matrix(model)
U_ = tf.abs(tf.transpose(tf.math.conj(UF))@model.H_TLS@UF)
U_diag = tf.linalg.tensor_diag_part(U_)
dotProd = tf.math.reduce_sum(abs(U_),axis=1,)
residual = tf.math.reduce_sum(tf.abs((U_diag-dotProd)),0)
U_ = tf.abs(tf.transpose(tf.math.conj(UF))@UF)
#print(U_)
U_diag = tf.linalg.tensor_diag_part(U_)
dotProd = tf.math.reduce_sum(abs(U_),axis=1)
residual_unitary = tf.pow(tf.math.reduce_sum(dotProd,0) - model.dim,2.0)
#residual += 1.0*residual_unitary
return residual
# This is the gradient of the loss function. required for keras optimisers
def grad(model):
with tf.GradientTape() as tape:
loss_value = loss(model)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.999,
epsilon=1e-07, amsgrad=False,name='Adam')
#optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)
model = Model()
loss_value = loss(model)
print("Initial UF guess: ", Unitary_Matrix(model))
print("Initial loss value: ",loss_value.numpy())
epochs = range(2048)
for i in epochs:
loss_value, grads = grad(model)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
print("Final loss value: ",loss_value.numpy())
print("Final UF matrix:", Unitary_Matrix(model))
UF = Unitary_Matrix(model)
U_ = tf.abs(tf.transpose(tf.math.conj(UF))@(model.H_TLS@UF))
U_diag = tf.linalg.tensor_diag_part(U_)
dotProd = tf.math.reduce_sum(abs(U_),axis=1,)
residual = tf.math.reduce_sum(tf.pow((U_diag-dotProd),2),0)
print(residual)
print(tf.abs(UF))
print(U_)
| return self.H_TLS | identifier_body |
RBM_diagonalisation-V4.py | #!/usr/bin/env python3
"""
Created on Wed Feb 12 10:44:59 2020
@author: German Sinuco
Skeleton modified from
https://www.tensorflow.org/tutorials/customization/custom_training
https://www.tensorflow.org/tutorials/customization/custom_training_walkthrough
Training of an RBM parametrization of the unitary matrix that diagonalises the 2x2 real,
and symmetric HAMILTONIAN:
==================== IMPORTANT NOTE ========================
as V2, but using complex parameters, which I used for the first time in TensorFlow_Floquet.py
============================================================
"""
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
get_ipython().run_line_magic('tensorflow_version', '2.x')
except Exception:
pass
import tensorflow as tf
import numpy as np
import math as m
from model import FloquetHamiltonian
from scipy.stats import unitary_group
class Model(object):
def __init__(self,delta=0.0,Omega=0.1,phase=0.0):
self.spin = True
self.omega_0 = 1.00
# Hamiltonian parameters
self.delta = 0.00
self.omega = self.delta + self.omega_0
self.Omega = 1.00
self.phase = phase # the phase in cos(omega t + phase)
# Initialize the spin value and number of floquet channels
self.hidden_n = 4 # hidden neurons
self.hidden_ph = 4 # hidden neurons
self.S = 4 # spin 3.2. Hilbert space dimension
#self.S = 2 # spin 1/2. Hilbert space dimension
self.N = 0 # Number of positive Floquet manifolds
self.dim = self.S*(2*self.N+1) # Dimension of the extended floquet space
zero_ = tf.constant(0.0,dtype=tf.float64)
one_ = tf.constant(1.0,dtype=tf.float64)
j_ = tf.constant(tf.complex(zero_,one_),dtype=tf.complex128)
#uf_ = tf.random.stateless_uniform([self.dim,self.dim],seed=[2,1],dtype=tf.float32,minval=0.0,maxval=1.0)
#s,u,v = tf.linalg.svd(uf_, full_matrices=True)
#uf_ = u
# Declaring training variables
# Training parameters defining the norm
self.W_n = tf.Variable(tf.random.stateless_uniform([self.hidden_n,self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.b_n = tf.Variable(tf.random.stateless_uniform([self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.c_n = tf.Variable(tf.random.stateless_uniform([self.hidden_n],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
# Training parameters defining the phase
self.W_ph = tf.Variable(tf.random.stateless_uniform([self.hidden_ph,self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.b_ph = tf.Variable(tf.random.stateless_uniform([self.dim*self.dim,2],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
self.c_ph = tf.Variable(tf.random.stateless_uniform([self.hidden_ph],
seed=[1,1],dtype=tf.float64,
minval=-1.0,maxval=1.0),trainable=True)
UF_aux = tf.Variable(np.zeros((self.dim*self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_n = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
UF_ph = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
self.UF = tf.Variable(np.zeros((self.dim,self.dim), dtype=np.complex128),trainable = False) # ext. micromotion operator
# defining the labels of the input layer, which are the components of the UF matrix
self.x = tf.Variable([[0.0,0.0]],dtype=tf.float64)
counter = 0
self.count = counter
for i in range(1,self.dim+1):
for j in range(1,self.dim+1):
if(self.S==4):
y = [[i-2.5,j-2.5]]
if(self.S==2):
y = [[i-1.5,j-1.5]]
self.x = tf.concat([self.x, y], 0)
counter +=1
self.count = counter
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm)
WX_n = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[0]),1)+self.c_n[0]]
for j in range(1,self.hidden_n):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_n[j]),1)+self.c_n[j]
WX_n = tf.concat([WX_n, [y]], 0)
UF_aux = tf.sqrt(tf.abs(tf.multiply(tf.reduce_prod(tf.math.cosh(WX_n),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(
self.x[1:counter+1],self.b_n),1))))))
UF_n = tf.reshape(UF_aux,[self.dim,self.dim])
# 2. Phase
WX_ph = [tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[0]),1)+self.c_ph[0]]
for j in range(1,self.hidden_ph):
y = tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.W_ph[j]),1)+self.c_ph[j]
WX_ph = tf.concat([WX_ph, [y]], 0)
UF_aux = tf.multiply(tf.reduce_prod(tf.math.cosh(WX_ph),0),tf.exp(
tf.transpose(tf.reduce_sum(tf.multiply(self.x[1:counter+1],self.b_ph),1))))
UF_ph = tf.reshape(tf.math.log(UF_aux),[self.dim,self.dim])
UF_cos = tf.cos(UF_ph/2.0)
UF_sin = tf.sin(UF_ph/2.0)
UF = tf.complex(UF_n*UF_cos,UF_n*UF_sin)
# 1st of March 2020. Task: REVISE NORMALISATION AND GRAM-SCHMIDT PROCEDURE FOR COMPLEX VECTORS
# 5th of March 2020. Normalisation done by hand: OK. Now I am using the G-S algorithm
# reported in https://stackoverflow.com/questions/48119473/gram-schmidt-orthogonalization-in-pure-tensorflow-performance-for-iterative-sol.
# Task: incorparate a basis rotation in the training loop
UF = normalisation(UF)
UF = tf_gram_schmidt(UF)
self.UF = UF
if self.S == 2:
# spin 1/2
self.Identity = tf.constant([[1.0,0.0],[ 0.0, 1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0,1.0],[ 1.0, 0.0]],dtype = tf.complex128)
self.Sy = j_*0.5*tf.constant([[0.0,1.0],[-1.0, 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[1.0,0.0],[ 0.0,-1.0]],dtype = tf.complex128)
else:
if self.S == 4:
# spin 3/2
self.Identity = tf.constant([[1.0,0.0,0.0,0.0],
[0.0,1.0,0.0,0.0],
[0.0,0.0,1.0,0.0],
[0.0,0.0,0.0,1.0]],dtype = tf.complex128)
self.Sx = 0.5*tf.constant([[0.0, np.sqrt(3.0),0.0, 0.0],
[np.sqrt(3.0),0.0, np.sqrt(4.0), 0.0],
[0.0, np.sqrt(4.0),0.0, np.sqrt(4.0)],
[0.0, 0.0, np.sqrt(3.0), 0.0]],dtype = tf.complex128)
self.Sz = 0.5*tf.constant([[3.0,0.0, 0.0, 0.0],
[0.0,1.0, 0.0, 0.0],
[0.0,0.0,-1.0, 0.0],
[0.0,0.0, 0.0,-3.0]],dtype = tf.complex128)
self.Szero = tf.zeros([self.S,self.S],dtype=tf.complex128)
#else:
# if (self.S != 4 & self.S !=2):
# for j in range(0,self.S):
# H[j,j]
if self.N == 0:
self.H_TLS = tf.Variable(self.delta*self.Sz+0.5*self.Omega*self.Sx,shape=(self.dim,self.dim),dtype = tf.complex128,trainable = False) # ext. Hamiltonian
else:
self.H_TLS = FloquetHamiltonian(self) # ext. Hamiltonian
self.trainable_variables = [self.W_n,self.b_n,self.c_n,self.W_ph,self.b_ph,self.c_ph]
def getH(self):
return self.H_TLS
def __call__(trainable_variables):
return self.H_TLS
def normalisation(U_):
# U_ (in) original matrix
# (out) matrix with normalised vectors
normaU_ = tf.sqrt(tf.math.reduce_sum(tf.multiply(tf.math.conj(U_),U_,1),axis=0))
U_ = tf.math.truediv(U_,normaU_)
return U_
def tf_gram_schmidt(vectors):
# add batch dimension for matmul
basis = tf.expand_dims(vectors[:,0]/tf.norm(vectors[:,0]),0)
for i in range(1,vectors.shape[0]):#vectors.get_shape()[0].value):
v = vectors[:,i]
# add batch dimension for matmul
v = tf.expand_dims(v,0)
w = v - tf.matmul(tf.matmul(v, basis, adjoint_b=True), basis)
# I assume that my matrix is close to orthogonal
basis = tf.concat([basis, w/tf.norm(w)],axis=0)
return basis
def Unitary_Matrix(model):
UF = tf.Variable(np.zeros((model.dim*model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
UF_n = tf.Variable(np.zeros((model.dim,model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
UF_ph = tf.Variable(np.zeros((model.dim,model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
#dim = model.dim
counter = model.count
| WX_n = [tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_n[0]),1)+model.c_n[0]]
for j in range(1,model.hidden_n):
y = tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_n[j]),1)+model.c_n[j]
WX_n = tf.concat([WX_n, [y]], 0)
UF_n = tf.sqrt(tf.multiply(tf.reduce_prod(tf.math.cosh(WX_n),0),tf.transpose(tf.exp(tf.reduce_sum(
tf.multiply(model.x[1:counter+1],model.b_n),1)))))
UF_n = tf.reshape(UF_n,[model.dim,model.dim])
# 2. Phase
WX_ph = [tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_ph[0]),1)+model.c_ph[0]]
for j in range(1,model.hidden_ph):
y = tf.reduce_sum(tf.multiply(model.x[1:counter+1],model.W_ph[j]),1)+model.c_ph[j]
WX_ph = tf.concat([WX_ph, [y]], 0)
UF_ph = tf.multiply(tf.reduce_prod(tf.math.cosh(WX_ph),0),tf.transpose(tf.exp(tf.reduce_sum(
tf.multiply(model.x[1:counter+1],model.b_ph),1))))
UF_ph = tf.reshape(tf.math.log(UF_ph),[model.dim,model.dim])
UF_cos = tf.cos(UF_ph/2.0)
UF_sin = tf.sin(UF_ph/2.0)
UF = tf.complex(UF_n*UF_cos,UF_n*UF_sin)
UF = tf_gram_schmidt(UF)
#s,u,v = tf.linalg.svd(UF, full_matrices=True)
#UF = u
return UF
def train(model,learning_rate):
with tf.GradientTape() as t:
current_loss = loss(model)
dU = t.gradient(current_loss, model.trainable_variables)
model.UF.assign_sub(learning_rate*dU)
# 3e. Loss function := Use U^dagger H U, sum over the columns, take the difference with the diagonal,
# the loss function is the summ of the square of these differences.
def loss(model):
# define the loss function explicitly including the training variables: self.W, self.b
# model.UF is a function of self.W,self.b,self.c
#UF = tf.Variable(np.zeros((model.dim*model.dim), dtype=np.complex64),trainable = False) # ext. micromotion operator
UF = tf.Variable(np.zeros((model.dim,model.dim),dtype=np.complex64))
a = np.zeros((model.dim,model.dim),dtype=np.float32)
counter = model.count
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm)
UF = Unitary_Matrix(model)
U_ = tf.abs(tf.transpose(tf.math.conj(UF))@model.H_TLS@UF)
U_diag = tf.linalg.tensor_diag_part(U_)
dotProd = tf.math.reduce_sum(abs(U_),axis=1,)
residual = tf.math.reduce_sum(tf.abs((U_diag-dotProd)),0)
U_ = tf.abs(tf.transpose(tf.math.conj(UF))@UF)
#print(U_)
U_diag = tf.linalg.tensor_diag_part(U_)
dotProd = tf.math.reduce_sum(abs(U_),axis=1)
residual_unitary = tf.pow(tf.math.reduce_sum(dotProd,0) - model.dim,2.0)
#residual += 1.0*residual_unitary
return residual
# This is the gradient of the loss function. required for keras optimisers
def grad(model):
with tf.GradientTape() as tape:
loss_value = loss(model)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01, beta_1=0.9, beta_2=0.999,
epsilon=1e-07, amsgrad=False,name='Adam')
#optimizer = tf.keras.optimizers.SGD(learning_rate=0.001)
model = Model()
loss_value = loss(model)
print("Initial UF guess: ", Unitary_Matrix(model))
print("Initial loss value: ",loss_value.numpy())
epochs = range(2048)
for i in epochs:
loss_value, grads = grad(model)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
print("Final loss value: ",loss_value.numpy())
print("Final UF matrix:", Unitary_Matrix(model))
UF = Unitary_Matrix(model)
U_ = tf.abs(tf.transpose(tf.math.conj(UF))@(model.H_TLS@UF))
U_diag = tf.linalg.tensor_diag_part(U_)
dotProd = tf.math.reduce_sum(abs(U_),axis=1,)
residual = tf.math.reduce_sum(tf.pow((U_diag-dotProd),2),0)
print(residual)
print(tf.abs(UF))
print(U_) |
#Building of the marginal probability of the RBM using the training parameters and labels of the input layer
#P(x)(b,c,W) = exp(bji . x) Prod_l=1^M 2 x cosh(c_l + W_{x,l} . x)
# 1. Amplitude (norm) | random_line_split |
tropcor_pyaps.py | #! /usr/bin/env python2
############################################################
# Program is part of PySAR v1.2 #
# Copyright(c) 2015, Heresh Fattahi, Zhang Yunjun #
# Author: Heresh Fattahi, Zhang Yunjun #
############################################################
import os
import sys
import argparse
import re
try:
import pyaps as pa
except:
sys.exit('Cannot import pyaps into Python!')
import h5py
import numpy as np
import pysar._datetime as ptime
import pysar._pysar_utilities as ut
import pysar._readfile as readfile
import pysar._writefile as writefile
###############################################################
def get_delay(grib_file, atr, inps_dict):
'''Get delay matrix using PyAPS for one acquisition
Inputs:
grib_file - strng, grib file path
atr - dict, including the following attributes:
dem_file - string, DEM file path
grib_source - string, Weather re-analysis data source
delay_type - string, comb/dry/wet
ref_y/x - string, reference pixel row/col number
inc_angle - np.array, 0/1/2 D
Output:
phs - 2D np.array, absolute tropospheric phase delay relative to ref_y/x
'''
if 'X_FIRST' in atr.keys():
aps = pa.PyAPS_geo(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\
verb=True, Del=inps_dict['delay_type'])
else:
aps = pa.PyAPS_rdr(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\
verb=True, Del=inps_dict['delay_type'])
phs = np.zeros((aps.ny, aps.nx), dtype=np.float32)
aps.getdelay(phs, inc=0.0)
# Get relative phase delay in space
yref = int(atr['ref_y'])
xref = int(atr['ref_x'])
phs -= phs[yref, xref]
# project into LOS direction
phs /= np.cos(inps_dict['inc_angle'])
# reverse the sign for consistency between different phase correction steps/methods
phs *= -1
return phs
def date_list2grib_file(date_list, hour, grib_source, grib_dir):
grib_file_list = []
for d in date_list:
grib_file = grib_dir+'/'
if grib_source == 'ECMWF' : grib_file += 'ERA-Int_%s_%s.grb' % (d, hour)
elif grib_source == 'ERA' : grib_file += 'ERA_%s_%s.grb' % (d, hour)
elif grib_source == 'NARR' : grib_file += 'narr-a_221_%s_%s00_000.grb' % (d, hour)
elif grib_source == 'MERRA' : grib_file += 'merra-%s-%s.nc4' % (d, hour)
elif grib_source == 'MERRA1': |
grib_file_list.append(grib_file)
return grib_file_list
def dload_grib(date_list, hour, grib_source='ECMWF', weather_dir='./'):
'''Download weather re-analysis grib files using PyAPS
Inputs:
date_list : list of string in YYYYMMDD format
hour : string in HH:MM or HH format
grib_source : string,
weather_dir : string,
Output:
grib_file_list : list of string
'''
## Grib data directory
weather_dir = os.path.abspath(weather_dir)
grib_dir = weather_dir+'/'+grib_source
if not os.path.isdir(grib_dir):
print 'making directory: '+grib_dir
os.makedirs(grib_dir)
## Date list to grib file list
grib_file_list = date_list2grib_file(date_list, hour, grib_source, grib_dir)
## Get date list to download (skip already downloaded files)
grib_file_existed = ut.get_file_list(grib_file_list)
if grib_file_existed:
grib_filesize_digit = ut.mode([len(str(os.path.getsize(i))) for i in grib_file_existed])
grib_filesize_max2 = ut.mode([str(os.path.getsize(i))[0:2] for i in grib_file_existed])
grib_file_corrupted = [i for i in grib_file_existed if (len(str(os.path.getsize(i))) != grib_filesize_digit or\
str(os.path.getsize(i))[0:2] != grib_filesize_max2)]
print 'file size mode: %se%d bytes' % (grib_filesize_max2, grib_filesize_digit-2)
print 'number of grib files existed : %d' % len(grib_file_existed)
if grib_file_corrupted:
print '------------------------------------------------------------------------------'
print 'corrupted grib files detected! Delete them and re-download...'
print 'number of grib files corrupted : %d' % len(grib_file_corrupted)
for i in grib_file_corrupted:
rmCmd = 'rm '+i
print rmCmd
os.system(rmCmd)
grib_file_existed.remove(i)
print '------------------------------------------------------------------------------'
grib_file2download = sorted(list(set(grib_file_list) - set(grib_file_existed)))
date_list2download = [str(re.findall('\d{8}', i)[0]) for i in grib_file2download]
print 'number of grib files to download: %d' % len(date_list2download)
print '------------------------------------------------------------------------------\n'
## Download grib file using PyAPS
if grib_source == 'ECMWF' : pa.ECMWFdload( date_list2download, hour, grib_dir)
elif grib_source == 'ERA' : pa.ERAdload( date_list2download, hour, grib_dir)
elif grib_source == 'NARR' : pa.NARRdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA' : pa.MERRAdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA1': pa.MERRA1dload(date_list2download, hour, grib_dir)
return grib_file_existed
###############################################################
EXAMPLE='''example:
tropcor_pyaps.py timeseries.h5 -d geometryRadar.h5 -i geometryRadar.h5
tropcor_pyaps.py timeseries.h5 -d geometryGeo.h5 -i geometryGeo.h5 --weather-dir /famelung/data/WEATHER
tropcor_pyaps.py -d srtm1.dem -i 30 --hour 00 --ref-yx 2000 2500 --date-list date_list.txt
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s NARR
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s MERRA --delay dry -i 23
tropcor_pyaps.py timeseries_LODcor.h5 -d demRadar.h5
tropcor_pyaps.py -s ECMWF --hour 18 --date-list date_list.txt --download
tropcor_pyaps.py -s ECMWF --hour 18 --date-list bl_list.txt --download
'''
REFERENCE='''reference:
Jolivet, R., R. Grandin, C. Lasserre, M.-P. Doin and G. Peltzer (2011), Systematic InSAR tropospheric
phase delay corrections from global meteorological reanalysis data, Geophys. Res. Lett., 38, L17311,
doi:10.1029/2011GL048757
'''
TEMPLATE='''
## 7. Tropospheric Delay Correction (optional and recommended)
## correct tropospheric delay using the following methods:
## a. pyaps - use weather re-analysis data (Jolivet et al., 2011, GRL, need to install PyAPS; Dee et al., 2011)
## b. height_correlation - correct stratified tropospheric delay (Doin et al., 2009, J Applied Geop)
## c. base_trop_cor - (not recommend) baseline error and stratified tropo simultaneously (Jo et al., 2010, Geo J)
pysar.troposphericDelay.method = auto #[pyaps / height_correlation / base_trop_cor / no], auto for pyaps
pysar.troposphericDelay.weatherModel = auto #[ECMWF / MERRA / NARR], auto for ECMWF, for pyaps method
pysar.troposphericDelay.polyOrder = auto #[1 / 2 / 3], auto for 1, for height_correlation method
pysar.troposphericDelay.looks = auto #[1-inf], auto for 8, Number of looks to be applied to interferogram
'''
DATA_INFO='''
re-analysis_dataset coverage temporal_resolution spatial_resolution latency analysis
------------------------------------------------------------------------------------------------------------
ERA-Interim (by ECMWF) Global 00/06/12/18 UTC 0.75 deg (~83 km) 2-month 4D-var
MERRA2 (by NASA Goddard) Global 00/06/12/18 UTC 0.5 * 0.625 (~50 km) 2-3 weeks 3D-var
To download MERRA2, you need an Earthdata account, and pre-authorize the "NASA GESDISC DATA ARCHIVE" application, following https://disc.gsfc.nasa.gov/earthdata-login.
'''
def cmdLineParse():
parser = argparse.ArgumentParser(description='Tropospheric correction using weather models\n'+\
' PyAPS is used to download and calculate the delay for each time-series epoch.',\
formatter_class=argparse.RawTextHelpFormatter,\
epilog=REFERENCE+'\n'+DATA_INFO+'\n'+EXAMPLE)
parser.add_argument(dest='timeseries_file', nargs='?', help='timeseries HDF5 file, i.e. timeseries.h5')
parser.add_argument('-d','--dem', dest='dem_file',\
help='DEM file, i.e. radar_4rlks.hgt, srtm1.dem')
parser.add_argument('-i', dest='inc_angle', default='30',\
help='a file containing all incidence angles, or a number representing for the whole image.')
parser.add_argument('--weather-dir', dest='weather_dir', \
help='directory to put downloaded weather data, i.e. ./../WEATHER\n'+\
'use directory of input timeseries_file if not specified.')
parser.add_argument('--delay', dest='delay_type', default='comb', choices={'comb','dry','wet'},\
help='Delay type to calculate, comb contains both wet and dry delays')
parser.add_argument('--download', action='store_true', help='Download weather data only.')
parser.add_argument('--date-list', dest='date_list_file',\
help='Read the first column of text file as list of date to download data\n'+\
'in YYYYMMDD or YYMMDD format')
parser.add_argument('--ref-yx', dest='ref_yx', type=int, nargs=2, help='reference pixel in y/x')
parser.add_argument('-s', dest='weather_model',\
default='ECMWF', choices={'ECMWF','ERA-Interim','ERA','MERRA','MERRA1','NARR'},\
help='source of the atmospheric data.\n'+\
'By the time of 2018-Mar-06, ERA and ECMWF data download link is working.\n'+\
'NARR is working for 1979-Jan to 2014-Oct.\n'+\
'MERRA(2) is not working.')
parser.add_argument('--hour', help='time of data in HH, e.g. 12, 06')
parser.add_argument('--template', dest='template_file',\
help='template file with input options below:\n'+TEMPLATE)
parser.add_argument('-o', dest='out_file', help='Output file name for trospheric corrected timeseries.')
inps = parser.parse_args()
# Calculate DELAY or DOWNLOAD DATA ONLY, required one of them
if not inps.download and not inps.dem_file and ( not inps.timeseries_file or not inps.date_list_file ):
parser.print_help()
sys.exit(1)
return inps
###############################################################
def main(argv):
inps = cmdLineParse()
k = None
atr = dict()
if inps.timeseries_file:
inps.timeseries_file = ut.get_file_list([inps.timeseries_file])[0]
atr = readfile.read_attribute(inps.timeseries_file)
k = atr['FILE_TYPE']
elif inps.dem_file:
inps.dem_file = ut.get_file_list([inps.dem_file])[0]
atr = readfile.read_attribute(inps.dem_file)
if 'ref_y' not in atr.keys() and inps.ref_yx:
print 'No reference info found in input file, use input ref_yx: '+str(inps.ref_yx)
atr['ref_y'] = inps.ref_yx[0]
atr['ref_x'] = inps.ref_yx[1]
##Read Incidence angle: to map the zenith delay to the slant delay
if os.path.isfile(inps.inc_angle):
inps.inc_angle = readfile.read(inps.inc_angle, epoch='incidenceAngle')[0]
else:
inps.inc_angle = float(inps.inc_angle)
print 'incidence angle: '+str(inps.inc_angle)
inps.inc_angle = inps.inc_angle*np.pi/180.0
##Prepare DEM file in ROI_PAC format for PyAPS to read
if inps.dem_file:
inps.dem_file = ut.get_file_list([inps.dem_file])[0]
if os.path.splitext(inps.dem_file)[1] in ['.h5']:
print 'convert DEM file to ROIPAC format'
dem, atr_dem = readfile.read(inps.dem_file, epoch='height')
if 'Y_FIRST' in atr.keys():
atr_dem['FILE_TYPE'] = '.dem'
else:
atr_dem['FILE_TYPE'] = '.hgt'
outname = os.path.splitext(inps.dem_file)[0]+'4pyaps'+atr_dem['FILE_TYPE']
inps.dem_file = writefile.write(dem, atr_dem, outname)
print '*******************************************************************************'
print 'Downloading weather model data ...'
## Get Grib Source
if inps.weather_model in ['ECMWF','ERA-Interim']: inps.grib_source = 'ECMWF'
elif inps.weather_model == 'ERA' : inps.grib_source = 'ERA'
elif inps.weather_model == 'MERRA': inps.grib_source = 'MERRA'
elif inps.weather_model == 'NARR' : inps.grib_source = 'NARR'
else: raise Reception('Unrecognized weather model: '+inps.weather_model)
print 'grib source: '+inps.grib_source
# Get weather directory
if not inps.weather_dir:
if inps.timeseries_file:
inps.weather_dir = os.path.dirname(os.path.abspath(inps.timeseries_file))+'/../WEATHER'
elif inps.dem_file:
inps.weather_dir = os.path.dirname(os.path.abspath(inps.dem_file))+'/../WEATHER'
else:
inps.weather_dir = os.path.abspath(os.getcwd())
print 'Store weather data into directory: '+inps.weather_dir
# Get date list to download
if not inps.date_list_file:
print 'read date list info from: '+inps.timeseries_file
h5 = h5py.File(inps.timeseries_file, 'r')
if 'timeseries' in h5.keys():
date_list = sorted(h5[k].keys())
elif k in ['interferograms','coherence','wrapped']:
ifgram_list = sorted(h5[k].keys())
date12_list = ptime.list_ifgram2date12(ifgram_list)
m_dates = [i.split('-')[0] for i in date12_list]
s_dates = [i.split('-')[1] for i in date12_list]
date_list = ptime.yyyymmdd(sorted(list(set(m_dates + s_dates))))
else:
raise ValueError('Un-support input file type:'+k)
h5.close()
else:
date_list = ptime.yyyymmdd(np.loadtxt(inps.date_list_file, dtype=str, usecols=(0,)).tolist())
print 'read date list info from: '+inps.date_list_file
# Get Acquisition time - hour
if not inps.hour:
inps.hour = ptime.closest_weather_product_time(atr['CENTER_LINE_UTC'], inps.grib_source)
print 'Time of cloest available product: '+inps.hour
## Download data using PyAPS
inps.grib_file_list = dload_grib(date_list, inps.hour, inps.weather_model, inps.weather_dir)
if inps.download:
print 'Download completed, exit as planned.'
return
print '*******************************************************************************'
print 'Calcualting delay for each epoch.'
## Calculate tropo delay using pyaps
length = int(atr['FILE_LENGTH'])
width = int(atr['WIDTH'])
date_num = len(date_list)
trop_ts = np.zeros((date_num, length, width), np.float32)
for i in range(date_num):
grib_file = inps.grib_file_list[i]
date = date_list[i]
print 'calculate phase delay on %s from file %s' % (date, os.path.basename(grib_file))
trop_ts[i] = get_delay(grib_file, atr, vars(inps))
## Convert relative phase delay on reference date
try: ref_date = atr['ref_date']
except: ref_date = date_list[0]
print 'convert to relative phase delay with reference date: '+ref_date
ref_idx = date_list.index(ref_date)
trop_ts -= np.tile(trop_ts[ref_idx,:,:], (date_num, 1, 1))
## Write tropospheric delay to HDF5
tropFile = inps.grib_source+'.h5'
print 'writing >>> %s' % (tropFile)
h5trop = h5py.File(tropFile, 'w')
group_trop = h5trop.create_group('timeseries')
print 'number of acquisitions: '+str(date_num)
prog_bar = ptime.progress_bar(maxValue=date_num)
for i in range(date_num):
date = date_list[i]
group_trop.create_dataset(date, data=trop_ts[i], compression='gzip')
prog_bar.update(i+1, suffix=date)
prog_bar.close()
# Write Attributes
for key,value in atr.iteritems():
group_trop.attrs[key] = value
h5trop.close()
## Write corrected Time series to HDF5
if k == 'timeseries':
if not inps.out_file:
inps.out_file = os.path.splitext(inps.timeseries_file)[0]+'_'+inps.grib_source+'.h5'
print 'writing >>> %s' % (inps.out_file)
h5ts = h5py.File(inps.timeseries_file, 'r')
h5tsCor = h5py.File(inps.out_file, 'w')
group_tsCor = h5tsCor.create_group('timeseries')
print 'number of acquisitions: '+str(date_num)
prog_bar = ptime.progress_bar(maxValue=date_num)
for i in range(date_num):
date = date_list[i]
ts = h5ts['timeseries'].get(date)[:]
group_tsCor.create_dataset(date, data=ts-trop_ts[i], compression='gzip')
prog_bar.update(i+1, suffix=date)
prog_bar.close()
h5ts.close()
# Write Attributes
for key,value in atr.iteritems():
group_tsCor.attrs[key] = value
h5tsCor.close()
# Delete temporary DEM file in ROI_PAC format
if '4pyaps' in inps.dem_file:
rmCmd = 'rm %s %s.rsc' % (inps.dem_file, inps.dem_file)
print rmCmd
os.system(rmCmd)
print 'Done.'
return inps.out_file
###############################################################
if __name__ == '__main__':
main(sys.argv[1:])
| grib_file += 'merra-%s-%s.hdf' % (d, hour) | conditional_block |
tropcor_pyaps.py | #! /usr/bin/env python2
############################################################
# Program is part of PySAR v1.2 #
# Copyright(c) 2015, Heresh Fattahi, Zhang Yunjun #
# Author: Heresh Fattahi, Zhang Yunjun #
############################################################
import os
import sys
import argparse
import re
try:
import pyaps as pa
except:
sys.exit('Cannot import pyaps into Python!')
import h5py
import numpy as np
import pysar._datetime as ptime
import pysar._pysar_utilities as ut
import pysar._readfile as readfile
import pysar._writefile as writefile
###############################################################
def get_delay(grib_file, atr, inps_dict):
'''Get delay matrix using PyAPS for one acquisition
Inputs:
grib_file - strng, grib file path
atr - dict, including the following attributes:
dem_file - string, DEM file path
grib_source - string, Weather re-analysis data source
delay_type - string, comb/dry/wet
ref_y/x - string, reference pixel row/col number
inc_angle - np.array, 0/1/2 D
Output:
phs - 2D np.array, absolute tropospheric phase delay relative to ref_y/x
'''
if 'X_FIRST' in atr.keys():
aps = pa.PyAPS_geo(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\
verb=True, Del=inps_dict['delay_type'])
else:
aps = pa.PyAPS_rdr(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\
verb=True, Del=inps_dict['delay_type'])
phs = np.zeros((aps.ny, aps.nx), dtype=np.float32)
aps.getdelay(phs, inc=0.0)
# Get relative phase delay in space
yref = int(atr['ref_y'])
xref = int(atr['ref_x'])
phs -= phs[yref, xref]
# project into LOS direction
phs /= np.cos(inps_dict['inc_angle'])
# reverse the sign for consistency between different phase correction steps/methods
phs *= -1
return phs
def date_list2grib_file(date_list, hour, grib_source, grib_dir):
|
def dload_grib(date_list, hour, grib_source='ECMWF', weather_dir='./'):
'''Download weather re-analysis grib files using PyAPS
Inputs:
date_list : list of string in YYYYMMDD format
hour : string in HH:MM or HH format
grib_source : string,
weather_dir : string,
Output:
grib_file_list : list of string
'''
## Grib data directory
weather_dir = os.path.abspath(weather_dir)
grib_dir = weather_dir+'/'+grib_source
if not os.path.isdir(grib_dir):
print 'making directory: '+grib_dir
os.makedirs(grib_dir)
## Date list to grib file list
grib_file_list = date_list2grib_file(date_list, hour, grib_source, grib_dir)
## Get date list to download (skip already downloaded files)
grib_file_existed = ut.get_file_list(grib_file_list)
if grib_file_existed:
grib_filesize_digit = ut.mode([len(str(os.path.getsize(i))) for i in grib_file_existed])
grib_filesize_max2 = ut.mode([str(os.path.getsize(i))[0:2] for i in grib_file_existed])
grib_file_corrupted = [i for i in grib_file_existed if (len(str(os.path.getsize(i))) != grib_filesize_digit or\
str(os.path.getsize(i))[0:2] != grib_filesize_max2)]
print 'file size mode: %se%d bytes' % (grib_filesize_max2, grib_filesize_digit-2)
print 'number of grib files existed : %d' % len(grib_file_existed)
if grib_file_corrupted:
print '------------------------------------------------------------------------------'
print 'corrupted grib files detected! Delete them and re-download...'
print 'number of grib files corrupted : %d' % len(grib_file_corrupted)
for i in grib_file_corrupted:
rmCmd = 'rm '+i
print rmCmd
os.system(rmCmd)
grib_file_existed.remove(i)
print '------------------------------------------------------------------------------'
grib_file2download = sorted(list(set(grib_file_list) - set(grib_file_existed)))
date_list2download = [str(re.findall('\d{8}', i)[0]) for i in grib_file2download]
print 'number of grib files to download: %d' % len(date_list2download)
print '------------------------------------------------------------------------------\n'
## Download grib file using PyAPS
if grib_source == 'ECMWF' : pa.ECMWFdload( date_list2download, hour, grib_dir)
elif grib_source == 'ERA' : pa.ERAdload( date_list2download, hour, grib_dir)
elif grib_source == 'NARR' : pa.NARRdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA' : pa.MERRAdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA1': pa.MERRA1dload(date_list2download, hour, grib_dir)
return grib_file_existed
###############################################################
EXAMPLE='''example:
tropcor_pyaps.py timeseries.h5 -d geometryRadar.h5 -i geometryRadar.h5
tropcor_pyaps.py timeseries.h5 -d geometryGeo.h5 -i geometryGeo.h5 --weather-dir /famelung/data/WEATHER
tropcor_pyaps.py -d srtm1.dem -i 30 --hour 00 --ref-yx 2000 2500 --date-list date_list.txt
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s NARR
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s MERRA --delay dry -i 23
tropcor_pyaps.py timeseries_LODcor.h5 -d demRadar.h5
tropcor_pyaps.py -s ECMWF --hour 18 --date-list date_list.txt --download
tropcor_pyaps.py -s ECMWF --hour 18 --date-list bl_list.txt --download
'''
REFERENCE='''reference:
Jolivet, R., R. Grandin, C. Lasserre, M.-P. Doin and G. Peltzer (2011), Systematic InSAR tropospheric
phase delay corrections from global meteorological reanalysis data, Geophys. Res. Lett., 38, L17311,
doi:10.1029/2011GL048757
'''
TEMPLATE='''
## 7. Tropospheric Delay Correction (optional and recommended)
## correct tropospheric delay using the following methods:
## a. pyaps - use weather re-analysis data (Jolivet et al., 2011, GRL, need to install PyAPS; Dee et al., 2011)
## b. height_correlation - correct stratified tropospheric delay (Doin et al., 2009, J Applied Geop)
## c. base_trop_cor - (not recommend) baseline error and stratified tropo simultaneously (Jo et al., 2010, Geo J)
pysar.troposphericDelay.method = auto #[pyaps / height_correlation / base_trop_cor / no], auto for pyaps
pysar.troposphericDelay.weatherModel = auto #[ECMWF / MERRA / NARR], auto for ECMWF, for pyaps method
pysar.troposphericDelay.polyOrder = auto #[1 / 2 / 3], auto for 1, for height_correlation method
pysar.troposphericDelay.looks = auto #[1-inf], auto for 8, Number of looks to be applied to interferogram
'''
DATA_INFO='''
re-analysis_dataset coverage temporal_resolution spatial_resolution latency analysis
------------------------------------------------------------------------------------------------------------
ERA-Interim (by ECMWF) Global 00/06/12/18 UTC 0.75 deg (~83 km) 2-month 4D-var
MERRA2 (by NASA Goddard) Global 00/06/12/18 UTC 0.5 * 0.625 (~50 km) 2-3 weeks 3D-var
To download MERRA2, you need an Earthdata account, and pre-authorize the "NASA GESDISC DATA ARCHIVE" application, following https://disc.gsfc.nasa.gov/earthdata-login.
'''
def cmdLineParse():
parser = argparse.ArgumentParser(description='Tropospheric correction using weather models\n'+\
' PyAPS is used to download and calculate the delay for each time-series epoch.',\
formatter_class=argparse.RawTextHelpFormatter,\
epilog=REFERENCE+'\n'+DATA_INFO+'\n'+EXAMPLE)
parser.add_argument(dest='timeseries_file', nargs='?', help='timeseries HDF5 file, i.e. timeseries.h5')
parser.add_argument('-d','--dem', dest='dem_file',\
help='DEM file, i.e. radar_4rlks.hgt, srtm1.dem')
parser.add_argument('-i', dest='inc_angle', default='30',\
help='a file containing all incidence angles, or a number representing for the whole image.')
parser.add_argument('--weather-dir', dest='weather_dir', \
help='directory to put downloaded weather data, i.e. ./../WEATHER\n'+\
'use directory of input timeseries_file if not specified.')
parser.add_argument('--delay', dest='delay_type', default='comb', choices={'comb','dry','wet'},\
help='Delay type to calculate, comb contains both wet and dry delays')
parser.add_argument('--download', action='store_true', help='Download weather data only.')
parser.add_argument('--date-list', dest='date_list_file',\
help='Read the first column of text file as list of date to download data\n'+\
'in YYYYMMDD or YYMMDD format')
parser.add_argument('--ref-yx', dest='ref_yx', type=int, nargs=2, help='reference pixel in y/x')
parser.add_argument('-s', dest='weather_model',\
default='ECMWF', choices={'ECMWF','ERA-Interim','ERA','MERRA','MERRA1','NARR'},\
help='source of the atmospheric data.\n'+\
'By the time of 2018-Mar-06, ERA and ECMWF data download link is working.\n'+\
'NARR is working for 1979-Jan to 2014-Oct.\n'+\
'MERRA(2) is not working.')
parser.add_argument('--hour', help='time of data in HH, e.g. 12, 06')
parser.add_argument('--template', dest='template_file',\
help='template file with input options below:\n'+TEMPLATE)
parser.add_argument('-o', dest='out_file', help='Output file name for trospheric corrected timeseries.')
inps = parser.parse_args()
# Calculate DELAY or DOWNLOAD DATA ONLY, required one of them
if not inps.download and not inps.dem_file and ( not inps.timeseries_file or not inps.date_list_file ):
parser.print_help()
sys.exit(1)
return inps
###############################################################
def main(argv):
inps = cmdLineParse()
k = None
atr = dict()
if inps.timeseries_file:
inps.timeseries_file = ut.get_file_list([inps.timeseries_file])[0]
atr = readfile.read_attribute(inps.timeseries_file)
k = atr['FILE_TYPE']
elif inps.dem_file:
inps.dem_file = ut.get_file_list([inps.dem_file])[0]
atr = readfile.read_attribute(inps.dem_file)
if 'ref_y' not in atr.keys() and inps.ref_yx:
print 'No reference info found in input file, use input ref_yx: '+str(inps.ref_yx)
atr['ref_y'] = inps.ref_yx[0]
atr['ref_x'] = inps.ref_yx[1]
##Read Incidence angle: to map the zenith delay to the slant delay
if os.path.isfile(inps.inc_angle):
inps.inc_angle = readfile.read(inps.inc_angle, epoch='incidenceAngle')[0]
else:
inps.inc_angle = float(inps.inc_angle)
print 'incidence angle: '+str(inps.inc_angle)
inps.inc_angle = inps.inc_angle*np.pi/180.0
##Prepare DEM file in ROI_PAC format for PyAPS to read
if inps.dem_file:
inps.dem_file = ut.get_file_list([inps.dem_file])[0]
if os.path.splitext(inps.dem_file)[1] in ['.h5']:
print 'convert DEM file to ROIPAC format'
dem, atr_dem = readfile.read(inps.dem_file, epoch='height')
if 'Y_FIRST' in atr.keys():
atr_dem['FILE_TYPE'] = '.dem'
else:
atr_dem['FILE_TYPE'] = '.hgt'
outname = os.path.splitext(inps.dem_file)[0]+'4pyaps'+atr_dem['FILE_TYPE']
inps.dem_file = writefile.write(dem, atr_dem, outname)
print '*******************************************************************************'
print 'Downloading weather model data ...'
## Get Grib Source
if inps.weather_model in ['ECMWF','ERA-Interim']: inps.grib_source = 'ECMWF'
elif inps.weather_model == 'ERA' : inps.grib_source = 'ERA'
elif inps.weather_model == 'MERRA': inps.grib_source = 'MERRA'
elif inps.weather_model == 'NARR' : inps.grib_source = 'NARR'
else: raise Reception('Unrecognized weather model: '+inps.weather_model)
print 'grib source: '+inps.grib_source
# Get weather directory
if not inps.weather_dir:
if inps.timeseries_file:
inps.weather_dir = os.path.dirname(os.path.abspath(inps.timeseries_file))+'/../WEATHER'
elif inps.dem_file:
inps.weather_dir = os.path.dirname(os.path.abspath(inps.dem_file))+'/../WEATHER'
else:
inps.weather_dir = os.path.abspath(os.getcwd())
print 'Store weather data into directory: '+inps.weather_dir
# Get date list to download
if not inps.date_list_file:
print 'read date list info from: '+inps.timeseries_file
h5 = h5py.File(inps.timeseries_file, 'r')
if 'timeseries' in h5.keys():
date_list = sorted(h5[k].keys())
elif k in ['interferograms','coherence','wrapped']:
ifgram_list = sorted(h5[k].keys())
date12_list = ptime.list_ifgram2date12(ifgram_list)
m_dates = [i.split('-')[0] for i in date12_list]
s_dates = [i.split('-')[1] for i in date12_list]
date_list = ptime.yyyymmdd(sorted(list(set(m_dates + s_dates))))
else:
raise ValueError('Un-support input file type:'+k)
h5.close()
else:
date_list = ptime.yyyymmdd(np.loadtxt(inps.date_list_file, dtype=str, usecols=(0,)).tolist())
print 'read date list info from: '+inps.date_list_file
# Get Acquisition time - hour
if not inps.hour:
inps.hour = ptime.closest_weather_product_time(atr['CENTER_LINE_UTC'], inps.grib_source)
print 'Time of cloest available product: '+inps.hour
## Download data using PyAPS
inps.grib_file_list = dload_grib(date_list, inps.hour, inps.weather_model, inps.weather_dir)
if inps.download:
print 'Download completed, exit as planned.'
return
print '*******************************************************************************'
print 'Calcualting delay for each epoch.'
## Calculate tropo delay using pyaps
length = int(atr['FILE_LENGTH'])
width = int(atr['WIDTH'])
date_num = len(date_list)
trop_ts = np.zeros((date_num, length, width), np.float32)
for i in range(date_num):
grib_file = inps.grib_file_list[i]
date = date_list[i]
print 'calculate phase delay on %s from file %s' % (date, os.path.basename(grib_file))
trop_ts[i] = get_delay(grib_file, atr, vars(inps))
## Convert relative phase delay on reference date
try: ref_date = atr['ref_date']
except: ref_date = date_list[0]
print 'convert to relative phase delay with reference date: '+ref_date
ref_idx = date_list.index(ref_date)
trop_ts -= np.tile(trop_ts[ref_idx,:,:], (date_num, 1, 1))
## Write tropospheric delay to HDF5
tropFile = inps.grib_source+'.h5'
print 'writing >>> %s' % (tropFile)
h5trop = h5py.File(tropFile, 'w')
group_trop = h5trop.create_group('timeseries')
print 'number of acquisitions: '+str(date_num)
prog_bar = ptime.progress_bar(maxValue=date_num)
for i in range(date_num):
date = date_list[i]
group_trop.create_dataset(date, data=trop_ts[i], compression='gzip')
prog_bar.update(i+1, suffix=date)
prog_bar.close()
# Write Attributes
for key,value in atr.iteritems():
group_trop.attrs[key] = value
h5trop.close()
## Write corrected Time series to HDF5
if k == 'timeseries':
if not inps.out_file:
inps.out_file = os.path.splitext(inps.timeseries_file)[0]+'_'+inps.grib_source+'.h5'
print 'writing >>> %s' % (inps.out_file)
h5ts = h5py.File(inps.timeseries_file, 'r')
h5tsCor = h5py.File(inps.out_file, 'w')
group_tsCor = h5tsCor.create_group('timeseries')
print 'number of acquisitions: '+str(date_num)
prog_bar = ptime.progress_bar(maxValue=date_num)
for i in range(date_num):
date = date_list[i]
ts = h5ts['timeseries'].get(date)[:]
group_tsCor.create_dataset(date, data=ts-trop_ts[i], compression='gzip')
prog_bar.update(i+1, suffix=date)
prog_bar.close()
h5ts.close()
# Write Attributes
for key,value in atr.iteritems():
group_tsCor.attrs[key] = value
h5tsCor.close()
# Delete temporary DEM file in ROI_PAC format
if '4pyaps' in inps.dem_file:
rmCmd = 'rm %s %s.rsc' % (inps.dem_file, inps.dem_file)
print rmCmd
os.system(rmCmd)
print 'Done.'
return inps.out_file
###############################################################
if __name__ == '__main__':
main(sys.argv[1:])
| grib_file_list = []
for d in date_list:
grib_file = grib_dir+'/'
if grib_source == 'ECMWF' : grib_file += 'ERA-Int_%s_%s.grb' % (d, hour)
elif grib_source == 'ERA' : grib_file += 'ERA_%s_%s.grb' % (d, hour)
elif grib_source == 'NARR' : grib_file += 'narr-a_221_%s_%s00_000.grb' % (d, hour)
elif grib_source == 'MERRA' : grib_file += 'merra-%s-%s.nc4' % (d, hour)
elif grib_source == 'MERRA1': grib_file += 'merra-%s-%s.hdf' % (d, hour)
grib_file_list.append(grib_file)
return grib_file_list | identifier_body |
tropcor_pyaps.py | #! /usr/bin/env python2
############################################################
# Program is part of PySAR v1.2 #
# Copyright(c) 2015, Heresh Fattahi, Zhang Yunjun #
# Author: Heresh Fattahi, Zhang Yunjun #
############################################################
import os
import sys
import argparse
import re
try:
import pyaps as pa
except:
sys.exit('Cannot import pyaps into Python!')
import h5py
import numpy as np
import pysar._datetime as ptime
import pysar._pysar_utilities as ut
import pysar._readfile as readfile
import pysar._writefile as writefile
###############################################################
def get_delay(grib_file, atr, inps_dict):
'''Get delay matrix using PyAPS for one acquisition
Inputs:
grib_file - strng, grib file path
atr - dict, including the following attributes:
dem_file - string, DEM file path
grib_source - string, Weather re-analysis data source
delay_type - string, comb/dry/wet
ref_y/x - string, reference pixel row/col number
inc_angle - np.array, 0/1/2 D
Output:
phs - 2D np.array, absolute tropospheric phase delay relative to ref_y/x
'''
if 'X_FIRST' in atr.keys():
aps = pa.PyAPS_geo(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\
verb=True, Del=inps_dict['delay_type'])
else:
aps = pa.PyAPS_rdr(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\
verb=True, Del=inps_dict['delay_type'])
phs = np.zeros((aps.ny, aps.nx), dtype=np.float32)
aps.getdelay(phs, inc=0.0)
# Get relative phase delay in space
yref = int(atr['ref_y'])
xref = int(atr['ref_x'])
phs -= phs[yref, xref]
# project into LOS direction
phs /= np.cos(inps_dict['inc_angle'])
# reverse the sign for consistency between different phase correction steps/methods
phs *= -1
return phs
def date_list2grib_file(date_list, hour, grib_source, grib_dir):
grib_file_list = []
for d in date_list:
grib_file = grib_dir+'/'
if grib_source == 'ECMWF' : grib_file += 'ERA-Int_%s_%s.grb' % (d, hour)
elif grib_source == 'ERA' : grib_file += 'ERA_%s_%s.grb' % (d, hour)
elif grib_source == 'NARR' : grib_file += 'narr-a_221_%s_%s00_000.grb' % (d, hour)
elif grib_source == 'MERRA' : grib_file += 'merra-%s-%s.nc4' % (d, hour)
elif grib_source == 'MERRA1': grib_file += 'merra-%s-%s.hdf' % (d, hour)
grib_file_list.append(grib_file)
return grib_file_list
def | (date_list, hour, grib_source='ECMWF', weather_dir='./'):
'''Download weather re-analysis grib files using PyAPS
Inputs:
date_list : list of string in YYYYMMDD format
hour : string in HH:MM or HH format
grib_source : string,
weather_dir : string,
Output:
grib_file_list : list of string
'''
## Grib data directory
weather_dir = os.path.abspath(weather_dir)
grib_dir = weather_dir+'/'+grib_source
if not os.path.isdir(grib_dir):
print 'making directory: '+grib_dir
os.makedirs(grib_dir)
## Date list to grib file list
grib_file_list = date_list2grib_file(date_list, hour, grib_source, grib_dir)
## Get date list to download (skip already downloaded files)
grib_file_existed = ut.get_file_list(grib_file_list)
if grib_file_existed:
grib_filesize_digit = ut.mode([len(str(os.path.getsize(i))) for i in grib_file_existed])
grib_filesize_max2 = ut.mode([str(os.path.getsize(i))[0:2] for i in grib_file_existed])
grib_file_corrupted = [i for i in grib_file_existed if (len(str(os.path.getsize(i))) != grib_filesize_digit or\
str(os.path.getsize(i))[0:2] != grib_filesize_max2)]
print 'file size mode: %se%d bytes' % (grib_filesize_max2, grib_filesize_digit-2)
print 'number of grib files existed : %d' % len(grib_file_existed)
if grib_file_corrupted:
print '------------------------------------------------------------------------------'
print 'corrupted grib files detected! Delete them and re-download...'
print 'number of grib files corrupted : %d' % len(grib_file_corrupted)
for i in grib_file_corrupted:
rmCmd = 'rm '+i
print rmCmd
os.system(rmCmd)
grib_file_existed.remove(i)
print '------------------------------------------------------------------------------'
grib_file2download = sorted(list(set(grib_file_list) - set(grib_file_existed)))
date_list2download = [str(re.findall('\d{8}', i)[0]) for i in grib_file2download]
print 'number of grib files to download: %d' % len(date_list2download)
print '------------------------------------------------------------------------------\n'
## Download grib file using PyAPS
if grib_source == 'ECMWF' : pa.ECMWFdload( date_list2download, hour, grib_dir)
elif grib_source == 'ERA' : pa.ERAdload( date_list2download, hour, grib_dir)
elif grib_source == 'NARR' : pa.NARRdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA' : pa.MERRAdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA1': pa.MERRA1dload(date_list2download, hour, grib_dir)
return grib_file_existed
###############################################################
EXAMPLE='''example:
tropcor_pyaps.py timeseries.h5 -d geometryRadar.h5 -i geometryRadar.h5
tropcor_pyaps.py timeseries.h5 -d geometryGeo.h5 -i geometryGeo.h5 --weather-dir /famelung/data/WEATHER
tropcor_pyaps.py -d srtm1.dem -i 30 --hour 00 --ref-yx 2000 2500 --date-list date_list.txt
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s NARR
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s MERRA --delay dry -i 23
tropcor_pyaps.py timeseries_LODcor.h5 -d demRadar.h5
tropcor_pyaps.py -s ECMWF --hour 18 --date-list date_list.txt --download
tropcor_pyaps.py -s ECMWF --hour 18 --date-list bl_list.txt --download
'''
REFERENCE='''reference:
Jolivet, R., R. Grandin, C. Lasserre, M.-P. Doin and G. Peltzer (2011), Systematic InSAR tropospheric
phase delay corrections from global meteorological reanalysis data, Geophys. Res. Lett., 38, L17311,
doi:10.1029/2011GL048757
'''
TEMPLATE='''
## 7. Tropospheric Delay Correction (optional and recommended)
## correct tropospheric delay using the following methods:
## a. pyaps - use weather re-analysis data (Jolivet et al., 2011, GRL, need to install PyAPS; Dee et al., 2011)
## b. height_correlation - correct stratified tropospheric delay (Doin et al., 2009, J Applied Geop)
## c. base_trop_cor - (not recommend) baseline error and stratified tropo simultaneously (Jo et al., 2010, Geo J)
pysar.troposphericDelay.method = auto #[pyaps / height_correlation / base_trop_cor / no], auto for pyaps
pysar.troposphericDelay.weatherModel = auto #[ECMWF / MERRA / NARR], auto for ECMWF, for pyaps method
pysar.troposphericDelay.polyOrder = auto #[1 / 2 / 3], auto for 1, for height_correlation method
pysar.troposphericDelay.looks = auto #[1-inf], auto for 8, Number of looks to be applied to interferogram
'''
DATA_INFO='''
re-analysis_dataset coverage temporal_resolution spatial_resolution latency analysis
------------------------------------------------------------------------------------------------------------
ERA-Interim (by ECMWF) Global 00/06/12/18 UTC 0.75 deg (~83 km) 2-month 4D-var
MERRA2 (by NASA Goddard) Global 00/06/12/18 UTC 0.5 * 0.625 (~50 km) 2-3 weeks 3D-var
To download MERRA2, you need an Earthdata account, and pre-authorize the "NASA GESDISC DATA ARCHIVE" application, following https://disc.gsfc.nasa.gov/earthdata-login.
'''
def cmdLineParse():
parser = argparse.ArgumentParser(description='Tropospheric correction using weather models\n'+\
' PyAPS is used to download and calculate the delay for each time-series epoch.',\
formatter_class=argparse.RawTextHelpFormatter,\
epilog=REFERENCE+'\n'+DATA_INFO+'\n'+EXAMPLE)
parser.add_argument(dest='timeseries_file', nargs='?', help='timeseries HDF5 file, i.e. timeseries.h5')
parser.add_argument('-d','--dem', dest='dem_file',\
help='DEM file, i.e. radar_4rlks.hgt, srtm1.dem')
parser.add_argument('-i', dest='inc_angle', default='30',\
help='a file containing all incidence angles, or a number representing for the whole image.')
parser.add_argument('--weather-dir', dest='weather_dir', \
help='directory to put downloaded weather data, i.e. ./../WEATHER\n'+\
'use directory of input timeseries_file if not specified.')
parser.add_argument('--delay', dest='delay_type', default='comb', choices={'comb','dry','wet'},\
help='Delay type to calculate, comb contains both wet and dry delays')
parser.add_argument('--download', action='store_true', help='Download weather data only.')
parser.add_argument('--date-list', dest='date_list_file',\
help='Read the first column of text file as list of date to download data\n'+\
'in YYYYMMDD or YYMMDD format')
parser.add_argument('--ref-yx', dest='ref_yx', type=int, nargs=2, help='reference pixel in y/x')
parser.add_argument('-s', dest='weather_model',\
default='ECMWF', choices={'ECMWF','ERA-Interim','ERA','MERRA','MERRA1','NARR'},\
help='source of the atmospheric data.\n'+\
'By the time of 2018-Mar-06, ERA and ECMWF data download link is working.\n'+\
'NARR is working for 1979-Jan to 2014-Oct.\n'+\
'MERRA(2) is not working.')
parser.add_argument('--hour', help='time of data in HH, e.g. 12, 06')
parser.add_argument('--template', dest='template_file',\
help='template file with input options below:\n'+TEMPLATE)
parser.add_argument('-o', dest='out_file', help='Output file name for trospheric corrected timeseries.')
inps = parser.parse_args()
# Calculate DELAY or DOWNLOAD DATA ONLY, required one of them
if not inps.download and not inps.dem_file and ( not inps.timeseries_file or not inps.date_list_file ):
parser.print_help()
sys.exit(1)
return inps
###############################################################
def main(argv):
inps = cmdLineParse()
k = None
atr = dict()
if inps.timeseries_file:
inps.timeseries_file = ut.get_file_list([inps.timeseries_file])[0]
atr = readfile.read_attribute(inps.timeseries_file)
k = atr['FILE_TYPE']
elif inps.dem_file:
inps.dem_file = ut.get_file_list([inps.dem_file])[0]
atr = readfile.read_attribute(inps.dem_file)
if 'ref_y' not in atr.keys() and inps.ref_yx:
print 'No reference info found in input file, use input ref_yx: '+str(inps.ref_yx)
atr['ref_y'] = inps.ref_yx[0]
atr['ref_x'] = inps.ref_yx[1]
##Read Incidence angle: to map the zenith delay to the slant delay
if os.path.isfile(inps.inc_angle):
inps.inc_angle = readfile.read(inps.inc_angle, epoch='incidenceAngle')[0]
else:
inps.inc_angle = float(inps.inc_angle)
print 'incidence angle: '+str(inps.inc_angle)
inps.inc_angle = inps.inc_angle*np.pi/180.0
##Prepare DEM file in ROI_PAC format for PyAPS to read
if inps.dem_file:
inps.dem_file = ut.get_file_list([inps.dem_file])[0]
if os.path.splitext(inps.dem_file)[1] in ['.h5']:
print 'convert DEM file to ROIPAC format'
dem, atr_dem = readfile.read(inps.dem_file, epoch='height')
if 'Y_FIRST' in atr.keys():
atr_dem['FILE_TYPE'] = '.dem'
else:
atr_dem['FILE_TYPE'] = '.hgt'
outname = os.path.splitext(inps.dem_file)[0]+'4pyaps'+atr_dem['FILE_TYPE']
inps.dem_file = writefile.write(dem, atr_dem, outname)
print '*******************************************************************************'
print 'Downloading weather model data ...'
## Get Grib Source
if inps.weather_model in ['ECMWF','ERA-Interim']: inps.grib_source = 'ECMWF'
elif inps.weather_model == 'ERA' : inps.grib_source = 'ERA'
elif inps.weather_model == 'MERRA': inps.grib_source = 'MERRA'
elif inps.weather_model == 'NARR' : inps.grib_source = 'NARR'
else: raise Reception('Unrecognized weather model: '+inps.weather_model)
print 'grib source: '+inps.grib_source
# Get weather directory
if not inps.weather_dir:
if inps.timeseries_file:
inps.weather_dir = os.path.dirname(os.path.abspath(inps.timeseries_file))+'/../WEATHER'
elif inps.dem_file:
inps.weather_dir = os.path.dirname(os.path.abspath(inps.dem_file))+'/../WEATHER'
else:
inps.weather_dir = os.path.abspath(os.getcwd())
print 'Store weather data into directory: '+inps.weather_dir
# Get date list to download
if not inps.date_list_file:
print 'read date list info from: '+inps.timeseries_file
h5 = h5py.File(inps.timeseries_file, 'r')
if 'timeseries' in h5.keys():
date_list = sorted(h5[k].keys())
elif k in ['interferograms','coherence','wrapped']:
ifgram_list = sorted(h5[k].keys())
date12_list = ptime.list_ifgram2date12(ifgram_list)
m_dates = [i.split('-')[0] for i in date12_list]
s_dates = [i.split('-')[1] for i in date12_list]
date_list = ptime.yyyymmdd(sorted(list(set(m_dates + s_dates))))
else:
raise ValueError('Un-support input file type:'+k)
h5.close()
else:
date_list = ptime.yyyymmdd(np.loadtxt(inps.date_list_file, dtype=str, usecols=(0,)).tolist())
print 'read date list info from: '+inps.date_list_file
# Get Acquisition time - hour
if not inps.hour:
inps.hour = ptime.closest_weather_product_time(atr['CENTER_LINE_UTC'], inps.grib_source)
print 'Time of cloest available product: '+inps.hour
## Download data using PyAPS
inps.grib_file_list = dload_grib(date_list, inps.hour, inps.weather_model, inps.weather_dir)
if inps.download:
print 'Download completed, exit as planned.'
return
print '*******************************************************************************'
print 'Calcualting delay for each epoch.'
## Calculate tropo delay using pyaps
length = int(atr['FILE_LENGTH'])
width = int(atr['WIDTH'])
date_num = len(date_list)
trop_ts = np.zeros((date_num, length, width), np.float32)
for i in range(date_num):
grib_file = inps.grib_file_list[i]
date = date_list[i]
print 'calculate phase delay on %s from file %s' % (date, os.path.basename(grib_file))
trop_ts[i] = get_delay(grib_file, atr, vars(inps))
## Convert relative phase delay on reference date
try: ref_date = atr['ref_date']
except: ref_date = date_list[0]
print 'convert to relative phase delay with reference date: '+ref_date
ref_idx = date_list.index(ref_date)
trop_ts -= np.tile(trop_ts[ref_idx,:,:], (date_num, 1, 1))
## Write tropospheric delay to HDF5
tropFile = inps.grib_source+'.h5'
print 'writing >>> %s' % (tropFile)
h5trop = h5py.File(tropFile, 'w')
group_trop = h5trop.create_group('timeseries')
print 'number of acquisitions: '+str(date_num)
prog_bar = ptime.progress_bar(maxValue=date_num)
for i in range(date_num):
date = date_list[i]
group_trop.create_dataset(date, data=trop_ts[i], compression='gzip')
prog_bar.update(i+1, suffix=date)
prog_bar.close()
# Write Attributes
for key,value in atr.iteritems():
group_trop.attrs[key] = value
h5trop.close()
## Write corrected Time series to HDF5
if k == 'timeseries':
if not inps.out_file:
inps.out_file = os.path.splitext(inps.timeseries_file)[0]+'_'+inps.grib_source+'.h5'
print 'writing >>> %s' % (inps.out_file)
h5ts = h5py.File(inps.timeseries_file, 'r')
h5tsCor = h5py.File(inps.out_file, 'w')
group_tsCor = h5tsCor.create_group('timeseries')
print 'number of acquisitions: '+str(date_num)
prog_bar = ptime.progress_bar(maxValue=date_num)
for i in range(date_num):
date = date_list[i]
ts = h5ts['timeseries'].get(date)[:]
group_tsCor.create_dataset(date, data=ts-trop_ts[i], compression='gzip')
prog_bar.update(i+1, suffix=date)
prog_bar.close()
h5ts.close()
# Write Attributes
for key,value in atr.iteritems():
group_tsCor.attrs[key] = value
h5tsCor.close()
# Delete temporary DEM file in ROI_PAC format
if '4pyaps' in inps.dem_file:
rmCmd = 'rm %s %s.rsc' % (inps.dem_file, inps.dem_file)
print rmCmd
os.system(rmCmd)
print 'Done.'
return inps.out_file
###############################################################
if __name__ == '__main__':
main(sys.argv[1:])
| dload_grib | identifier_name |
tropcor_pyaps.py | #! /usr/bin/env python2
############################################################
# Program is part of PySAR v1.2 #
# Copyright(c) 2015, Heresh Fattahi, Zhang Yunjun #
# Author: Heresh Fattahi, Zhang Yunjun #
############################################################
import os
import sys
import argparse
import re
try:
import pyaps as pa
except:
sys.exit('Cannot import pyaps into Python!')
import h5py
import numpy as np
import pysar._datetime as ptime
import pysar._pysar_utilities as ut
import pysar._readfile as readfile
import pysar._writefile as writefile
###############################################################
def get_delay(grib_file, atr, inps_dict):
'''Get delay matrix using PyAPS for one acquisition
Inputs:
grib_file - strng, grib file path
atr - dict, including the following attributes:
dem_file - string, DEM file path
grib_source - string, Weather re-analysis data source
delay_type - string, comb/dry/wet
ref_y/x - string, reference pixel row/col number
inc_angle - np.array, 0/1/2 D
Output:
phs - 2D np.array, absolute tropospheric phase delay relative to ref_y/x
'''
if 'X_FIRST' in atr.keys():
aps = pa.PyAPS_geo(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\
verb=True, Del=inps_dict['delay_type'])
else:
aps = pa.PyAPS_rdr(grib_file, inps_dict['dem_file'], grib=inps_dict['grib_source'],\
verb=True, Del=inps_dict['delay_type'])
phs = np.zeros((aps.ny, aps.nx), dtype=np.float32)
aps.getdelay(phs, inc=0.0)
# Get relative phase delay in space
yref = int(atr['ref_y'])
xref = int(atr['ref_x'])
phs -= phs[yref, xref]
# project into LOS direction
phs /= np.cos(inps_dict['inc_angle'])
# reverse the sign for consistency between different phase correction steps/methods
phs *= -1
return phs
def date_list2grib_file(date_list, hour, grib_source, grib_dir):
grib_file_list = []
for d in date_list:
grib_file = grib_dir+'/'
if grib_source == 'ECMWF' : grib_file += 'ERA-Int_%s_%s.grb' % (d, hour)
elif grib_source == 'ERA' : grib_file += 'ERA_%s_%s.grb' % (d, hour)
elif grib_source == 'NARR' : grib_file += 'narr-a_221_%s_%s00_000.grb' % (d, hour)
elif grib_source == 'MERRA' : grib_file += 'merra-%s-%s.nc4' % (d, hour)
elif grib_source == 'MERRA1': grib_file += 'merra-%s-%s.hdf' % (d, hour)
grib_file_list.append(grib_file)
return grib_file_list
def dload_grib(date_list, hour, grib_source='ECMWF', weather_dir='./'):
'''Download weather re-analysis grib files using PyAPS
Inputs:
date_list : list of string in YYYYMMDD format
hour : string in HH:MM or HH format
grib_source : string,
weather_dir : string,
Output:
grib_file_list : list of string
'''
## Grib data directory
weather_dir = os.path.abspath(weather_dir)
grib_dir = weather_dir+'/'+grib_source
if not os.path.isdir(grib_dir):
print 'making directory: '+grib_dir
os.makedirs(grib_dir)
## Date list to grib file list
grib_file_list = date_list2grib_file(date_list, hour, grib_source, grib_dir)
## Get date list to download (skip already downloaded files)
grib_file_existed = ut.get_file_list(grib_file_list)
if grib_file_existed:
grib_filesize_digit = ut.mode([len(str(os.path.getsize(i))) for i in grib_file_existed])
grib_filesize_max2 = ut.mode([str(os.path.getsize(i))[0:2] for i in grib_file_existed])
grib_file_corrupted = [i for i in grib_file_existed if (len(str(os.path.getsize(i))) != grib_filesize_digit or\
str(os.path.getsize(i))[0:2] != grib_filesize_max2)]
print 'file size mode: %se%d bytes' % (grib_filesize_max2, grib_filesize_digit-2)
print 'number of grib files existed : %d' % len(grib_file_existed)
if grib_file_corrupted:
print '------------------------------------------------------------------------------'
print 'corrupted grib files detected! Delete them and re-download...'
print 'number of grib files corrupted : %d' % len(grib_file_corrupted)
for i in grib_file_corrupted:
rmCmd = 'rm '+i
print rmCmd
os.system(rmCmd)
grib_file_existed.remove(i)
print '------------------------------------------------------------------------------'
grib_file2download = sorted(list(set(grib_file_list) - set(grib_file_existed)))
date_list2download = [str(re.findall('\d{8}', i)[0]) for i in grib_file2download]
print 'number of grib files to download: %d' % len(date_list2download)
print '------------------------------------------------------------------------------\n'
## Download grib file using PyAPS
if grib_source == 'ECMWF' : pa.ECMWFdload( date_list2download, hour, grib_dir)
elif grib_source == 'ERA' : pa.ERAdload( date_list2download, hour, grib_dir)
elif grib_source == 'NARR' : pa.NARRdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA' : pa.MERRAdload( date_list2download, hour, grib_dir)
elif grib_source == 'MERRA1': pa.MERRA1dload(date_list2download, hour, grib_dir)
return grib_file_existed
###############################################################
EXAMPLE='''example:
tropcor_pyaps.py timeseries.h5 -d geometryRadar.h5 -i geometryRadar.h5
tropcor_pyaps.py timeseries.h5 -d geometryGeo.h5 -i geometryGeo.h5 --weather-dir /famelung/data/WEATHER
tropcor_pyaps.py -d srtm1.dem -i 30 --hour 00 --ref-yx 2000 2500 --date-list date_list.txt
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s NARR
tropcor_pyaps.py timeseries.h5 -d demRadar.h5 -s MERRA --delay dry -i 23
tropcor_pyaps.py timeseries_LODcor.h5 -d demRadar.h5
tropcor_pyaps.py -s ECMWF --hour 18 --date-list date_list.txt --download
tropcor_pyaps.py -s ECMWF --hour 18 --date-list bl_list.txt --download
'''
REFERENCE='''reference:
Jolivet, R., R. Grandin, C. Lasserre, M.-P. Doin and G. Peltzer (2011), Systematic InSAR tropospheric
phase delay corrections from global meteorological reanalysis data, Geophys. Res. Lett., 38, L17311,
doi:10.1029/2011GL048757
'''
TEMPLATE='''
## 7. Tropospheric Delay Correction (optional and recommended)
## correct tropospheric delay using the following methods:
## a. pyaps - use weather re-analysis data (Jolivet et al., 2011, GRL, need to install PyAPS; Dee et al., 2011)
## b. height_correlation - correct stratified tropospheric delay (Doin et al., 2009, J Applied Geop)
## c. base_trop_cor - (not recommend) baseline error and stratified tropo simultaneously (Jo et al., 2010, Geo J)
pysar.troposphericDelay.method = auto #[pyaps / height_correlation / base_trop_cor / no], auto for pyaps
pysar.troposphericDelay.weatherModel = auto #[ECMWF / MERRA / NARR], auto for ECMWF, for pyaps method
pysar.troposphericDelay.polyOrder = auto #[1 / 2 / 3], auto for 1, for height_correlation method
pysar.troposphericDelay.looks = auto #[1-inf], auto for 8, Number of looks to be applied to interferogram
'''
DATA_INFO='''
re-analysis_dataset coverage temporal_resolution spatial_resolution latency analysis
------------------------------------------------------------------------------------------------------------
ERA-Interim (by ECMWF) Global 00/06/12/18 UTC 0.75 deg (~83 km) 2-month 4D-var
MERRA2 (by NASA Goddard) Global 00/06/12/18 UTC 0.5 * 0.625 (~50 km) 2-3 weeks 3D-var
To download MERRA2, you need an Earthdata account, and pre-authorize the "NASA GESDISC DATA ARCHIVE" application, following https://disc.gsfc.nasa.gov/earthdata-login.
'''
def cmdLineParse():
parser = argparse.ArgumentParser(description='Tropospheric correction using weather models\n'+\
' PyAPS is used to download and calculate the delay for each time-series epoch.',\
formatter_class=argparse.RawTextHelpFormatter,\
epilog=REFERENCE+'\n'+DATA_INFO+'\n'+EXAMPLE)
parser.add_argument(dest='timeseries_file', nargs='?', help='timeseries HDF5 file, i.e. timeseries.h5')
parser.add_argument('-d','--dem', dest='dem_file',\
help='DEM file, i.e. radar_4rlks.hgt, srtm1.dem')
parser.add_argument('-i', dest='inc_angle', default='30',\
help='a file containing all incidence angles, or a number representing for the whole image.')
parser.add_argument('--weather-dir', dest='weather_dir', \
help='directory to put downloaded weather data, i.e. ./../WEATHER\n'+\
'use directory of input timeseries_file if not specified.')
parser.add_argument('--delay', dest='delay_type', default='comb', choices={'comb','dry','wet'},\
help='Delay type to calculate, comb contains both wet and dry delays')
parser.add_argument('--download', action='store_true', help='Download weather data only.')
parser.add_argument('--date-list', dest='date_list_file',\
help='Read the first column of text file as list of date to download data\n'+\
'in YYYYMMDD or YYMMDD format')
parser.add_argument('--ref-yx', dest='ref_yx', type=int, nargs=2, help='reference pixel in y/x')
parser.add_argument('-s', dest='weather_model',\
default='ECMWF', choices={'ECMWF','ERA-Interim','ERA','MERRA','MERRA1','NARR'},\
help='source of the atmospheric data.\n'+\
'By the time of 2018-Mar-06, ERA and ECMWF data download link is working.\n'+\
'NARR is working for 1979-Jan to 2014-Oct.\n'+\
'MERRA(2) is not working.')
parser.add_argument('--hour', help='time of data in HH, e.g. 12, 06')
parser.add_argument('--template', dest='template_file',\
help='template file with input options below:\n'+TEMPLATE)
parser.add_argument('-o', dest='out_file', help='Output file name for trospheric corrected timeseries.')
inps = parser.parse_args()
# Calculate DELAY or DOWNLOAD DATA ONLY, required one of them
if not inps.download and not inps.dem_file and ( not inps.timeseries_file or not inps.date_list_file ):
parser.print_help()
sys.exit(1)
return inps
###############################################################
def main(argv):
inps = cmdLineParse()
k = None
atr = dict()
if inps.timeseries_file:
inps.timeseries_file = ut.get_file_list([inps.timeseries_file])[0]
atr = readfile.read_attribute(inps.timeseries_file)
k = atr['FILE_TYPE']
elif inps.dem_file:
inps.dem_file = ut.get_file_list([inps.dem_file])[0]
atr = readfile.read_attribute(inps.dem_file)
if 'ref_y' not in atr.keys() and inps.ref_yx:
print 'No reference info found in input file, use input ref_yx: '+str(inps.ref_yx)
atr['ref_y'] = inps.ref_yx[0]
atr['ref_x'] = inps.ref_yx[1]
##Read Incidence angle: to map the zenith delay to the slant delay
if os.path.isfile(inps.inc_angle):
inps.inc_angle = readfile.read(inps.inc_angle, epoch='incidenceAngle')[0]
else:
inps.inc_angle = float(inps.inc_angle)
print 'incidence angle: '+str(inps.inc_angle)
inps.inc_angle = inps.inc_angle*np.pi/180.0
##Prepare DEM file in ROI_PAC format for PyAPS to read
if inps.dem_file:
inps.dem_file = ut.get_file_list([inps.dem_file])[0]
if os.path.splitext(inps.dem_file)[1] in ['.h5']:
print 'convert DEM file to ROIPAC format'
dem, atr_dem = readfile.read(inps.dem_file, epoch='height')
if 'Y_FIRST' in atr.keys():
atr_dem['FILE_TYPE'] = '.dem'
else:
atr_dem['FILE_TYPE'] = '.hgt'
outname = os.path.splitext(inps.dem_file)[0]+'4pyaps'+atr_dem['FILE_TYPE']
inps.dem_file = writefile.write(dem, atr_dem, outname)
print '*******************************************************************************'
print 'Downloading weather model data ...'
## Get Grib Source
if inps.weather_model in ['ECMWF','ERA-Interim']: inps.grib_source = 'ECMWF'
elif inps.weather_model == 'ERA' : inps.grib_source = 'ERA'
elif inps.weather_model == 'MERRA': inps.grib_source = 'MERRA'
elif inps.weather_model == 'NARR' : inps.grib_source = 'NARR'
else: raise Reception('Unrecognized weather model: '+inps.weather_model)
print 'grib source: '+inps.grib_source
# Get weather directory
if not inps.weather_dir:
if inps.timeseries_file:
inps.weather_dir = os.path.dirname(os.path.abspath(inps.timeseries_file))+'/../WEATHER'
elif inps.dem_file:
inps.weather_dir = os.path.dirname(os.path.abspath(inps.dem_file))+'/../WEATHER'
else:
inps.weather_dir = os.path.abspath(os.getcwd())
print 'Store weather data into directory: '+inps.weather_dir
# Get date list to download
if not inps.date_list_file:
print 'read date list info from: '+inps.timeseries_file
h5 = h5py.File(inps.timeseries_file, 'r')
if 'timeseries' in h5.keys():
date_list = sorted(h5[k].keys())
elif k in ['interferograms','coherence','wrapped']:
ifgram_list = sorted(h5[k].keys())
date12_list = ptime.list_ifgram2date12(ifgram_list)
m_dates = [i.split('-')[0] for i in date12_list]
s_dates = [i.split('-')[1] for i in date12_list]
date_list = ptime.yyyymmdd(sorted(list(set(m_dates + s_dates))))
else:
raise ValueError('Un-support input file type:'+k)
h5.close()
else:
date_list = ptime.yyyymmdd(np.loadtxt(inps.date_list_file, dtype=str, usecols=(0,)).tolist())
print 'read date list info from: '+inps.date_list_file
# Get Acquisition time - hour
if not inps.hour:
inps.hour = ptime.closest_weather_product_time(atr['CENTER_LINE_UTC'], inps.grib_source)
print 'Time of cloest available product: '+inps.hour
## Download data using PyAPS
inps.grib_file_list = dload_grib(date_list, inps.hour, inps.weather_model, inps.weather_dir)
if inps.download:
print 'Download completed, exit as planned.'
return
print '*******************************************************************************'
print 'Calcualting delay for each epoch.'
## Calculate tropo delay using pyaps
length = int(atr['FILE_LENGTH'])
width = int(atr['WIDTH'])
date_num = len(date_list)
trop_ts = np.zeros((date_num, length, width), np.float32)
for i in range(date_num):
grib_file = inps.grib_file_list[i]
date = date_list[i]
print 'calculate phase delay on %s from file %s' % (date, os.path.basename(grib_file))
trop_ts[i] = get_delay(grib_file, atr, vars(inps))
## Convert relative phase delay on reference date
try: ref_date = atr['ref_date']
except: ref_date = date_list[0]
print 'convert to relative phase delay with reference date: '+ref_date
ref_idx = date_list.index(ref_date)
trop_ts -= np.tile(trop_ts[ref_idx,:,:], (date_num, 1, 1))
## Write tropospheric delay to HDF5
tropFile = inps.grib_source+'.h5'
print 'writing >>> %s' % (tropFile)
h5trop = h5py.File(tropFile, 'w')
group_trop = h5trop.create_group('timeseries')
print 'number of acquisitions: '+str(date_num)
prog_bar = ptime.progress_bar(maxValue=date_num)
for i in range(date_num):
date = date_list[i]
group_trop.create_dataset(date, data=trop_ts[i], compression='gzip')
prog_bar.update(i+1, suffix=date)
prog_bar.close()
# Write Attributes
for key,value in atr.iteritems():
group_trop.attrs[key] = value
h5trop.close()
## Write corrected Time series to HDF5
if k == 'timeseries':
if not inps.out_file:
inps.out_file = os.path.splitext(inps.timeseries_file)[0]+'_'+inps.grib_source+'.h5'
print 'writing >>> %s' % (inps.out_file)
h5ts = h5py.File(inps.timeseries_file, 'r')
h5tsCor = h5py.File(inps.out_file, 'w')
group_tsCor = h5tsCor.create_group('timeseries')
print 'number of acquisitions: '+str(date_num)
prog_bar = ptime.progress_bar(maxValue=date_num)
for i in range(date_num):
date = date_list[i]
ts = h5ts['timeseries'].get(date)[:]
group_tsCor.create_dataset(date, data=ts-trop_ts[i], compression='gzip')
prog_bar.update(i+1, suffix=date)
prog_bar.close()
h5ts.close()
# Write Attributes
for key,value in atr.iteritems():
group_tsCor.attrs[key] = value
h5tsCor.close()
# Delete temporary DEM file in ROI_PAC format
if '4pyaps' in inps.dem_file:
rmCmd = 'rm %s %s.rsc' % (inps.dem_file, inps.dem_file)
print rmCmd
os.system(rmCmd)
print 'Done.'
return inps.out_file | main(sys.argv[1:]) |
###############################################################
if __name__ == '__main__': | random_line_split |
function_system.rs | use crate::{
archetype::{ArchetypeComponentId, ArchetypeGeneration, ArchetypeId},
component::{ComponentId, Tick},
prelude::FromWorld,
query::{Access, FilteredAccessSet},
system::{check_system_change_tick, ReadOnlySystemParam, System, SystemParam, SystemParamItem},
world::{unsafe_world_cell::UnsafeWorldCell, World, WorldId},
};
use bevy_utils::all_tuples;
use std::{any::TypeId, borrow::Cow, marker::PhantomData};
use super::{In, IntoSystem, ReadOnlySystem};
/// The metadata of a [`System`].
#[derive(Clone)]
pub struct SystemMeta {
pub(crate) name: Cow<'static, str>,
pub(crate) component_access_set: FilteredAccessSet<ComponentId>,
pub(crate) archetype_component_access: Access<ArchetypeComponentId>,
// NOTE: this must be kept private. making a SystemMeta non-send is irreversible to prevent
// SystemParams from overriding each other
is_send: bool,
pub(crate) last_run: Tick,
}
impl SystemMeta {
pub(crate) fn new<T>() -> Self {
Self {
name: std::any::type_name::<T>().into(),
archetype_component_access: Access::default(),
component_access_set: FilteredAccessSet::default(),
is_send: true,
last_run: Tick::new(0),
}
}
/// Returns the system's name
#[inline]
pub fn name(&self) -> &str {
&self.name
}
/// Returns true if the system is [`Send`].
#[inline]
pub fn is_send(&self) -> bool {
self.is_send
}
/// Sets the system to be not [`Send`].
///
/// This is irreversible.
#[inline]
pub fn set_non_send(&mut self) {
self.is_send = false;
}
}
// TODO: Actually use this in FunctionSystem. We should probably only do this once Systems are constructed using a World reference
// (to avoid the need for unwrapping to retrieve SystemMeta)
/// Holds on to persistent state required to drive [`SystemParam`] for a [`System`].
///
/// This is a very powerful and convenient tool for working with exclusive world access,
/// allowing you to fetch data from the [`World`] as if you were running a [`System`].
///
/// Borrow-checking is handled for you, allowing you to mutably access multiple compatible system parameters at once,
/// and arbitrary system parameters (like [`EventWriter`](crate::event::EventWriter)) can be conveniently fetched.
///
/// For an alternative approach to split mutable access to the world, see [`World::resource_scope`].
///
/// # Warning
///
/// [`SystemState`] values created can be cached to improve performance,
/// and *must* be cached and reused in order for system parameters that rely on local state to work correctly.
/// These include:
/// - [`Added`](crate::query::Added) and [`Changed`](crate::query::Changed) query filters
/// - [`Local`](crate::system::Local) variables that hold state
/// - [`EventReader`](crate::event::EventReader) system parameters, which rely on a [`Local`](crate::system::Local) to track which events have been seen
///
/// # Example
///
/// Basic usage:
/// ```rust
/// # use bevy_ecs::prelude::*;
/// # use bevy_ecs::system::SystemState;
/// # use bevy_ecs::event::Events;
/// #
/// # #[derive(Event)]
/// # struct MyEvent;
/// # #[derive(Resource)]
/// # struct MyResource(u32);
/// #
/// # #[derive(Component)]
/// # struct MyComponent;
/// #
/// // Work directly on the `World`
/// let mut world = World::new();
/// world.init_resource::<Events<MyEvent>>();
///
/// // Construct a `SystemState` struct, passing in a tuple of `SystemParam`
/// // as if you were writing an ordinary system.
/// let mut system_state: SystemState<(
/// EventWriter<MyEvent>,
/// Option<ResMut<MyResource>>,
/// Query<&MyComponent>,
/// )> = SystemState::new(&mut world);
///
/// // Use system_state.get_mut(&mut world) and unpack your system parameters into variables!
/// // system_state.get(&world) provides read-only versions of your system parameters instead.
/// let (event_writer, maybe_resource, query) = system_state.get_mut(&mut world);
///
/// // If you are using `Commands`, you can choose when you want to apply them to the world.
/// // You need to manually call `.apply(world)` on the `SystemState` to apply them.
/// ```
/// Caching:
/// ```rust
/// # use bevy_ecs::prelude::*;
/// # use bevy_ecs::system::SystemState;
/// # use bevy_ecs::event::Events;
/// #
/// # #[derive(Event)]
/// # struct MyEvent;
/// #[derive(Resource)]
/// struct CachedSystemState {
/// event_state: SystemState<EventReader<'static, 'static, MyEvent>>,
/// }
///
/// // Create and store a system state once
/// let mut world = World::new();
/// world.init_resource::<Events<MyEvent>>();
/// let initial_state: SystemState<EventReader<MyEvent>> = SystemState::new(&mut world);
///
/// // The system state is cached in a resource
/// world.insert_resource(CachedSystemState {
/// event_state: initial_state,
/// });
///
/// // Later, fetch the cached system state, saving on overhead
/// world.resource_scope(|world, mut cached_state: Mut<CachedSystemState>| {
/// let mut event_reader = cached_state.event_state.get_mut(world);
///
/// for events in event_reader.iter() {
/// println!("Hello World!");
/// }
/// });
/// ```
pub struct SystemState<Param: SystemParam + 'static> {
meta: SystemMeta,
param_state: Param::State,
world_id: WorldId,
archetype_generation: ArchetypeGeneration,
}
impl<Param: SystemParam> SystemState<Param> {
/// Creates a new [`SystemState`] with default state.
///
/// ## Note
/// For users of [`SystemState::get_manual`] or [`get_manual_mut`](SystemState::get_manual_mut):
///
/// `new` does not cache any of the world's archetypes, so you must call [`SystemState::update_archetypes`]
/// manually before calling `get_manual{_mut}`.
pub fn new(world: &mut World) -> Self {
let mut meta = SystemMeta::new::<Param>();
meta.last_run = world.change_tick().relative_to(Tick::MAX);
let param_state = Param::init_state(world, &mut meta);
Self {
meta,
param_state,
world_id: world.id(),
archetype_generation: ArchetypeGeneration::initial(),
}
}
/// Gets the metadata for this instance.
#[inline]
pub fn meta(&self) -> &SystemMeta {
&self.meta
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
#[inline]
pub fn get<'w, 's>(&'s mut self, world: &'w World) -> SystemParamItem<'w, 's, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell_readonly()) }
}
/// Retrieve the mutable [`SystemParam`] values.
#[inline]
pub fn get_mut<'w, 's>(&'s mut self, world: &'w mut World) -> SystemParamItem<'w, 's, Param> {
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell()) }
}
/// Applies all state queued up for [`SystemParam`] values. For example, this will apply commands queued up
/// by a [`Commands`](`super::Commands`) parameter to the given [`World`].
/// This function should be called manually after the values returned by [`SystemState::get`] and [`SystemState::get_mut`]
/// are finished being used.
pub fn apply(&mut self, world: &mut World) {
Param::apply(&mut self.param_state, &self.meta, world);
}
/// Returns `true` if `world_id` matches the [`World`] that was used to call [`SystemState::new`].
/// Otherwise, this returns false.
#[inline]
pub fn matches_world(&self, world_id: WorldId) -> bool {
self.world_id == world_id
}
/// Asserts that the [`SystemState`] matches the provided world.
#[inline]
fn validate_world(&self, world_id: WorldId) {
assert!(self.matches_world(world_id), "Encountered a mismatched World. A SystemState cannot be used with Worlds other than the one it was created with.");
}
/// Updates the state's internal view of the [`World`]'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
#[inline]
pub fn update_archetypes(&mut self, world: &World) {
self.update_archetypes_unsafe_world_cell(world.as_unsafe_world_cell_readonly());
}
/// Updates the state's internal view of the `world`'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
///
/// # Note
///
/// This method only accesses world metadata.
#[inline]
pub fn update_archetypes_unsafe_world_cell(&mut self, world: UnsafeWorldCell) {
let archetypes = world.archetypes();
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
Param::new_archetype(
&mut self.param_state,
&archetypes[ArchetypeId::new(archetype_index)],
&mut self.meta,
);
}
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
/// This will not update the state's view of the world's archetypes automatically nor increment the
/// world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get`] over this function.
#[inline]
pub fn get_manual<'w, 's>(&'s mut self, world: &'w World) -> SystemParamItem<'w, 's, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
let change_tick = world.read_change_tick();
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell_readonly(), change_tick) }
}
/// Retrieve the mutable [`SystemParam`] values. This will not update the state's view of the world's archetypes
/// automatically nor increment the world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get_mut`] over this function.
#[inline]
pub fn get_manual_mut<'w, 's>(
&'s mut self,
world: &'w mut World,
) -> SystemParamItem<'w, 's, Param> {
self.validate_world(world.id());
let change_tick = world.change_tick();
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell(), change_tick) }
}
/// Retrieve the [`SystemParam`] values. This will not update archetypes automatically.
///
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
pub unsafe fn get_unchecked_manual<'w, 's>(
&'s mut self,
world: UnsafeWorldCell<'w>,
) -> SystemParamItem<'w, 's, Param> {
let change_tick = world.increment_change_tick();
self.fetch(world, change_tick)
}
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
unsafe fn fetch<'w, 's>(
&'s mut self,
world: UnsafeWorldCell<'w>,
change_tick: Tick,
) -> SystemParamItem<'w, 's, Param> {
let param = Param::get_param(&mut self.param_state, &self.meta, world, change_tick);
self.meta.last_run = change_tick;
param
}
}
impl<Param: SystemParam> FromWorld for SystemState<Param> {
fn from_world(world: &mut World) -> Self {
Self::new(world)
}
}
/// The [`System`] counter part of an ordinary function.
///
/// You get this by calling [`IntoSystem::into_system`] on a function that only accepts
/// [`SystemParam`]s. The output of the system becomes the functions return type, while the input
/// becomes the functions [`In`] tagged parameter or `()` if no such parameter exists.
///
/// [`FunctionSystem`] must be `.initialized` before they can be run.
///
/// The [`Clone`] implementation for [`FunctionSystem`] returns a new instance which
/// is NOT initialized. The cloned system must also be `.initialized` before it can be run.
pub struct FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker>,
{
func: F,
param_state: Option<<F::Param as SystemParam>::State>,
system_meta: SystemMeta,
world_id: Option<WorldId>,
archetype_generation: ArchetypeGeneration,
// NOTE: PhantomData<fn()-> T> gives this safe Send/Sync impls
marker: PhantomData<fn() -> Marker>,
}
// De-initializes the cloned system.
impl<Marker, F> Clone for FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker> + Clone,
{
fn clone(&self) -> Self {
Self {
func: self.func.clone(),
param_state: None,
system_meta: SystemMeta::new::<F>(),
world_id: None,
archetype_generation: ArchetypeGeneration::initial(),
marker: PhantomData,
}
}
}
/// A marker type used to distinguish regular function systems from exclusive function systems.
#[doc(hidden)]
pub struct IsFunctionSystem;
impl<Marker, F> IntoSystem<F::In, F::Out, (IsFunctionSystem, Marker)> for F
where
Marker: 'static,
F: SystemParamFunction<Marker>,
{
type System = FunctionSystem<Marker, F>;
fn into_system(func: Self) -> Self::System {
FunctionSystem {
func,
param_state: None,
system_meta: SystemMeta::new::<F>(),
world_id: None,
archetype_generation: ArchetypeGeneration::initial(),
marker: PhantomData,
}
}
}
impl<Marker, F> FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker>,
{
/// Message shown when a system isn't initialised
// When lines get too long, rustfmt can sometimes refuse to format them.
// Work around this by storing the message separately.
const PARAM_MESSAGE: &'static str = "System's param_state was not found. Did you forget to initialize this system before running it?";
}
impl<Marker, F> System for FunctionSystem<Marker, F>
where
Marker: 'static,
F: SystemParamFunction<Marker>,
{
type In = F::In;
type Out = F::Out;
#[inline]
fn name(&self) -> Cow<'static, str> {
self.system_meta.name.clone()
}
#[inline]
fn type_id(&self) -> TypeId {
TypeId::of::<F>()
}
#[inline]
fn component_access(&self) -> &Access<ComponentId> {
self.system_meta.component_access_set.combined_access()
}
#[inline]
fn archetype_component_access(&self) -> &Access<ArchetypeComponentId> {
&self.system_meta.archetype_component_access
}
#[inline]
fn is_send(&self) -> bool |
#[inline]
fn is_exclusive(&self) -> bool {
false
}
#[inline]
unsafe fn run_unsafe(&mut self, input: Self::In, world: UnsafeWorldCell) -> Self::Out {
let change_tick = world.increment_change_tick();
// SAFETY:
// - The caller has invoked `update_archetype_component_access`, which will panic
// if the world does not match.
// - All world accesses used by `F::Param` have been registered, so the caller
// will ensure that there are no data access conflicts.
let params = F::Param::get_param(
self.param_state.as_mut().expect(Self::PARAM_MESSAGE),
&self.system_meta,
world,
change_tick,
);
let out = self.func.run(input, params);
self.system_meta.last_run = change_tick;
out
}
fn get_last_run(&self) -> Tick {
self.system_meta.last_run
}
fn set_last_run(&mut self, last_run: Tick) {
self.system_meta.last_run = last_run;
}
#[inline]
fn apply_deferred(&mut self, world: &mut World) {
let param_state = self.param_state.as_mut().expect(Self::PARAM_MESSAGE);
F::Param::apply(param_state, &self.system_meta, world);
}
#[inline]
fn initialize(&mut self, world: &mut World) {
self.world_id = Some(world.id());
self.system_meta.last_run = world.change_tick().relative_to(Tick::MAX);
self.param_state = Some(F::Param::init_state(world, &mut self.system_meta));
}
fn update_archetype_component_access(&mut self, world: UnsafeWorldCell) {
assert!(self.world_id == Some(world.id()), "Encountered a mismatched World. A System cannot be used with Worlds other than the one it was initialized with.");
let archetypes = world.archetypes();
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
let param_state = self.param_state.as_mut().unwrap();
F::Param::new_archetype(
param_state,
&archetypes[ArchetypeId::new(archetype_index)],
&mut self.system_meta,
);
}
}
#[inline]
fn check_change_tick(&mut self, change_tick: Tick) {
check_system_change_tick(
&mut self.system_meta.last_run,
change_tick,
self.system_meta.name.as_ref(),
);
}
fn default_system_sets(&self) -> Vec<Box<dyn crate::schedule::SystemSet>> {
let set = crate::schedule::SystemTypeSet::<F>::new();
vec![Box::new(set)]
}
}
/// SAFETY: `F`'s param is [`ReadOnlySystemParam`], so this system will only read from the world.
unsafe impl<Marker, F> ReadOnlySystem for FunctionSystem<Marker, F>
where
Marker: 'static,
F: SystemParamFunction<Marker>,
F::Param: ReadOnlySystemParam,
{
}
/// A trait implemented for all functions that can be used as [`System`]s.
///
/// This trait can be useful for making your own systems which accept other systems,
/// sometimes called higher order systems.
///
/// This should be used in combination with [`ParamSet`] when calling other systems
/// within your system.
/// Using [`ParamSet`] in this case avoids [`SystemParam`] collisions.
///
/// # Example
///
/// To create something like [`PipeSystem`], but in entirely safe code.
///
/// ```rust
/// use std::num::ParseIntError;
///
/// use bevy_ecs::prelude::*;
///
/// /// Pipe creates a new system which calls `a`, then calls `b` with the output of `a`
/// pub fn pipe<A, B, AMarker, BMarker>(
/// mut a: A,
/// mut b: B,
/// ) -> impl FnMut(In<A::In>, ParamSet<(A::Param, B::Param)>) -> B::Out
/// where
/// // We need A and B to be systems, add those bounds
/// A: SystemParamFunction<AMarker>,
/// B: SystemParamFunction<BMarker, In = A::Out>,
/// {
/// // The type of `params` is inferred based on the return of this function above
/// move |In(a_in), mut params| {
/// let shared = a.run(a_in, params.p0());
/// b.run(shared, params.p1())
/// }
/// }
///
/// // Usage example for `pipe`:
/// fn main() {
/// let mut world = World::default();
/// world.insert_resource(Message("42".to_string()));
///
/// // pipe the `parse_message_system`'s output into the `filter_system`s input
/// let mut piped_system = IntoSystem::into_system(pipe(parse_message, filter));
/// piped_system.initialize(&mut world);
/// assert_eq!(piped_system.run((), &mut world), Some(42));
/// }
///
/// #[derive(Resource)]
/// struct Message(String);
///
/// fn parse_message(message: Res<Message>) -> Result<usize, ParseIntError> {
/// message.0.parse::<usize>()
/// }
///
/// fn filter(In(result): In<Result<usize, ParseIntError>>) -> Option<usize> {
/// result.ok().filter(|&n| n < 100)
/// }
/// ```
/// [`PipeSystem`]: crate::system::PipeSystem
/// [`ParamSet`]: crate::system::ParamSet
pub trait SystemParamFunction<Marker>: Send + Sync + 'static {
/// The input type to this system. See [`System::In`].
type In;
/// The return type of this system. See [`System::Out`].
type Out;
/// The [`SystemParam`]/s used by this system to access the [`World`].
type Param: SystemParam;
/// Executes this system once. See [`System::run`] or [`System::run_unsafe`].
fn run(&mut self, input: Self::In, param_value: SystemParamItem<Self::Param>) -> Self::Out;
}
macro_rules! impl_system_function {
($($param: ident),*) => {
#[allow(non_snake_case)]
impl<Out, Func: Send + Sync + 'static, $($param: SystemParam),*> SystemParamFunction<fn($($param,)*) -> Out> for Func
where
for <'a> &'a mut Func:
FnMut($($param),*) -> Out +
FnMut($(SystemParamItem<$param>),*) -> Out, Out: 'static
{
type In = ();
type Out = Out;
type Param = ($($param,)*);
#[inline]
fn run(&mut self, _input: (), param_value: SystemParamItem< ($($param,)*)>) -> Out {
// Yes, this is strange, but `rustc` fails to compile this impl
// without using this function. It fails to recognize that `func`
// is a function, potentially because of the multiple impls of `FnMut`
#[allow(clippy::too_many_arguments)]
fn call_inner<Out, $($param,)*>(
mut f: impl FnMut($($param,)*)->Out,
$($param: $param,)*
)->Out{
f($($param,)*)
}
let ($($param,)*) = param_value;
call_inner(self, $($param),*)
}
}
#[allow(non_snake_case)]
impl<Input, Out, Func: Send + Sync + 'static, $($param: SystemParam),*> SystemParamFunction<fn(In<Input>, $($param,)*) -> Out> for Func
where
for <'a> &'a mut Func:
FnMut(In<Input>, $($param),*) -> Out +
FnMut(In<Input>, $(SystemParamItem<$param>),*) -> Out, Out: 'static
{
type In = Input;
type Out = Out;
type Param = ($($param,)*);
#[inline]
fn run(&mut self, input: Input, param_value: SystemParamItem< ($($param,)*)>) -> Out {
#[allow(clippy::too_many_arguments)]
fn call_inner<Input, Out, $($param,)*>(
mut f: impl FnMut(In<Input>, $($param,)*)->Out,
input: In<Input>,
$($param: $param,)*
)->Out{
f(input, $($param,)*)
}
let ($($param,)*) = param_value;
call_inner(self, In(input), $($param),*)
}
}
};
}
// Note that we rely on the highest impl to be <= the highest order of the tuple impls
// of `SystemParam` created.
all_tuples!(impl_system_function, 0, 16, F);
| {
self.system_meta.is_send
} | identifier_body |
function_system.rs | use crate::{
archetype::{ArchetypeComponentId, ArchetypeGeneration, ArchetypeId},
component::{ComponentId, Tick},
prelude::FromWorld,
query::{Access, FilteredAccessSet},
system::{check_system_change_tick, ReadOnlySystemParam, System, SystemParam, SystemParamItem},
world::{unsafe_world_cell::UnsafeWorldCell, World, WorldId},
};
use bevy_utils::all_tuples;
use std::{any::TypeId, borrow::Cow, marker::PhantomData};
use super::{In, IntoSystem, ReadOnlySystem};
/// The metadata of a [`System`].
#[derive(Clone)]
pub struct SystemMeta {
pub(crate) name: Cow<'static, str>,
pub(crate) component_access_set: FilteredAccessSet<ComponentId>,
pub(crate) archetype_component_access: Access<ArchetypeComponentId>,
// NOTE: this must be kept private. making a SystemMeta non-send is irreversible to prevent
// SystemParams from overriding each other
is_send: bool,
pub(crate) last_run: Tick,
}
impl SystemMeta {
pub(crate) fn new<T>() -> Self {
Self {
name: std::any::type_name::<T>().into(),
archetype_component_access: Access::default(),
component_access_set: FilteredAccessSet::default(),
is_send: true,
last_run: Tick::new(0),
}
}
/// Returns the system's name
#[inline]
pub fn name(&self) -> &str {
&self.name
}
/// Returns true if the system is [`Send`].
#[inline]
pub fn is_send(&self) -> bool {
self.is_send
}
/// Sets the system to be not [`Send`].
///
/// This is irreversible.
#[inline]
pub fn set_non_send(&mut self) {
self.is_send = false;
}
}
// TODO: Actually use this in FunctionSystem. We should probably only do this once Systems are constructed using a World reference
// (to avoid the need for unwrapping to retrieve SystemMeta)
/// Holds on to persistent state required to drive [`SystemParam`] for a [`System`].
///
/// This is a very powerful and convenient tool for working with exclusive world access,
/// allowing you to fetch data from the [`World`] as if you were running a [`System`].
///
/// Borrow-checking is handled for you, allowing you to mutably access multiple compatible system parameters at once,
/// and arbitrary system parameters (like [`EventWriter`](crate::event::EventWriter)) can be conveniently fetched.
///
/// For an alternative approach to split mutable access to the world, see [`World::resource_scope`].
///
/// # Warning
///
/// [`SystemState`] values created can be cached to improve performance,
/// and *must* be cached and reused in order for system parameters that rely on local state to work correctly.
/// These include:
/// - [`Added`](crate::query::Added) and [`Changed`](crate::query::Changed) query filters
/// - [`Local`](crate::system::Local) variables that hold state
/// - [`EventReader`](crate::event::EventReader) system parameters, which rely on a [`Local`](crate::system::Local) to track which events have been seen
///
/// # Example
///
/// Basic usage:
/// ```rust
/// # use bevy_ecs::prelude::*;
/// # use bevy_ecs::system::SystemState;
/// # use bevy_ecs::event::Events;
/// #
/// # #[derive(Event)]
/// # struct MyEvent;
/// # #[derive(Resource)]
/// # struct MyResource(u32);
/// #
/// # #[derive(Component)]
/// # struct MyComponent;
/// #
/// // Work directly on the `World`
/// let mut world = World::new();
/// world.init_resource::<Events<MyEvent>>();
///
/// // Construct a `SystemState` struct, passing in a tuple of `SystemParam`
/// // as if you were writing an ordinary system.
/// let mut system_state: SystemState<(
/// EventWriter<MyEvent>,
/// Option<ResMut<MyResource>>,
/// Query<&MyComponent>,
/// )> = SystemState::new(&mut world);
///
/// // Use system_state.get_mut(&mut world) and unpack your system parameters into variables!
/// // system_state.get(&world) provides read-only versions of your system parameters instead.
/// let (event_writer, maybe_resource, query) = system_state.get_mut(&mut world);
///
/// // If you are using `Commands`, you can choose when you want to apply them to the world.
/// // You need to manually call `.apply(world)` on the `SystemState` to apply them.
/// ```
/// Caching:
/// ```rust
/// # use bevy_ecs::prelude::*;
/// # use bevy_ecs::system::SystemState;
/// # use bevy_ecs::event::Events;
/// #
/// # #[derive(Event)]
/// # struct MyEvent;
/// #[derive(Resource)]
/// struct CachedSystemState {
/// event_state: SystemState<EventReader<'static, 'static, MyEvent>>,
/// }
///
/// // Create and store a system state once
/// let mut world = World::new();
/// world.init_resource::<Events<MyEvent>>();
/// let initial_state: SystemState<EventReader<MyEvent>> = SystemState::new(&mut world);
///
/// // The system state is cached in a resource
/// world.insert_resource(CachedSystemState {
/// event_state: initial_state,
/// });
///
/// // Later, fetch the cached system state, saving on overhead
/// world.resource_scope(|world, mut cached_state: Mut<CachedSystemState>| {
/// let mut event_reader = cached_state.event_state.get_mut(world);
///
/// for events in event_reader.iter() {
/// println!("Hello World!");
/// }
/// });
/// ```
pub struct SystemState<Param: SystemParam + 'static> {
meta: SystemMeta,
param_state: Param::State, |
impl<Param: SystemParam> SystemState<Param> {
/// Creates a new [`SystemState`] with default state.
///
/// ## Note
/// For users of [`SystemState::get_manual`] or [`get_manual_mut`](SystemState::get_manual_mut):
///
/// `new` does not cache any of the world's archetypes, so you must call [`SystemState::update_archetypes`]
/// manually before calling `get_manual{_mut}`.
pub fn new(world: &mut World) -> Self {
let mut meta = SystemMeta::new::<Param>();
meta.last_run = world.change_tick().relative_to(Tick::MAX);
let param_state = Param::init_state(world, &mut meta);
Self {
meta,
param_state,
world_id: world.id(),
archetype_generation: ArchetypeGeneration::initial(),
}
}
/// Gets the metadata for this instance.
#[inline]
pub fn meta(&self) -> &SystemMeta {
&self.meta
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
#[inline]
pub fn get<'w, 's>(&'s mut self, world: &'w World) -> SystemParamItem<'w, 's, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell_readonly()) }
}
/// Retrieve the mutable [`SystemParam`] values.
#[inline]
pub fn get_mut<'w, 's>(&'s mut self, world: &'w mut World) -> SystemParamItem<'w, 's, Param> {
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell()) }
}
/// Applies all state queued up for [`SystemParam`] values. For example, this will apply commands queued up
/// by a [`Commands`](`super::Commands`) parameter to the given [`World`].
/// This function should be called manually after the values returned by [`SystemState::get`] and [`SystemState::get_mut`]
/// are finished being used.
pub fn apply(&mut self, world: &mut World) {
Param::apply(&mut self.param_state, &self.meta, world);
}
/// Returns `true` if `world_id` matches the [`World`] that was used to call [`SystemState::new`].
/// Otherwise, this returns false.
#[inline]
pub fn matches_world(&self, world_id: WorldId) -> bool {
self.world_id == world_id
}
/// Asserts that the [`SystemState`] matches the provided world.
#[inline]
fn validate_world(&self, world_id: WorldId) {
assert!(self.matches_world(world_id), "Encountered a mismatched World. A SystemState cannot be used with Worlds other than the one it was created with.");
}
/// Updates the state's internal view of the [`World`]'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
#[inline]
pub fn update_archetypes(&mut self, world: &World) {
self.update_archetypes_unsafe_world_cell(world.as_unsafe_world_cell_readonly());
}
/// Updates the state's internal view of the `world`'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
///
/// # Note
///
/// This method only accesses world metadata.
#[inline]
pub fn update_archetypes_unsafe_world_cell(&mut self, world: UnsafeWorldCell) {
let archetypes = world.archetypes();
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
Param::new_archetype(
&mut self.param_state,
&archetypes[ArchetypeId::new(archetype_index)],
&mut self.meta,
);
}
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
/// This will not update the state's view of the world's archetypes automatically nor increment the
/// world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get`] over this function.
#[inline]
pub fn get_manual<'w, 's>(&'s mut self, world: &'w World) -> SystemParamItem<'w, 's, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
let change_tick = world.read_change_tick();
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell_readonly(), change_tick) }
}
/// Retrieve the mutable [`SystemParam`] values. This will not update the state's view of the world's archetypes
/// automatically nor increment the world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get_mut`] over this function.
#[inline]
pub fn get_manual_mut<'w, 's>(
&'s mut self,
world: &'w mut World,
) -> SystemParamItem<'w, 's, Param> {
self.validate_world(world.id());
let change_tick = world.change_tick();
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell(), change_tick) }
}
/// Retrieve the [`SystemParam`] values. This will not update archetypes automatically.
///
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
pub unsafe fn get_unchecked_manual<'w, 's>(
&'s mut self,
world: UnsafeWorldCell<'w>,
) -> SystemParamItem<'w, 's, Param> {
let change_tick = world.increment_change_tick();
self.fetch(world, change_tick)
}
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
unsafe fn fetch<'w, 's>(
&'s mut self,
world: UnsafeWorldCell<'w>,
change_tick: Tick,
) -> SystemParamItem<'w, 's, Param> {
let param = Param::get_param(&mut self.param_state, &self.meta, world, change_tick);
self.meta.last_run = change_tick;
param
}
}
impl<Param: SystemParam> FromWorld for SystemState<Param> {
fn from_world(world: &mut World) -> Self {
Self::new(world)
}
}
/// The [`System`] counter part of an ordinary function.
///
/// You get this by calling [`IntoSystem::into_system`] on a function that only accepts
/// [`SystemParam`]s. The output of the system becomes the functions return type, while the input
/// becomes the functions [`In`] tagged parameter or `()` if no such parameter exists.
///
/// [`FunctionSystem`] must be `.initialized` before they can be run.
///
/// The [`Clone`] implementation for [`FunctionSystem`] returns a new instance which
/// is NOT initialized. The cloned system must also be `.initialized` before it can be run.
pub struct FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker>,
{
func: F,
param_state: Option<<F::Param as SystemParam>::State>,
system_meta: SystemMeta,
world_id: Option<WorldId>,
archetype_generation: ArchetypeGeneration,
// NOTE: PhantomData<fn()-> T> gives this safe Send/Sync impls
marker: PhantomData<fn() -> Marker>,
}
// De-initializes the cloned system.
impl<Marker, F> Clone for FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker> + Clone,
{
fn clone(&self) -> Self {
Self {
func: self.func.clone(),
param_state: None,
system_meta: SystemMeta::new::<F>(),
world_id: None,
archetype_generation: ArchetypeGeneration::initial(),
marker: PhantomData,
}
}
}
/// A marker type used to distinguish regular function systems from exclusive function systems.
#[doc(hidden)]
pub struct IsFunctionSystem;
impl<Marker, F> IntoSystem<F::In, F::Out, (IsFunctionSystem, Marker)> for F
where
Marker: 'static,
F: SystemParamFunction<Marker>,
{
type System = FunctionSystem<Marker, F>;
fn into_system(func: Self) -> Self::System {
FunctionSystem {
func,
param_state: None,
system_meta: SystemMeta::new::<F>(),
world_id: None,
archetype_generation: ArchetypeGeneration::initial(),
marker: PhantomData,
}
}
}
impl<Marker, F> FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker>,
{
/// Message shown when a system isn't initialised
// When lines get too long, rustfmt can sometimes refuse to format them.
// Work around this by storing the message separately.
const PARAM_MESSAGE: &'static str = "System's param_state was not found. Did you forget to initialize this system before running it?";
}
impl<Marker, F> System for FunctionSystem<Marker, F>
where
Marker: 'static,
F: SystemParamFunction<Marker>,
{
type In = F::In;
type Out = F::Out;
#[inline]
fn name(&self) -> Cow<'static, str> {
self.system_meta.name.clone()
}
#[inline]
fn type_id(&self) -> TypeId {
TypeId::of::<F>()
}
#[inline]
fn component_access(&self) -> &Access<ComponentId> {
self.system_meta.component_access_set.combined_access()
}
#[inline]
fn archetype_component_access(&self) -> &Access<ArchetypeComponentId> {
&self.system_meta.archetype_component_access
}
#[inline]
fn is_send(&self) -> bool {
self.system_meta.is_send
}
#[inline]
fn is_exclusive(&self) -> bool {
false
}
#[inline]
unsafe fn run_unsafe(&mut self, input: Self::In, world: UnsafeWorldCell) -> Self::Out {
let change_tick = world.increment_change_tick();
// SAFETY:
// - The caller has invoked `update_archetype_component_access`, which will panic
// if the world does not match.
// - All world accesses used by `F::Param` have been registered, so the caller
// will ensure that there are no data access conflicts.
let params = F::Param::get_param(
self.param_state.as_mut().expect(Self::PARAM_MESSAGE),
&self.system_meta,
world,
change_tick,
);
let out = self.func.run(input, params);
self.system_meta.last_run = change_tick;
out
}
fn get_last_run(&self) -> Tick {
self.system_meta.last_run
}
fn set_last_run(&mut self, last_run: Tick) {
self.system_meta.last_run = last_run;
}
#[inline]
fn apply_deferred(&mut self, world: &mut World) {
let param_state = self.param_state.as_mut().expect(Self::PARAM_MESSAGE);
F::Param::apply(param_state, &self.system_meta, world);
}
#[inline]
fn initialize(&mut self, world: &mut World) {
self.world_id = Some(world.id());
self.system_meta.last_run = world.change_tick().relative_to(Tick::MAX);
self.param_state = Some(F::Param::init_state(world, &mut self.system_meta));
}
fn update_archetype_component_access(&mut self, world: UnsafeWorldCell) {
assert!(self.world_id == Some(world.id()), "Encountered a mismatched World. A System cannot be used with Worlds other than the one it was initialized with.");
let archetypes = world.archetypes();
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
let param_state = self.param_state.as_mut().unwrap();
F::Param::new_archetype(
param_state,
&archetypes[ArchetypeId::new(archetype_index)],
&mut self.system_meta,
);
}
}
#[inline]
fn check_change_tick(&mut self, change_tick: Tick) {
check_system_change_tick(
&mut self.system_meta.last_run,
change_tick,
self.system_meta.name.as_ref(),
);
}
fn default_system_sets(&self) -> Vec<Box<dyn crate::schedule::SystemSet>> {
let set = crate::schedule::SystemTypeSet::<F>::new();
vec![Box::new(set)]
}
}
/// SAFETY: `F`'s param is [`ReadOnlySystemParam`], so this system will only read from the world.
unsafe impl<Marker, F> ReadOnlySystem for FunctionSystem<Marker, F>
where
Marker: 'static,
F: SystemParamFunction<Marker>,
F::Param: ReadOnlySystemParam,
{
}
/// A trait implemented for all functions that can be used as [`System`]s.
///
/// This trait can be useful for making your own systems which accept other systems,
/// sometimes called higher order systems.
///
/// This should be used in combination with [`ParamSet`] when calling other systems
/// within your system.
/// Using [`ParamSet`] in this case avoids [`SystemParam`] collisions.
///
/// # Example
///
/// To create something like [`PipeSystem`], but in entirely safe code.
///
/// ```rust
/// use std::num::ParseIntError;
///
/// use bevy_ecs::prelude::*;
///
/// /// Pipe creates a new system which calls `a`, then calls `b` with the output of `a`
/// pub fn pipe<A, B, AMarker, BMarker>(
/// mut a: A,
/// mut b: B,
/// ) -> impl FnMut(In<A::In>, ParamSet<(A::Param, B::Param)>) -> B::Out
/// where
/// // We need A and B to be systems, add those bounds
/// A: SystemParamFunction<AMarker>,
/// B: SystemParamFunction<BMarker, In = A::Out>,
/// {
/// // The type of `params` is inferred based on the return of this function above
/// move |In(a_in), mut params| {
/// let shared = a.run(a_in, params.p0());
/// b.run(shared, params.p1())
/// }
/// }
///
/// // Usage example for `pipe`:
/// fn main() {
/// let mut world = World::default();
/// world.insert_resource(Message("42".to_string()));
///
/// // pipe the `parse_message_system`'s output into the `filter_system`s input
/// let mut piped_system = IntoSystem::into_system(pipe(parse_message, filter));
/// piped_system.initialize(&mut world);
/// assert_eq!(piped_system.run((), &mut world), Some(42));
/// }
///
/// #[derive(Resource)]
/// struct Message(String);
///
/// fn parse_message(message: Res<Message>) -> Result<usize, ParseIntError> {
/// message.0.parse::<usize>()
/// }
///
/// fn filter(In(result): In<Result<usize, ParseIntError>>) -> Option<usize> {
/// result.ok().filter(|&n| n < 100)
/// }
/// ```
/// [`PipeSystem`]: crate::system::PipeSystem
/// [`ParamSet`]: crate::system::ParamSet
pub trait SystemParamFunction<Marker>: Send + Sync + 'static {
/// The input type to this system. See [`System::In`].
type In;
/// The return type of this system. See [`System::Out`].
type Out;
/// The [`SystemParam`]/s used by this system to access the [`World`].
type Param: SystemParam;
/// Executes this system once. See [`System::run`] or [`System::run_unsafe`].
fn run(&mut self, input: Self::In, param_value: SystemParamItem<Self::Param>) -> Self::Out;
}
macro_rules! impl_system_function {
($($param: ident),*) => {
#[allow(non_snake_case)]
impl<Out, Func: Send + Sync + 'static, $($param: SystemParam),*> SystemParamFunction<fn($($param,)*) -> Out> for Func
where
for <'a> &'a mut Func:
FnMut($($param),*) -> Out +
FnMut($(SystemParamItem<$param>),*) -> Out, Out: 'static
{
type In = ();
type Out = Out;
type Param = ($($param,)*);
#[inline]
fn run(&mut self, _input: (), param_value: SystemParamItem< ($($param,)*)>) -> Out {
// Yes, this is strange, but `rustc` fails to compile this impl
// without using this function. It fails to recognize that `func`
// is a function, potentially because of the multiple impls of `FnMut`
#[allow(clippy::too_many_arguments)]
fn call_inner<Out, $($param,)*>(
mut f: impl FnMut($($param,)*)->Out,
$($param: $param,)*
)->Out{
f($($param,)*)
}
let ($($param,)*) = param_value;
call_inner(self, $($param),*)
}
}
#[allow(non_snake_case)]
impl<Input, Out, Func: Send + Sync + 'static, $($param: SystemParam),*> SystemParamFunction<fn(In<Input>, $($param,)*) -> Out> for Func
where
for <'a> &'a mut Func:
FnMut(In<Input>, $($param),*) -> Out +
FnMut(In<Input>, $(SystemParamItem<$param>),*) -> Out, Out: 'static
{
type In = Input;
type Out = Out;
type Param = ($($param,)*);
#[inline]
fn run(&mut self, input: Input, param_value: SystemParamItem< ($($param,)*)>) -> Out {
#[allow(clippy::too_many_arguments)]
fn call_inner<Input, Out, $($param,)*>(
mut f: impl FnMut(In<Input>, $($param,)*)->Out,
input: In<Input>,
$($param: $param,)*
)->Out{
f(input, $($param,)*)
}
let ($($param,)*) = param_value;
call_inner(self, In(input), $($param),*)
}
}
};
}
// Note that we rely on the highest impl to be <= the highest order of the tuple impls
// of `SystemParam` created.
all_tuples!(impl_system_function, 0, 16, F); | world_id: WorldId,
archetype_generation: ArchetypeGeneration,
} | random_line_split |
function_system.rs | use crate::{
archetype::{ArchetypeComponentId, ArchetypeGeneration, ArchetypeId},
component::{ComponentId, Tick},
prelude::FromWorld,
query::{Access, FilteredAccessSet},
system::{check_system_change_tick, ReadOnlySystemParam, System, SystemParam, SystemParamItem},
world::{unsafe_world_cell::UnsafeWorldCell, World, WorldId},
};
use bevy_utils::all_tuples;
use std::{any::TypeId, borrow::Cow, marker::PhantomData};
use super::{In, IntoSystem, ReadOnlySystem};
/// The metadata of a [`System`].
#[derive(Clone)]
pub struct SystemMeta {
pub(crate) name: Cow<'static, str>,
pub(crate) component_access_set: FilteredAccessSet<ComponentId>,
pub(crate) archetype_component_access: Access<ArchetypeComponentId>,
// NOTE: this must be kept private. making a SystemMeta non-send is irreversible to prevent
// SystemParams from overriding each other
is_send: bool,
pub(crate) last_run: Tick,
}
impl SystemMeta {
pub(crate) fn new<T>() -> Self {
Self {
name: std::any::type_name::<T>().into(),
archetype_component_access: Access::default(),
component_access_set: FilteredAccessSet::default(),
is_send: true,
last_run: Tick::new(0),
}
}
/// Returns the system's name
#[inline]
pub fn name(&self) -> &str {
&self.name
}
/// Returns true if the system is [`Send`].
#[inline]
pub fn is_send(&self) -> bool {
self.is_send
}
/// Sets the system to be not [`Send`].
///
/// This is irreversible.
#[inline]
pub fn set_non_send(&mut self) {
self.is_send = false;
}
}
// TODO: Actually use this in FunctionSystem. We should probably only do this once Systems are constructed using a World reference
// (to avoid the need for unwrapping to retrieve SystemMeta)
/// Holds on to persistent state required to drive [`SystemParam`] for a [`System`].
///
/// This is a very powerful and convenient tool for working with exclusive world access,
/// allowing you to fetch data from the [`World`] as if you were running a [`System`].
///
/// Borrow-checking is handled for you, allowing you to mutably access multiple compatible system parameters at once,
/// and arbitrary system parameters (like [`EventWriter`](crate::event::EventWriter)) can be conveniently fetched.
///
/// For an alternative approach to split mutable access to the world, see [`World::resource_scope`].
///
/// # Warning
///
/// [`SystemState`] values created can be cached to improve performance,
/// and *must* be cached and reused in order for system parameters that rely on local state to work correctly.
/// These include:
/// - [`Added`](crate::query::Added) and [`Changed`](crate::query::Changed) query filters
/// - [`Local`](crate::system::Local) variables that hold state
/// - [`EventReader`](crate::event::EventReader) system parameters, which rely on a [`Local`](crate::system::Local) to track which events have been seen
///
/// # Example
///
/// Basic usage:
/// ```rust
/// # use bevy_ecs::prelude::*;
/// # use bevy_ecs::system::SystemState;
/// # use bevy_ecs::event::Events;
/// #
/// # #[derive(Event)]
/// # struct MyEvent;
/// # #[derive(Resource)]
/// # struct MyResource(u32);
/// #
/// # #[derive(Component)]
/// # struct MyComponent;
/// #
/// // Work directly on the `World`
/// let mut world = World::new();
/// world.init_resource::<Events<MyEvent>>();
///
/// // Construct a `SystemState` struct, passing in a tuple of `SystemParam`
/// // as if you were writing an ordinary system.
/// let mut system_state: SystemState<(
/// EventWriter<MyEvent>,
/// Option<ResMut<MyResource>>,
/// Query<&MyComponent>,
/// )> = SystemState::new(&mut world);
///
/// // Use system_state.get_mut(&mut world) and unpack your system parameters into variables!
/// // system_state.get(&world) provides read-only versions of your system parameters instead.
/// let (event_writer, maybe_resource, query) = system_state.get_mut(&mut world);
///
/// // If you are using `Commands`, you can choose when you want to apply them to the world.
/// // You need to manually call `.apply(world)` on the `SystemState` to apply them.
/// ```
/// Caching:
/// ```rust
/// # use bevy_ecs::prelude::*;
/// # use bevy_ecs::system::SystemState;
/// # use bevy_ecs::event::Events;
/// #
/// # #[derive(Event)]
/// # struct MyEvent;
/// #[derive(Resource)]
/// struct CachedSystemState {
/// event_state: SystemState<EventReader<'static, 'static, MyEvent>>,
/// }
///
/// // Create and store a system state once
/// let mut world = World::new();
/// world.init_resource::<Events<MyEvent>>();
/// let initial_state: SystemState<EventReader<MyEvent>> = SystemState::new(&mut world);
///
/// // The system state is cached in a resource
/// world.insert_resource(CachedSystemState {
/// event_state: initial_state,
/// });
///
/// // Later, fetch the cached system state, saving on overhead
/// world.resource_scope(|world, mut cached_state: Mut<CachedSystemState>| {
/// let mut event_reader = cached_state.event_state.get_mut(world);
///
/// for events in event_reader.iter() {
/// println!("Hello World!");
/// }
/// });
/// ```
pub struct SystemState<Param: SystemParam + 'static> {
meta: SystemMeta,
param_state: Param::State,
world_id: WorldId,
archetype_generation: ArchetypeGeneration,
}
impl<Param: SystemParam> SystemState<Param> {
/// Creates a new [`SystemState`] with default state.
///
/// ## Note
/// For users of [`SystemState::get_manual`] or [`get_manual_mut`](SystemState::get_manual_mut):
///
/// `new` does not cache any of the world's archetypes, so you must call [`SystemState::update_archetypes`]
/// manually before calling `get_manual{_mut}`.
pub fn new(world: &mut World) -> Self {
let mut meta = SystemMeta::new::<Param>();
meta.last_run = world.change_tick().relative_to(Tick::MAX);
let param_state = Param::init_state(world, &mut meta);
Self {
meta,
param_state,
world_id: world.id(),
archetype_generation: ArchetypeGeneration::initial(),
}
}
/// Gets the metadata for this instance.
#[inline]
pub fn meta(&self) -> &SystemMeta {
&self.meta
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
#[inline]
pub fn get<'w, 's>(&'s mut self, world: &'w World) -> SystemParamItem<'w, 's, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell_readonly()) }
}
/// Retrieve the mutable [`SystemParam`] values.
#[inline]
pub fn get_mut<'w, 's>(&'s mut self, world: &'w mut World) -> SystemParamItem<'w, 's, Param> {
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell()) }
}
/// Applies all state queued up for [`SystemParam`] values. For example, this will apply commands queued up
/// by a [`Commands`](`super::Commands`) parameter to the given [`World`].
/// This function should be called manually after the values returned by [`SystemState::get`] and [`SystemState::get_mut`]
/// are finished being used.
pub fn apply(&mut self, world: &mut World) {
Param::apply(&mut self.param_state, &self.meta, world);
}
/// Returns `true` if `world_id` matches the [`World`] that was used to call [`SystemState::new`].
/// Otherwise, this returns false.
#[inline]
pub fn matches_world(&self, world_id: WorldId) -> bool {
self.world_id == world_id
}
/// Asserts that the [`SystemState`] matches the provided world.
#[inline]
fn | (&self, world_id: WorldId) {
assert!(self.matches_world(world_id), "Encountered a mismatched World. A SystemState cannot be used with Worlds other than the one it was created with.");
}
/// Updates the state's internal view of the [`World`]'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
#[inline]
pub fn update_archetypes(&mut self, world: &World) {
self.update_archetypes_unsafe_world_cell(world.as_unsafe_world_cell_readonly());
}
/// Updates the state's internal view of the `world`'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
///
/// # Note
///
/// This method only accesses world metadata.
#[inline]
pub fn update_archetypes_unsafe_world_cell(&mut self, world: UnsafeWorldCell) {
let archetypes = world.archetypes();
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
Param::new_archetype(
&mut self.param_state,
&archetypes[ArchetypeId::new(archetype_index)],
&mut self.meta,
);
}
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
/// This will not update the state's view of the world's archetypes automatically nor increment the
/// world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get`] over this function.
#[inline]
pub fn get_manual<'w, 's>(&'s mut self, world: &'w World) -> SystemParamItem<'w, 's, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
let change_tick = world.read_change_tick();
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell_readonly(), change_tick) }
}
/// Retrieve the mutable [`SystemParam`] values. This will not update the state's view of the world's archetypes
/// automatically nor increment the world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get_mut`] over this function.
#[inline]
pub fn get_manual_mut<'w, 's>(
&'s mut self,
world: &'w mut World,
) -> SystemParamItem<'w, 's, Param> {
self.validate_world(world.id());
let change_tick = world.change_tick();
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell(), change_tick) }
}
/// Retrieve the [`SystemParam`] values. This will not update archetypes automatically.
///
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
pub unsafe fn get_unchecked_manual<'w, 's>(
&'s mut self,
world: UnsafeWorldCell<'w>,
) -> SystemParamItem<'w, 's, Param> {
let change_tick = world.increment_change_tick();
self.fetch(world, change_tick)
}
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
unsafe fn fetch<'w, 's>(
&'s mut self,
world: UnsafeWorldCell<'w>,
change_tick: Tick,
) -> SystemParamItem<'w, 's, Param> {
let param = Param::get_param(&mut self.param_state, &self.meta, world, change_tick);
self.meta.last_run = change_tick;
param
}
}
impl<Param: SystemParam> FromWorld for SystemState<Param> {
fn from_world(world: &mut World) -> Self {
Self::new(world)
}
}
/// The [`System`] counter part of an ordinary function.
///
/// You get this by calling [`IntoSystem::into_system`] on a function that only accepts
/// [`SystemParam`]s. The output of the system becomes the functions return type, while the input
/// becomes the functions [`In`] tagged parameter or `()` if no such parameter exists.
///
/// [`FunctionSystem`] must be `.initialized` before they can be run.
///
/// The [`Clone`] implementation for [`FunctionSystem`] returns a new instance which
/// is NOT initialized. The cloned system must also be `.initialized` before it can be run.
pub struct FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker>,
{
func: F,
param_state: Option<<F::Param as SystemParam>::State>,
system_meta: SystemMeta,
world_id: Option<WorldId>,
archetype_generation: ArchetypeGeneration,
// NOTE: PhantomData<fn()-> T> gives this safe Send/Sync impls
marker: PhantomData<fn() -> Marker>,
}
// De-initializes the cloned system.
impl<Marker, F> Clone for FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker> + Clone,
{
fn clone(&self) -> Self {
Self {
func: self.func.clone(),
param_state: None,
system_meta: SystemMeta::new::<F>(),
world_id: None,
archetype_generation: ArchetypeGeneration::initial(),
marker: PhantomData,
}
}
}
/// A marker type used to distinguish regular function systems from exclusive function systems.
#[doc(hidden)]
pub struct IsFunctionSystem;
impl<Marker, F> IntoSystem<F::In, F::Out, (IsFunctionSystem, Marker)> for F
where
Marker: 'static,
F: SystemParamFunction<Marker>,
{
type System = FunctionSystem<Marker, F>;
fn into_system(func: Self) -> Self::System {
FunctionSystem {
func,
param_state: None,
system_meta: SystemMeta::new::<F>(),
world_id: None,
archetype_generation: ArchetypeGeneration::initial(),
marker: PhantomData,
}
}
}
impl<Marker, F> FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker>,
{
/// Message shown when a system isn't initialised
// When lines get too long, rustfmt can sometimes refuse to format them.
// Work around this by storing the message separately.
const PARAM_MESSAGE: &'static str = "System's param_state was not found. Did you forget to initialize this system before running it?";
}
impl<Marker, F> System for FunctionSystem<Marker, F>
where
Marker: 'static,
F: SystemParamFunction<Marker>,
{
type In = F::In;
type Out = F::Out;
#[inline]
fn name(&self) -> Cow<'static, str> {
self.system_meta.name.clone()
}
#[inline]
fn type_id(&self) -> TypeId {
TypeId::of::<F>()
}
#[inline]
fn component_access(&self) -> &Access<ComponentId> {
self.system_meta.component_access_set.combined_access()
}
#[inline]
fn archetype_component_access(&self) -> &Access<ArchetypeComponentId> {
&self.system_meta.archetype_component_access
}
#[inline]
fn is_send(&self) -> bool {
self.system_meta.is_send
}
#[inline]
fn is_exclusive(&self) -> bool {
false
}
#[inline]
unsafe fn run_unsafe(&mut self, input: Self::In, world: UnsafeWorldCell) -> Self::Out {
let change_tick = world.increment_change_tick();
// SAFETY:
// - The caller has invoked `update_archetype_component_access`, which will panic
// if the world does not match.
// - All world accesses used by `F::Param` have been registered, so the caller
// will ensure that there are no data access conflicts.
let params = F::Param::get_param(
self.param_state.as_mut().expect(Self::PARAM_MESSAGE),
&self.system_meta,
world,
change_tick,
);
let out = self.func.run(input, params);
self.system_meta.last_run = change_tick;
out
}
fn get_last_run(&self) -> Tick {
self.system_meta.last_run
}
fn set_last_run(&mut self, last_run: Tick) {
self.system_meta.last_run = last_run;
}
#[inline]
fn apply_deferred(&mut self, world: &mut World) {
let param_state = self.param_state.as_mut().expect(Self::PARAM_MESSAGE);
F::Param::apply(param_state, &self.system_meta, world);
}
#[inline]
fn initialize(&mut self, world: &mut World) {
self.world_id = Some(world.id());
self.system_meta.last_run = world.change_tick().relative_to(Tick::MAX);
self.param_state = Some(F::Param::init_state(world, &mut self.system_meta));
}
fn update_archetype_component_access(&mut self, world: UnsafeWorldCell) {
assert!(self.world_id == Some(world.id()), "Encountered a mismatched World. A System cannot be used with Worlds other than the one it was initialized with.");
let archetypes = world.archetypes();
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
let param_state = self.param_state.as_mut().unwrap();
F::Param::new_archetype(
param_state,
&archetypes[ArchetypeId::new(archetype_index)],
&mut self.system_meta,
);
}
}
#[inline]
fn check_change_tick(&mut self, change_tick: Tick) {
check_system_change_tick(
&mut self.system_meta.last_run,
change_tick,
self.system_meta.name.as_ref(),
);
}
fn default_system_sets(&self) -> Vec<Box<dyn crate::schedule::SystemSet>> {
let set = crate::schedule::SystemTypeSet::<F>::new();
vec![Box::new(set)]
}
}
/// SAFETY: `F`'s param is [`ReadOnlySystemParam`], so this system will only read from the world.
unsafe impl<Marker, F> ReadOnlySystem for FunctionSystem<Marker, F>
where
Marker: 'static,
F: SystemParamFunction<Marker>,
F::Param: ReadOnlySystemParam,
{
}
/// A trait implemented for all functions that can be used as [`System`]s.
///
/// This trait can be useful for making your own systems which accept other systems,
/// sometimes called higher order systems.
///
/// This should be used in combination with [`ParamSet`] when calling other systems
/// within your system.
/// Using [`ParamSet`] in this case avoids [`SystemParam`] collisions.
///
/// # Example
///
/// To create something like [`PipeSystem`], but in entirely safe code.
///
/// ```rust
/// use std::num::ParseIntError;
///
/// use bevy_ecs::prelude::*;
///
/// /// Pipe creates a new system which calls `a`, then calls `b` with the output of `a`
/// pub fn pipe<A, B, AMarker, BMarker>(
/// mut a: A,
/// mut b: B,
/// ) -> impl FnMut(In<A::In>, ParamSet<(A::Param, B::Param)>) -> B::Out
/// where
/// // We need A and B to be systems, add those bounds
/// A: SystemParamFunction<AMarker>,
/// B: SystemParamFunction<BMarker, In = A::Out>,
/// {
/// // The type of `params` is inferred based on the return of this function above
/// move |In(a_in), mut params| {
/// let shared = a.run(a_in, params.p0());
/// b.run(shared, params.p1())
/// }
/// }
///
/// // Usage example for `pipe`:
/// fn main() {
/// let mut world = World::default();
/// world.insert_resource(Message("42".to_string()));
///
/// // pipe the `parse_message_system`'s output into the `filter_system`s input
/// let mut piped_system = IntoSystem::into_system(pipe(parse_message, filter));
/// piped_system.initialize(&mut world);
/// assert_eq!(piped_system.run((), &mut world), Some(42));
/// }
///
/// #[derive(Resource)]
/// struct Message(String);
///
/// fn parse_message(message: Res<Message>) -> Result<usize, ParseIntError> {
/// message.0.parse::<usize>()
/// }
///
/// fn filter(In(result): In<Result<usize, ParseIntError>>) -> Option<usize> {
/// result.ok().filter(|&n| n < 100)
/// }
/// ```
/// [`PipeSystem`]: crate::system::PipeSystem
/// [`ParamSet`]: crate::system::ParamSet
pub trait SystemParamFunction<Marker>: Send + Sync + 'static {
/// The input type to this system. See [`System::In`].
type In;
/// The return type of this system. See [`System::Out`].
type Out;
/// The [`SystemParam`]/s used by this system to access the [`World`].
type Param: SystemParam;
/// Executes this system once. See [`System::run`] or [`System::run_unsafe`].
fn run(&mut self, input: Self::In, param_value: SystemParamItem<Self::Param>) -> Self::Out;
}
macro_rules! impl_system_function {
($($param: ident),*) => {
#[allow(non_snake_case)]
impl<Out, Func: Send + Sync + 'static, $($param: SystemParam),*> SystemParamFunction<fn($($param,)*) -> Out> for Func
where
for <'a> &'a mut Func:
FnMut($($param),*) -> Out +
FnMut($(SystemParamItem<$param>),*) -> Out, Out: 'static
{
type In = ();
type Out = Out;
type Param = ($($param,)*);
#[inline]
fn run(&mut self, _input: (), param_value: SystemParamItem< ($($param,)*)>) -> Out {
// Yes, this is strange, but `rustc` fails to compile this impl
// without using this function. It fails to recognize that `func`
// is a function, potentially because of the multiple impls of `FnMut`
#[allow(clippy::too_many_arguments)]
fn call_inner<Out, $($param,)*>(
mut f: impl FnMut($($param,)*)->Out,
$($param: $param,)*
)->Out{
f($($param,)*)
}
let ($($param,)*) = param_value;
call_inner(self, $($param),*)
}
}
#[allow(non_snake_case)]
impl<Input, Out, Func: Send + Sync + 'static, $($param: SystemParam),*> SystemParamFunction<fn(In<Input>, $($param,)*) -> Out> for Func
where
for <'a> &'a mut Func:
FnMut(In<Input>, $($param),*) -> Out +
FnMut(In<Input>, $(SystemParamItem<$param>),*) -> Out, Out: 'static
{
type In = Input;
type Out = Out;
type Param = ($($param,)*);
#[inline]
fn run(&mut self, input: Input, param_value: SystemParamItem< ($($param,)*)>) -> Out {
#[allow(clippy::too_many_arguments)]
fn call_inner<Input, Out, $($param,)*>(
mut f: impl FnMut(In<Input>, $($param,)*)->Out,
input: In<Input>,
$($param: $param,)*
)->Out{
f(input, $($param,)*)
}
let ($($param,)*) = param_value;
call_inner(self, In(input), $($param),*)
}
}
};
}
// Note that we rely on the highest impl to be <= the highest order of the tuple impls
// of `SystemParam` created.
all_tuples!(impl_system_function, 0, 16, F);
| validate_world | identifier_name |
de.communardo.confluence.plugins.subspace-subspace-search-resource.js | AJS.toInit( function() {
initSubspacesQuickSearch()
initSubspacesSearchCheckboxToggle();
});
function | () {
jQuery(".subspaces-quicksearch .subspaces-quick-search-query").each(function(){
var quickSearchQuery = jQuery(this);
// here we do the little placeholder stuff
quickSearchQuery.focus(function () {
if (jQuery(this).hasClass('placeholded')) {
jQuery(this).val("");
jQuery(this).removeClass("placeholded");
}
});
quickSearchQuery.change(function () {
if (jQuery(this).val() == "") {
jQuery(this).val(jQuery(this).attr('placeholder'));
jQuery(this).addClass("placeholded");
}
});
/**
* function to add a tooltip showing the space name to each drop down list item
*/
AJS.subspacequicksearch = AJS.quicksearch || {};
AJS.subspacequicksearch.dropdownPostprocess = function (list) {
jQuery("a span", list).each(function () {
var a = jQuery(this);
// get the hidden space name property from the span
var spaceName = AJS.dropDown.getAdditionalPropertyValue(a, "spaceName") || "";
// we need to go through html node creation so that all encoded symbols(like >) are displayed correctly
if (spaceName) {
spaceName = " (" + AJS("i").html(spaceName).text() + ")";
}
a.attr("title", a.text() + spaceName);
});
};
/**
* Append the drop down to the form element with the class quick-nav-drop-down
*/
var subspacequickNavPlacement = function (input, dropDown) {
input.closest("form").find(".quick-nav-drop-down").append(dropDown);
};
var subspacesSpacekey = quickSearchQuery.parent().children('.subspacesSpaceKey').val();
var includeSubspaces = quickSearchQuery.parent().children('.includeSubspaces').val();
quickSearchQuery.subspacesquicksearch("/communardo_plugins/quicksearch/subspacesQuickSearch.action"+
"?spaceKey="+subspacesSpacekey+
"&includeSubspaces="+includeSubspaces, null, {
dropdownPostprocess : AJS.subspacequicksearch.dropdownPostprocess,
dropdownPlacement : subspacequickNavPlacement
});
});
}
function initSubspacesSearchCheckboxToggle() {
var topLevelSpaceCheckboxes = jQuery('#topspaces_holder .checkbox_topLevelSpaces');
topLevelSpaceCheckboxes.click(function() {
//now the checkboxes can be used like radiobuttons
if(jQuery(this).is(':checked')) {
topLevelSpaceCheckboxes.attr('checked', false);
jQuery(this).attr('checked', true);
}
enableDisableSubspacesSearchElements();
});
enableDisableSubspacesSearchElements();
}
function enableDisableSubspacesSearchElements() {
//disable/enable the include subspaces and spaces input element
if(jQuery('#topspaces_holder .checkbox_topLevelSpaces').is(':checked')) {
jQuery('#search-filter-by-space').attr("disabled", true);
jQuery('#checkbox_include_subspaces').attr("disabled", true);
}
else {
jQuery('#search-filter-by-space').attr("disabled", false);
jQuery('#checkbox_include_subspaces').attr("disabled", false);
}
}
(function($){
/**
* Options are:
* dropdownPostprocess - a function that will be supplied with the list created by the dropDown and can manipulate or modify it
* as necessary.
* dropdownPlacement - a function that will be called with the drop down and which should place it in the correct place on the page.
* The supplied arguments are 1) the input that issued the search, 2) the dropDown to be placed.
* ajsDropDownOptions - any options the underlying dropDown component can handle expects
*/
$.fn.subspacesquicksearch = function(path, onShow, options) {
options = options || {};
var attr = {
cache_size: 30,
max_length: 1,
effect: "appear"
};
var dd,
cache = {},
cache_stack = [],
timer;
if (typeof path == "function") {
var oldPath = path();
var getPath = function () {
var newPath = path();
if (newPath != oldPath) {
// reset the cache if the path has changed
cache = {};
cache_stack = [];
oldPath = newPath;
}
return newPath;
};
} else {
var getPath = function () {
return path;
};
}
var searchBox = $(this);
var jsonparser = function (json, resultStatus) {
var hasErrors = json.statusMessage ? true : false; // right now, we are overloading the existence of a status message to imply an error
var matches = hasErrors ? [[{html: json.statusMessage, className: "error"}]] : json.contentNameMatches;
if (!hasErrors) {
var query = json.query;
if (!cache[query] && resultStatus == "success") {
cache[query] = json;
cache_stack.push(query);
if (cache_stack.length > attr.cache_size) {
delete cache[cache_stack.shift()];
}
}
}
// do not update drop down for earlier requests. We are only interested in displaying the results for the most current search
if (json.query != searchBox.val()) {
return;
}
var old_dd = dd;
// Standard dropDown handling of JSON object is to extract name, href, etc and then store the rest of the properties
// as a jQuery "data" property on the name span called properties.
dd = AJS.dropDown(matches, options.ajsDropDownOptions)[0];
dd.jsonResult = json;
// place the created drop down using the configured dropdownPlacement function
// if there is none then use a default behaviour
if (options.dropdownPlacement) {
options.dropdownPlacement(searchBox, dd.$);
} else {
searchBox.closest("form").find(".quick-nav-drop-down").append(dd.$);
}
dd.onhide = function (causer) {
if (causer == "escape") {
searchBox.focus();
}
};
var spans = $("span", dd.$);
for (var i = 0, ii = spans.length - 1; i < ii; i++) {
(function () {
var $this = $(this),
html = $this.html();
// highlight matching tokens
html = html.replace(new RegExp("(" + json.queryTokens.join("|") + ")", "gi"), "<strong>$1</strong>");
$this.html(html);
}).call(spans[i]);
}
if (options.dropdownPostprocess) {
options.dropdownPostprocess(dd.$);
dd.hider = function () {
options.dropdownPostprocess(dd.$);
};
}
/**
* Check that all items in the drop down can be displayed - show ellipses at the end of any that
* are too long. Also remove any unused properties that the dropDown may have stored for each
* item in the list.
*/
$("a span", dd.$).each(function () {
var $a = $(this),
elpss = AJS("var", "…"),
elwidth = elpss[0].offsetWidth,
width = this.parentNode.parentNode.parentNode.parentNode.offsetWidth,
isLong = false,
rightPadding = 20; // add some padding so the ellipsis doesn't run over the edge of the box
AJS.dropDown.removeAllAdditionalProperties($a);
$a.wrapInner($("<em>"));
$a.append(elpss);
this.elpss = elpss;
$("em", $a).each(function () {
var $label = $(this);
$label.show();
if (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding) {
var childNodes = this.childNodes;
var success = false;
for (var j = childNodes.length - 1; j >= 0; j--) {
var childNode = childNodes[j];
var truncatedChars = 1;
var valueAttr = (childNode.nodeType == 3) ? "nodeValue" : "innerHTML";
var nodeText = childNode[valueAttr];
do {
if (truncatedChars <= nodeText.length) {
childNode[valueAttr] = nodeText.substr(0, nodeText.length - truncatedChars++);
} else { // if we cannot fit even one character of the next word, then try truncating the node just previous to this
break;
}
} while (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding);
if (truncatedChars <= nodeText.length) {
// we've managed truncate part of the word and fit it in
success = true;
break;
}
}
if (success) {
isLong = true;
} else {
$label.hide();
}
}
});
if (!isLong) {
elpss.hide();
}
});
if (old_dd) {
dd.show();
dd.method = attr.effect;
old_dd.$.remove();
} else {
dd.show(attr.effect);
}
if(typeof onShow == "function") {
onShow.apply(dd);
}
};
searchBox.oldval = searchBox.val();
searchBox.keyup(function (e) {
// Don't open the search box on <enter> or <tab>
if (e.which == 13 || e.which == 9) {
return;
}
var val = searchBox.val();
if (val != searchBox.oldval) {
searchBox.oldval = val;
if (!searchBox.hasClass("placeholded")) {
clearTimeout(timer);
if (AJS.params.quickNavEnabled && (/[\S]{2,}/).test(val)) {
if (cache[val]) {
jsonparser(cache[val]);
} else {
var contextPath = jQuery('#confluence-context-path').attr('content');
timer = setTimeout(function () { // delay sending a request to give the user a chance to finish typing their search term(s)
return AJS.$.ajax({
type: "GET",
url: contextPath + getPath() ,
data: {"query": AJS.escape(val)},
success: jsonparser,
dataType: "json",
global: false,
timeout: 5000,
error: function ( xml, status, e ) { // ajax error handler
if (status == "timeout") {
jsonparser({statusMessage: "Timeout", query: val}, status);
}
}
});
}, 200);
}
} else {
dd && dd.hide();
}
}
}
});
return this;
};
})(jQuery);
| initSubspacesQuickSearch | identifier_name |
de.communardo.confluence.plugins.subspace-subspace-search-resource.js | AJS.toInit( function() {
initSubspacesQuickSearch()
initSubspacesSearchCheckboxToggle();
});
function initSubspacesQuickSearch() {
jQuery(".subspaces-quicksearch .subspaces-quick-search-query").each(function(){
var quickSearchQuery = jQuery(this);
// here we do the little placeholder stuff
quickSearchQuery.focus(function () {
if (jQuery(this).hasClass('placeholded')) {
jQuery(this).val("");
jQuery(this).removeClass("placeholded");
}
});
quickSearchQuery.change(function () {
if (jQuery(this).val() == "") {
jQuery(this).val(jQuery(this).attr('placeholder'));
jQuery(this).addClass("placeholded");
}
});
/**
* function to add a tooltip showing the space name to each drop down list item
*/
AJS.subspacequicksearch = AJS.quicksearch || {};
AJS.subspacequicksearch.dropdownPostprocess = function (list) {
jQuery("a span", list).each(function () {
var a = jQuery(this);
// get the hidden space name property from the span
var spaceName = AJS.dropDown.getAdditionalPropertyValue(a, "spaceName") || "";
// we need to go through html node creation so that all encoded symbols(like >) are displayed correctly
if (spaceName) {
spaceName = " (" + AJS("i").html(spaceName).text() + ")";
}
a.attr("title", a.text() + spaceName);
});
};
/**
* Append the drop down to the form element with the class quick-nav-drop-down
*/
var subspacequickNavPlacement = function (input, dropDown) {
input.closest("form").find(".quick-nav-drop-down").append(dropDown);
};
var subspacesSpacekey = quickSearchQuery.parent().children('.subspacesSpaceKey').val();
var includeSubspaces = quickSearchQuery.parent().children('.includeSubspaces').val();
quickSearchQuery.subspacesquicksearch("/communardo_plugins/quicksearch/subspacesQuickSearch.action"+
"?spaceKey="+subspacesSpacekey+
"&includeSubspaces="+includeSubspaces, null, {
dropdownPostprocess : AJS.subspacequicksearch.dropdownPostprocess,
dropdownPlacement : subspacequickNavPlacement
});
});
}
function initSubspacesSearchCheckboxToggle() {
var topLevelSpaceCheckboxes = jQuery('#topspaces_holder .checkbox_topLevelSpaces');
topLevelSpaceCheckboxes.click(function() {
//now the checkboxes can be used like radiobuttons
if(jQuery(this).is(':checked')) {
topLevelSpaceCheckboxes.attr('checked', false);
jQuery(this).attr('checked', true);
}
enableDisableSubspacesSearchElements();
});
enableDisableSubspacesSearchElements();
}
function enableDisableSubspacesSearchElements() {
//disable/enable the include subspaces and spaces input element
if(jQuery('#topspaces_holder .checkbox_topLevelSpaces').is(':checked')) {
jQuery('#search-filter-by-space').attr("disabled", true);
jQuery('#checkbox_include_subspaces').attr("disabled", true);
}
else {
jQuery('#search-filter-by-space').attr("disabled", false);
jQuery('#checkbox_include_subspaces').attr("disabled", false);
}
}
(function($){
/**
* Options are:
* dropdownPostprocess - a function that will be supplied with the list created by the dropDown and can manipulate or modify it
* as necessary.
* dropdownPlacement - a function that will be called with the drop down and which should place it in the correct place on the page.
* The supplied arguments are 1) the input that issued the search, 2) the dropDown to be placed.
* ajsDropDownOptions - any options the underlying dropDown component can handle expects
*/
$.fn.subspacesquicksearch = function(path, onShow, options) {
options = options || {};
var attr = {
cache_size: 30,
max_length: 1,
effect: "appear"
};
var dd,
cache = {},
cache_stack = [],
timer;
if (typeof path == "function") {
var oldPath = path();
var getPath = function () {
var newPath = path();
if (newPath != oldPath) {
// reset the cache if the path has changed
cache = {};
cache_stack = [];
oldPath = newPath;
}
return newPath;
};
} else {
var getPath = function () {
return path;
};
}
var searchBox = $(this);
var jsonparser = function (json, resultStatus) {
var hasErrors = json.statusMessage ? true : false; // right now, we are overloading the existence of a status message to imply an error
var matches = hasErrors ? [[{html: json.statusMessage, className: "error"}]] : json.contentNameMatches;
if (!hasErrors) {
var query = json.query;
if (!cache[query] && resultStatus == "success") {
cache[query] = json;
cache_stack.push(query);
if (cache_stack.length > attr.cache_size) {
delete cache[cache_stack.shift()];
}
}
}
// do not update drop down for earlier requests. We are only interested in displaying the results for the most current search
if (json.query != searchBox.val()) {
return;
}
var old_dd = dd;
// Standard dropDown handling of JSON object is to extract name, href, etc and then store the rest of the properties
// as a jQuery "data" property on the name span called properties.
dd = AJS.dropDown(matches, options.ajsDropDownOptions)[0];
dd.jsonResult = json;
// place the created drop down using the configured dropdownPlacement function
// if there is none then use a default behaviour
if (options.dropdownPlacement) {
options.dropdownPlacement(searchBox, dd.$);
} else {
searchBox.closest("form").find(".quick-nav-drop-down").append(dd.$);
}
dd.onhide = function (causer) {
if (causer == "escape") |
};
var spans = $("span", dd.$);
for (var i = 0, ii = spans.length - 1; i < ii; i++) {
(function () {
var $this = $(this),
html = $this.html();
// highlight matching tokens
html = html.replace(new RegExp("(" + json.queryTokens.join("|") + ")", "gi"), "<strong>$1</strong>");
$this.html(html);
}).call(spans[i]);
}
if (options.dropdownPostprocess) {
options.dropdownPostprocess(dd.$);
dd.hider = function () {
options.dropdownPostprocess(dd.$);
};
}
/**
* Check that all items in the drop down can be displayed - show ellipses at the end of any that
* are too long. Also remove any unused properties that the dropDown may have stored for each
* item in the list.
*/
$("a span", dd.$).each(function () {
var $a = $(this),
elpss = AJS("var", "…"),
elwidth = elpss[0].offsetWidth,
width = this.parentNode.parentNode.parentNode.parentNode.offsetWidth,
isLong = false,
rightPadding = 20; // add some padding so the ellipsis doesn't run over the edge of the box
AJS.dropDown.removeAllAdditionalProperties($a);
$a.wrapInner($("<em>"));
$a.append(elpss);
this.elpss = elpss;
$("em", $a).each(function () {
var $label = $(this);
$label.show();
if (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding) {
var childNodes = this.childNodes;
var success = false;
for (var j = childNodes.length - 1; j >= 0; j--) {
var childNode = childNodes[j];
var truncatedChars = 1;
var valueAttr = (childNode.nodeType == 3) ? "nodeValue" : "innerHTML";
var nodeText = childNode[valueAttr];
do {
if (truncatedChars <= nodeText.length) {
childNode[valueAttr] = nodeText.substr(0, nodeText.length - truncatedChars++);
} else { // if we cannot fit even one character of the next word, then try truncating the node just previous to this
break;
}
} while (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding);
if (truncatedChars <= nodeText.length) {
// we've managed truncate part of the word and fit it in
success = true;
break;
}
}
if (success) {
isLong = true;
} else {
$label.hide();
}
}
});
if (!isLong) {
elpss.hide();
}
});
if (old_dd) {
dd.show();
dd.method = attr.effect;
old_dd.$.remove();
} else {
dd.show(attr.effect);
}
if(typeof onShow == "function") {
onShow.apply(dd);
}
};
searchBox.oldval = searchBox.val();
searchBox.keyup(function (e) {
// Don't open the search box on <enter> or <tab>
if (e.which == 13 || e.which == 9) {
return;
}
var val = searchBox.val();
if (val != searchBox.oldval) {
searchBox.oldval = val;
if (!searchBox.hasClass("placeholded")) {
clearTimeout(timer);
if (AJS.params.quickNavEnabled && (/[\S]{2,}/).test(val)) {
if (cache[val]) {
jsonparser(cache[val]);
} else {
var contextPath = jQuery('#confluence-context-path').attr('content');
timer = setTimeout(function () { // delay sending a request to give the user a chance to finish typing their search term(s)
return AJS.$.ajax({
type: "GET",
url: contextPath + getPath() ,
data: {"query": AJS.escape(val)},
success: jsonparser,
dataType: "json",
global: false,
timeout: 5000,
error: function ( xml, status, e ) { // ajax error handler
if (status == "timeout") {
jsonparser({statusMessage: "Timeout", query: val}, status);
}
}
});
}, 200);
}
} else {
dd && dd.hide();
}
}
}
});
return this;
};
})(jQuery);
| {
searchBox.focus();
} | conditional_block |
de.communardo.confluence.plugins.subspace-subspace-search-resource.js | AJS.toInit( function() {
initSubspacesQuickSearch()
initSubspacesSearchCheckboxToggle();
});
function initSubspacesQuickSearch() {
jQuery(".subspaces-quicksearch .subspaces-quick-search-query").each(function(){
var quickSearchQuery = jQuery(this);
// here we do the little placeholder stuff
quickSearchQuery.focus(function () {
if (jQuery(this).hasClass('placeholded')) {
jQuery(this).val("");
jQuery(this).removeClass("placeholded");
}
});
quickSearchQuery.change(function () {
if (jQuery(this).val() == "") {
jQuery(this).val(jQuery(this).attr('placeholder'));
jQuery(this).addClass("placeholded");
}
});
/**
* function to add a tooltip showing the space name to each drop down list item
*/
AJS.subspacequicksearch = AJS.quicksearch || {};
AJS.subspacequicksearch.dropdownPostprocess = function (list) {
jQuery("a span", list).each(function () {
var a = jQuery(this);
// get the hidden space name property from the span
var spaceName = AJS.dropDown.getAdditionalPropertyValue(a, "spaceName") || "";
// we need to go through html node creation so that all encoded symbols(like >) are displayed correctly
if (spaceName) {
spaceName = " (" + AJS("i").html(spaceName).text() + ")";
}
a.attr("title", a.text() + spaceName);
});
};
/**
* Append the drop down to the form element with the class quick-nav-drop-down
*/
var subspacequickNavPlacement = function (input, dropDown) {
input.closest("form").find(".quick-nav-drop-down").append(dropDown);
};
var subspacesSpacekey = quickSearchQuery.parent().children('.subspacesSpaceKey').val();
var includeSubspaces = quickSearchQuery.parent().children('.includeSubspaces').val();
quickSearchQuery.subspacesquicksearch("/communardo_plugins/quicksearch/subspacesQuickSearch.action"+
"?spaceKey="+subspacesSpacekey+
"&includeSubspaces="+includeSubspaces, null, {
dropdownPostprocess : AJS.subspacequicksearch.dropdownPostprocess,
dropdownPlacement : subspacequickNavPlacement
});
});
}
function initSubspacesSearchCheckboxToggle() |
function enableDisableSubspacesSearchElements() {
//disable/enable the include subspaces and spaces input element
if(jQuery('#topspaces_holder .checkbox_topLevelSpaces').is(':checked')) {
jQuery('#search-filter-by-space').attr("disabled", true);
jQuery('#checkbox_include_subspaces').attr("disabled", true);
}
else {
jQuery('#search-filter-by-space').attr("disabled", false);
jQuery('#checkbox_include_subspaces').attr("disabled", false);
}
}
(function($){
/**
* Options are:
* dropdownPostprocess - a function that will be supplied with the list created by the dropDown and can manipulate or modify it
* as necessary.
* dropdownPlacement - a function that will be called with the drop down and which should place it in the correct place on the page.
* The supplied arguments are 1) the input that issued the search, 2) the dropDown to be placed.
* ajsDropDownOptions - any options the underlying dropDown component can handle expects
*/
$.fn.subspacesquicksearch = function(path, onShow, options) {
options = options || {};
var attr = {
cache_size: 30,
max_length: 1,
effect: "appear"
};
var dd,
cache = {},
cache_stack = [],
timer;
if (typeof path == "function") {
var oldPath = path();
var getPath = function () {
var newPath = path();
if (newPath != oldPath) {
// reset the cache if the path has changed
cache = {};
cache_stack = [];
oldPath = newPath;
}
return newPath;
};
} else {
var getPath = function () {
return path;
};
}
var searchBox = $(this);
var jsonparser = function (json, resultStatus) {
var hasErrors = json.statusMessage ? true : false; // right now, we are overloading the existence of a status message to imply an error
var matches = hasErrors ? [[{html: json.statusMessage, className: "error"}]] : json.contentNameMatches;
if (!hasErrors) {
var query = json.query;
if (!cache[query] && resultStatus == "success") {
cache[query] = json;
cache_stack.push(query);
if (cache_stack.length > attr.cache_size) {
delete cache[cache_stack.shift()];
}
}
}
// do not update drop down for earlier requests. We are only interested in displaying the results for the most current search
if (json.query != searchBox.val()) {
return;
}
var old_dd = dd;
// Standard dropDown handling of JSON object is to extract name, href, etc and then store the rest of the properties
// as a jQuery "data" property on the name span called properties.
dd = AJS.dropDown(matches, options.ajsDropDownOptions)[0];
dd.jsonResult = json;
// place the created drop down using the configured dropdownPlacement function
// if there is none then use a default behaviour
if (options.dropdownPlacement) {
options.dropdownPlacement(searchBox, dd.$);
} else {
searchBox.closest("form").find(".quick-nav-drop-down").append(dd.$);
}
dd.onhide = function (causer) {
if (causer == "escape") {
searchBox.focus();
}
};
var spans = $("span", dd.$);
for (var i = 0, ii = spans.length - 1; i < ii; i++) {
(function () {
var $this = $(this),
html = $this.html();
// highlight matching tokens
html = html.replace(new RegExp("(" + json.queryTokens.join("|") + ")", "gi"), "<strong>$1</strong>");
$this.html(html);
}).call(spans[i]);
}
if (options.dropdownPostprocess) {
options.dropdownPostprocess(dd.$);
dd.hider = function () {
options.dropdownPostprocess(dd.$);
};
}
/**
* Check that all items in the drop down can be displayed - show ellipses at the end of any that
* are too long. Also remove any unused properties that the dropDown may have stored for each
* item in the list.
*/
$("a span", dd.$).each(function () {
var $a = $(this),
elpss = AJS("var", "…"),
elwidth = elpss[0].offsetWidth,
width = this.parentNode.parentNode.parentNode.parentNode.offsetWidth,
isLong = false,
rightPadding = 20; // add some padding so the ellipsis doesn't run over the edge of the box
AJS.dropDown.removeAllAdditionalProperties($a);
$a.wrapInner($("<em>"));
$a.append(elpss);
this.elpss = elpss;
$("em", $a).each(function () {
var $label = $(this);
$label.show();
if (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding) {
var childNodes = this.childNodes;
var success = false;
for (var j = childNodes.length - 1; j >= 0; j--) {
var childNode = childNodes[j];
var truncatedChars = 1;
var valueAttr = (childNode.nodeType == 3) ? "nodeValue" : "innerHTML";
var nodeText = childNode[valueAttr];
do {
if (truncatedChars <= nodeText.length) {
childNode[valueAttr] = nodeText.substr(0, nodeText.length - truncatedChars++);
} else { // if we cannot fit even one character of the next word, then try truncating the node just previous to this
break;
}
} while (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding);
if (truncatedChars <= nodeText.length) {
// we've managed truncate part of the word and fit it in
success = true;
break;
}
}
if (success) {
isLong = true;
} else {
$label.hide();
}
}
});
if (!isLong) {
elpss.hide();
}
});
if (old_dd) {
dd.show();
dd.method = attr.effect;
old_dd.$.remove();
} else {
dd.show(attr.effect);
}
if(typeof onShow == "function") {
onShow.apply(dd);
}
};
searchBox.oldval = searchBox.val();
searchBox.keyup(function (e) {
// Don't open the search box on <enter> or <tab>
if (e.which == 13 || e.which == 9) {
return;
}
var val = searchBox.val();
if (val != searchBox.oldval) {
searchBox.oldval = val;
if (!searchBox.hasClass("placeholded")) {
clearTimeout(timer);
if (AJS.params.quickNavEnabled && (/[\S]{2,}/).test(val)) {
if (cache[val]) {
jsonparser(cache[val]);
} else {
var contextPath = jQuery('#confluence-context-path').attr('content');
timer = setTimeout(function () { // delay sending a request to give the user a chance to finish typing their search term(s)
return AJS.$.ajax({
type: "GET",
url: contextPath + getPath() ,
data: {"query": AJS.escape(val)},
success: jsonparser,
dataType: "json",
global: false,
timeout: 5000,
error: function ( xml, status, e ) { // ajax error handler
if (status == "timeout") {
jsonparser({statusMessage: "Timeout", query: val}, status);
}
}
});
}, 200);
}
} else {
dd && dd.hide();
}
}
}
});
return this;
};
})(jQuery);
| {
var topLevelSpaceCheckboxes = jQuery('#topspaces_holder .checkbox_topLevelSpaces');
topLevelSpaceCheckboxes.click(function() {
//now the checkboxes can be used like radiobuttons
if(jQuery(this).is(':checked')) {
topLevelSpaceCheckboxes.attr('checked', false);
jQuery(this).attr('checked', true);
}
enableDisableSubspacesSearchElements();
});
enableDisableSubspacesSearchElements();
} | identifier_body |
de.communardo.confluence.plugins.subspace-subspace-search-resource.js | AJS.toInit( function() {
initSubspacesQuickSearch()
initSubspacesSearchCheckboxToggle();
});
function initSubspacesQuickSearch() {
jQuery(".subspaces-quicksearch .subspaces-quick-search-query").each(function(){
var quickSearchQuery = jQuery(this);
// here we do the little placeholder stuff
quickSearchQuery.focus(function () {
if (jQuery(this).hasClass('placeholded')) {
jQuery(this).val("");
jQuery(this).removeClass("placeholded");
}
});
quickSearchQuery.change(function () {
if (jQuery(this).val() == "") {
jQuery(this).val(jQuery(this).attr('placeholder'));
jQuery(this).addClass("placeholded");
}
});
/**
* function to add a tooltip showing the space name to each drop down list item
*/
AJS.subspacequicksearch = AJS.quicksearch || {};
AJS.subspacequicksearch.dropdownPostprocess = function (list) {
jQuery("a span", list).each(function () {
var a = jQuery(this);
// get the hidden space name property from the span
var spaceName = AJS.dropDown.getAdditionalPropertyValue(a, "spaceName") || "";
// we need to go through html node creation so that all encoded symbols(like >) are displayed correctly
if (spaceName) {
spaceName = " (" + AJS("i").html(spaceName).text() + ")";
}
a.attr("title", a.text() + spaceName);
});
};
/**
* Append the drop down to the form element with the class quick-nav-drop-down
*/
var subspacequickNavPlacement = function (input, dropDown) {
input.closest("form").find(".quick-nav-drop-down").append(dropDown);
};
var subspacesSpacekey = quickSearchQuery.parent().children('.subspacesSpaceKey').val();
var includeSubspaces = quickSearchQuery.parent().children('.includeSubspaces').val();
quickSearchQuery.subspacesquicksearch("/communardo_plugins/quicksearch/subspacesQuickSearch.action"+
"?spaceKey="+subspacesSpacekey+
"&includeSubspaces="+includeSubspaces, null, {
dropdownPostprocess : AJS.subspacequicksearch.dropdownPostprocess,
dropdownPlacement : subspacequickNavPlacement
});
});
}
function initSubspacesSearchCheckboxToggle() {
var topLevelSpaceCheckboxes = jQuery('#topspaces_holder .checkbox_topLevelSpaces');
topLevelSpaceCheckboxes.click(function() {
//now the checkboxes can be used like radiobuttons
if(jQuery(this).is(':checked')) {
topLevelSpaceCheckboxes.attr('checked', false);
jQuery(this).attr('checked', true);
}
enableDisableSubspacesSearchElements();
});
enableDisableSubspacesSearchElements();
}
function enableDisableSubspacesSearchElements() {
//disable/enable the include subspaces and spaces input element
if(jQuery('#topspaces_holder .checkbox_topLevelSpaces').is(':checked')) {
jQuery('#search-filter-by-space').attr("disabled", true);
jQuery('#checkbox_include_subspaces').attr("disabled", true);
}
else {
jQuery('#search-filter-by-space').attr("disabled", false);
jQuery('#checkbox_include_subspaces').attr("disabled", false);
}
}
(function($){
/**
* Options are:
* dropdownPostprocess - a function that will be supplied with the list created by the dropDown and can manipulate or modify it
* as necessary.
* dropdownPlacement - a function that will be called with the drop down and which should place it in the correct place on the page.
* The supplied arguments are 1) the input that issued the search, 2) the dropDown to be placed.
* ajsDropDownOptions - any options the underlying dropDown component can handle expects
*/
$.fn.subspacesquicksearch = function(path, onShow, options) {
options = options || {};
var attr = {
cache_size: 30,
max_length: 1,
effect: "appear"
};
var dd,
cache = {},
cache_stack = [],
timer;
if (typeof path == "function") {
var oldPath = path();
var getPath = function () {
var newPath = path();
if (newPath != oldPath) {
// reset the cache if the path has changed
cache = {};
cache_stack = [];
oldPath = newPath;
}
return newPath;
};
} else {
var getPath = function () {
return path;
};
}
var searchBox = $(this);
var jsonparser = function (json, resultStatus) {
var hasErrors = json.statusMessage ? true : false; // right now, we are overloading the existence of a status message to imply an error
var matches = hasErrors ? [[{html: json.statusMessage, className: "error"}]] : json.contentNameMatches;
if (!hasErrors) {
var query = json.query;
if (!cache[query] && resultStatus == "success") {
cache[query] = json;
cache_stack.push(query);
if (cache_stack.length > attr.cache_size) {
delete cache[cache_stack.shift()];
}
}
}
// do not update drop down for earlier requests. We are only interested in displaying the results for the most current search
if (json.query != searchBox.val()) {
return;
}
var old_dd = dd;
// Standard dropDown handling of JSON object is to extract name, href, etc and then store the rest of the properties
// as a jQuery "data" property on the name span called properties.
dd = AJS.dropDown(matches, options.ajsDropDownOptions)[0];
dd.jsonResult = json;
// place the created drop down using the configured dropdownPlacement function
// if there is none then use a default behaviour
if (options.dropdownPlacement) {
options.dropdownPlacement(searchBox, dd.$);
} else {
searchBox.closest("form").find(".quick-nav-drop-down").append(dd.$);
}
dd.onhide = function (causer) {
if (causer == "escape") {
searchBox.focus();
}
};
var spans = $("span", dd.$);
for (var i = 0, ii = spans.length - 1; i < ii; i++) {
(function () {
var $this = $(this),
html = $this.html();
// highlight matching tokens
html = html.replace(new RegExp("(" + json.queryTokens.join("|") + ")", "gi"), "<strong>$1</strong>");
$this.html(html);
}).call(spans[i]);
}
if (options.dropdownPostprocess) {
options.dropdownPostprocess(dd.$);
dd.hider = function () {
options.dropdownPostprocess(dd.$);
};
}
/**
* Check that all items in the drop down can be displayed - show ellipses at the end of any that
* are too long. Also remove any unused properties that the dropDown may have stored for each
* item in the list.
*/
$("a span", dd.$).each(function () {
var $a = $(this),
elpss = AJS("var", "…"),
elwidth = elpss[0].offsetWidth,
width = this.parentNode.parentNode.parentNode.parentNode.offsetWidth,
isLong = false,
rightPadding = 20; // add some padding so the ellipsis doesn't run over the edge of the box
AJS.dropDown.removeAllAdditionalProperties($a);
$a.wrapInner($("<em>"));
$a.append(elpss);
this.elpss = elpss;
$("em", $a).each(function () {
|
var childNodes = this.childNodes;
var success = false;
for (var j = childNodes.length - 1; j >= 0; j--) {
var childNode = childNodes[j];
var truncatedChars = 1;
var valueAttr = (childNode.nodeType == 3) ? "nodeValue" : "innerHTML";
var nodeText = childNode[valueAttr];
do {
if (truncatedChars <= nodeText.length) {
childNode[valueAttr] = nodeText.substr(0, nodeText.length - truncatedChars++);
} else { // if we cannot fit even one character of the next word, then try truncating the node just previous to this
break;
}
} while (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding);
if (truncatedChars <= nodeText.length) {
// we've managed truncate part of the word and fit it in
success = true;
break;
}
}
if (success) {
isLong = true;
} else {
$label.hide();
}
}
});
if (!isLong) {
elpss.hide();
}
});
if (old_dd) {
dd.show();
dd.method = attr.effect;
old_dd.$.remove();
} else {
dd.show(attr.effect);
}
if(typeof onShow == "function") {
onShow.apply(dd);
}
};
searchBox.oldval = searchBox.val();
searchBox.keyup(function (e) {
// Don't open the search box on <enter> or <tab>
if (e.which == 13 || e.which == 9) {
return;
}
var val = searchBox.val();
if (val != searchBox.oldval) {
searchBox.oldval = val;
if (!searchBox.hasClass("placeholded")) {
clearTimeout(timer);
if (AJS.params.quickNavEnabled && (/[\S]{2,}/).test(val)) {
if (cache[val]) {
jsonparser(cache[val]);
} else {
var contextPath = jQuery('#confluence-context-path').attr('content');
timer = setTimeout(function () { // delay sending a request to give the user a chance to finish typing their search term(s)
return AJS.$.ajax({
type: "GET",
url: contextPath + getPath() ,
data: {"query": AJS.escape(val)},
success: jsonparser,
dataType: "json",
global: false,
timeout: 5000,
error: function ( xml, status, e ) { // ajax error handler
if (status == "timeout") {
jsonparser({statusMessage: "Timeout", query: val}, status);
}
}
});
}, 200);
}
} else {
dd && dd.hide();
}
}
}
});
return this;
};
})(jQuery); | var $label = $(this);
$label.show();
if (this.offsetLeft + this.offsetWidth + elwidth > width - rightPadding) {
| random_line_split |
cellgrid.rs | use ggez::graphics;
use ggez::GameResult;
use ggez::nalgebra as na;
use crate::simulation::{SimGrid, Automaton};
use crate::commons::cells::BinaryCell;
use crate::commons::grids::CellGrid;
use crate::gameoflife::GameOfLife;
/// Implementation of the Automaton trait for GameOfLife with a CellGrid grid,
impl Automaton for GameOfLife<CellGrid<BinaryCell>> {
/// Defines the type of grid for the automaton.
type Grid = CellGrid<BinaryCell>;
/// A constructor method that creates a null automaton
/// ands sets the initial state and cell size parameters.
fn new(initialstate: &str, cellsize: f32) -> Self {
Self {
grid: Self::Grid::new(cellsize),
initialstate: initialstate.to_string(),
cellsize,
generation: 0,
alive: 0,
dead: 0,
}
}
/// A method that initializes the automaton for the given dimensions.
fn initialize(&mut self, dimensions: graphics::Rect) {
// Create a new dimensions object for the grid of cells (60 px removed for the banner)
let griddimensions = graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h - 60.0);
// Set the grid dimensions to the grid
self.grid.setdimensions(griddimensions);
// Check the value of the initial state field
match self.initialstate.as_str() {
// Default initial state (random-balanced)
"default" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Balanced Random initial state
"random-balanced" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Invalid initial state
_ => {
// Print an error and exit
eprintln!("[error] invalid initial state for 'gameoflife'");
std::process::exit(0);
}
}
}
/// A method that advances the game of life to the next generation.
fn advance(&mut self) {
// Declare counter variables for the number of alive and dead cells
let mut alive: u32 = 0;
let mut dead: u32 = 0;
// Check if the cell grid exists
if self.grid.vector.is_some() {
// Create a clone of the cell grid
let mut newgrid = self.grid.vector.clone().unwrap();
// Iterate over the grid
for (x, y, cell) in self.grid.clone() {
// Check the vicinity of the cell
let cell = match (cell, self.scan_vicinity(x, y)) {
// If a cell is alive, and there are either too many live
// neighbors or not enough live neighbors, kill it.
(BinaryCell::Active, n) if n < 2 || n > 3 => BinaryCell::Passive,
// If a cell is alive and has either 2
// or 3 live neighbors, keep it alive
(BinaryCell::Active, n) if n == 3 || n == 2 => BinaryCell::Active,
// If a cell is dead and has exactly 3 live neighbors, revive it
(BinaryCell::Passive, 3) => BinaryCell::Active,
// Otherwise, keep the cell state
(c, _) => c,
};
// Add the new cell to the new grid
newgrid[x][y] = cell.clone();
// Increment the alive or dead counter
match cell {
BinaryCell::Passive => dead += 1, | }
// Assign the new grid to the grid struct
self.grid.setgrid(newgrid);
}
// Update the alive and dead cell value in the grid struct
self.alive = alive;
self.dead = dead;
// Increment the generation value in the grid struct
self.generation += 1;
}
/// A method that returns the state of the automaton as a string.
/// Format: "Generation: {} | Alive: {} | Dead: {}"
fn state(&self) -> String {
format!("Generation: {} | Alive: {} | Dead: {}", self.generation, self.alive, self.dead)
}
/// A method that returns the name of the automaton as a string.
/// Format: "Conway's Game of Life"
fn name(&self) -> String {
"Conway's Game of Life".to_string()
}
/// A method that returns the name of the automaton as a string
/// along with its initial state and grid type.
/// Format: "Conway's Game of Life | Grid | {}"
fn fullname(&self) -> String {
format!("Conway's Game of Life | Grid | {}", self.initialstate)
}
}
// Implementation of helper methods for GameOfLife with a CellGrid grid,
impl GameOfLife<CellGrid<BinaryCell>> {
// A function that retrieves the number of alive cells in
// the neighbouring vicity of a given cell (x, y)
fn scan_vicinity(&mut self, x: usize, y: usize) -> i32 {
// Declare a counter
let mut count = 0;
// Check if the cell grid exists
if let Some(grid) = &self.grid.vector {
// Iterate over the cells in the vicinity of the cell at (x, y).
// The [-1,0,1] vectors represent the vicinity offsets for the x and y axis each.
for x_off in vec![-1, 0, 1] {
for y_off in vec![-1, 0, 1] {
// Create the position of the cell in the
// grid based on the vicinity offsets
let nx = x as i32 + x_off;
let ny = y as i32 + y_off;
// Check if position is out of grid bounds (x axis)
if nx < 0 || nx >= grid.len() as i32 {
continue;
}
// Check if position is out of grid bounds (y axis)
if ny < 0 || ny >= grid[nx as usize].len() as i32 {
continue;
}
// Check if position points to the cell itself i.e (0,0) offsets
if nx == x as i32 && ny == y as i32 {
continue;
}
// Check if the cell if alive
match grid[nx as usize][ny as usize].clone() {
// Increment the counter if the cell is alive
BinaryCell::Active => count = count+1,
_ => continue,
}
}
}
}
// Return the counter value
return count
}
}
// Implementation of the Drawable trait for GameOfLife with a CellGrid grid,
impl graphics::Drawable for GameOfLife<CellGrid<BinaryCell>> {
// A method that returns the dimensions of the automaton
fn dimensions(&self, _ctx: &mut ggez::Context) -> Option<graphics::Rect> {
// Get the grid dimesions and add the banner height
if let Some(dimensions) = &self.grid.dimensions {
Some(graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h + 60.0))
} else {None}
}
// A method that returns the graphics blending mode of the automaton grid
fn blend_mode(&self) -> Option<graphics::BlendMode> {
Some(graphics::BlendMode::Add)
}
// A method that set the graphics blend mode of the automaton grid (currently does nothing)
fn set_blend_mode(&mut self, _: Option<graphics::BlendMode>) {}
// A method that renders the automaton grid and state and returns a GameResult
fn draw(&self, ctx: &mut ggez::Context, param: graphics::DrawParam) -> GameResult<()> {
// Create a new graphic mesh builder
let mut mb = graphics::MeshBuilder::new();
// Iterate through each cell in the grid
for (x, y, cell) in self.grid.clone() {
// Create the bounds of the cell
let cellbounds = graphics::Rect::new(
(x as f32) * self.cellsize,
(y as f32) * self.cellsize,
self.cellsize,
self.cellsize,
);
// Add the cell fill to the mesh builder
mb.rectangle(
graphics::DrawMode::Fill(graphics::FillOptions::default()),
cellbounds,
// Set the cell color based on cell state
match cell {
BinaryCell::Passive => [0.0, 0.0, 0.0, 1.0].into(),
BinaryCell::Active => [1.0, 1.0, 1.0, 1.0].into(),
},
)
// Add the cell boundary to the mesh builder
.rectangle(
graphics::DrawMode::Stroke(graphics::StrokeOptions::default()),
cellbounds,
[1.0, 1.0, 1.0, 0.25].into(),
);
}
// Build and Draw the mesh
mb.build(ctx)?.draw(ctx, param)?;
// Declare a variable for the font size
let font_size = 18.0;
// Create the text graphics for the banner
let mut name_text = graphics::Text::new(self.fullname());
let mut state_text = graphics::Text::new(self.state());
// Set the font styling for the text graphics
state_text.set_font(graphics::Font::default(), graphics::Scale::uniform(font_size));
name_text.set_font(graphics::Font::default(), graphics::Scale::uniform(font_size));
// Chekc the grid dimesions
if let Some(dimensions) = &self.grid.dimensions {
// Calculate the spacing between banner elements.
// Assumes 2 units of spacing above the name text and below the state text
// and 1 unit of spacing between the name and state text.
let spacing = (60.0 - (font_size * 2.0)) / 5.0;
// Calculate the position of the name text
let name_offset = dimensions.h + (spacing * 2.0);
// Calculate the position of the state text
let state_offset = dimensions.h + (spacing * 3.0) + font_size;
// Draw the banner text graphics
name_text.draw(ctx, (na::Point2::new(param.dest.x + 10.0, param.dest.y + name_offset),).into())?;
state_text.draw(ctx, (na::Point2::new(param.dest.x + 10.0, param.dest.y + state_offset),).into())?;
}
// Return GameResult::Ok
Ok(())
}
} | BinaryCell::Active => alive += 1
} | random_line_split |
cellgrid.rs | use ggez::graphics;
use ggez::GameResult;
use ggez::nalgebra as na;
use crate::simulation::{SimGrid, Automaton};
use crate::commons::cells::BinaryCell;
use crate::commons::grids::CellGrid;
use crate::gameoflife::GameOfLife;
/// Implementation of the Automaton trait for GameOfLife with a CellGrid grid,
impl Automaton for GameOfLife<CellGrid<BinaryCell>> {
/// Defines the type of grid for the automaton.
type Grid = CellGrid<BinaryCell>;
/// A constructor method that creates a null automaton
/// ands sets the initial state and cell size parameters.
fn new(initialstate: &str, cellsize: f32) -> Self {
Self {
grid: Self::Grid::new(cellsize),
initialstate: initialstate.to_string(),
cellsize,
generation: 0,
alive: 0,
dead: 0,
}
}
/// A method that initializes the automaton for the given dimensions.
fn initialize(&mut self, dimensions: graphics::Rect) {
// Create a new dimensions object for the grid of cells (60 px removed for the banner)
let griddimensions = graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h - 60.0);
// Set the grid dimensions to the grid
self.grid.setdimensions(griddimensions);
// Check the value of the initial state field
match self.initialstate.as_str() {
// Default initial state (random-balanced)
"default" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Balanced Random initial state
"random-balanced" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Invalid initial state
_ => {
// Print an error and exit
eprintln!("[error] invalid initial state for 'gameoflife'");
std::process::exit(0);
}
}
}
/// A method that advances the game of life to the next generation.
fn advance(&mut self) {
// Declare counter variables for the number of alive and dead cells
let mut alive: u32 = 0;
let mut dead: u32 = 0;
// Check if the cell grid exists
if self.grid.vector.is_some() {
// Create a clone of the cell grid
let mut newgrid = self.grid.vector.clone().unwrap();
// Iterate over the grid
for (x, y, cell) in self.grid.clone() {
// Check the vicinity of the cell
let cell = match (cell, self.scan_vicinity(x, y)) {
// If a cell is alive, and there are either too many live
// neighbors or not enough live neighbors, kill it.
(BinaryCell::Active, n) if n < 2 || n > 3 => BinaryCell::Passive,
// If a cell is alive and has either 2
// or 3 live neighbors, keep it alive
(BinaryCell::Active, n) if n == 3 || n == 2 => BinaryCell::Active,
// If a cell is dead and has exactly 3 live neighbors, revive it
(BinaryCell::Passive, 3) => BinaryCell::Active,
// Otherwise, keep the cell state
(c, _) => c,
};
// Add the new cell to the new grid
newgrid[x][y] = cell.clone();
// Increment the alive or dead counter
match cell {
BinaryCell::Passive => dead += 1,
BinaryCell::Active => alive += 1
}
}
// Assign the new grid to the grid struct
self.grid.setgrid(newgrid);
}
// Update the alive and dead cell value in the grid struct
self.alive = alive;
self.dead = dead;
// Increment the generation value in the grid struct
self.generation += 1;
}
/// A method that returns the state of the automaton as a string.
/// Format: "Generation: {} | Alive: {} | Dead: {}"
fn state(&self) -> String {
format!("Generation: {} | Alive: {} | Dead: {}", self.generation, self.alive, self.dead)
}
/// A method that returns the name of the automaton as a string.
/// Format: "Conway's Game of Life"
fn name(&self) -> String {
"Conway's Game of Life".to_string()
}
/// A method that returns the name of the automaton as a string
/// along with its initial state and grid type.
/// Format: "Conway's Game of Life | Grid | {}"
fn fullname(&self) -> String {
format!("Conway's Game of Life | Grid | {}", self.initialstate)
}
}
// Implementation of helper methods for GameOfLife with a CellGrid grid,
impl GameOfLife<CellGrid<BinaryCell>> {
// A function that retrieves the number of alive cells in
// the neighbouring vicity of a given cell (x, y)
fn scan_vicinity(&mut self, x: usize, y: usize) -> i32 {
// Declare a counter
let mut count = 0;
// Check if the cell grid exists
if let Some(grid) = &self.grid.vector {
// Iterate over the cells in the vicinity of the cell at (x, y).
// The [-1,0,1] vectors represent the vicinity offsets for the x and y axis each.
for x_off in vec![-1, 0, 1] {
for y_off in vec![-1, 0, 1] {
// Create the position of the cell in the
// grid based on the vicinity offsets
let nx = x as i32 + x_off;
let ny = y as i32 + y_off;
// Check if position is out of grid bounds (x axis)
if nx < 0 || nx >= grid.len() as i32 {
continue;
}
// Check if position is out of grid bounds (y axis)
if ny < 0 || ny >= grid[nx as usize].len() as i32 {
continue;
}
// Check if position points to the cell itself i.e (0,0) offsets
if nx == x as i32 && ny == y as i32 {
continue;
}
// Check if the cell if alive
match grid[nx as usize][ny as usize].clone() {
// Increment the counter if the cell is alive
BinaryCell::Active => count = count+1,
_ => continue,
}
}
}
}
// Return the counter value
return count
}
}
// Implementation of the Drawable trait for GameOfLife with a CellGrid grid,
impl graphics::Drawable for GameOfLife<CellGrid<BinaryCell>> {
// A method that returns the dimensions of the automaton
fn dimensions(&self, _ctx: &mut ggez::Context) -> Option<graphics::Rect> |
// A method that returns the graphics blending mode of the automaton grid
fn blend_mode(&self) -> Option<graphics::BlendMode> {
Some(graphics::BlendMode::Add)
}
// A method that set the graphics blend mode of the automaton grid (currently does nothing)
fn set_blend_mode(&mut self, _: Option<graphics::BlendMode>) {}
// A method that renders the automaton grid and state and returns a GameResult
fn draw(&self, ctx: &mut ggez::Context, param: graphics::DrawParam) -> GameResult<()> {
// Create a new graphic mesh builder
let mut mb = graphics::MeshBuilder::new();
// Iterate through each cell in the grid
for (x, y, cell) in self.grid.clone() {
// Create the bounds of the cell
let cellbounds = graphics::Rect::new(
(x as f32) * self.cellsize,
(y as f32) * self.cellsize,
self.cellsize,
self.cellsize,
);
// Add the cell fill to the mesh builder
mb.rectangle(
graphics::DrawMode::Fill(graphics::FillOptions::default()),
cellbounds,
// Set the cell color based on cell state
match cell {
BinaryCell::Passive => [0.0, 0.0, 0.0, 1.0].into(),
BinaryCell::Active => [1.0, 1.0, 1.0, 1.0].into(),
},
)
// Add the cell boundary to the mesh builder
.rectangle(
graphics::DrawMode::Stroke(graphics::StrokeOptions::default()),
cellbounds,
[1.0, 1.0, 1.0, 0.25].into(),
);
}
// Build and Draw the mesh
mb.build(ctx)?.draw(ctx, param)?;
// Declare a variable for the font size
let font_size = 18.0;
// Create the text graphics for the banner
let mut name_text = graphics::Text::new(self.fullname());
let mut state_text = graphics::Text::new(self.state());
// Set the font styling for the text graphics
state_text.set_font(graphics::Font::default(), graphics::Scale::uniform(font_size));
name_text.set_font(graphics::Font::default(), graphics::Scale::uniform(font_size));
// Chekc the grid dimesions
if let Some(dimensions) = &self.grid.dimensions {
// Calculate the spacing between banner elements.
// Assumes 2 units of spacing above the name text and below the state text
// and 1 unit of spacing between the name and state text.
let spacing = (60.0 - (font_size * 2.0)) / 5.0;
// Calculate the position of the name text
let name_offset = dimensions.h + (spacing * 2.0);
// Calculate the position of the state text
let state_offset = dimensions.h + (spacing * 3.0) + font_size;
// Draw the banner text graphics
name_text.draw(ctx, (na::Point2::new(param.dest.x + 10.0, param.dest.y + name_offset),).into())?;
state_text.draw(ctx, (na::Point2::new(param.dest.x + 10.0, param.dest.y + state_offset),).into())?;
}
// Return GameResult::Ok
Ok(())
}
} | {
// Get the grid dimesions and add the banner height
if let Some(dimensions) = &self.grid.dimensions {
Some(graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h + 60.0))
} else {None}
} | identifier_body |
cellgrid.rs | use ggez::graphics;
use ggez::GameResult;
use ggez::nalgebra as na;
use crate::simulation::{SimGrid, Automaton};
use crate::commons::cells::BinaryCell;
use crate::commons::grids::CellGrid;
use crate::gameoflife::GameOfLife;
/// Implementation of the Automaton trait for GameOfLife with a CellGrid grid,
impl Automaton for GameOfLife<CellGrid<BinaryCell>> {
/// Defines the type of grid for the automaton.
type Grid = CellGrid<BinaryCell>;
/// A constructor method that creates a null automaton
/// ands sets the initial state and cell size parameters.
fn new(initialstate: &str, cellsize: f32) -> Self {
Self {
grid: Self::Grid::new(cellsize),
initialstate: initialstate.to_string(),
cellsize,
generation: 0,
alive: 0,
dead: 0,
}
}
/// A method that initializes the automaton for the given dimensions.
fn initialize(&mut self, dimensions: graphics::Rect) {
// Create a new dimensions object for the grid of cells (60 px removed for the banner)
let griddimensions = graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h - 60.0);
// Set the grid dimensions to the grid
self.grid.setdimensions(griddimensions);
// Check the value of the initial state field
match self.initialstate.as_str() {
// Default initial state (random-balanced)
"default" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Balanced Random initial state
"random-balanced" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Invalid initial state
_ => {
// Print an error and exit
eprintln!("[error] invalid initial state for 'gameoflife'");
std::process::exit(0);
}
}
}
/// A method that advances the game of life to the next generation.
fn advance(&mut self) {
// Declare counter variables for the number of alive and dead cells
let mut alive: u32 = 0;
let mut dead: u32 = 0;
// Check if the cell grid exists
if self.grid.vector.is_some() {
// Create a clone of the cell grid
let mut newgrid = self.grid.vector.clone().unwrap();
// Iterate over the grid
for (x, y, cell) in self.grid.clone() {
// Check the vicinity of the cell
let cell = match (cell, self.scan_vicinity(x, y)) {
// If a cell is alive, and there are either too many live
// neighbors or not enough live neighbors, kill it.
(BinaryCell::Active, n) if n < 2 || n > 3 => BinaryCell::Passive,
// If a cell is alive and has either 2
// or 3 live neighbors, keep it alive
(BinaryCell::Active, n) if n == 3 || n == 2 => BinaryCell::Active,
// If a cell is dead and has exactly 3 live neighbors, revive it
(BinaryCell::Passive, 3) => BinaryCell::Active,
// Otherwise, keep the cell state
(c, _) => c,
};
// Add the new cell to the new grid
newgrid[x][y] = cell.clone();
// Increment the alive or dead counter
match cell {
BinaryCell::Passive => dead += 1,
BinaryCell::Active => alive += 1
}
}
// Assign the new grid to the grid struct
self.grid.setgrid(newgrid);
}
// Update the alive and dead cell value in the grid struct
self.alive = alive;
self.dead = dead;
// Increment the generation value in the grid struct
self.generation += 1;
}
/// A method that returns the state of the automaton as a string.
/// Format: "Generation: {} | Alive: {} | Dead: {}"
fn state(&self) -> String {
format!("Generation: {} | Alive: {} | Dead: {}", self.generation, self.alive, self.dead)
}
/// A method that returns the name of the automaton as a string.
/// Format: "Conway's Game of Life"
fn name(&self) -> String {
"Conway's Game of Life".to_string()
}
/// A method that returns the name of the automaton as a string
/// along with its initial state and grid type.
/// Format: "Conway's Game of Life | Grid | {}"
fn | (&self) -> String {
format!("Conway's Game of Life | Grid | {}", self.initialstate)
}
}
// Implementation of helper methods for GameOfLife with a CellGrid grid,
impl GameOfLife<CellGrid<BinaryCell>> {
// A function that retrieves the number of alive cells in
// the neighbouring vicity of a given cell (x, y)
fn scan_vicinity(&mut self, x: usize, y: usize) -> i32 {
// Declare a counter
let mut count = 0;
// Check if the cell grid exists
if let Some(grid) = &self.grid.vector {
// Iterate over the cells in the vicinity of the cell at (x, y).
// The [-1,0,1] vectors represent the vicinity offsets for the x and y axis each.
for x_off in vec![-1, 0, 1] {
for y_off in vec![-1, 0, 1] {
// Create the position of the cell in the
// grid based on the vicinity offsets
let nx = x as i32 + x_off;
let ny = y as i32 + y_off;
// Check if position is out of grid bounds (x axis)
if nx < 0 || nx >= grid.len() as i32 {
continue;
}
// Check if position is out of grid bounds (y axis)
if ny < 0 || ny >= grid[nx as usize].len() as i32 {
continue;
}
// Check if position points to the cell itself i.e (0,0) offsets
if nx == x as i32 && ny == y as i32 {
continue;
}
// Check if the cell if alive
match grid[nx as usize][ny as usize].clone() {
// Increment the counter if the cell is alive
BinaryCell::Active => count = count+1,
_ => continue,
}
}
}
}
// Return the counter value
return count
}
}
// Implementation of the Drawable trait for GameOfLife with a CellGrid grid,
impl graphics::Drawable for GameOfLife<CellGrid<BinaryCell>> {
// A method that returns the dimensions of the automaton
fn dimensions(&self, _ctx: &mut ggez::Context) -> Option<graphics::Rect> {
// Get the grid dimesions and add the banner height
if let Some(dimensions) = &self.grid.dimensions {
Some(graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h + 60.0))
} else {None}
}
// A method that returns the graphics blending mode of the automaton grid
fn blend_mode(&self) -> Option<graphics::BlendMode> {
Some(graphics::BlendMode::Add)
}
// A method that set the graphics blend mode of the automaton grid (currently does nothing)
fn set_blend_mode(&mut self, _: Option<graphics::BlendMode>) {}
// A method that renders the automaton grid and state and returns a GameResult
fn draw(&self, ctx: &mut ggez::Context, param: graphics::DrawParam) -> GameResult<()> {
// Create a new graphic mesh builder
let mut mb = graphics::MeshBuilder::new();
// Iterate through each cell in the grid
for (x, y, cell) in self.grid.clone() {
// Create the bounds of the cell
let cellbounds = graphics::Rect::new(
(x as f32) * self.cellsize,
(y as f32) * self.cellsize,
self.cellsize,
self.cellsize,
);
// Add the cell fill to the mesh builder
mb.rectangle(
graphics::DrawMode::Fill(graphics::FillOptions::default()),
cellbounds,
// Set the cell color based on cell state
match cell {
BinaryCell::Passive => [0.0, 0.0, 0.0, 1.0].into(),
BinaryCell::Active => [1.0, 1.0, 1.0, 1.0].into(),
},
)
// Add the cell boundary to the mesh builder
.rectangle(
graphics::DrawMode::Stroke(graphics::StrokeOptions::default()),
cellbounds,
[1.0, 1.0, 1.0, 0.25].into(),
);
}
// Build and Draw the mesh
mb.build(ctx)?.draw(ctx, param)?;
// Declare a variable for the font size
let font_size = 18.0;
// Create the text graphics for the banner
let mut name_text = graphics::Text::new(self.fullname());
let mut state_text = graphics::Text::new(self.state());
// Set the font styling for the text graphics
state_text.set_font(graphics::Font::default(), graphics::Scale::uniform(font_size));
name_text.set_font(graphics::Font::default(), graphics::Scale::uniform(font_size));
// Chekc the grid dimesions
if let Some(dimensions) = &self.grid.dimensions {
// Calculate the spacing between banner elements.
// Assumes 2 units of spacing above the name text and below the state text
// and 1 unit of spacing between the name and state text.
let spacing = (60.0 - (font_size * 2.0)) / 5.0;
// Calculate the position of the name text
let name_offset = dimensions.h + (spacing * 2.0);
// Calculate the position of the state text
let state_offset = dimensions.h + (spacing * 3.0) + font_size;
// Draw the banner text graphics
name_text.draw(ctx, (na::Point2::new(param.dest.x + 10.0, param.dest.y + name_offset),).into())?;
state_text.draw(ctx, (na::Point2::new(param.dest.x + 10.0, param.dest.y + state_offset),).into())?;
}
// Return GameResult::Ok
Ok(())
}
} | fullname | identifier_name |
instance.rs | mod siginfo_ext;
pub mod signals;
pub use crate::instance::signals::{signal_handler_none, SignalBehavior, SignalHandler};
use crate::alloc::Alloc;
use crate::context::Context;
use crate::embed_ctx::CtxMap;
use crate::error::Error;
use crate::instance::siginfo_ext::SiginfoExt;
use crate::module::{self, Global, Module};
use crate::trapcode::{TrapCode, TrapCodeType};
use crate::val::{UntypedRetVal, Val};
use crate::WASM_PAGE_SIZE;
use libc::{c_void, siginfo_t, uintptr_t, SIGBUS, SIGSEGV};
use std::any::Any;
use std::cell::{RefCell, UnsafeCell};
use std::ffi::{CStr, CString};
use std::mem;
use std::ops::{Deref, DerefMut};
use std::ptr::{self, NonNull};
use std::sync::Arc;
pub const LUCET_INSTANCE_MAGIC: u64 = 746932922;
pub const INSTANCE_PADDING: usize = 2328;
thread_local! {
/// The host context.
///
/// Control returns here implicitly due to the setup in `Context::init()` when guest functions
/// return normally. Control can return here explicitly from signal handlers when the guest
/// program needs to be terminated.
///
/// This is an `UnsafeCell` due to nested borrows. The context must be borrowed mutably when
/// swapping to the guest context, which means that borrow exists for the entire time the guest
/// function runs even though the mutation to the host context is done only at the beginning of
/// the swap. Meanwhile, the signal handler can run at any point during the guest function, and
/// so it also must be able to immutably borrow the host context if it needs to swap back. The
/// runtime borrowing constraints for a `RefCell` are therefore too strict for this variable.
pub(crate) static HOST_CTX: UnsafeCell<Context> = UnsafeCell::new(Context::new());
/// The currently-running `Instance`, if one exists.
pub(crate) static CURRENT_INSTANCE: RefCell<Option<NonNull<Instance>>> = RefCell::new(None);
}
/// A smart pointer to an [`Instance`](struct.Instance.html) that properly manages cleanup when dropped.
///
/// Instances are always stored in memory backed by a `Region`; we never want to create one directly
/// with the Rust allocator. This type allows us to abide by that rule while also having an owned
/// type that cleans up the instance when we are done with it.
///
/// Since this type implements `Deref` and `DerefMut` to `Instance`, it can usually be treated as
/// though it were a `&mut Instance`.
pub struct InstanceHandle {
inst: NonNull<Instance>,
}
/// Create a new `InstanceHandle`.
///
/// This is not meant for public consumption, but rather is used to make implementations of
/// `Region`.
///
/// # Safety
///
/// This function runs the guest code for the WebAssembly `start` section, and running any guest
/// code is potentially unsafe; see [`Instance::run()`](struct.Instance.html#method.run).
pub fn new_instance_handle(
instance: *mut Instance,
module: Arc<dyn Module>,
alloc: Alloc,
embed_ctx: CtxMap,
) -> Result<InstanceHandle, Error> {
let inst = NonNull::new(instance)
.ok_or(lucet_format_err!("instance pointer is null; this is a bug"))?;
// do this check first so we don't run `InstanceHandle::drop()` for a failure
lucet_ensure!(
unsafe { inst.as_ref().magic } != LUCET_INSTANCE_MAGIC,
"created a new instance handle in memory with existing instance magic; this is a bug"
);
let mut handle = InstanceHandle { inst };
let inst = Instance::new(alloc, module, embed_ctx);
unsafe {
// this is wildly unsafe! you must be very careful to not let the drop impls run on the
// uninitialized fields; see
// <https://doc.rust-lang.org/std/mem/fn.forget.html#use-case-1>
// write the whole struct into place over the uninitialized page
ptr::write(&mut *handle, inst);
};
handle.reset()?;
Ok(handle)
}
pub fn instance_handle_to_raw(inst: InstanceHandle) -> *mut Instance {
let ptr = inst.inst.as_ptr();
std::mem::forget(inst);
ptr
}
pub unsafe fn instance_handle_from_raw(ptr: *mut Instance) -> InstanceHandle {
InstanceHandle {
inst: NonNull::new_unchecked(ptr),
}
}
// Safety argument for these deref impls: the instance's `Alloc` field contains an `Arc` to the
// region that backs this memory, keeping the page containing the `Instance` alive as long as the
// region exists
impl Deref for InstanceHandle {
type Target = Instance;
fn deref(&self) -> &Self::Target {
unsafe { self.inst.as_ref() }
}
}
impl DerefMut for InstanceHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { self.inst.as_mut() }
}
}
impl Drop for InstanceHandle {
fn drop(&mut self) {
// eprintln!("InstanceHandle::drop()");
// zero out magic, then run the destructor by taking and dropping the inner `Instance`
self.magic = 0;
unsafe {
mem::replace(self.inst.as_mut(), mem::uninitialized());
}
}
}
/// A Lucet program, together with its dedicated memory and signal handlers.
///
/// This is the primary interface for running programs, examining return values, and accessing the
/// WebAssembly heap.
///
/// `Instance`s are never created by runtime users directly, but rather are acquired from
/// [`Region`](trait.Region.html)s and often accessed through
/// [`InstanceHandle`](struct.InstanceHandle.html) smart pointers. This guarantees that instances
/// and their fields are never moved in memory, otherwise raw pointers in the metadata could be
/// unsafely invalidated.
#[repr(C)]
pub struct Instance {
/// Used to catch bugs in pointer math used to find the address of the instance
magic: u64,
/// The embedding context is a map containing embedder-specific values that are used to
/// implement hostcalls
pub(crate) embed_ctx: CtxMap,
/// The program (WebAssembly module) that is the entrypoint for the instance.
module: Arc<dyn Module>,
/// The `Context` in which the guest program runs
ctx: Context,
/// Instance state and error information
pub(crate) state: State,
/// The memory allocated for this instance
alloc: Alloc,
/// Handler run for signals that do not arise from a known WebAssembly trap, or that involve
/// memory outside of the current instance.
fatal_handler: fn(&Instance) -> !,
/// A fatal handler set from C
c_fatal_handler: Option<unsafe extern "C" fn(*mut Instance)>,
/// Handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the instance thread.
signal_handler: Box<
dyn Fn(
&Instance,
&TrapCode,
libc::c_int,
*const siginfo_t,
*const c_void,
) -> SignalBehavior,
>,
/// Pointer to the function used as the entrypoint (for use in backtraces)
entrypoint: *const extern "C" fn(),
/// Padding to ensure the pointer to globals at the end of the page occupied by the `Instance`
_reserved: [u8; INSTANCE_PADDING],
/// Pointer to the globals
///
/// This is accessed through the `vmctx` pointer, which points to the heap that begins
/// immediately after this struct, so it has to come at the very end.
globals_ptr: *const i64,
}
/// APIs that are internal, but useful to implementors of extension modules; you probably don't want
/// this trait!
///
/// This is a trait rather than inherent `impl`s in order to keep the `lucet-runtime` API clean and
/// safe.
pub trait InstanceInternal {
fn alloc(&self) -> &Alloc;
fn alloc_mut(&mut self) -> &mut Alloc;
fn module(&self) -> &dyn Module;
fn state(&self) -> &State;
fn valid_magic(&self) -> bool;
}
impl InstanceInternal for Instance {
/// Get a reference to the instance's `Alloc`.
fn alloc(&self) -> &Alloc {
&self.alloc
}
/// Get a mutable reference to the instance's `Alloc`.
fn alloc_mut(&mut self) -> &mut Alloc {
&mut self.alloc
}
/// Get a reference to the instance's `Module`.
fn module(&self) -> &dyn Module {
self.module.deref()
}
/// Get a reference to the instance's `State`.
fn state(&self) -> &State {
&self.state
}
/// Check whether the instance magic is valid.
fn valid_magic(&self) -> bool {
self.magic == LUCET_INSTANCE_MAGIC
}
}
// Public API
impl Instance {
/// Run a function with arguments in the guest context at the given entrypoint.
///
/// ```no_run
/// # use lucet_runtime_internals::instance::InstanceHandle;
/// # let instance: InstanceHandle = unimplemented!();
/// // regular execution yields `Ok(UntypedRetVal)`
/// let retval = instance.run(b"factorial", &[5u64.into()]).unwrap();
/// assert_eq!(u64::from(retval), 120u64);
///
/// // runtime faults yield `Err(Error)`
/// let result = instance.run(b"faulting_function", &[]);
/// assert!(result.is_err());
/// ```
///
/// # Safety
///
/// This is unsafe in two ways:
///
/// - The type of the entrypoint might not be correct. It might take a different number or
/// different types of arguments than are provided to `args`. It might not even point to a
/// function! We will likely add type information to `lucetc` output so we can dynamically check
/// the type in the future.
///
/// - The entrypoint is foreign code. While we may be convinced that WebAssembly compiled to
/// native code by `lucetc` is safe, we do not have the same guarantee for the hostcalls that a
/// guest may invoke. They might be implemented in an unsafe language, so we must treat this
/// call as unsafe, just like any other FFI call.
///
/// For the moment, we do not mark this as `unsafe` in the Rust type system, but that may change
/// in the future.
pub fn run(&mut self, entrypoint: &[u8], args: &[Val]) -> Result<UntypedRetVal, Error> {
let func = self.module.get_export_func(entrypoint)?;
self.run_func(func, &args)
}
/// Run a function with arguments in the guest context from the [WebAssembly function
/// table](https://webassembly.github.io/spec/core/syntax/modules.html#tables).
///
/// The same safety caveats of [`Instance::run()`](struct.Instance.html#method.run) apply.
pub fn run_func_idx(
&mut self,
table_idx: u32,
func_idx: u32,
args: &[Val],
) -> Result<UntypedRetVal, Error> {
let func = self.module.get_func_from_idx(table_idx, func_idx)?;
self.run_func(func, &args)
}
/// Reset the instance's heap and global variables to their initial state.
///
/// The WebAssembly `start` section will also be run, if one exists.
///
/// The embedder contexts present at instance creation or added with
/// [`Instance::insert_embed_ctx()`](struct.Instance.html#method.insert_embed_ctx) are not
/// modified by this call; it is the embedder's responsibility to clear or reset their state if
/// necessary.
///
/// # Safety
///
/// This function runs the guest code for the WebAssembly `start` section, and running any guest
/// code is potentially unsafe; see [`Instance::run()`](struct.Instance.html#method.run).
pub fn reset(&mut self) -> Result<(), Error> {
self.alloc.reset_heap(self.module.as_ref())?;
let globals = unsafe { self.alloc.globals_mut() };
let mod_globals = self.module.globals();
for (i, v) in mod_globals.iter().enumerate() {
globals[i] = match v.global() {
Global::Import { .. } => {
return Err(Error::Unsupported(format!(
"global imports are unsupported; found: {:?}",
i
)));
}
Global::Def { def } => def.init_val(),
};
}
self.state = State::Ready {
retval: UntypedRetVal::default(),
};
self.run_start()?;
Ok(())
}
/// Grow the guest memory by the given number of WebAssembly pages.
///
/// On success, returns the number of pages that existed before the call.
pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> {
let orig_len = self
.alloc
.expand_heap(additional_pages * WASM_PAGE_SIZE, self.module.as_ref())?;
Ok(orig_len / WASM_PAGE_SIZE)
}
/// Return the WebAssembly heap as a slice of bytes.
pub fn heap(&self) -> &[u8] {
unsafe { self.alloc.heap() }
}
/// Return the WebAssembly heap as a mutable slice of bytes.
pub fn heap_mut(&mut self) -> &mut [u8] {
unsafe { self.alloc.heap_mut() }
}
/// Return the WebAssembly heap as a slice of `u32`s.
pub fn heap_u32(&self) -> &[u32] {
unsafe { self.alloc.heap_u32() }
}
/// Return the WebAssembly heap as a mutable slice of `u32`s.
pub fn heap_u32_mut(&mut self) -> &mut [u32] {
unsafe { self.alloc.heap_u32_mut() }
}
/// Return the WebAssembly globals as a slice of `i64`s.
pub fn globals(&self) -> &[i64] {
unsafe { self.alloc.globals() }
}
/// Return the WebAssembly globals as a mutable slice of `i64`s.
pub fn globals_mut(&mut self) -> &mut [i64] {
unsafe { self.alloc.globals_mut() }
}
/// Check whether a given range in the host address space overlaps with the memory that backs
/// the instance heap.
pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool {
self.alloc.mem_in_heap(ptr, len)
}
/// Check whether a context value of a particular type exists.
pub fn contains_embed_ctx<T: Any>(&self) -> bool {
self.embed_ctx.contains::<T>()
}
/// Get a reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx<T: Any>(&self) -> Option<&T> {
self.embed_ctx.get::<T>()
}
/// Get a mutable reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx_mut<T: Any>(&mut self) -> Option<&mut T> {
self.embed_ctx.get_mut::<T>()
}
/// Insert a context value.
///
/// If a context value of the same type already existed, it is returned.
///
/// **Note**: this method is intended for embedder contexts that need to be added _after_ an
/// instance is created and initialized. To add a context for an instance's entire lifetime,
/// including the execution of its `start` section, see
/// [`Region::new_instance_builder()`](trait.Region.html#method.new_instance_builder).
pub fn insert_embed_ctx<T: Any>(&mut self, x: T) -> Option<T> {
self.embed_ctx.insert(x)
}
/// Remove a context value of a particular type, returning it if it exists.
pub fn remove_embed_ctx<T: Any>(&mut self) -> Option<T> {
self.embed_ctx.remove::<T>()
}
/// Set the handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the
/// instance thread.
///
/// In most cases, these signals are unrecoverable for the instance that raised them, but do not
/// affect the rest of the process.
///
/// The default signal handler returns
/// [`SignalBehavior::Default`](enum.SignalBehavior.html#variant.Default), which yields a
/// runtime fault error.
///
/// The signal handler must be
/// [signal-safe](http://man7.org/linux/man-pages/man7/signal-safety.7.html).
pub fn set_signal_handler<H>(&mut self, handler: H)
where
H: 'static
+ Fn(&Instance, &TrapCode, libc::c_int, *const siginfo_t, *const c_void) -> SignalBehavior,
{
self.signal_handler = Box::new(handler) as Box<SignalHandler>;
}
/// Set the handler run for signals that do not arise from a known WebAssembly trap, or that
/// involve memory outside of the current instance.
///
/// Fatal signals are not only unrecoverable for the instance that raised them, but may
/// compromise the correctness of the rest of the process if unhandled.
///
/// The default fatal handler calls `panic!()`.
pub fn set_fatal_handler(&mut self, handler: fn(&Instance) -> !) {
self.fatal_handler = handler;
}
/// Set the fatal handler to a C-compatible function.
///
/// This is a separate interface, because C functions can't return the `!` type. Like the
/// regular `fatal_handler`, it is not expected to return, but we cannot enforce that through
/// types.
///
/// When a fatal error occurs, this handler is run first, and then the regular `fatal_handler`
/// runs in case it returns.
pub fn set_c_fatal_handler(&mut self, handler: unsafe extern "C" fn(*mut Instance)) {
self.c_fatal_handler = Some(handler);
}
}
// Private API
impl Instance {
fn new(alloc: Alloc, module: Arc<dyn Module>, embed_ctx: CtxMap) -> Self {
let globals_ptr = alloc.slot().globals as *mut i64;
Instance {
magic: LUCET_INSTANCE_MAGIC,
embed_ctx: embed_ctx,
module,
ctx: Context::new(),
state: State::Ready {
retval: UntypedRetVal::default(),
},
alloc,
fatal_handler: default_fatal_handler,
c_fatal_handler: None,
signal_handler: Box::new(signal_handler_none) as Box<SignalHandler>,
entrypoint: ptr::null(),
_reserved: [0; INSTANCE_PADDING],
globals_ptr,
}
}
/// Run a function in guest context at the given entrypoint.
fn run_func(
&mut self,
func: *const extern "C" fn(),
args: &[Val],
) -> Result<UntypedRetVal, Error> {
lucet_ensure!(
self.state.is_ready(),
"instance must be ready; this is a bug"
);
if func.is_null() {
return Err(Error::InvalidArgument(
"entrypoint function cannot be null; this is probably a malformed module",
));
}
self.entrypoint = func;
let mut args_with_vmctx = vec![Val::from(self.alloc.slot().heap)];
args_with_vmctx.extend_from_slice(args);
HOST_CTX.with(|host_ctx| {
Context::init(
unsafe { self.alloc.stack_u64_mut() },
unsafe { &mut *host_ctx.get() },
&mut self.ctx,
func,
&args_with_vmctx,
)
})?;
self.state = State::Running;
// there should never be another instance running on this thread when we enter this function
CURRENT_INSTANCE.with(|current_instance| {
let mut current_instance = current_instance.borrow_mut();
assert!(
current_instance.is_none(),
"no other instance is running on this thread"
);
// safety: `self` is not null if we are in this function
*current_instance = Some(unsafe { NonNull::new_unchecked(self) });
});
self.with_signals_on(|i| {
HOST_CTX.with(|host_ctx| {
// Save the current context into `host_ctx`, and jump to the guest context. The
// lucet context is linked to host_ctx, so it will return here after it finishes,
// successfully or otherwise.
unsafe { Context::swap(&mut *host_ctx.get(), &mut i.ctx) };
Ok(())
})
})?;
CURRENT_INSTANCE.with(|current_instance| {
*current_instance.borrow_mut() = None;
});
// Sandbox has jumped back to the host process, indicating it has either:
//
// * trapped, or called hostcall_error: state tag changed to something other than `Running`
// * function body returned: set state back to `Ready` with return value
match &self.state {
State::Running => {
let retval = self.ctx.get_untyped_retval();
self.state = State::Ready { retval };
Ok(retval)
}
State::Terminated { details, .. } => Err(Error::RuntimeTerminated(details.clone())),
State::Fault { .. } => {
// Sandbox is no longer runnable. It's unsafe to determine all error details in the signal
// handler, so we fill in extra details here.
self.populate_fault_detail()?;
if let State::Fault { ref details, .. } = self.state {
if details.fatal {
// Some errors indicate that the guest is not functioning correctly or that
// the loaded code violated some assumption, so bail out via the fatal
// handler.
// Run the C-style fatal handler, if it exists.
self.c_fatal_handler
.map(|h| unsafe { h(self as *mut Instance) });
// If there is no C-style fatal handler, or if it (erroneously) returns,
// call the Rust handler that we know will not return
(self.fatal_handler)(self)
} else {
// leave the full fault details in the instance state, and return the
// higher-level info to the user
Err(Error::RuntimeFault(details.clone()))
}
} else {
panic!("state remains Fault after populate_fault_detail()")
}
}
State::Ready { .. } => {
panic!("instance in Ready state after returning from guest context")
}
}
}
fn run_start(&mut self) -> Result<(), Error> {
if let Some(start) = self.module.get_start_func()? {
self.run_func(start, &[])?;
}
Ok(())
}
fn populate_fault_detail(&mut self) -> Result<(), Error> {
if let State::Fault {
details:
FaultDetails {
rip_addr,
trapcode,
ref mut fatal,
ref mut rip_addr_details,
..
},
siginfo,
..
} = self.state
{
// We do this after returning from the signal handler because it requires `dladdr`
// calls, which are not signal safe
*rip_addr_details = self.module.addr_details(rip_addr as *const c_void)?.clone();
// If the trap table lookup returned unknown, it is a fatal error
let unknown_fault = trapcode.ty == TrapCodeType::Unknown;
// If the trap was a segv or bus fault and the addressed memory was outside the
// guard pages, it is also a fatal error
let outside_guard = (siginfo.si_signo == SIGSEGV || siginfo.si_signo == SIGBUS)
&& !self.alloc.addr_in_heap_guard(siginfo.si_addr());
*fatal = unknown_fault || outside_guard;
}
Ok(())
}
}
pub enum State {
Ready {
retval: UntypedRetVal,
},
Running,
Fault {
details: FaultDetails,
siginfo: libc::siginfo_t,
context: libc::ucontext_t,
},
Terminated {
details: TerminationDetails,
},
}
/// Information about a runtime fault.
///
/// Runtime faults are raised implictly by signal handlers that return `SignalBehavior::Default` in
/// response to signals arising while a guest is running.
#[derive(Clone, Debug)]
pub struct | {
/// If true, the instance's `fatal_handler` will be called.
pub fatal: bool,
/// Information about the type of fault that occurred.
pub trapcode: TrapCode,
/// The instruction pointer where the fault occurred.
pub rip_addr: uintptr_t,
/// Extra information about the instruction pointer's location, if available.
pub rip_addr_details: Option<module::AddrDetails>,
}
impl std::fmt::Display for FaultDetails {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if self.fatal {
write!(f, "fault FATAL ")?;
} else {
write!(f, "fault ")?;
}
self.trapcode.fmt(f)?;
write!(f, "code at address {:p}", self.rip_addr as *const c_void)?;
if let Some(ref addr_details) = self.rip_addr_details {
if let Some(ref fname) = addr_details.file_name {
let sname = addr_details
.sym_name
.as_ref()
.map(String::as_str)
.unwrap_or("<unknown>");
write!(f, " (symbol {}:{})", fname, sname)?;
}
if addr_details.in_module_code {
write!(f, " (inside module code)")
} else {
write!(f, " (not inside module code)")
}
} else {
write!(f, " (unknown whether in module)")
}
}
}
/// Information about a terminated guest.
///
/// Guests are terminated either explicitly by `Vmctx::terminate()`, or implicitly by signal
/// handlers that return `SignalBehavior::Terminate`. It usually indicates that an unrecoverable
/// error has occurred in a hostcall, rather than in WebAssembly code.
#[derive(Clone)]
pub enum TerminationDetails {
Signal,
GetEmbedCtx,
/// Calls to `Vmctx::terminate()` may attach an arbitrary pointer for extra debugging
/// information.
Provided(Arc<dyn Any>),
}
impl TerminationDetails {
pub fn provide<A: Any>(details: A) -> Self {
TerminationDetails::Provided(Arc::new(details))
}
pub fn provided_details(&self) -> Option<&dyn Any> {
match self {
TerminationDetails::Provided(a) => Some(a.as_ref()),
_ => None,
}
}
}
// Because of deref coercions, the code above was tricky to get right-
// test that a string makes it through
#[test]
fn termination_details_any_typing() {
let hello = "hello, world".to_owned();
let details = TerminationDetails::provide(hello.clone());
let provided = details.provided_details().expect("got Provided");
assert_eq!(
provided.downcast_ref::<String>().expect("right type"),
&hello
);
}
impl std::fmt::Debug for TerminationDetails {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"TerminationDetails::{}",
match self {
TerminationDetails::Signal => "Signal",
TerminationDetails::GetEmbedCtx => "GetEmbedCtx",
TerminationDetails::Provided(_) => "Provided(Any)",
}
)
}
}
unsafe impl Send for TerminationDetails {}
unsafe impl Sync for TerminationDetails {}
impl std::fmt::Display for State {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
State::Ready { .. } => write!(f, "ready"),
State::Running => write!(f, "running"),
State::Fault {
details, siginfo, ..
} => {
write!(f, "{}", details)?;
write!(
f,
" triggered by {}: ",
strsignal_wrapper(siginfo.si_signo)
.into_string()
.expect("strsignal returns valid UTF-8")
)?;
if siginfo.si_signo == SIGSEGV || siginfo.si_signo == SIGBUS {
// We know this is inside the heap guard, because by the time we get here,
// `lucet_error_verify_trap_safety` will have run and validated it.
write!(
f,
" accessed memory at {:p} (inside heap guard)",
siginfo.si_addr()
)?;
}
Ok(())
}
State::Terminated { .. } => write!(f, "terminated"),
}
}
}
impl State {
pub fn is_ready(&self) -> bool {
if let State::Ready { .. } = self {
true
} else {
false
}
}
pub fn is_running(&self) -> bool {
if let State::Running = self {
true
} else {
false
}
}
pub fn is_fault(&self) -> bool {
if let State::Fault { .. } = self {
true
} else {
false
}
}
pub fn is_fatal(&self) -> bool {
if let State::Fault {
details: FaultDetails { fatal, .. },
..
} = self
{
*fatal
} else {
false
}
}
pub fn is_terminated(&self) -> bool {
if let State::Terminated { .. } = self {
true
} else {
false
}
}
}
fn default_fatal_handler(inst: &Instance) -> ! {
panic!("> instance {:p} had fatal error: {}", inst, inst.state);
}
// TODO: PR into `libc`
extern "C" {
#[no_mangle]
fn strsignal(sig: libc::c_int) -> *mut libc::c_char;
}
// TODO: PR into `nix`
fn strsignal_wrapper(sig: libc::c_int) -> CString {
unsafe { CStr::from_ptr(strsignal(sig)).to_owned() }
}
#[cfg(test)]
mod tests {
use super::*;
use memoffset::offset_of;
#[test]
fn instance_size_correct() {
assert_eq!(mem::size_of::<Instance>(), 4096);
}
#[test]
fn instance_globals_offset_correct() {
let offset = offset_of!(Instance, globals_ptr) as isize;
if offset != 4096 - 8 {
let diff = 4096 - 8 - offset;
let new_padding = INSTANCE_PADDING as isize + diff;
panic!("new padding should be: {:?}", new_padding);
}
assert_eq!(offset_of!(Instance, globals_ptr), 4096 - 8);
}
}
| FaultDetails | identifier_name |
instance.rs | mod siginfo_ext;
pub mod signals;
pub use crate::instance::signals::{signal_handler_none, SignalBehavior, SignalHandler};
use crate::alloc::Alloc;
use crate::context::Context;
use crate::embed_ctx::CtxMap;
use crate::error::Error;
use crate::instance::siginfo_ext::SiginfoExt;
use crate::module::{self, Global, Module};
use crate::trapcode::{TrapCode, TrapCodeType};
use crate::val::{UntypedRetVal, Val};
use crate::WASM_PAGE_SIZE;
use libc::{c_void, siginfo_t, uintptr_t, SIGBUS, SIGSEGV};
use std::any::Any;
use std::cell::{RefCell, UnsafeCell};
use std::ffi::{CStr, CString};
use std::mem;
use std::ops::{Deref, DerefMut};
use std::ptr::{self, NonNull};
use std::sync::Arc;
pub const LUCET_INSTANCE_MAGIC: u64 = 746932922;
pub const INSTANCE_PADDING: usize = 2328;
thread_local! {
/// The host context.
///
/// Control returns here implicitly due to the setup in `Context::init()` when guest functions
/// return normally. Control can return here explicitly from signal handlers when the guest
/// program needs to be terminated.
///
/// This is an `UnsafeCell` due to nested borrows. The context must be borrowed mutably when
/// swapping to the guest context, which means that borrow exists for the entire time the guest
/// function runs even though the mutation to the host context is done only at the beginning of
/// the swap. Meanwhile, the signal handler can run at any point during the guest function, and
/// so it also must be able to immutably borrow the host context if it needs to swap back. The
/// runtime borrowing constraints for a `RefCell` are therefore too strict for this variable.
pub(crate) static HOST_CTX: UnsafeCell<Context> = UnsafeCell::new(Context::new());
/// The currently-running `Instance`, if one exists.
pub(crate) static CURRENT_INSTANCE: RefCell<Option<NonNull<Instance>>> = RefCell::new(None);
}
/// A smart pointer to an [`Instance`](struct.Instance.html) that properly manages cleanup when dropped.
///
/// Instances are always stored in memory backed by a `Region`; we never want to create one directly
/// with the Rust allocator. This type allows us to abide by that rule while also having an owned
/// type that cleans up the instance when we are done with it.
///
/// Since this type implements `Deref` and `DerefMut` to `Instance`, it can usually be treated as
/// though it were a `&mut Instance`.
pub struct InstanceHandle {
inst: NonNull<Instance>,
}
/// Create a new `InstanceHandle`.
///
/// This is not meant for public consumption, but rather is used to make implementations of
/// `Region`.
///
/// # Safety
///
/// This function runs the guest code for the WebAssembly `start` section, and running any guest
/// code is potentially unsafe; see [`Instance::run()`](struct.Instance.html#method.run).
pub fn new_instance_handle(
instance: *mut Instance,
module: Arc<dyn Module>,
alloc: Alloc,
embed_ctx: CtxMap,
) -> Result<InstanceHandle, Error> {
let inst = NonNull::new(instance)
.ok_or(lucet_format_err!("instance pointer is null; this is a bug"))?;
// do this check first so we don't run `InstanceHandle::drop()` for a failure
lucet_ensure!(
unsafe { inst.as_ref().magic } != LUCET_INSTANCE_MAGIC,
"created a new instance handle in memory with existing instance magic; this is a bug"
);
let mut handle = InstanceHandle { inst };
let inst = Instance::new(alloc, module, embed_ctx);
unsafe {
// this is wildly unsafe! you must be very careful to not let the drop impls run on the
// uninitialized fields; see
// <https://doc.rust-lang.org/std/mem/fn.forget.html#use-case-1>
// write the whole struct into place over the uninitialized page
ptr::write(&mut *handle, inst);
};
handle.reset()?;
Ok(handle)
}
pub fn instance_handle_to_raw(inst: InstanceHandle) -> *mut Instance {
let ptr = inst.inst.as_ptr();
std::mem::forget(inst);
ptr
}
pub unsafe fn instance_handle_from_raw(ptr: *mut Instance) -> InstanceHandle {
InstanceHandle {
inst: NonNull::new_unchecked(ptr),
}
}
// Safety argument for these deref impls: the instance's `Alloc` field contains an `Arc` to the
// region that backs this memory, keeping the page containing the `Instance` alive as long as the
// region exists
impl Deref for InstanceHandle {
type Target = Instance;
fn deref(&self) -> &Self::Target {
unsafe { self.inst.as_ref() }
}
}
impl DerefMut for InstanceHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { self.inst.as_mut() }
}
}
impl Drop for InstanceHandle {
fn drop(&mut self) {
// eprintln!("InstanceHandle::drop()");
// zero out magic, then run the destructor by taking and dropping the inner `Instance`
self.magic = 0;
unsafe {
mem::replace(self.inst.as_mut(), mem::uninitialized());
}
}
}
/// A Lucet program, together with its dedicated memory and signal handlers.
///
/// This is the primary interface for running programs, examining return values, and accessing the
/// WebAssembly heap.
///
/// `Instance`s are never created by runtime users directly, but rather are acquired from
/// [`Region`](trait.Region.html)s and often accessed through
/// [`InstanceHandle`](struct.InstanceHandle.html) smart pointers. This guarantees that instances
/// and their fields are never moved in memory, otherwise raw pointers in the metadata could be
/// unsafely invalidated.
#[repr(C)]
pub struct Instance {
/// Used to catch bugs in pointer math used to find the address of the instance
magic: u64,
/// The embedding context is a map containing embedder-specific values that are used to
/// implement hostcalls
pub(crate) embed_ctx: CtxMap,
/// The program (WebAssembly module) that is the entrypoint for the instance.
module: Arc<dyn Module>,
/// The `Context` in which the guest program runs
ctx: Context,
/// Instance state and error information
pub(crate) state: State,
/// The memory allocated for this instance
alloc: Alloc,
/// Handler run for signals that do not arise from a known WebAssembly trap, or that involve
/// memory outside of the current instance.
fatal_handler: fn(&Instance) -> !,
/// A fatal handler set from C
c_fatal_handler: Option<unsafe extern "C" fn(*mut Instance)>,
/// Handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the instance thread.
signal_handler: Box<
dyn Fn(
&Instance,
&TrapCode,
libc::c_int,
*const siginfo_t,
*const c_void,
) -> SignalBehavior,
>,
/// Pointer to the function used as the entrypoint (for use in backtraces)
entrypoint: *const extern "C" fn(),
/// Padding to ensure the pointer to globals at the end of the page occupied by the `Instance`
_reserved: [u8; INSTANCE_PADDING],
/// Pointer to the globals
///
/// This is accessed through the `vmctx` pointer, which points to the heap that begins
/// immediately after this struct, so it has to come at the very end.
globals_ptr: *const i64,
}
/// APIs that are internal, but useful to implementors of extension modules; you probably don't want
/// this trait!
///
/// This is a trait rather than inherent `impl`s in order to keep the `lucet-runtime` API clean and
/// safe.
pub trait InstanceInternal {
fn alloc(&self) -> &Alloc;
fn alloc_mut(&mut self) -> &mut Alloc;
fn module(&self) -> &dyn Module;
fn state(&self) -> &State;
fn valid_magic(&self) -> bool;
}
impl InstanceInternal for Instance {
/// Get a reference to the instance's `Alloc`.
fn alloc(&self) -> &Alloc {
&self.alloc
}
/// Get a mutable reference to the instance's `Alloc`.
fn alloc_mut(&mut self) -> &mut Alloc {
&mut self.alloc
}
/// Get a reference to the instance's `Module`.
fn module(&self) -> &dyn Module {
self.module.deref()
}
/// Get a reference to the instance's `State`.
fn state(&self) -> &State {
&self.state
}
/// Check whether the instance magic is valid.
fn valid_magic(&self) -> bool {
self.magic == LUCET_INSTANCE_MAGIC
}
}
// Public API
impl Instance {
/// Run a function with arguments in the guest context at the given entrypoint.
///
/// ```no_run
/// # use lucet_runtime_internals::instance::InstanceHandle;
/// # let instance: InstanceHandle = unimplemented!();
/// // regular execution yields `Ok(UntypedRetVal)`
/// let retval = instance.run(b"factorial", &[5u64.into()]).unwrap();
/// assert_eq!(u64::from(retval), 120u64);
///
/// // runtime faults yield `Err(Error)`
/// let result = instance.run(b"faulting_function", &[]);
/// assert!(result.is_err());
/// ```
///
/// # Safety
///
/// This is unsafe in two ways:
///
/// - The type of the entrypoint might not be correct. It might take a different number or
/// different types of arguments than are provided to `args`. It might not even point to a
/// function! We will likely add type information to `lucetc` output so we can dynamically check
/// the type in the future.
///
/// - The entrypoint is foreign code. While we may be convinced that WebAssembly compiled to
/// native code by `lucetc` is safe, we do not have the same guarantee for the hostcalls that a
/// guest may invoke. They might be implemented in an unsafe language, so we must treat this
/// call as unsafe, just like any other FFI call.
///
/// For the moment, we do not mark this as `unsafe` in the Rust type system, but that may change
/// in the future.
pub fn run(&mut self, entrypoint: &[u8], args: &[Val]) -> Result<UntypedRetVal, Error> {
let func = self.module.get_export_func(entrypoint)?;
self.run_func(func, &args)
}
/// Run a function with arguments in the guest context from the [WebAssembly function
/// table](https://webassembly.github.io/spec/core/syntax/modules.html#tables).
///
/// The same safety caveats of [`Instance::run()`](struct.Instance.html#method.run) apply.
pub fn run_func_idx(
&mut self,
table_idx: u32,
func_idx: u32,
args: &[Val],
) -> Result<UntypedRetVal, Error> {
let func = self.module.get_func_from_idx(table_idx, func_idx)?;
self.run_func(func, &args)
}
/// Reset the instance's heap and global variables to their initial state.
///
/// The WebAssembly `start` section will also be run, if one exists.
///
/// The embedder contexts present at instance creation or added with
/// [`Instance::insert_embed_ctx()`](struct.Instance.html#method.insert_embed_ctx) are not
/// modified by this call; it is the embedder's responsibility to clear or reset their state if
/// necessary.
///
/// # Safety
///
/// This function runs the guest code for the WebAssembly `start` section, and running any guest
/// code is potentially unsafe; see [`Instance::run()`](struct.Instance.html#method.run).
pub fn reset(&mut self) -> Result<(), Error> {
self.alloc.reset_heap(self.module.as_ref())?;
let globals = unsafe { self.alloc.globals_mut() };
let mod_globals = self.module.globals();
for (i, v) in mod_globals.iter().enumerate() {
globals[i] = match v.global() {
Global::Import { .. } => {
return Err(Error::Unsupported(format!(
"global imports are unsupported; found: {:?}",
i
)));
}
Global::Def { def } => def.init_val(),
};
}
self.state = State::Ready {
retval: UntypedRetVal::default(),
};
self.run_start()?;
Ok(())
}
/// Grow the guest memory by the given number of WebAssembly pages.
///
/// On success, returns the number of pages that existed before the call.
pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> {
let orig_len = self
.alloc
.expand_heap(additional_pages * WASM_PAGE_SIZE, self.module.as_ref())?;
Ok(orig_len / WASM_PAGE_SIZE)
}
/// Return the WebAssembly heap as a slice of bytes.
pub fn heap(&self) -> &[u8] {
unsafe { self.alloc.heap() }
}
/// Return the WebAssembly heap as a mutable slice of bytes.
pub fn heap_mut(&mut self) -> &mut [u8] {
unsafe { self.alloc.heap_mut() }
}
/// Return the WebAssembly heap as a slice of `u32`s.
pub fn heap_u32(&self) -> &[u32] {
unsafe { self.alloc.heap_u32() }
}
/// Return the WebAssembly heap as a mutable slice of `u32`s.
pub fn heap_u32_mut(&mut self) -> &mut [u32] {
unsafe { self.alloc.heap_u32_mut() }
}
/// Return the WebAssembly globals as a slice of `i64`s. | pub fn globals(&self) -> &[i64] {
unsafe { self.alloc.globals() }
}
/// Return the WebAssembly globals as a mutable slice of `i64`s.
pub fn globals_mut(&mut self) -> &mut [i64] {
unsafe { self.alloc.globals_mut() }
}
/// Check whether a given range in the host address space overlaps with the memory that backs
/// the instance heap.
pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool {
self.alloc.mem_in_heap(ptr, len)
}
/// Check whether a context value of a particular type exists.
pub fn contains_embed_ctx<T: Any>(&self) -> bool {
self.embed_ctx.contains::<T>()
}
/// Get a reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx<T: Any>(&self) -> Option<&T> {
self.embed_ctx.get::<T>()
}
/// Get a mutable reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx_mut<T: Any>(&mut self) -> Option<&mut T> {
self.embed_ctx.get_mut::<T>()
}
/// Insert a context value.
///
/// If a context value of the same type already existed, it is returned.
///
/// **Note**: this method is intended for embedder contexts that need to be added _after_ an
/// instance is created and initialized. To add a context for an instance's entire lifetime,
/// including the execution of its `start` section, see
/// [`Region::new_instance_builder()`](trait.Region.html#method.new_instance_builder).
pub fn insert_embed_ctx<T: Any>(&mut self, x: T) -> Option<T> {
self.embed_ctx.insert(x)
}
/// Remove a context value of a particular type, returning it if it exists.
pub fn remove_embed_ctx<T: Any>(&mut self) -> Option<T> {
self.embed_ctx.remove::<T>()
}
/// Set the handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the
/// instance thread.
///
/// In most cases, these signals are unrecoverable for the instance that raised them, but do not
/// affect the rest of the process.
///
/// The default signal handler returns
/// [`SignalBehavior::Default`](enum.SignalBehavior.html#variant.Default), which yields a
/// runtime fault error.
///
/// The signal handler must be
/// [signal-safe](http://man7.org/linux/man-pages/man7/signal-safety.7.html).
pub fn set_signal_handler<H>(&mut self, handler: H)
where
H: 'static
+ Fn(&Instance, &TrapCode, libc::c_int, *const siginfo_t, *const c_void) -> SignalBehavior,
{
self.signal_handler = Box::new(handler) as Box<SignalHandler>;
}
/// Set the handler run for signals that do not arise from a known WebAssembly trap, or that
/// involve memory outside of the current instance.
///
/// Fatal signals are not only unrecoverable for the instance that raised them, but may
/// compromise the correctness of the rest of the process if unhandled.
///
/// The default fatal handler calls `panic!()`.
pub fn set_fatal_handler(&mut self, handler: fn(&Instance) -> !) {
self.fatal_handler = handler;
}
/// Set the fatal handler to a C-compatible function.
///
/// This is a separate interface, because C functions can't return the `!` type. Like the
/// regular `fatal_handler`, it is not expected to return, but we cannot enforce that through
/// types.
///
/// When a fatal error occurs, this handler is run first, and then the regular `fatal_handler`
/// runs in case it returns.
pub fn set_c_fatal_handler(&mut self, handler: unsafe extern "C" fn(*mut Instance)) {
self.c_fatal_handler = Some(handler);
}
}
// Private API
impl Instance {
fn new(alloc: Alloc, module: Arc<dyn Module>, embed_ctx: CtxMap) -> Self {
let globals_ptr = alloc.slot().globals as *mut i64;
Instance {
magic: LUCET_INSTANCE_MAGIC,
embed_ctx: embed_ctx,
module,
ctx: Context::new(),
state: State::Ready {
retval: UntypedRetVal::default(),
},
alloc,
fatal_handler: default_fatal_handler,
c_fatal_handler: None,
signal_handler: Box::new(signal_handler_none) as Box<SignalHandler>,
entrypoint: ptr::null(),
_reserved: [0; INSTANCE_PADDING],
globals_ptr,
}
}
/// Run a function in guest context at the given entrypoint.
fn run_func(
&mut self,
func: *const extern "C" fn(),
args: &[Val],
) -> Result<UntypedRetVal, Error> {
lucet_ensure!(
self.state.is_ready(),
"instance must be ready; this is a bug"
);
if func.is_null() {
return Err(Error::InvalidArgument(
"entrypoint function cannot be null; this is probably a malformed module",
));
}
self.entrypoint = func;
let mut args_with_vmctx = vec![Val::from(self.alloc.slot().heap)];
args_with_vmctx.extend_from_slice(args);
HOST_CTX.with(|host_ctx| {
Context::init(
unsafe { self.alloc.stack_u64_mut() },
unsafe { &mut *host_ctx.get() },
&mut self.ctx,
func,
&args_with_vmctx,
)
})?;
self.state = State::Running;
// there should never be another instance running on this thread when we enter this function
CURRENT_INSTANCE.with(|current_instance| {
let mut current_instance = current_instance.borrow_mut();
assert!(
current_instance.is_none(),
"no other instance is running on this thread"
);
// safety: `self` is not null if we are in this function
*current_instance = Some(unsafe { NonNull::new_unchecked(self) });
});
self.with_signals_on(|i| {
HOST_CTX.with(|host_ctx| {
// Save the current context into `host_ctx`, and jump to the guest context. The
// lucet context is linked to host_ctx, so it will return here after it finishes,
// successfully or otherwise.
unsafe { Context::swap(&mut *host_ctx.get(), &mut i.ctx) };
Ok(())
})
})?;
CURRENT_INSTANCE.with(|current_instance| {
*current_instance.borrow_mut() = None;
});
// Sandbox has jumped back to the host process, indicating it has either:
//
// * trapped, or called hostcall_error: state tag changed to something other than `Running`
// * function body returned: set state back to `Ready` with return value
match &self.state {
State::Running => {
let retval = self.ctx.get_untyped_retval();
self.state = State::Ready { retval };
Ok(retval)
}
State::Terminated { details, .. } => Err(Error::RuntimeTerminated(details.clone())),
State::Fault { .. } => {
// Sandbox is no longer runnable. It's unsafe to determine all error details in the signal
// handler, so we fill in extra details here.
self.populate_fault_detail()?;
if let State::Fault { ref details, .. } = self.state {
if details.fatal {
// Some errors indicate that the guest is not functioning correctly or that
// the loaded code violated some assumption, so bail out via the fatal
// handler.
// Run the C-style fatal handler, if it exists.
self.c_fatal_handler
.map(|h| unsafe { h(self as *mut Instance) });
// If there is no C-style fatal handler, or if it (erroneously) returns,
// call the Rust handler that we know will not return
(self.fatal_handler)(self)
} else {
// leave the full fault details in the instance state, and return the
// higher-level info to the user
Err(Error::RuntimeFault(details.clone()))
}
} else {
panic!("state remains Fault after populate_fault_detail()")
}
}
State::Ready { .. } => {
panic!("instance in Ready state after returning from guest context")
}
}
}
fn run_start(&mut self) -> Result<(), Error> {
if let Some(start) = self.module.get_start_func()? {
self.run_func(start, &[])?;
}
Ok(())
}
fn populate_fault_detail(&mut self) -> Result<(), Error> {
if let State::Fault {
details:
FaultDetails {
rip_addr,
trapcode,
ref mut fatal,
ref mut rip_addr_details,
..
},
siginfo,
..
} = self.state
{
// We do this after returning from the signal handler because it requires `dladdr`
// calls, which are not signal safe
*rip_addr_details = self.module.addr_details(rip_addr as *const c_void)?.clone();
// If the trap table lookup returned unknown, it is a fatal error
let unknown_fault = trapcode.ty == TrapCodeType::Unknown;
// If the trap was a segv or bus fault and the addressed memory was outside the
// guard pages, it is also a fatal error
let outside_guard = (siginfo.si_signo == SIGSEGV || siginfo.si_signo == SIGBUS)
&& !self.alloc.addr_in_heap_guard(siginfo.si_addr());
*fatal = unknown_fault || outside_guard;
}
Ok(())
}
}
pub enum State {
Ready {
retval: UntypedRetVal,
},
Running,
Fault {
details: FaultDetails,
siginfo: libc::siginfo_t,
context: libc::ucontext_t,
},
Terminated {
details: TerminationDetails,
},
}
/// Information about a runtime fault.
///
/// Runtime faults are raised implictly by signal handlers that return `SignalBehavior::Default` in
/// response to signals arising while a guest is running.
#[derive(Clone, Debug)]
pub struct FaultDetails {
/// If true, the instance's `fatal_handler` will be called.
pub fatal: bool,
/// Information about the type of fault that occurred.
pub trapcode: TrapCode,
/// The instruction pointer where the fault occurred.
pub rip_addr: uintptr_t,
/// Extra information about the instruction pointer's location, if available.
pub rip_addr_details: Option<module::AddrDetails>,
}
impl std::fmt::Display for FaultDetails {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if self.fatal {
write!(f, "fault FATAL ")?;
} else {
write!(f, "fault ")?;
}
self.trapcode.fmt(f)?;
write!(f, "code at address {:p}", self.rip_addr as *const c_void)?;
if let Some(ref addr_details) = self.rip_addr_details {
if let Some(ref fname) = addr_details.file_name {
let sname = addr_details
.sym_name
.as_ref()
.map(String::as_str)
.unwrap_or("<unknown>");
write!(f, " (symbol {}:{})", fname, sname)?;
}
if addr_details.in_module_code {
write!(f, " (inside module code)")
} else {
write!(f, " (not inside module code)")
}
} else {
write!(f, " (unknown whether in module)")
}
}
}
/// Information about a terminated guest.
///
/// Guests are terminated either explicitly by `Vmctx::terminate()`, or implicitly by signal
/// handlers that return `SignalBehavior::Terminate`. It usually indicates that an unrecoverable
/// error has occurred in a hostcall, rather than in WebAssembly code.
#[derive(Clone)]
pub enum TerminationDetails {
Signal,
GetEmbedCtx,
/// Calls to `Vmctx::terminate()` may attach an arbitrary pointer for extra debugging
/// information.
Provided(Arc<dyn Any>),
}
impl TerminationDetails {
pub fn provide<A: Any>(details: A) -> Self {
TerminationDetails::Provided(Arc::new(details))
}
pub fn provided_details(&self) -> Option<&dyn Any> {
match self {
TerminationDetails::Provided(a) => Some(a.as_ref()),
_ => None,
}
}
}
// Because of deref coercions, the code above was tricky to get right-
// test that a string makes it through
#[test]
fn termination_details_any_typing() {
let hello = "hello, world".to_owned();
let details = TerminationDetails::provide(hello.clone());
let provided = details.provided_details().expect("got Provided");
assert_eq!(
provided.downcast_ref::<String>().expect("right type"),
&hello
);
}
impl std::fmt::Debug for TerminationDetails {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"TerminationDetails::{}",
match self {
TerminationDetails::Signal => "Signal",
TerminationDetails::GetEmbedCtx => "GetEmbedCtx",
TerminationDetails::Provided(_) => "Provided(Any)",
}
)
}
}
unsafe impl Send for TerminationDetails {}
unsafe impl Sync for TerminationDetails {}
impl std::fmt::Display for State {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
State::Ready { .. } => write!(f, "ready"),
State::Running => write!(f, "running"),
State::Fault {
details, siginfo, ..
} => {
write!(f, "{}", details)?;
write!(
f,
" triggered by {}: ",
strsignal_wrapper(siginfo.si_signo)
.into_string()
.expect("strsignal returns valid UTF-8")
)?;
if siginfo.si_signo == SIGSEGV || siginfo.si_signo == SIGBUS {
// We know this is inside the heap guard, because by the time we get here,
// `lucet_error_verify_trap_safety` will have run and validated it.
write!(
f,
" accessed memory at {:p} (inside heap guard)",
siginfo.si_addr()
)?;
}
Ok(())
}
State::Terminated { .. } => write!(f, "terminated"),
}
}
}
impl State {
pub fn is_ready(&self) -> bool {
if let State::Ready { .. } = self {
true
} else {
false
}
}
pub fn is_running(&self) -> bool {
if let State::Running = self {
true
} else {
false
}
}
pub fn is_fault(&self) -> bool {
if let State::Fault { .. } = self {
true
} else {
false
}
}
pub fn is_fatal(&self) -> bool {
if let State::Fault {
details: FaultDetails { fatal, .. },
..
} = self
{
*fatal
} else {
false
}
}
pub fn is_terminated(&self) -> bool {
if let State::Terminated { .. } = self {
true
} else {
false
}
}
}
fn default_fatal_handler(inst: &Instance) -> ! {
panic!("> instance {:p} had fatal error: {}", inst, inst.state);
}
// TODO: PR into `libc`
extern "C" {
#[no_mangle]
fn strsignal(sig: libc::c_int) -> *mut libc::c_char;
}
// TODO: PR into `nix`
fn strsignal_wrapper(sig: libc::c_int) -> CString {
unsafe { CStr::from_ptr(strsignal(sig)).to_owned() }
}
#[cfg(test)]
mod tests {
use super::*;
use memoffset::offset_of;
#[test]
fn instance_size_correct() {
assert_eq!(mem::size_of::<Instance>(), 4096);
}
#[test]
fn instance_globals_offset_correct() {
let offset = offset_of!(Instance, globals_ptr) as isize;
if offset != 4096 - 8 {
let diff = 4096 - 8 - offset;
let new_padding = INSTANCE_PADDING as isize + diff;
panic!("new padding should be: {:?}", new_padding);
}
assert_eq!(offset_of!(Instance, globals_ptr), 4096 - 8);
}
} | random_line_split |
|
instance.rs | mod siginfo_ext;
pub mod signals;
pub use crate::instance::signals::{signal_handler_none, SignalBehavior, SignalHandler};
use crate::alloc::Alloc;
use crate::context::Context;
use crate::embed_ctx::CtxMap;
use crate::error::Error;
use crate::instance::siginfo_ext::SiginfoExt;
use crate::module::{self, Global, Module};
use crate::trapcode::{TrapCode, TrapCodeType};
use crate::val::{UntypedRetVal, Val};
use crate::WASM_PAGE_SIZE;
use libc::{c_void, siginfo_t, uintptr_t, SIGBUS, SIGSEGV};
use std::any::Any;
use std::cell::{RefCell, UnsafeCell};
use std::ffi::{CStr, CString};
use std::mem;
use std::ops::{Deref, DerefMut};
use std::ptr::{self, NonNull};
use std::sync::Arc;
pub const LUCET_INSTANCE_MAGIC: u64 = 746932922;
pub const INSTANCE_PADDING: usize = 2328;
thread_local! {
/// The host context.
///
/// Control returns here implicitly due to the setup in `Context::init()` when guest functions
/// return normally. Control can return here explicitly from signal handlers when the guest
/// program needs to be terminated.
///
/// This is an `UnsafeCell` due to nested borrows. The context must be borrowed mutably when
/// swapping to the guest context, which means that borrow exists for the entire time the guest
/// function runs even though the mutation to the host context is done only at the beginning of
/// the swap. Meanwhile, the signal handler can run at any point during the guest function, and
/// so it also must be able to immutably borrow the host context if it needs to swap back. The
/// runtime borrowing constraints for a `RefCell` are therefore too strict for this variable.
pub(crate) static HOST_CTX: UnsafeCell<Context> = UnsafeCell::new(Context::new());
/// The currently-running `Instance`, if one exists.
pub(crate) static CURRENT_INSTANCE: RefCell<Option<NonNull<Instance>>> = RefCell::new(None);
}
/// A smart pointer to an [`Instance`](struct.Instance.html) that properly manages cleanup when dropped.
///
/// Instances are always stored in memory backed by a `Region`; we never want to create one directly
/// with the Rust allocator. This type allows us to abide by that rule while also having an owned
/// type that cleans up the instance when we are done with it.
///
/// Since this type implements `Deref` and `DerefMut` to `Instance`, it can usually be treated as
/// though it were a `&mut Instance`.
pub struct InstanceHandle {
inst: NonNull<Instance>,
}
/// Create a new `InstanceHandle`.
///
/// This is not meant for public consumption, but rather is used to make implementations of
/// `Region`.
///
/// # Safety
///
/// This function runs the guest code for the WebAssembly `start` section, and running any guest
/// code is potentially unsafe; see [`Instance::run()`](struct.Instance.html#method.run).
pub fn new_instance_handle(
instance: *mut Instance,
module: Arc<dyn Module>,
alloc: Alloc,
embed_ctx: CtxMap,
) -> Result<InstanceHandle, Error> {
let inst = NonNull::new(instance)
.ok_or(lucet_format_err!("instance pointer is null; this is a bug"))?;
// do this check first so we don't run `InstanceHandle::drop()` for a failure
lucet_ensure!(
unsafe { inst.as_ref().magic } != LUCET_INSTANCE_MAGIC,
"created a new instance handle in memory with existing instance magic; this is a bug"
);
let mut handle = InstanceHandle { inst };
let inst = Instance::new(alloc, module, embed_ctx);
unsafe {
// this is wildly unsafe! you must be very careful to not let the drop impls run on the
// uninitialized fields; see
// <https://doc.rust-lang.org/std/mem/fn.forget.html#use-case-1>
// write the whole struct into place over the uninitialized page
ptr::write(&mut *handle, inst);
};
handle.reset()?;
Ok(handle)
}
pub fn instance_handle_to_raw(inst: InstanceHandle) -> *mut Instance {
let ptr = inst.inst.as_ptr();
std::mem::forget(inst);
ptr
}
pub unsafe fn instance_handle_from_raw(ptr: *mut Instance) -> InstanceHandle {
InstanceHandle {
inst: NonNull::new_unchecked(ptr),
}
}
// Safety argument for these deref impls: the instance's `Alloc` field contains an `Arc` to the
// region that backs this memory, keeping the page containing the `Instance` alive as long as the
// region exists
impl Deref for InstanceHandle {
type Target = Instance;
fn deref(&self) -> &Self::Target {
unsafe { self.inst.as_ref() }
}
}
impl DerefMut for InstanceHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { self.inst.as_mut() }
}
}
impl Drop for InstanceHandle {
fn drop(&mut self) {
// eprintln!("InstanceHandle::drop()");
// zero out magic, then run the destructor by taking and dropping the inner `Instance`
self.magic = 0;
unsafe {
mem::replace(self.inst.as_mut(), mem::uninitialized());
}
}
}
/// A Lucet program, together with its dedicated memory and signal handlers.
///
/// This is the primary interface for running programs, examining return values, and accessing the
/// WebAssembly heap.
///
/// `Instance`s are never created by runtime users directly, but rather are acquired from
/// [`Region`](trait.Region.html)s and often accessed through
/// [`InstanceHandle`](struct.InstanceHandle.html) smart pointers. This guarantees that instances
/// and their fields are never moved in memory, otherwise raw pointers in the metadata could be
/// unsafely invalidated.
#[repr(C)]
pub struct Instance {
/// Used to catch bugs in pointer math used to find the address of the instance
magic: u64,
/// The embedding context is a map containing embedder-specific values that are used to
/// implement hostcalls
pub(crate) embed_ctx: CtxMap,
/// The program (WebAssembly module) that is the entrypoint for the instance.
module: Arc<dyn Module>,
/// The `Context` in which the guest program runs
ctx: Context,
/// Instance state and error information
pub(crate) state: State,
/// The memory allocated for this instance
alloc: Alloc,
/// Handler run for signals that do not arise from a known WebAssembly trap, or that involve
/// memory outside of the current instance.
fatal_handler: fn(&Instance) -> !,
/// A fatal handler set from C
c_fatal_handler: Option<unsafe extern "C" fn(*mut Instance)>,
/// Handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the instance thread.
signal_handler: Box<
dyn Fn(
&Instance,
&TrapCode,
libc::c_int,
*const siginfo_t,
*const c_void,
) -> SignalBehavior,
>,
/// Pointer to the function used as the entrypoint (for use in backtraces)
entrypoint: *const extern "C" fn(),
/// Padding to ensure the pointer to globals at the end of the page occupied by the `Instance`
_reserved: [u8; INSTANCE_PADDING],
/// Pointer to the globals
///
/// This is accessed through the `vmctx` pointer, which points to the heap that begins
/// immediately after this struct, so it has to come at the very end.
globals_ptr: *const i64,
}
/// APIs that are internal, but useful to implementors of extension modules; you probably don't want
/// this trait!
///
/// This is a trait rather than inherent `impl`s in order to keep the `lucet-runtime` API clean and
/// safe.
pub trait InstanceInternal {
fn alloc(&self) -> &Alloc;
fn alloc_mut(&mut self) -> &mut Alloc;
fn module(&self) -> &dyn Module;
fn state(&self) -> &State;
fn valid_magic(&self) -> bool;
}
impl InstanceInternal for Instance {
/// Get a reference to the instance's `Alloc`.
fn alloc(&self) -> &Alloc {
&self.alloc
}
/// Get a mutable reference to the instance's `Alloc`.
fn alloc_mut(&mut self) -> &mut Alloc {
&mut self.alloc
}
/// Get a reference to the instance's `Module`.
fn module(&self) -> &dyn Module {
self.module.deref()
}
/// Get a reference to the instance's `State`.
fn state(&self) -> &State {
&self.state
}
/// Check whether the instance magic is valid.
fn valid_magic(&self) -> bool {
self.magic == LUCET_INSTANCE_MAGIC
}
}
// Public API
impl Instance {
/// Run a function with arguments in the guest context at the given entrypoint.
///
/// ```no_run
/// # use lucet_runtime_internals::instance::InstanceHandle;
/// # let instance: InstanceHandle = unimplemented!();
/// // regular execution yields `Ok(UntypedRetVal)`
/// let retval = instance.run(b"factorial", &[5u64.into()]).unwrap();
/// assert_eq!(u64::from(retval), 120u64);
///
/// // runtime faults yield `Err(Error)`
/// let result = instance.run(b"faulting_function", &[]);
/// assert!(result.is_err());
/// ```
///
/// # Safety
///
/// This is unsafe in two ways:
///
/// - The type of the entrypoint might not be correct. It might take a different number or
/// different types of arguments than are provided to `args`. It might not even point to a
/// function! We will likely add type information to `lucetc` output so we can dynamically check
/// the type in the future.
///
/// - The entrypoint is foreign code. While we may be convinced that WebAssembly compiled to
/// native code by `lucetc` is safe, we do not have the same guarantee for the hostcalls that a
/// guest may invoke. They might be implemented in an unsafe language, so we must treat this
/// call as unsafe, just like any other FFI call.
///
/// For the moment, we do not mark this as `unsafe` in the Rust type system, but that may change
/// in the future.
pub fn run(&mut self, entrypoint: &[u8], args: &[Val]) -> Result<UntypedRetVal, Error> {
let func = self.module.get_export_func(entrypoint)?;
self.run_func(func, &args)
}
/// Run a function with arguments in the guest context from the [WebAssembly function
/// table](https://webassembly.github.io/spec/core/syntax/modules.html#tables).
///
/// The same safety caveats of [`Instance::run()`](struct.Instance.html#method.run) apply.
pub fn run_func_idx(
&mut self,
table_idx: u32,
func_idx: u32,
args: &[Val],
) -> Result<UntypedRetVal, Error> {
let func = self.module.get_func_from_idx(table_idx, func_idx)?;
self.run_func(func, &args)
}
/// Reset the instance's heap and global variables to their initial state.
///
/// The WebAssembly `start` section will also be run, if one exists.
///
/// The embedder contexts present at instance creation or added with
/// [`Instance::insert_embed_ctx()`](struct.Instance.html#method.insert_embed_ctx) are not
/// modified by this call; it is the embedder's responsibility to clear or reset their state if
/// necessary.
///
/// # Safety
///
/// This function runs the guest code for the WebAssembly `start` section, and running any guest
/// code is potentially unsafe; see [`Instance::run()`](struct.Instance.html#method.run).
pub fn reset(&mut self) -> Result<(), Error> {
self.alloc.reset_heap(self.module.as_ref())?;
let globals = unsafe { self.alloc.globals_mut() };
let mod_globals = self.module.globals();
for (i, v) in mod_globals.iter().enumerate() {
globals[i] = match v.global() {
Global::Import { .. } => {
return Err(Error::Unsupported(format!(
"global imports are unsupported; found: {:?}",
i
)));
}
Global::Def { def } => def.init_val(),
};
}
self.state = State::Ready {
retval: UntypedRetVal::default(),
};
self.run_start()?;
Ok(())
}
/// Grow the guest memory by the given number of WebAssembly pages.
///
/// On success, returns the number of pages that existed before the call.
pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> {
let orig_len = self
.alloc
.expand_heap(additional_pages * WASM_PAGE_SIZE, self.module.as_ref())?;
Ok(orig_len / WASM_PAGE_SIZE)
}
/// Return the WebAssembly heap as a slice of bytes.
pub fn heap(&self) -> &[u8] {
unsafe { self.alloc.heap() }
}
/// Return the WebAssembly heap as a mutable slice of bytes.
pub fn heap_mut(&mut self) -> &mut [u8] {
unsafe { self.alloc.heap_mut() }
}
/// Return the WebAssembly heap as a slice of `u32`s.
pub fn heap_u32(&self) -> &[u32] {
unsafe { self.alloc.heap_u32() }
}
/// Return the WebAssembly heap as a mutable slice of `u32`s.
pub fn heap_u32_mut(&mut self) -> &mut [u32] {
unsafe { self.alloc.heap_u32_mut() }
}
/// Return the WebAssembly globals as a slice of `i64`s.
pub fn globals(&self) -> &[i64] {
unsafe { self.alloc.globals() }
}
/// Return the WebAssembly globals as a mutable slice of `i64`s.
pub fn globals_mut(&mut self) -> &mut [i64] {
unsafe { self.alloc.globals_mut() }
}
/// Check whether a given range in the host address space overlaps with the memory that backs
/// the instance heap.
pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool {
self.alloc.mem_in_heap(ptr, len)
}
/// Check whether a context value of a particular type exists.
pub fn contains_embed_ctx<T: Any>(&self) -> bool {
self.embed_ctx.contains::<T>()
}
/// Get a reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx<T: Any>(&self) -> Option<&T> {
self.embed_ctx.get::<T>()
}
/// Get a mutable reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx_mut<T: Any>(&mut self) -> Option<&mut T> {
self.embed_ctx.get_mut::<T>()
}
/// Insert a context value.
///
/// If a context value of the same type already existed, it is returned.
///
/// **Note**: this method is intended for embedder contexts that need to be added _after_ an
/// instance is created and initialized. To add a context for an instance's entire lifetime,
/// including the execution of its `start` section, see
/// [`Region::new_instance_builder()`](trait.Region.html#method.new_instance_builder).
pub fn insert_embed_ctx<T: Any>(&mut self, x: T) -> Option<T> {
self.embed_ctx.insert(x)
}
/// Remove a context value of a particular type, returning it if it exists.
pub fn remove_embed_ctx<T: Any>(&mut self) -> Option<T> {
self.embed_ctx.remove::<T>()
}
/// Set the handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the
/// instance thread.
///
/// In most cases, these signals are unrecoverable for the instance that raised them, but do not
/// affect the rest of the process.
///
/// The default signal handler returns
/// [`SignalBehavior::Default`](enum.SignalBehavior.html#variant.Default), which yields a
/// runtime fault error.
///
/// The signal handler must be
/// [signal-safe](http://man7.org/linux/man-pages/man7/signal-safety.7.html).
pub fn set_signal_handler<H>(&mut self, handler: H)
where
H: 'static
+ Fn(&Instance, &TrapCode, libc::c_int, *const siginfo_t, *const c_void) -> SignalBehavior,
{
self.signal_handler = Box::new(handler) as Box<SignalHandler>;
}
/// Set the handler run for signals that do not arise from a known WebAssembly trap, or that
/// involve memory outside of the current instance.
///
/// Fatal signals are not only unrecoverable for the instance that raised them, but may
/// compromise the correctness of the rest of the process if unhandled.
///
/// The default fatal handler calls `panic!()`.
pub fn set_fatal_handler(&mut self, handler: fn(&Instance) -> !) {
self.fatal_handler = handler;
}
/// Set the fatal handler to a C-compatible function.
///
/// This is a separate interface, because C functions can't return the `!` type. Like the
/// regular `fatal_handler`, it is not expected to return, but we cannot enforce that through
/// types.
///
/// When a fatal error occurs, this handler is run first, and then the regular `fatal_handler`
/// runs in case it returns.
pub fn set_c_fatal_handler(&mut self, handler: unsafe extern "C" fn(*mut Instance)) {
self.c_fatal_handler = Some(handler);
}
}
// Private API
impl Instance {
fn new(alloc: Alloc, module: Arc<dyn Module>, embed_ctx: CtxMap) -> Self {
let globals_ptr = alloc.slot().globals as *mut i64;
Instance {
magic: LUCET_INSTANCE_MAGIC,
embed_ctx: embed_ctx,
module,
ctx: Context::new(),
state: State::Ready {
retval: UntypedRetVal::default(),
},
alloc,
fatal_handler: default_fatal_handler,
c_fatal_handler: None,
signal_handler: Box::new(signal_handler_none) as Box<SignalHandler>,
entrypoint: ptr::null(),
_reserved: [0; INSTANCE_PADDING],
globals_ptr,
}
}
/// Run a function in guest context at the given entrypoint.
fn run_func(
&mut self,
func: *const extern "C" fn(),
args: &[Val],
) -> Result<UntypedRetVal, Error> {
lucet_ensure!(
self.state.is_ready(),
"instance must be ready; this is a bug"
);
if func.is_null() {
return Err(Error::InvalidArgument(
"entrypoint function cannot be null; this is probably a malformed module",
));
}
self.entrypoint = func;
let mut args_with_vmctx = vec![Val::from(self.alloc.slot().heap)];
args_with_vmctx.extend_from_slice(args);
HOST_CTX.with(|host_ctx| {
Context::init(
unsafe { self.alloc.stack_u64_mut() },
unsafe { &mut *host_ctx.get() },
&mut self.ctx,
func,
&args_with_vmctx,
)
})?;
self.state = State::Running;
// there should never be another instance running on this thread when we enter this function
CURRENT_INSTANCE.with(|current_instance| {
let mut current_instance = current_instance.borrow_mut();
assert!(
current_instance.is_none(),
"no other instance is running on this thread"
);
// safety: `self` is not null if we are in this function
*current_instance = Some(unsafe { NonNull::new_unchecked(self) });
});
self.with_signals_on(|i| {
HOST_CTX.with(|host_ctx| {
// Save the current context into `host_ctx`, and jump to the guest context. The
// lucet context is linked to host_ctx, so it will return here after it finishes,
// successfully or otherwise.
unsafe { Context::swap(&mut *host_ctx.get(), &mut i.ctx) };
Ok(())
})
})?;
CURRENT_INSTANCE.with(|current_instance| {
*current_instance.borrow_mut() = None;
});
// Sandbox has jumped back to the host process, indicating it has either:
//
// * trapped, or called hostcall_error: state tag changed to something other than `Running`
// * function body returned: set state back to `Ready` with return value
match &self.state {
State::Running => {
let retval = self.ctx.get_untyped_retval();
self.state = State::Ready { retval };
Ok(retval)
}
State::Terminated { details, .. } => Err(Error::RuntimeTerminated(details.clone())),
State::Fault { .. } => |
State::Ready { .. } => {
panic!("instance in Ready state after returning from guest context")
}
}
}
fn run_start(&mut self) -> Result<(), Error> {
if let Some(start) = self.module.get_start_func()? {
self.run_func(start, &[])?;
}
Ok(())
}
fn populate_fault_detail(&mut self) -> Result<(), Error> {
if let State::Fault {
details:
FaultDetails {
rip_addr,
trapcode,
ref mut fatal,
ref mut rip_addr_details,
..
},
siginfo,
..
} = self.state
{
// We do this after returning from the signal handler because it requires `dladdr`
// calls, which are not signal safe
*rip_addr_details = self.module.addr_details(rip_addr as *const c_void)?.clone();
// If the trap table lookup returned unknown, it is a fatal error
let unknown_fault = trapcode.ty == TrapCodeType::Unknown;
// If the trap was a segv or bus fault and the addressed memory was outside the
// guard pages, it is also a fatal error
let outside_guard = (siginfo.si_signo == SIGSEGV || siginfo.si_signo == SIGBUS)
&& !self.alloc.addr_in_heap_guard(siginfo.si_addr());
*fatal = unknown_fault || outside_guard;
}
Ok(())
}
}
pub enum State {
Ready {
retval: UntypedRetVal,
},
Running,
Fault {
details: FaultDetails,
siginfo: libc::siginfo_t,
context: libc::ucontext_t,
},
Terminated {
details: TerminationDetails,
},
}
/// Information about a runtime fault.
///
/// Runtime faults are raised implictly by signal handlers that return `SignalBehavior::Default` in
/// response to signals arising while a guest is running.
#[derive(Clone, Debug)]
pub struct FaultDetails {
/// If true, the instance's `fatal_handler` will be called.
pub fatal: bool,
/// Information about the type of fault that occurred.
pub trapcode: TrapCode,
/// The instruction pointer where the fault occurred.
pub rip_addr: uintptr_t,
/// Extra information about the instruction pointer's location, if available.
pub rip_addr_details: Option<module::AddrDetails>,
}
impl std::fmt::Display for FaultDetails {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if self.fatal {
write!(f, "fault FATAL ")?;
} else {
write!(f, "fault ")?;
}
self.trapcode.fmt(f)?;
write!(f, "code at address {:p}", self.rip_addr as *const c_void)?;
if let Some(ref addr_details) = self.rip_addr_details {
if let Some(ref fname) = addr_details.file_name {
let sname = addr_details
.sym_name
.as_ref()
.map(String::as_str)
.unwrap_or("<unknown>");
write!(f, " (symbol {}:{})", fname, sname)?;
}
if addr_details.in_module_code {
write!(f, " (inside module code)")
} else {
write!(f, " (not inside module code)")
}
} else {
write!(f, " (unknown whether in module)")
}
}
}
/// Information about a terminated guest.
///
/// Guests are terminated either explicitly by `Vmctx::terminate()`, or implicitly by signal
/// handlers that return `SignalBehavior::Terminate`. It usually indicates that an unrecoverable
/// error has occurred in a hostcall, rather than in WebAssembly code.
#[derive(Clone)]
pub enum TerminationDetails {
Signal,
GetEmbedCtx,
/// Calls to `Vmctx::terminate()` may attach an arbitrary pointer for extra debugging
/// information.
Provided(Arc<dyn Any>),
}
impl TerminationDetails {
pub fn provide<A: Any>(details: A) -> Self {
TerminationDetails::Provided(Arc::new(details))
}
pub fn provided_details(&self) -> Option<&dyn Any> {
match self {
TerminationDetails::Provided(a) => Some(a.as_ref()),
_ => None,
}
}
}
// Because of deref coercions, the code above was tricky to get right-
// test that a string makes it through
#[test]
fn termination_details_any_typing() {
let hello = "hello, world".to_owned();
let details = TerminationDetails::provide(hello.clone());
let provided = details.provided_details().expect("got Provided");
assert_eq!(
provided.downcast_ref::<String>().expect("right type"),
&hello
);
}
impl std::fmt::Debug for TerminationDetails {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"TerminationDetails::{}",
match self {
TerminationDetails::Signal => "Signal",
TerminationDetails::GetEmbedCtx => "GetEmbedCtx",
TerminationDetails::Provided(_) => "Provided(Any)",
}
)
}
}
unsafe impl Send for TerminationDetails {}
unsafe impl Sync for TerminationDetails {}
impl std::fmt::Display for State {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
State::Ready { .. } => write!(f, "ready"),
State::Running => write!(f, "running"),
State::Fault {
details, siginfo, ..
} => {
write!(f, "{}", details)?;
write!(
f,
" triggered by {}: ",
strsignal_wrapper(siginfo.si_signo)
.into_string()
.expect("strsignal returns valid UTF-8")
)?;
if siginfo.si_signo == SIGSEGV || siginfo.si_signo == SIGBUS {
// We know this is inside the heap guard, because by the time we get here,
// `lucet_error_verify_trap_safety` will have run and validated it.
write!(
f,
" accessed memory at {:p} (inside heap guard)",
siginfo.si_addr()
)?;
}
Ok(())
}
State::Terminated { .. } => write!(f, "terminated"),
}
}
}
impl State {
pub fn is_ready(&self) -> bool {
if let State::Ready { .. } = self {
true
} else {
false
}
}
pub fn is_running(&self) -> bool {
if let State::Running = self {
true
} else {
false
}
}
pub fn is_fault(&self) -> bool {
if let State::Fault { .. } = self {
true
} else {
false
}
}
pub fn is_fatal(&self) -> bool {
if let State::Fault {
details: FaultDetails { fatal, .. },
..
} = self
{
*fatal
} else {
false
}
}
pub fn is_terminated(&self) -> bool {
if let State::Terminated { .. } = self {
true
} else {
false
}
}
}
fn default_fatal_handler(inst: &Instance) -> ! {
panic!("> instance {:p} had fatal error: {}", inst, inst.state);
}
// TODO: PR into `libc`
extern "C" {
#[no_mangle]
fn strsignal(sig: libc::c_int) -> *mut libc::c_char;
}
// TODO: PR into `nix`
fn strsignal_wrapper(sig: libc::c_int) -> CString {
unsafe { CStr::from_ptr(strsignal(sig)).to_owned() }
}
#[cfg(test)]
mod tests {
use super::*;
use memoffset::offset_of;
#[test]
fn instance_size_correct() {
assert_eq!(mem::size_of::<Instance>(), 4096);
}
#[test]
fn instance_globals_offset_correct() {
let offset = offset_of!(Instance, globals_ptr) as isize;
if offset != 4096 - 8 {
let diff = 4096 - 8 - offset;
let new_padding = INSTANCE_PADDING as isize + diff;
panic!("new padding should be: {:?}", new_padding);
}
assert_eq!(offset_of!(Instance, globals_ptr), 4096 - 8);
}
}
| {
// Sandbox is no longer runnable. It's unsafe to determine all error details in the signal
// handler, so we fill in extra details here.
self.populate_fault_detail()?;
if let State::Fault { ref details, .. } = self.state {
if details.fatal {
// Some errors indicate that the guest is not functioning correctly or that
// the loaded code violated some assumption, so bail out via the fatal
// handler.
// Run the C-style fatal handler, if it exists.
self.c_fatal_handler
.map(|h| unsafe { h(self as *mut Instance) });
// If there is no C-style fatal handler, or if it (erroneously) returns,
// call the Rust handler that we know will not return
(self.fatal_handler)(self)
} else {
// leave the full fault details in the instance state, and return the
// higher-level info to the user
Err(Error::RuntimeFault(details.clone()))
}
} else {
panic!("state remains Fault after populate_fault_detail()")
}
} | conditional_block |
instance.rs | mod siginfo_ext;
pub mod signals;
pub use crate::instance::signals::{signal_handler_none, SignalBehavior, SignalHandler};
use crate::alloc::Alloc;
use crate::context::Context;
use crate::embed_ctx::CtxMap;
use crate::error::Error;
use crate::instance::siginfo_ext::SiginfoExt;
use crate::module::{self, Global, Module};
use crate::trapcode::{TrapCode, TrapCodeType};
use crate::val::{UntypedRetVal, Val};
use crate::WASM_PAGE_SIZE;
use libc::{c_void, siginfo_t, uintptr_t, SIGBUS, SIGSEGV};
use std::any::Any;
use std::cell::{RefCell, UnsafeCell};
use std::ffi::{CStr, CString};
use std::mem;
use std::ops::{Deref, DerefMut};
use std::ptr::{self, NonNull};
use std::sync::Arc;
pub const LUCET_INSTANCE_MAGIC: u64 = 746932922;
pub const INSTANCE_PADDING: usize = 2328;
thread_local! {
/// The host context.
///
/// Control returns here implicitly due to the setup in `Context::init()` when guest functions
/// return normally. Control can return here explicitly from signal handlers when the guest
/// program needs to be terminated.
///
/// This is an `UnsafeCell` due to nested borrows. The context must be borrowed mutably when
/// swapping to the guest context, which means that borrow exists for the entire time the guest
/// function runs even though the mutation to the host context is done only at the beginning of
/// the swap. Meanwhile, the signal handler can run at any point during the guest function, and
/// so it also must be able to immutably borrow the host context if it needs to swap back. The
/// runtime borrowing constraints for a `RefCell` are therefore too strict for this variable.
pub(crate) static HOST_CTX: UnsafeCell<Context> = UnsafeCell::new(Context::new());
/// The currently-running `Instance`, if one exists.
pub(crate) static CURRENT_INSTANCE: RefCell<Option<NonNull<Instance>>> = RefCell::new(None);
}
/// A smart pointer to an [`Instance`](struct.Instance.html) that properly manages cleanup when dropped.
///
/// Instances are always stored in memory backed by a `Region`; we never want to create one directly
/// with the Rust allocator. This type allows us to abide by that rule while also having an owned
/// type that cleans up the instance when we are done with it.
///
/// Since this type implements `Deref` and `DerefMut` to `Instance`, it can usually be treated as
/// though it were a `&mut Instance`.
pub struct InstanceHandle {
inst: NonNull<Instance>,
}
/// Create a new `InstanceHandle`.
///
/// This is not meant for public consumption, but rather is used to make implementations of
/// `Region`.
///
/// # Safety
///
/// This function runs the guest code for the WebAssembly `start` section, and running any guest
/// code is potentially unsafe; see [`Instance::run()`](struct.Instance.html#method.run).
pub fn new_instance_handle(
instance: *mut Instance,
module: Arc<dyn Module>,
alloc: Alloc,
embed_ctx: CtxMap,
) -> Result<InstanceHandle, Error> {
let inst = NonNull::new(instance)
.ok_or(lucet_format_err!("instance pointer is null; this is a bug"))?;
// do this check first so we don't run `InstanceHandle::drop()` for a failure
lucet_ensure!(
unsafe { inst.as_ref().magic } != LUCET_INSTANCE_MAGIC,
"created a new instance handle in memory with existing instance magic; this is a bug"
);
let mut handle = InstanceHandle { inst };
let inst = Instance::new(alloc, module, embed_ctx);
unsafe {
// this is wildly unsafe! you must be very careful to not let the drop impls run on the
// uninitialized fields; see
// <https://doc.rust-lang.org/std/mem/fn.forget.html#use-case-1>
// write the whole struct into place over the uninitialized page
ptr::write(&mut *handle, inst);
};
handle.reset()?;
Ok(handle)
}
pub fn instance_handle_to_raw(inst: InstanceHandle) -> *mut Instance {
let ptr = inst.inst.as_ptr();
std::mem::forget(inst);
ptr
}
pub unsafe fn instance_handle_from_raw(ptr: *mut Instance) -> InstanceHandle {
InstanceHandle {
inst: NonNull::new_unchecked(ptr),
}
}
// Safety argument for these deref impls: the instance's `Alloc` field contains an `Arc` to the
// region that backs this memory, keeping the page containing the `Instance` alive as long as the
// region exists
impl Deref for InstanceHandle {
type Target = Instance;
fn deref(&self) -> &Self::Target {
unsafe { self.inst.as_ref() }
}
}
impl DerefMut for InstanceHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { self.inst.as_mut() }
}
}
impl Drop for InstanceHandle {
fn drop(&mut self) {
// eprintln!("InstanceHandle::drop()");
// zero out magic, then run the destructor by taking and dropping the inner `Instance`
self.magic = 0;
unsafe {
mem::replace(self.inst.as_mut(), mem::uninitialized());
}
}
}
/// A Lucet program, together with its dedicated memory and signal handlers.
///
/// This is the primary interface for running programs, examining return values, and accessing the
/// WebAssembly heap.
///
/// `Instance`s are never created by runtime users directly, but rather are acquired from
/// [`Region`](trait.Region.html)s and often accessed through
/// [`InstanceHandle`](struct.InstanceHandle.html) smart pointers. This guarantees that instances
/// and their fields are never moved in memory, otherwise raw pointers in the metadata could be
/// unsafely invalidated.
#[repr(C)]
pub struct Instance {
/// Used to catch bugs in pointer math used to find the address of the instance
magic: u64,
/// The embedding context is a map containing embedder-specific values that are used to
/// implement hostcalls
pub(crate) embed_ctx: CtxMap,
/// The program (WebAssembly module) that is the entrypoint for the instance.
module: Arc<dyn Module>,
/// The `Context` in which the guest program runs
ctx: Context,
/// Instance state and error information
pub(crate) state: State,
/// The memory allocated for this instance
alloc: Alloc,
/// Handler run for signals that do not arise from a known WebAssembly trap, or that involve
/// memory outside of the current instance.
fatal_handler: fn(&Instance) -> !,
/// A fatal handler set from C
c_fatal_handler: Option<unsafe extern "C" fn(*mut Instance)>,
/// Handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the instance thread.
signal_handler: Box<
dyn Fn(
&Instance,
&TrapCode,
libc::c_int,
*const siginfo_t,
*const c_void,
) -> SignalBehavior,
>,
/// Pointer to the function used as the entrypoint (for use in backtraces)
entrypoint: *const extern "C" fn(),
/// Padding to ensure the pointer to globals at the end of the page occupied by the `Instance`
_reserved: [u8; INSTANCE_PADDING],
/// Pointer to the globals
///
/// This is accessed through the `vmctx` pointer, which points to the heap that begins
/// immediately after this struct, so it has to come at the very end.
globals_ptr: *const i64,
}
/// APIs that are internal, but useful to implementors of extension modules; you probably don't want
/// this trait!
///
/// This is a trait rather than inherent `impl`s in order to keep the `lucet-runtime` API clean and
/// safe.
pub trait InstanceInternal {
fn alloc(&self) -> &Alloc;
fn alloc_mut(&mut self) -> &mut Alloc;
fn module(&self) -> &dyn Module;
fn state(&self) -> &State;
fn valid_magic(&self) -> bool;
}
impl InstanceInternal for Instance {
/// Get a reference to the instance's `Alloc`.
fn alloc(&self) -> &Alloc |
/// Get a mutable reference to the instance's `Alloc`.
fn alloc_mut(&mut self) -> &mut Alloc {
&mut self.alloc
}
/// Get a reference to the instance's `Module`.
fn module(&self) -> &dyn Module {
self.module.deref()
}
/// Get a reference to the instance's `State`.
fn state(&self) -> &State {
&self.state
}
/// Check whether the instance magic is valid.
fn valid_magic(&self) -> bool {
self.magic == LUCET_INSTANCE_MAGIC
}
}
// Public API
impl Instance {
/// Run a function with arguments in the guest context at the given entrypoint.
///
/// ```no_run
/// # use lucet_runtime_internals::instance::InstanceHandle;
/// # let instance: InstanceHandle = unimplemented!();
/// // regular execution yields `Ok(UntypedRetVal)`
/// let retval = instance.run(b"factorial", &[5u64.into()]).unwrap();
/// assert_eq!(u64::from(retval), 120u64);
///
/// // runtime faults yield `Err(Error)`
/// let result = instance.run(b"faulting_function", &[]);
/// assert!(result.is_err());
/// ```
///
/// # Safety
///
/// This is unsafe in two ways:
///
/// - The type of the entrypoint might not be correct. It might take a different number or
/// different types of arguments than are provided to `args`. It might not even point to a
/// function! We will likely add type information to `lucetc` output so we can dynamically check
/// the type in the future.
///
/// - The entrypoint is foreign code. While we may be convinced that WebAssembly compiled to
/// native code by `lucetc` is safe, we do not have the same guarantee for the hostcalls that a
/// guest may invoke. They might be implemented in an unsafe language, so we must treat this
/// call as unsafe, just like any other FFI call.
///
/// For the moment, we do not mark this as `unsafe` in the Rust type system, but that may change
/// in the future.
pub fn run(&mut self, entrypoint: &[u8], args: &[Val]) -> Result<UntypedRetVal, Error> {
let func = self.module.get_export_func(entrypoint)?;
self.run_func(func, &args)
}
/// Run a function with arguments in the guest context from the [WebAssembly function
/// table](https://webassembly.github.io/spec/core/syntax/modules.html#tables).
///
/// The same safety caveats of [`Instance::run()`](struct.Instance.html#method.run) apply.
pub fn run_func_idx(
&mut self,
table_idx: u32,
func_idx: u32,
args: &[Val],
) -> Result<UntypedRetVal, Error> {
let func = self.module.get_func_from_idx(table_idx, func_idx)?;
self.run_func(func, &args)
}
/// Reset the instance's heap and global variables to their initial state.
///
/// The WebAssembly `start` section will also be run, if one exists.
///
/// The embedder contexts present at instance creation or added with
/// [`Instance::insert_embed_ctx()`](struct.Instance.html#method.insert_embed_ctx) are not
/// modified by this call; it is the embedder's responsibility to clear or reset their state if
/// necessary.
///
/// # Safety
///
/// This function runs the guest code for the WebAssembly `start` section, and running any guest
/// code is potentially unsafe; see [`Instance::run()`](struct.Instance.html#method.run).
pub fn reset(&mut self) -> Result<(), Error> {
self.alloc.reset_heap(self.module.as_ref())?;
let globals = unsafe { self.alloc.globals_mut() };
let mod_globals = self.module.globals();
for (i, v) in mod_globals.iter().enumerate() {
globals[i] = match v.global() {
Global::Import { .. } => {
return Err(Error::Unsupported(format!(
"global imports are unsupported; found: {:?}",
i
)));
}
Global::Def { def } => def.init_val(),
};
}
self.state = State::Ready {
retval: UntypedRetVal::default(),
};
self.run_start()?;
Ok(())
}
/// Grow the guest memory by the given number of WebAssembly pages.
///
/// On success, returns the number of pages that existed before the call.
pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> {
let orig_len = self
.alloc
.expand_heap(additional_pages * WASM_PAGE_SIZE, self.module.as_ref())?;
Ok(orig_len / WASM_PAGE_SIZE)
}
/// Return the WebAssembly heap as a slice of bytes.
pub fn heap(&self) -> &[u8] {
unsafe { self.alloc.heap() }
}
/// Return the WebAssembly heap as a mutable slice of bytes.
pub fn heap_mut(&mut self) -> &mut [u8] {
unsafe { self.alloc.heap_mut() }
}
/// Return the WebAssembly heap as a slice of `u32`s.
pub fn heap_u32(&self) -> &[u32] {
unsafe { self.alloc.heap_u32() }
}
/// Return the WebAssembly heap as a mutable slice of `u32`s.
pub fn heap_u32_mut(&mut self) -> &mut [u32] {
unsafe { self.alloc.heap_u32_mut() }
}
/// Return the WebAssembly globals as a slice of `i64`s.
pub fn globals(&self) -> &[i64] {
unsafe { self.alloc.globals() }
}
/// Return the WebAssembly globals as a mutable slice of `i64`s.
pub fn globals_mut(&mut self) -> &mut [i64] {
unsafe { self.alloc.globals_mut() }
}
/// Check whether a given range in the host address space overlaps with the memory that backs
/// the instance heap.
pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool {
self.alloc.mem_in_heap(ptr, len)
}
/// Check whether a context value of a particular type exists.
pub fn contains_embed_ctx<T: Any>(&self) -> bool {
self.embed_ctx.contains::<T>()
}
/// Get a reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx<T: Any>(&self) -> Option<&T> {
self.embed_ctx.get::<T>()
}
/// Get a mutable reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx_mut<T: Any>(&mut self) -> Option<&mut T> {
self.embed_ctx.get_mut::<T>()
}
/// Insert a context value.
///
/// If a context value of the same type already existed, it is returned.
///
/// **Note**: this method is intended for embedder contexts that need to be added _after_ an
/// instance is created and initialized. To add a context for an instance's entire lifetime,
/// including the execution of its `start` section, see
/// [`Region::new_instance_builder()`](trait.Region.html#method.new_instance_builder).
pub fn insert_embed_ctx<T: Any>(&mut self, x: T) -> Option<T> {
self.embed_ctx.insert(x)
}
/// Remove a context value of a particular type, returning it if it exists.
pub fn remove_embed_ctx<T: Any>(&mut self) -> Option<T> {
self.embed_ctx.remove::<T>()
}
/// Set the handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the
/// instance thread.
///
/// In most cases, these signals are unrecoverable for the instance that raised them, but do not
/// affect the rest of the process.
///
/// The default signal handler returns
/// [`SignalBehavior::Default`](enum.SignalBehavior.html#variant.Default), which yields a
/// runtime fault error.
///
/// The signal handler must be
/// [signal-safe](http://man7.org/linux/man-pages/man7/signal-safety.7.html).
pub fn set_signal_handler<H>(&mut self, handler: H)
where
H: 'static
+ Fn(&Instance, &TrapCode, libc::c_int, *const siginfo_t, *const c_void) -> SignalBehavior,
{
self.signal_handler = Box::new(handler) as Box<SignalHandler>;
}
/// Set the handler run for signals that do not arise from a known WebAssembly trap, or that
/// involve memory outside of the current instance.
///
/// Fatal signals are not only unrecoverable for the instance that raised them, but may
/// compromise the correctness of the rest of the process if unhandled.
///
/// The default fatal handler calls `panic!()`.
pub fn set_fatal_handler(&mut self, handler: fn(&Instance) -> !) {
self.fatal_handler = handler;
}
/// Set the fatal handler to a C-compatible function.
///
/// This is a separate interface, because C functions can't return the `!` type. Like the
/// regular `fatal_handler`, it is not expected to return, but we cannot enforce that through
/// types.
///
/// When a fatal error occurs, this handler is run first, and then the regular `fatal_handler`
/// runs in case it returns.
pub fn set_c_fatal_handler(&mut self, handler: unsafe extern "C" fn(*mut Instance)) {
self.c_fatal_handler = Some(handler);
}
}
// Private API
impl Instance {
fn new(alloc: Alloc, module: Arc<dyn Module>, embed_ctx: CtxMap) -> Self {
let globals_ptr = alloc.slot().globals as *mut i64;
Instance {
magic: LUCET_INSTANCE_MAGIC,
embed_ctx: embed_ctx,
module,
ctx: Context::new(),
state: State::Ready {
retval: UntypedRetVal::default(),
},
alloc,
fatal_handler: default_fatal_handler,
c_fatal_handler: None,
signal_handler: Box::new(signal_handler_none) as Box<SignalHandler>,
entrypoint: ptr::null(),
_reserved: [0; INSTANCE_PADDING],
globals_ptr,
}
}
/// Run a function in guest context at the given entrypoint.
fn run_func(
&mut self,
func: *const extern "C" fn(),
args: &[Val],
) -> Result<UntypedRetVal, Error> {
lucet_ensure!(
self.state.is_ready(),
"instance must be ready; this is a bug"
);
if func.is_null() {
return Err(Error::InvalidArgument(
"entrypoint function cannot be null; this is probably a malformed module",
));
}
self.entrypoint = func;
let mut args_with_vmctx = vec![Val::from(self.alloc.slot().heap)];
args_with_vmctx.extend_from_slice(args);
HOST_CTX.with(|host_ctx| {
Context::init(
unsafe { self.alloc.stack_u64_mut() },
unsafe { &mut *host_ctx.get() },
&mut self.ctx,
func,
&args_with_vmctx,
)
})?;
self.state = State::Running;
// there should never be another instance running on this thread when we enter this function
CURRENT_INSTANCE.with(|current_instance| {
let mut current_instance = current_instance.borrow_mut();
assert!(
current_instance.is_none(),
"no other instance is running on this thread"
);
// safety: `self` is not null if we are in this function
*current_instance = Some(unsafe { NonNull::new_unchecked(self) });
});
self.with_signals_on(|i| {
HOST_CTX.with(|host_ctx| {
// Save the current context into `host_ctx`, and jump to the guest context. The
// lucet context is linked to host_ctx, so it will return here after it finishes,
// successfully or otherwise.
unsafe { Context::swap(&mut *host_ctx.get(), &mut i.ctx) };
Ok(())
})
})?;
CURRENT_INSTANCE.with(|current_instance| {
*current_instance.borrow_mut() = None;
});
// Sandbox has jumped back to the host process, indicating it has either:
//
// * trapped, or called hostcall_error: state tag changed to something other than `Running`
// * function body returned: set state back to `Ready` with return value
match &self.state {
State::Running => {
let retval = self.ctx.get_untyped_retval();
self.state = State::Ready { retval };
Ok(retval)
}
State::Terminated { details, .. } => Err(Error::RuntimeTerminated(details.clone())),
State::Fault { .. } => {
// Sandbox is no longer runnable. It's unsafe to determine all error details in the signal
// handler, so we fill in extra details here.
self.populate_fault_detail()?;
if let State::Fault { ref details, .. } = self.state {
if details.fatal {
// Some errors indicate that the guest is not functioning correctly or that
// the loaded code violated some assumption, so bail out via the fatal
// handler.
// Run the C-style fatal handler, if it exists.
self.c_fatal_handler
.map(|h| unsafe { h(self as *mut Instance) });
// If there is no C-style fatal handler, or if it (erroneously) returns,
// call the Rust handler that we know will not return
(self.fatal_handler)(self)
} else {
// leave the full fault details in the instance state, and return the
// higher-level info to the user
Err(Error::RuntimeFault(details.clone()))
}
} else {
panic!("state remains Fault after populate_fault_detail()")
}
}
State::Ready { .. } => {
panic!("instance in Ready state after returning from guest context")
}
}
}
fn run_start(&mut self) -> Result<(), Error> {
if let Some(start) = self.module.get_start_func()? {
self.run_func(start, &[])?;
}
Ok(())
}
fn populate_fault_detail(&mut self) -> Result<(), Error> {
if let State::Fault {
details:
FaultDetails {
rip_addr,
trapcode,
ref mut fatal,
ref mut rip_addr_details,
..
},
siginfo,
..
} = self.state
{
// We do this after returning from the signal handler because it requires `dladdr`
// calls, which are not signal safe
*rip_addr_details = self.module.addr_details(rip_addr as *const c_void)?.clone();
// If the trap table lookup returned unknown, it is a fatal error
let unknown_fault = trapcode.ty == TrapCodeType::Unknown;
// If the trap was a segv or bus fault and the addressed memory was outside the
// guard pages, it is also a fatal error
let outside_guard = (siginfo.si_signo == SIGSEGV || siginfo.si_signo == SIGBUS)
&& !self.alloc.addr_in_heap_guard(siginfo.si_addr());
*fatal = unknown_fault || outside_guard;
}
Ok(())
}
}
pub enum State {
Ready {
retval: UntypedRetVal,
},
Running,
Fault {
details: FaultDetails,
siginfo: libc::siginfo_t,
context: libc::ucontext_t,
},
Terminated {
details: TerminationDetails,
},
}
/// Information about a runtime fault.
///
/// Runtime faults are raised implictly by signal handlers that return `SignalBehavior::Default` in
/// response to signals arising while a guest is running.
#[derive(Clone, Debug)]
pub struct FaultDetails {
/// If true, the instance's `fatal_handler` will be called.
pub fatal: bool,
/// Information about the type of fault that occurred.
pub trapcode: TrapCode,
/// The instruction pointer where the fault occurred.
pub rip_addr: uintptr_t,
/// Extra information about the instruction pointer's location, if available.
pub rip_addr_details: Option<module::AddrDetails>,
}
impl std::fmt::Display for FaultDetails {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if self.fatal {
write!(f, "fault FATAL ")?;
} else {
write!(f, "fault ")?;
}
self.trapcode.fmt(f)?;
write!(f, "code at address {:p}", self.rip_addr as *const c_void)?;
if let Some(ref addr_details) = self.rip_addr_details {
if let Some(ref fname) = addr_details.file_name {
let sname = addr_details
.sym_name
.as_ref()
.map(String::as_str)
.unwrap_or("<unknown>");
write!(f, " (symbol {}:{})", fname, sname)?;
}
if addr_details.in_module_code {
write!(f, " (inside module code)")
} else {
write!(f, " (not inside module code)")
}
} else {
write!(f, " (unknown whether in module)")
}
}
}
/// Information about a terminated guest.
///
/// Guests are terminated either explicitly by `Vmctx::terminate()`, or implicitly by signal
/// handlers that return `SignalBehavior::Terminate`. It usually indicates that an unrecoverable
/// error has occurred in a hostcall, rather than in WebAssembly code.
#[derive(Clone)]
pub enum TerminationDetails {
Signal,
GetEmbedCtx,
/// Calls to `Vmctx::terminate()` may attach an arbitrary pointer for extra debugging
/// information.
Provided(Arc<dyn Any>),
}
impl TerminationDetails {
pub fn provide<A: Any>(details: A) -> Self {
TerminationDetails::Provided(Arc::new(details))
}
pub fn provided_details(&self) -> Option<&dyn Any> {
match self {
TerminationDetails::Provided(a) => Some(a.as_ref()),
_ => None,
}
}
}
// Because of deref coercions, the code above was tricky to get right-
// test that a string makes it through
#[test]
fn termination_details_any_typing() {
let hello = "hello, world".to_owned();
let details = TerminationDetails::provide(hello.clone());
let provided = details.provided_details().expect("got Provided");
assert_eq!(
provided.downcast_ref::<String>().expect("right type"),
&hello
);
}
impl std::fmt::Debug for TerminationDetails {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"TerminationDetails::{}",
match self {
TerminationDetails::Signal => "Signal",
TerminationDetails::GetEmbedCtx => "GetEmbedCtx",
TerminationDetails::Provided(_) => "Provided(Any)",
}
)
}
}
unsafe impl Send for TerminationDetails {}
unsafe impl Sync for TerminationDetails {}
impl std::fmt::Display for State {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
State::Ready { .. } => write!(f, "ready"),
State::Running => write!(f, "running"),
State::Fault {
details, siginfo, ..
} => {
write!(f, "{}", details)?;
write!(
f,
" triggered by {}: ",
strsignal_wrapper(siginfo.si_signo)
.into_string()
.expect("strsignal returns valid UTF-8")
)?;
if siginfo.si_signo == SIGSEGV || siginfo.si_signo == SIGBUS {
// We know this is inside the heap guard, because by the time we get here,
// `lucet_error_verify_trap_safety` will have run and validated it.
write!(
f,
" accessed memory at {:p} (inside heap guard)",
siginfo.si_addr()
)?;
}
Ok(())
}
State::Terminated { .. } => write!(f, "terminated"),
}
}
}
impl State {
pub fn is_ready(&self) -> bool {
if let State::Ready { .. } = self {
true
} else {
false
}
}
pub fn is_running(&self) -> bool {
if let State::Running = self {
true
} else {
false
}
}
pub fn is_fault(&self) -> bool {
if let State::Fault { .. } = self {
true
} else {
false
}
}
pub fn is_fatal(&self) -> bool {
if let State::Fault {
details: FaultDetails { fatal, .. },
..
} = self
{
*fatal
} else {
false
}
}
pub fn is_terminated(&self) -> bool {
if let State::Terminated { .. } = self {
true
} else {
false
}
}
}
fn default_fatal_handler(inst: &Instance) -> ! {
panic!("> instance {:p} had fatal error: {}", inst, inst.state);
}
// TODO: PR into `libc`
extern "C" {
#[no_mangle]
fn strsignal(sig: libc::c_int) -> *mut libc::c_char;
}
// TODO: PR into `nix`
fn strsignal_wrapper(sig: libc::c_int) -> CString {
unsafe { CStr::from_ptr(strsignal(sig)).to_owned() }
}
#[cfg(test)]
mod tests {
use super::*;
use memoffset::offset_of;
#[test]
fn instance_size_correct() {
assert_eq!(mem::size_of::<Instance>(), 4096);
}
#[test]
fn instance_globals_offset_correct() {
let offset = offset_of!(Instance, globals_ptr) as isize;
if offset != 4096 - 8 {
let diff = 4096 - 8 - offset;
let new_padding = INSTANCE_PADDING as isize + diff;
panic!("new padding should be: {:?}", new_padding);
}
assert_eq!(offset_of!(Instance, globals_ptr), 4096 - 8);
}
}
| {
&self.alloc
} | identifier_body |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.