file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
39k
| suffix
large_stringlengths 0
36.1k
| middle
large_stringlengths 0
29.4k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
demo.py | #!/usr/bin/env python
########################################
# Mario Rosasco, 2017
########################################
from Model import *
from Visualization import *
from scipy.optimize import minimize
from allensdk.ephys.ephys_features import detect_putative_spikes
import numpy as np
import sys
def | (modelID):
print "Loading parameters for model", modelID
selection=raw_input('Would you like to download NWB data for model? [Y/N] ')
if selection[0] == 'y' or selection[0] == 'Y':
currModel = Model(modelID, cache_stim = True)
if selection[0] == 'n' or selection[0] == 'N':
currModel = Model(modelID, cache_stim = False)
currModel.init_model()
while(True):
print "Initialized biophysical model", modelID
print '''
Please select from the following options:
1 - Run test pulse on model
2 - Fit model parameter to data
3 - Display static neuron model
4 - Visualize model dynamics
5 - Quit
'''
try:
selection=int(raw_input('Please choose an option above: '))
except ValueError:
print "Invalid selection."
continue
# test pulse example
if selection == 1:
# Run the model with a test pulse of the 'long square' type
print "Running model with a long square current injection pulse of 210pA"
output = currModel.long_square(0.21)
currModel.plot_output()
# fit parameter example
elif selection == 2:
if not currModel.bp.cache_stimulus:
print "Current model was not instantiated with NWB data cached. Please reload the current model and cache experimental stimulus data."
continue
print "Fitting somatic sodium conductance for model", modelID, "to experimental data in sweep 41."
print "Please be patient, this may take some time."
# Define which section and which parameter to fit.
# Here we'll fit the somatic sodium conductance.
currModel.set_fit_section('soma', 0)
currModel.set_parameter_to_fit('gbar_NaV')
# Running the model with an NWB pulse as stimulus takes a
# very long time because of the high sampling rate.
# As a computationally-cheaper approximation for stimuli of
# type Long Square pulse, we can rebuild the stimulus with the
# default (lower) sampling rate in h.IClamp
# currModel.run_nwb_pulse(41) # too slow
output = currModel.long_square(0.21)
# Set the experimental reference sweep and set up the variables for the objective function
currModel.set_reference_sweep(ref_index=41)
currModel.set_up_objective(measure='spike frequency')
# Use SciPy's minimize functions to fit the specified parameter
#results = minimize(currModel.objective_function, currModel.theta, method='Nelder-Mead', tol=1e-3)
#results = minimize(currModel.objective_function, currModel.theta, method='Powell', tol=1e-3)
#results = minimize(currModel.objective_function, currModel.theta, method='COBYLA', tol=1e-5)
currModel.gradient_descent(alpha=0.00005, epsilon=0.001, threshold=0.01, max_cycles=1000)
currModel.plot_fit()
output = currModel.long_square(0.21)
currModel.plot_output()
times = np.array(output['t'])/1000
spikes = detect_putative_spikes(np.array(output['v']), times, 0.1, 1.1)
avg_rate = currModel.average_rate_from_delays(times, spikes, 0.1, 1.1)
print "spike rate for theta of", currModel.theta, ":", avg_rate
# static visualization example
elif selection == 3:
run_visualization(currModel)
elif selection == 4:
run_visualization(currModel, show_simulation_dynamics = True)
elif selection == 5:
quit()
else:
print "Invalid selection."
continue
def run_visualization(currModel, show_simulation_dynamics = False):
print "Setting up visualization..."
morphology = currModel.get_reconstruction()
# Prepare model coordinates for uploading to OpenGL.
tempIndices = []
tempVertices = []
n_index = 0
tempX = []
tempY = []
tempZ = []
tempCol = []
if not show_simulation_dynamics:
print '''
Soma - Red
Axon - Green
Dendrites - Blue
Apical Dendrites - Purple'''
# array of colors to denote individual compartment types
compartmentColors=[[0.0,0.0,0.0,0.0], # padding for index convenience
[1.0, 0.0, 0.0, 1.0], #1: soma - red
[0.0, 1.0, 0.0, 1.0], #2: axon - green
[0.0, 0.0, 1.0, 1.0], #3: dendrites - blue
[1.0, 0.0, 1.0, 1.0]] #4: apical dendrites - purple
color_dim = 4
# used to set up section monitoring for visualization of dynamics
compartmentNames=['none', # padding for index convenience
'soma', #1: soma
'axon', #2: axon
'dend', #3: dendrites - blue
'dend'] #4: apical dendrites - purple
sectionIndices=[0,0,0,0,0]
segmentsPerSection = {}
sec_name = ''
# initialize storage arrays for each vertex.
index = 0
n_compartments = len(morphology.compartment_list)
tempX = [0] * n_compartments
tempY = [0] * n_compartments
tempZ = [0] * n_compartments
tempCol = [0] * n_compartments * color_dim
for n in morphology.compartment_list:
# add parent coords
tempX[n['id']] = n['x']
tempY[n['id']] = -n['y']
tempZ[n['id']] = n['z']
# add color data for parent
col_i = 0
offset = n['id']*color_dim
for cval in compartmentColors[n['type']]:
tempCol[offset+col_i] = cval
col_i += 1
# if at a branch point or an end of a section, set up a vector to monitor that segment's voltage
type = compartmentNames[n['type']]
sec_index = sectionIndices[n['type']]
if not (len(morphology.children_of(n)) == 1): #either branch pt or end
sec_name = type + '[' + str(sec_index) + ']'
sectionIndices[n['type']] += 1
currModel.monitor_section_voltage(type, sec_index)
segmentsPerSection[sec_name] = 1
else:
segmentsPerSection[sec_name] += 1
index += 1
for c in morphology.children_of(n):
# add child coods
tempX[c['id']] = c['x']
tempY[c['id']] = -c['y']
tempZ[c['id']] = c['z']
# add index data:
# draw from parent to child, for each child
tempIndices.append(n['id'])
tempIndices.append(c['id'])
index += 1
# add color data for child
col_i = 0
offset = c['id']*color_dim
for cval in compartmentColors[c['type']]:
tempCol[offset+col_i] = cval
col_i += 1
segmentsPerSection[sec_name] += 1
# get ranges for scaling
maxX = max(tempX)
maxY = max(tempY)
maxZ = max(tempZ)
minX = min(tempX)
minY = min(tempY)
minZ = min(tempZ)
xHalfRange = (maxX - minX)/2.0
yHalfRange = (maxY - minY)/2.0
zHalfRange = (maxZ - minZ)/2.0
longestDimLen = max(xHalfRange, yHalfRange, zHalfRange)
# center coords about 0,0,0, with range -1 to 1
tempX = [((((x-minX)*(2*xHalfRange))/(2*xHalfRange)) - xHalfRange)/longestDimLen for x in tempX]
tempY = [((((y-minY)*(2*yHalfRange))/(2*yHalfRange)) - yHalfRange)/longestDimLen for y in tempY]
tempZ = [((((z-minZ)*(2*zHalfRange))/(2*zHalfRange)) - zHalfRange)/longestDimLen for z in tempZ]
# convert everything to a numpy array so OpenGL can use it
indexData = np.array(tempIndices, dtype='uint16')
vertexData = np.array([tempX,tempY,tempZ], dtype='float32')
tempCol = np.array(tempCol, dtype='float32')
vertexData = np.append(vertexData.transpose().flatten(), tempCol)
#################### /Preparing Model Coords
# Set up the Visualization instance
n_vertices = len(tempX)
currVis = Visualization(data=vertexData, indices=indexData, nVert=n_vertices, colorDim=color_dim)
if show_simulation_dynamics:
currModel.run_test_pulse(amp=0.25, delay=20.0, dur=20.0, tstop=60.0)
#currModel.plot_output() # uncomment this line to display the somatic potential over time before the visualization begins
sectionOutput = currModel.section_output
n_segments = n_vertices
# set up looping color change data
all_voltages = []
n_pts = len(sectionOutput['t'])
for t in range(n_pts): # for each timepoint...
for key in sectionOutput.keys(): # for each section...
if key != 't':
for s in range(segmentsPerSection[key]): # for each segment...
all_voltages.append(sectionOutput[key][t]) # ...set up color for segment
all_voltages = np.array(all_voltages, dtype='float32')
all_voltages -= min(all_voltages)
all_voltages /= max(all_voltages)
temp_col = []
n_pts = 0
for v in all_voltages:
temp_col.append(v)
temp_col.append(0.0)
temp_col.append(1.0-v)
temp_col.append(1.0)
n_pts += 1
voltage_col = np.array(temp_col, dtype='float32')
currVis.change_color_loop(voltage_col, n_colors=n_segments, n_timepoints=n_pts, offset=0, rate=0.10)
currVis.run()
if __name__ == '__main__':
if len(sys.argv) == 1: # no model ID passed as argument
modelID = 497233230
else:
try:
modelID=int(sys.argv[1])
except ValueError:
print "Could not interpret model ID. Initializing with example model 497233230"
modelID = 497233230
main(modelID) | main | identifier_name |
demo.py | #!/usr/bin/env python
########################################
# Mario Rosasco, 2017
########################################
from Model import *
from Visualization import *
from scipy.optimize import minimize
from allensdk.ephys.ephys_features import detect_putative_spikes
import numpy as np
import sys
def main(modelID):
print "Loading parameters for model", modelID
selection=raw_input('Would you like to download NWB data for model? [Y/N] ')
if selection[0] == 'y' or selection[0] == 'Y':
currModel = Model(modelID, cache_stim = True)
if selection[0] == 'n' or selection[0] == 'N':
currModel = Model(modelID, cache_stim = False)
currModel.init_model()
while(True):
print "Initialized biophysical model", modelID
print '''
Please select from the following options:
1 - Run test pulse on model
2 - Fit model parameter to data
3 - Display static neuron model
4 - Visualize model dynamics
5 - Quit
'''
try:
selection=int(raw_input('Please choose an option above: '))
except ValueError:
print "Invalid selection."
continue
# test pulse example
if selection == 1:
# Run the model with a test pulse of the 'long square' type
print "Running model with a long square current injection pulse of 210pA"
output = currModel.long_square(0.21)
currModel.plot_output()
# fit parameter example
elif selection == 2:
if not currModel.bp.cache_stimulus:
print "Current model was not instantiated with NWB data cached. Please reload the current model and cache experimental stimulus data."
continue
print "Fitting somatic sodium conductance for model", modelID, "to experimental data in sweep 41."
print "Please be patient, this may take some time."
# Define which section and which parameter to fit.
# Here we'll fit the somatic sodium conductance.
currModel.set_fit_section('soma', 0)
currModel.set_parameter_to_fit('gbar_NaV')
# Running the model with an NWB pulse as stimulus takes a
# very long time because of the high sampling rate.
# As a computationally-cheaper approximation for stimuli of
# type Long Square pulse, we can rebuild the stimulus with the
# default (lower) sampling rate in h.IClamp
# currModel.run_nwb_pulse(41) # too slow
output = currModel.long_square(0.21)
# Set the experimental reference sweep and set up the variables for the objective function
currModel.set_reference_sweep(ref_index=41)
currModel.set_up_objective(measure='spike frequency')
# Use SciPy's minimize functions to fit the specified parameter
#results = minimize(currModel.objective_function, currModel.theta, method='Nelder-Mead', tol=1e-3)
#results = minimize(currModel.objective_function, currModel.theta, method='Powell', tol=1e-3)
#results = minimize(currModel.objective_function, currModel.theta, method='COBYLA', tol=1e-5)
currModel.gradient_descent(alpha=0.00005, epsilon=0.001, threshold=0.01, max_cycles=1000)
currModel.plot_fit()
output = currModel.long_square(0.21)
currModel.plot_output()
times = np.array(output['t'])/1000
spikes = detect_putative_spikes(np.array(output['v']), times, 0.1, 1.1)
avg_rate = currModel.average_rate_from_delays(times, spikes, 0.1, 1.1)
print "spike rate for theta of", currModel.theta, ":", avg_rate
# static visualization example
elif selection == 3:
run_visualization(currModel)
elif selection == 4:
run_visualization(currModel, show_simulation_dynamics = True)
elif selection == 5:
quit()
else:
print "Invalid selection."
continue
def run_visualization(currModel, show_simulation_dynamics = False):
print "Setting up visualization..."
morphology = currModel.get_reconstruction()
# Prepare model coordinates for uploading to OpenGL.
tempIndices = []
tempVertices = []
n_index = 0
tempX = []
tempY = []
tempZ = []
tempCol = []
if not show_simulation_dynamics:
print '''
Soma - Red
Axon - Green
Dendrites - Blue
Apical Dendrites - Purple'''
# array of colors to denote individual compartment types
compartmentColors=[[0.0,0.0,0.0,0.0], # padding for index convenience
[1.0, 0.0, 0.0, 1.0], #1: soma - red
[0.0, 1.0, 0.0, 1.0], #2: axon - green
[0.0, 0.0, 1.0, 1.0], #3: dendrites - blue
[1.0, 0.0, 1.0, 1.0]] #4: apical dendrites - purple
color_dim = 4
# used to set up section monitoring for visualization of dynamics
compartmentNames=['none', # padding for index convenience
'soma', #1: soma
'axon', #2: axon
'dend', #3: dendrites - blue
'dend'] #4: apical dendrites - purple
sectionIndices=[0,0,0,0,0]
segmentsPerSection = {}
sec_name = ''
# initialize storage arrays for each vertex.
index = 0
n_compartments = len(morphology.compartment_list)
tempX = [0] * n_compartments
tempY = [0] * n_compartments
tempZ = [0] * n_compartments
tempCol = [0] * n_compartments * color_dim
for n in morphology.compartment_list:
# add parent coords
tempX[n['id']] = n['x']
tempY[n['id']] = -n['y']
tempZ[n['id']] = n['z']
# add color data for parent
col_i = 0
offset = n['id']*color_dim
for cval in compartmentColors[n['type']]:
|
# if at a branch point or an end of a section, set up a vector to monitor that segment's voltage
type = compartmentNames[n['type']]
sec_index = sectionIndices[n['type']]
if not (len(morphology.children_of(n)) == 1): #either branch pt or end
sec_name = type + '[' + str(sec_index) + ']'
sectionIndices[n['type']] += 1
currModel.monitor_section_voltage(type, sec_index)
segmentsPerSection[sec_name] = 1
else:
segmentsPerSection[sec_name] += 1
index += 1
for c in morphology.children_of(n):
# add child coods
tempX[c['id']] = c['x']
tempY[c['id']] = -c['y']
tempZ[c['id']] = c['z']
# add index data:
# draw from parent to child, for each child
tempIndices.append(n['id'])
tempIndices.append(c['id'])
index += 1
# add color data for child
col_i = 0
offset = c['id']*color_dim
for cval in compartmentColors[c['type']]:
tempCol[offset+col_i] = cval
col_i += 1
segmentsPerSection[sec_name] += 1
# get ranges for scaling
maxX = max(tempX)
maxY = max(tempY)
maxZ = max(tempZ)
minX = min(tempX)
minY = min(tempY)
minZ = min(tempZ)
xHalfRange = (maxX - minX)/2.0
yHalfRange = (maxY - minY)/2.0
zHalfRange = (maxZ - minZ)/2.0
longestDimLen = max(xHalfRange, yHalfRange, zHalfRange)
# center coords about 0,0,0, with range -1 to 1
tempX = [((((x-minX)*(2*xHalfRange))/(2*xHalfRange)) - xHalfRange)/longestDimLen for x in tempX]
tempY = [((((y-minY)*(2*yHalfRange))/(2*yHalfRange)) - yHalfRange)/longestDimLen for y in tempY]
tempZ = [((((z-minZ)*(2*zHalfRange))/(2*zHalfRange)) - zHalfRange)/longestDimLen for z in tempZ]
# convert everything to a numpy array so OpenGL can use it
indexData = np.array(tempIndices, dtype='uint16')
vertexData = np.array([tempX,tempY,tempZ], dtype='float32')
tempCol = np.array(tempCol, dtype='float32')
vertexData = np.append(vertexData.transpose().flatten(), tempCol)
#################### /Preparing Model Coords
# Set up the Visualization instance
n_vertices = len(tempX)
currVis = Visualization(data=vertexData, indices=indexData, nVert=n_vertices, colorDim=color_dim)
if show_simulation_dynamics:
currModel.run_test_pulse(amp=0.25, delay=20.0, dur=20.0, tstop=60.0)
#currModel.plot_output() # uncomment this line to display the somatic potential over time before the visualization begins
sectionOutput = currModel.section_output
n_segments = n_vertices
# set up looping color change data
all_voltages = []
n_pts = len(sectionOutput['t'])
for t in range(n_pts): # for each timepoint...
for key in sectionOutput.keys(): # for each section...
if key != 't':
for s in range(segmentsPerSection[key]): # for each segment...
all_voltages.append(sectionOutput[key][t]) # ...set up color for segment
all_voltages = np.array(all_voltages, dtype='float32')
all_voltages -= min(all_voltages)
all_voltages /= max(all_voltages)
temp_col = []
n_pts = 0
for v in all_voltages:
temp_col.append(v)
temp_col.append(0.0)
temp_col.append(1.0-v)
temp_col.append(1.0)
n_pts += 1
voltage_col = np.array(temp_col, dtype='float32')
currVis.change_color_loop(voltage_col, n_colors=n_segments, n_timepoints=n_pts, offset=0, rate=0.10)
currVis.run()
if __name__ == '__main__':
if len(sys.argv) == 1: # no model ID passed as argument
modelID = 497233230
else:
try:
modelID=int(sys.argv[1])
except ValueError:
print "Could not interpret model ID. Initializing with example model 497233230"
modelID = 497233230
main(modelID) | tempCol[offset+col_i] = cval
col_i += 1 | conditional_block |
fs.rs | use core_collections::borrow::ToOwned;
use io::{self, BufRead, BufReader, Read, Error, Result, Write, Seek, SeekFrom};
use os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use mem;
use path::{PathBuf, Path};
use string::String;
use sys_common::AsInner;
use vec::Vec;
use syscall::{open, dup, close, fpath, fstat, ftruncate, read,
write, lseek, fsync, mkdir, rmdir, unlink};
use syscall::{O_RDWR, O_RDONLY, O_WRONLY, O_APPEND, O_CREAT, O_TRUNC, MODE_DIR, MODE_FILE, MODE_PERM, SEEK_SET, SEEK_CUR, SEEK_END, Stat};
/// A Unix-style file
#[derive(Debug)]
pub struct File {
/// The id for the file
fd: usize,
}
impl File {
/// Open a new file using a path
pub fn open<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_RDONLY).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Create a new file using a path
pub fn create<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_CREAT | O_RDWR | O_TRUNC | 0o664).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Duplicate the file
pub fn dup(&self, buf: &[u8]) -> Result<File> {
dup(self.fd, buf).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Get information about a file
pub fn metadata(&self) -> Result<Metadata> {
let mut stat = Stat::default();
try!(fstat(self.fd, &mut stat).map_err(|x| Error::from_sys(x)));
Ok(Metadata {
stat: stat
})
}
/// Get the canonical path of the file
pub fn path(&self) -> Result<PathBuf> {
let mut buf: [u8; 4096] = [0; 4096];
match fpath(self.fd, &mut buf) {
Ok(count) => Ok(PathBuf::from(unsafe { String::from_utf8_unchecked(Vec::from(&buf[0..count])) })),
Err(err) => Err(Error::from_sys(err)),
}
}
/// Flush the file data and metadata
pub fn sync_all(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Flush the file data
pub fn sync_data(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Truncates the file
pub fn set_len(&self, size: u64) -> Result<()> {
ftruncate(self.fd, size as usize).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl AsRawFd for File {
fn as_raw_fd(&self) -> RawFd {
self.fd
}
}
impl FromRawFd for File {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
File {
fd: fd
}
}
}
impl IntoRawFd for File {
fn into_raw_fd(self) -> RawFd {
let fd = self.fd;
mem::forget(self);
fd
}
}
impl Read for File {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
read(self.fd, buf).map_err(|x| Error::from_sys(x))
}
}
impl Write for File {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
write(self.fd, buf).map_err(|x| Error::from_sys(x))
}
fn flush(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl Seek for File {
/// Seek a given position
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
let (whence, offset) = match pos {
SeekFrom::Start(offset) => (SEEK_SET, offset as isize),
SeekFrom::Current(offset) => (SEEK_CUR, offset as isize),
SeekFrom::End(offset) => (SEEK_END, offset as isize),
};
lseek(self.fd, offset, whence).map(|position| position as u64).map_err(|x| Error::from_sys(x))
}
}
impl Drop for File {
fn drop(&mut self) {
let _ = close(self.fd);
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct FileType {
dir: bool,
file: bool,
}
impl FileType {
pub fn is_dir(&self) -> bool {
self.dir
}
pub fn is_file(&self) -> bool {
self.file
}
pub fn is_symlink(&self) -> bool {
false
}
}
impl ::os::unix::fs::FileTypeExt for FileType {
fn is_block_device(&self) -> bool { false }
fn is_char_device(&self) -> bool { false }
fn is_fifo(&self) -> bool { false }
fn is_socket(&self) -> bool { false }
}
pub struct OpenOptions {
read: bool,
write: bool,
append: bool,
create: bool,
truncate: bool,
mode: u16,
}
impl OpenOptions {
pub fn new() -> OpenOptions {
OpenOptions {
read: false,
write: false,
append: false,
create: false,
truncate: false,
mode: 0,
}
}
pub fn read(&mut self, read: bool) -> &mut OpenOptions {
self.read = read;
self
}
pub fn write(&mut self, write: bool) -> &mut OpenOptions {
self.write = write;
self
}
pub fn append(&mut self, append: bool) -> &mut OpenOptions {
self.append = append;
self
}
pub fn create(&mut self, create: bool) -> &mut OpenOptions {
self.create = create;
self
}
pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions {
self.truncate = truncate;
self
}
pub fn open<P: AsRef<Path>>(&self, path: P) -> Result<File> {
let mut flags = 0;
if self.read && self.write {
flags |= O_RDWR;
} else if self.read {
flags |= O_RDONLY;
} else if self.write {
flags |= O_WRONLY;
}
if self.append {
flags |= O_APPEND;
}
if self.create {
flags |= O_CREAT;
}
if self.truncate {
flags |= O_TRUNC;
}
flags |= (self.mode & MODE_PERM) as usize;
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, flags).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
}
impl ::os::unix::fs::OpenOptionsExt for OpenOptions {
fn mode(&mut self, mode: u32) -> &mut Self {
self.mode = mode as u16;
self
}
}
pub struct Metadata {
stat: Stat
}
impl Metadata {
pub fn file_type(&self) -> FileType {
FileType {
dir: self.stat.st_mode & MODE_DIR == MODE_DIR,
file: self.stat.st_mode & MODE_FILE == MODE_FILE
}
}
pub fn is_dir(&self) -> bool {
self.stat.st_mode & MODE_DIR == MODE_DIR
}
pub fn is_file(&self) -> bool {
self.stat.st_mode & MODE_FILE == MODE_FILE
}
pub fn len(&self) -> u64 {
self.stat.st_size
}
pub fn permissions(&self) -> Permissions {
Permissions {
mode: self.stat.st_mode & MODE_PERM
}
}
}
impl ::os::unix::fs::MetadataExt for Metadata {
fn mode(&self) -> u32 {
self.stat.st_mode as u32
}
fn uid(&self) -> u32 {
self.stat.st_uid
}
fn gid(&self) -> u32 {
self.stat.st_gid
}
fn size(&self) -> u64 {
self.stat.st_size
}
}
pub struct Permissions {
mode: u16
}
impl Permissions {
pub fn readonly(&self) -> bool {
self.mode & 0o222 == 0
}
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.mode &= !0o222;
} else {
self.mode |= 0o222;
}
}
}
impl ::os::unix::fs::PermissionsExt for Permissions {
fn mode(&self) -> u32 {
self.mode as u32
}
fn set_mode(&mut self, mode: u32) {
self.mode = mode as u16;
}
fn from_mode(mode: u32) -> Self {
Permissions {
mode: mode as u16
}
}
}
pub struct DirEntry {
path: PathBuf,
}
impl DirEntry {
pub fn file_name(&self) -> &Path {
unsafe { mem::transmute(self.path.file_name().unwrap().to_str().unwrap()) }
}
pub fn file_type(&self) -> Result<FileType> {
self.metadata().map(|metadata| metadata.file_type())
}
pub fn metadata(&self) -> Result<Metadata> {
metadata(&self.path)
}
pub fn path(&self) -> PathBuf {
self.path.clone()
}
}
pub struct ReadDir {
path: PathBuf,
file: BufReader<File>,
}
impl Iterator for ReadDir {
type Item = Result<DirEntry>;
fn next(&mut self) -> Option<Result<DirEntry>> {
let mut name = String::new();
match self.file.read_line(&mut name) {
Ok(0) => None,
Ok(_) => {
if name.ends_with('\n') {
name.pop();
}
let mut path = self.path.clone();
path.push(name);
Some(Ok(DirEntry {
path: path
}))
},
Err(err) => Some(Err(err))
}
}
}
/// Find the canonical path of a file
pub fn canonicalize<P: AsRef<Path>>(path: P) -> Result<PathBuf> {
match File::open(path) {
Ok(file) => {
match file.path() {
Ok(realpath) => Ok(realpath),
Err(err) => Err(err)
}
},
Err(err) => Err(err)
}
}
/// Get information about a file
pub fn metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
try!(File::open(path)).metadata()
}
/// Get information about a file without following symlinks
/// Warning: Redox does not currently support symlinks
pub fn symlink_metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
metadata(path)
}
/// Create a new directory, using a path
/// The default mode of the directory is 775
pub fn create_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
mkdir(path_str, 0o775).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Recursively create a directory and all of its parent components if they are missing.
pub fn create_dir_all<P: AsRef<Path>>(path: P) -> Result<()> {
if let Some(parent) = path.as_ref().parent() {
try!(create_dir_all(&parent));
}
if let Err(_err) = metadata(&path) {
try!(create_dir(&path));
}
Ok(())
}
/// Copy the contents of one file to another
pub fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<u64> {
let mut infile = try!(File::open(from));
let mut outfile = try!(File::create(to));
io::copy(&mut infile, &mut outfile)
}
/// Rename a file or directory to a new name
pub fn rename<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<()> {
try!(copy(Path::new(from.as_ref()), to));
remove_file(from)
}
/// Return an iterator over the entries within a directory
pub fn read_dir<P: AsRef<Path>>(path: P) -> Result<ReadDir> {
let path_buf = path.as_ref().to_owned();
File::open(&path_buf).map(|file| ReadDir { path: path_buf, file: BufReader::new(file) })
}
/// Removes an existing, empty directory
pub fn remove_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
rmdir(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Removes a directory at this path, after removing all its contents. Use carefully!
pub fn remove_dir_all<P: AsRef<Path>>(path: P) -> Result<()> {
for child in try!(read_dir(&path)) {
let child = try!(child);
if try!(child.file_type()).is_dir() {
try!(remove_dir_all(&child.path()));
} else |
}
remove_dir(path)
}
/// Removes a file from the filesystem
pub fn remove_file<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
unlink(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
}
| {
try!(remove_file(&child.path()));
} | conditional_block |
fs.rs | use core_collections::borrow::ToOwned;
use io::{self, BufRead, BufReader, Read, Error, Result, Write, Seek, SeekFrom};
use os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use mem;
use path::{PathBuf, Path};
use string::String;
use sys_common::AsInner;
use vec::Vec;
use syscall::{open, dup, close, fpath, fstat, ftruncate, read,
write, lseek, fsync, mkdir, rmdir, unlink};
use syscall::{O_RDWR, O_RDONLY, O_WRONLY, O_APPEND, O_CREAT, O_TRUNC, MODE_DIR, MODE_FILE, MODE_PERM, SEEK_SET, SEEK_CUR, SEEK_END, Stat};
/// A Unix-style file
#[derive(Debug)]
pub struct File {
/// The id for the file
fd: usize,
}
impl File {
/// Open a new file using a path
pub fn open<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_RDONLY).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Create a new file using a path
pub fn create<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_CREAT | O_RDWR | O_TRUNC | 0o664).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Duplicate the file
pub fn dup(&self, buf: &[u8]) -> Result<File> {
dup(self.fd, buf).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Get information about a file
pub fn metadata(&self) -> Result<Metadata> {
let mut stat = Stat::default();
try!(fstat(self.fd, &mut stat).map_err(|x| Error::from_sys(x)));
Ok(Metadata {
stat: stat
})
}
/// Get the canonical path of the file
pub fn path(&self) -> Result<PathBuf> {
let mut buf: [u8; 4096] = [0; 4096];
match fpath(self.fd, &mut buf) {
Ok(count) => Ok(PathBuf::from(unsafe { String::from_utf8_unchecked(Vec::from(&buf[0..count])) })),
Err(err) => Err(Error::from_sys(err)),
}
}
/// Flush the file data and metadata
pub fn sync_all(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Flush the file data
pub fn sync_data(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Truncates the file
pub fn set_len(&self, size: u64) -> Result<()> {
ftruncate(self.fd, size as usize).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl AsRawFd for File {
fn as_raw_fd(&self) -> RawFd {
self.fd
}
}
impl FromRawFd for File {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
File {
fd: fd
}
}
}
impl IntoRawFd for File {
fn into_raw_fd(self) -> RawFd {
let fd = self.fd;
mem::forget(self);
fd
}
}
impl Read for File {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
read(self.fd, buf).map_err(|x| Error::from_sys(x))
}
}
impl Write for File {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
write(self.fd, buf).map_err(|x| Error::from_sys(x))
}
fn flush(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl Seek for File {
/// Seek a given position
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
let (whence, offset) = match pos {
SeekFrom::Start(offset) => (SEEK_SET, offset as isize),
SeekFrom::Current(offset) => (SEEK_CUR, offset as isize),
SeekFrom::End(offset) => (SEEK_END, offset as isize),
};
lseek(self.fd, offset, whence).map(|position| position as u64).map_err(|x| Error::from_sys(x))
}
}
impl Drop for File {
fn drop(&mut self) {
let _ = close(self.fd);
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct FileType {
dir: bool,
file: bool,
}
impl FileType {
pub fn is_dir(&self) -> bool {
self.dir
}
pub fn is_file(&self) -> bool {
self.file
}
pub fn is_symlink(&self) -> bool {
false
}
}
impl ::os::unix::fs::FileTypeExt for FileType {
fn is_block_device(&self) -> bool { false }
fn is_char_device(&self) -> bool { false }
fn is_fifo(&self) -> bool { false }
fn is_socket(&self) -> bool { false }
}
pub struct OpenOptions {
read: bool,
write: bool,
append: bool,
create: bool,
truncate: bool,
mode: u16,
}
impl OpenOptions {
pub fn new() -> OpenOptions {
OpenOptions {
read: false,
write: false,
append: false,
create: false,
truncate: false,
mode: 0,
}
}
pub fn read(&mut self, read: bool) -> &mut OpenOptions {
self.read = read;
self
}
pub fn write(&mut self, write: bool) -> &mut OpenOptions {
self.write = write;
self
}
pub fn append(&mut self, append: bool) -> &mut OpenOptions {
self.append = append;
self
}
pub fn create(&mut self, create: bool) -> &mut OpenOptions {
self.create = create;
self
}
pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions {
self.truncate = truncate;
self
}
pub fn open<P: AsRef<Path>>(&self, path: P) -> Result<File> {
let mut flags = 0;
if self.read && self.write {
flags |= O_RDWR;
} else if self.read {
flags |= O_RDONLY;
} else if self.write {
flags |= O_WRONLY;
}
if self.append {
flags |= O_APPEND;
}
if self.create {
flags |= O_CREAT;
}
if self.truncate {
flags |= O_TRUNC;
}
flags |= (self.mode & MODE_PERM) as usize;
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, flags).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
}
impl ::os::unix::fs::OpenOptionsExt for OpenOptions {
fn mode(&mut self, mode: u32) -> &mut Self {
self.mode = mode as u16;
self
}
}
pub struct Metadata {
stat: Stat
}
impl Metadata {
pub fn file_type(&self) -> FileType {
FileType {
dir: self.stat.st_mode & MODE_DIR == MODE_DIR,
file: self.stat.st_mode & MODE_FILE == MODE_FILE
}
}
pub fn is_dir(&self) -> bool {
self.stat.st_mode & MODE_DIR == MODE_DIR
}
pub fn is_file(&self) -> bool {
self.stat.st_mode & MODE_FILE == MODE_FILE
}
pub fn len(&self) -> u64 {
self.stat.st_size
}
pub fn permissions(&self) -> Permissions {
Permissions {
mode: self.stat.st_mode & MODE_PERM
}
}
}
impl ::os::unix::fs::MetadataExt for Metadata {
fn mode(&self) -> u32 {
self.stat.st_mode as u32
}
fn uid(&self) -> u32 {
self.stat.st_uid
}
fn gid(&self) -> u32 {
self.stat.st_gid
}
fn size(&self) -> u64 {
self.stat.st_size
}
}
pub struct Permissions {
mode: u16
}
impl Permissions {
pub fn readonly(&self) -> bool {
self.mode & 0o222 == 0
}
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.mode &= !0o222;
} else {
self.mode |= 0o222;
}
}
}
impl ::os::unix::fs::PermissionsExt for Permissions {
fn mode(&self) -> u32 {
self.mode as u32
}
fn set_mode(&mut self, mode: u32) { | self.mode = mode as u16;
}
fn from_mode(mode: u32) -> Self {
Permissions {
mode: mode as u16
}
}
}
pub struct DirEntry {
path: PathBuf,
}
impl DirEntry {
pub fn file_name(&self) -> &Path {
unsafe { mem::transmute(self.path.file_name().unwrap().to_str().unwrap()) }
}
pub fn file_type(&self) -> Result<FileType> {
self.metadata().map(|metadata| metadata.file_type())
}
pub fn metadata(&self) -> Result<Metadata> {
metadata(&self.path)
}
pub fn path(&self) -> PathBuf {
self.path.clone()
}
}
pub struct ReadDir {
path: PathBuf,
file: BufReader<File>,
}
impl Iterator for ReadDir {
type Item = Result<DirEntry>;
fn next(&mut self) -> Option<Result<DirEntry>> {
let mut name = String::new();
match self.file.read_line(&mut name) {
Ok(0) => None,
Ok(_) => {
if name.ends_with('\n') {
name.pop();
}
let mut path = self.path.clone();
path.push(name);
Some(Ok(DirEntry {
path: path
}))
},
Err(err) => Some(Err(err))
}
}
}
/// Find the canonical path of a file
pub fn canonicalize<P: AsRef<Path>>(path: P) -> Result<PathBuf> {
match File::open(path) {
Ok(file) => {
match file.path() {
Ok(realpath) => Ok(realpath),
Err(err) => Err(err)
}
},
Err(err) => Err(err)
}
}
/// Get information about a file
pub fn metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
try!(File::open(path)).metadata()
}
/// Get information about a file without following symlinks
/// Warning: Redox does not currently support symlinks
pub fn symlink_metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
metadata(path)
}
/// Create a new directory, using a path
/// The default mode of the directory is 775
pub fn create_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
mkdir(path_str, 0o775).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Recursively create a directory and all of its parent components if they are missing.
pub fn create_dir_all<P: AsRef<Path>>(path: P) -> Result<()> {
if let Some(parent) = path.as_ref().parent() {
try!(create_dir_all(&parent));
}
if let Err(_err) = metadata(&path) {
try!(create_dir(&path));
}
Ok(())
}
/// Copy the contents of one file to another
pub fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<u64> {
let mut infile = try!(File::open(from));
let mut outfile = try!(File::create(to));
io::copy(&mut infile, &mut outfile)
}
/// Rename a file or directory to a new name
pub fn rename<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<()> {
try!(copy(Path::new(from.as_ref()), to));
remove_file(from)
}
/// Return an iterator over the entries within a directory
pub fn read_dir<P: AsRef<Path>>(path: P) -> Result<ReadDir> {
let path_buf = path.as_ref().to_owned();
File::open(&path_buf).map(|file| ReadDir { path: path_buf, file: BufReader::new(file) })
}
/// Removes an existing, empty directory
pub fn remove_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
rmdir(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Removes a directory at this path, after removing all its contents. Use carefully!
pub fn remove_dir_all<P: AsRef<Path>>(path: P) -> Result<()> {
for child in try!(read_dir(&path)) {
let child = try!(child);
if try!(child.file_type()).is_dir() {
try!(remove_dir_all(&child.path()));
} else {
try!(remove_file(&child.path()));
}
}
remove_dir(path)
}
/// Removes a file from the filesystem
pub fn remove_file<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
unlink(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
} | random_line_split |
|
fs.rs | use core_collections::borrow::ToOwned;
use io::{self, BufRead, BufReader, Read, Error, Result, Write, Seek, SeekFrom};
use os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use mem;
use path::{PathBuf, Path};
use string::String;
use sys_common::AsInner;
use vec::Vec;
use syscall::{open, dup, close, fpath, fstat, ftruncate, read,
write, lseek, fsync, mkdir, rmdir, unlink};
use syscall::{O_RDWR, O_RDONLY, O_WRONLY, O_APPEND, O_CREAT, O_TRUNC, MODE_DIR, MODE_FILE, MODE_PERM, SEEK_SET, SEEK_CUR, SEEK_END, Stat};
/// A Unix-style file
#[derive(Debug)]
pub struct File {
/// The id for the file
fd: usize,
}
impl File {
/// Open a new file using a path
pub fn open<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_RDONLY).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Create a new file using a path
pub fn create<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_CREAT | O_RDWR | O_TRUNC | 0o664).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Duplicate the file
pub fn dup(&self, buf: &[u8]) -> Result<File> {
dup(self.fd, buf).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Get information about a file
pub fn metadata(&self) -> Result<Metadata> {
let mut stat = Stat::default();
try!(fstat(self.fd, &mut stat).map_err(|x| Error::from_sys(x)));
Ok(Metadata {
stat: stat
})
}
/// Get the canonical path of the file
pub fn path(&self) -> Result<PathBuf> {
let mut buf: [u8; 4096] = [0; 4096];
match fpath(self.fd, &mut buf) {
Ok(count) => Ok(PathBuf::from(unsafe { String::from_utf8_unchecked(Vec::from(&buf[0..count])) })),
Err(err) => Err(Error::from_sys(err)),
}
}
/// Flush the file data and metadata
pub fn sync_all(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Flush the file data
pub fn sync_data(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Truncates the file
pub fn set_len(&self, size: u64) -> Result<()> {
ftruncate(self.fd, size as usize).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl AsRawFd for File {
fn as_raw_fd(&self) -> RawFd |
}
impl FromRawFd for File {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
File {
fd: fd
}
}
}
impl IntoRawFd for File {
fn into_raw_fd(self) -> RawFd {
let fd = self.fd;
mem::forget(self);
fd
}
}
impl Read for File {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
read(self.fd, buf).map_err(|x| Error::from_sys(x))
}
}
impl Write for File {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
write(self.fd, buf).map_err(|x| Error::from_sys(x))
}
fn flush(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl Seek for File {
/// Seek a given position
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
let (whence, offset) = match pos {
SeekFrom::Start(offset) => (SEEK_SET, offset as isize),
SeekFrom::Current(offset) => (SEEK_CUR, offset as isize),
SeekFrom::End(offset) => (SEEK_END, offset as isize),
};
lseek(self.fd, offset, whence).map(|position| position as u64).map_err(|x| Error::from_sys(x))
}
}
impl Drop for File {
fn drop(&mut self) {
let _ = close(self.fd);
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct FileType {
dir: bool,
file: bool,
}
impl FileType {
pub fn is_dir(&self) -> bool {
self.dir
}
pub fn is_file(&self) -> bool {
self.file
}
pub fn is_symlink(&self) -> bool {
false
}
}
impl ::os::unix::fs::FileTypeExt for FileType {
fn is_block_device(&self) -> bool { false }
fn is_char_device(&self) -> bool { false }
fn is_fifo(&self) -> bool { false }
fn is_socket(&self) -> bool { false }
}
pub struct OpenOptions {
read: bool,
write: bool,
append: bool,
create: bool,
truncate: bool,
mode: u16,
}
impl OpenOptions {
pub fn new() -> OpenOptions {
OpenOptions {
read: false,
write: false,
append: false,
create: false,
truncate: false,
mode: 0,
}
}
pub fn read(&mut self, read: bool) -> &mut OpenOptions {
self.read = read;
self
}
pub fn write(&mut self, write: bool) -> &mut OpenOptions {
self.write = write;
self
}
pub fn append(&mut self, append: bool) -> &mut OpenOptions {
self.append = append;
self
}
pub fn create(&mut self, create: bool) -> &mut OpenOptions {
self.create = create;
self
}
pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions {
self.truncate = truncate;
self
}
pub fn open<P: AsRef<Path>>(&self, path: P) -> Result<File> {
let mut flags = 0;
if self.read && self.write {
flags |= O_RDWR;
} else if self.read {
flags |= O_RDONLY;
} else if self.write {
flags |= O_WRONLY;
}
if self.append {
flags |= O_APPEND;
}
if self.create {
flags |= O_CREAT;
}
if self.truncate {
flags |= O_TRUNC;
}
flags |= (self.mode & MODE_PERM) as usize;
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, flags).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
}
impl ::os::unix::fs::OpenOptionsExt for OpenOptions {
fn mode(&mut self, mode: u32) -> &mut Self {
self.mode = mode as u16;
self
}
}
pub struct Metadata {
stat: Stat
}
impl Metadata {
pub fn file_type(&self) -> FileType {
FileType {
dir: self.stat.st_mode & MODE_DIR == MODE_DIR,
file: self.stat.st_mode & MODE_FILE == MODE_FILE
}
}
pub fn is_dir(&self) -> bool {
self.stat.st_mode & MODE_DIR == MODE_DIR
}
pub fn is_file(&self) -> bool {
self.stat.st_mode & MODE_FILE == MODE_FILE
}
pub fn len(&self) -> u64 {
self.stat.st_size
}
pub fn permissions(&self) -> Permissions {
Permissions {
mode: self.stat.st_mode & MODE_PERM
}
}
}
impl ::os::unix::fs::MetadataExt for Metadata {
fn mode(&self) -> u32 {
self.stat.st_mode as u32
}
fn uid(&self) -> u32 {
self.stat.st_uid
}
fn gid(&self) -> u32 {
self.stat.st_gid
}
fn size(&self) -> u64 {
self.stat.st_size
}
}
pub struct Permissions {
mode: u16
}
impl Permissions {
pub fn readonly(&self) -> bool {
self.mode & 0o222 == 0
}
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.mode &= !0o222;
} else {
self.mode |= 0o222;
}
}
}
impl ::os::unix::fs::PermissionsExt for Permissions {
fn mode(&self) -> u32 {
self.mode as u32
}
fn set_mode(&mut self, mode: u32) {
self.mode = mode as u16;
}
fn from_mode(mode: u32) -> Self {
Permissions {
mode: mode as u16
}
}
}
pub struct DirEntry {
path: PathBuf,
}
impl DirEntry {
pub fn file_name(&self) -> &Path {
unsafe { mem::transmute(self.path.file_name().unwrap().to_str().unwrap()) }
}
pub fn file_type(&self) -> Result<FileType> {
self.metadata().map(|metadata| metadata.file_type())
}
pub fn metadata(&self) -> Result<Metadata> {
metadata(&self.path)
}
pub fn path(&self) -> PathBuf {
self.path.clone()
}
}
pub struct ReadDir {
path: PathBuf,
file: BufReader<File>,
}
impl Iterator for ReadDir {
type Item = Result<DirEntry>;
fn next(&mut self) -> Option<Result<DirEntry>> {
let mut name = String::new();
match self.file.read_line(&mut name) {
Ok(0) => None,
Ok(_) => {
if name.ends_with('\n') {
name.pop();
}
let mut path = self.path.clone();
path.push(name);
Some(Ok(DirEntry {
path: path
}))
},
Err(err) => Some(Err(err))
}
}
}
/// Find the canonical path of a file
pub fn canonicalize<P: AsRef<Path>>(path: P) -> Result<PathBuf> {
match File::open(path) {
Ok(file) => {
match file.path() {
Ok(realpath) => Ok(realpath),
Err(err) => Err(err)
}
},
Err(err) => Err(err)
}
}
/// Get information about a file
pub fn metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
try!(File::open(path)).metadata()
}
/// Get information about a file without following symlinks
/// Warning: Redox does not currently support symlinks
pub fn symlink_metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
metadata(path)
}
/// Create a new directory, using a path
/// The default mode of the directory is 775
pub fn create_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
mkdir(path_str, 0o775).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Recursively create a directory and all of its parent components if they are missing.
pub fn create_dir_all<P: AsRef<Path>>(path: P) -> Result<()> {
if let Some(parent) = path.as_ref().parent() {
try!(create_dir_all(&parent));
}
if let Err(_err) = metadata(&path) {
try!(create_dir(&path));
}
Ok(())
}
/// Copy the contents of one file to another
pub fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<u64> {
let mut infile = try!(File::open(from));
let mut outfile = try!(File::create(to));
io::copy(&mut infile, &mut outfile)
}
/// Rename a file or directory to a new name
pub fn rename<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<()> {
try!(copy(Path::new(from.as_ref()), to));
remove_file(from)
}
/// Return an iterator over the entries within a directory
pub fn read_dir<P: AsRef<Path>>(path: P) -> Result<ReadDir> {
let path_buf = path.as_ref().to_owned();
File::open(&path_buf).map(|file| ReadDir { path: path_buf, file: BufReader::new(file) })
}
/// Removes an existing, empty directory
pub fn remove_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
rmdir(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Removes a directory at this path, after removing all its contents. Use carefully!
pub fn remove_dir_all<P: AsRef<Path>>(path: P) -> Result<()> {
for child in try!(read_dir(&path)) {
let child = try!(child);
if try!(child.file_type()).is_dir() {
try!(remove_dir_all(&child.path()));
} else {
try!(remove_file(&child.path()));
}
}
remove_dir(path)
}
/// Removes a file from the filesystem
pub fn remove_file<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
unlink(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
}
| {
self.fd
} | identifier_body |
fs.rs | use core_collections::borrow::ToOwned;
use io::{self, BufRead, BufReader, Read, Error, Result, Write, Seek, SeekFrom};
use os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
use mem;
use path::{PathBuf, Path};
use string::String;
use sys_common::AsInner;
use vec::Vec;
use syscall::{open, dup, close, fpath, fstat, ftruncate, read,
write, lseek, fsync, mkdir, rmdir, unlink};
use syscall::{O_RDWR, O_RDONLY, O_WRONLY, O_APPEND, O_CREAT, O_TRUNC, MODE_DIR, MODE_FILE, MODE_PERM, SEEK_SET, SEEK_CUR, SEEK_END, Stat};
/// A Unix-style file
#[derive(Debug)]
pub struct File {
/// The id for the file
fd: usize,
}
impl File {
/// Open a new file using a path
pub fn open<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_RDONLY).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Create a new file using a path
pub fn create<P: AsRef<Path>>(path: P) -> Result<File> {
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, O_CREAT | O_RDWR | O_TRUNC | 0o664).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Duplicate the file
pub fn dup(&self, buf: &[u8]) -> Result<File> {
dup(self.fd, buf).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
/// Get information about a file
pub fn metadata(&self) -> Result<Metadata> {
let mut stat = Stat::default();
try!(fstat(self.fd, &mut stat).map_err(|x| Error::from_sys(x)));
Ok(Metadata {
stat: stat
})
}
/// Get the canonical path of the file
pub fn path(&self) -> Result<PathBuf> {
let mut buf: [u8; 4096] = [0; 4096];
match fpath(self.fd, &mut buf) {
Ok(count) => Ok(PathBuf::from(unsafe { String::from_utf8_unchecked(Vec::from(&buf[0..count])) })),
Err(err) => Err(Error::from_sys(err)),
}
}
/// Flush the file data and metadata
pub fn sync_all(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Flush the file data
pub fn sync_data(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Truncates the file
pub fn set_len(&self, size: u64) -> Result<()> {
ftruncate(self.fd, size as usize).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl AsRawFd for File {
fn as_raw_fd(&self) -> RawFd {
self.fd
}
}
impl FromRawFd for File {
unsafe fn from_raw_fd(fd: RawFd) -> Self {
File {
fd: fd
}
}
}
impl IntoRawFd for File {
fn into_raw_fd(self) -> RawFd {
let fd = self.fd;
mem::forget(self);
fd
}
}
impl Read for File {
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
read(self.fd, buf).map_err(|x| Error::from_sys(x))
}
}
impl Write for File {
fn write(&mut self, buf: &[u8]) -> Result<usize> {
write(self.fd, buf).map_err(|x| Error::from_sys(x))
}
fn flush(&mut self) -> Result<()> {
fsync(self.fd).and(Ok(())).map_err(|x| Error::from_sys(x))
}
}
impl Seek for File {
/// Seek a given position
fn seek(&mut self, pos: SeekFrom) -> Result<u64> {
let (whence, offset) = match pos {
SeekFrom::Start(offset) => (SEEK_SET, offset as isize),
SeekFrom::Current(offset) => (SEEK_CUR, offset as isize),
SeekFrom::End(offset) => (SEEK_END, offset as isize),
};
lseek(self.fd, offset, whence).map(|position| position as u64).map_err(|x| Error::from_sys(x))
}
}
impl Drop for File {
fn drop(&mut self) {
let _ = close(self.fd);
}
}
#[derive(Copy, Clone, Eq, PartialEq)]
pub struct FileType {
dir: bool,
file: bool,
}
impl FileType {
pub fn is_dir(&self) -> bool {
self.dir
}
pub fn is_file(&self) -> bool {
self.file
}
pub fn is_symlink(&self) -> bool {
false
}
}
impl ::os::unix::fs::FileTypeExt for FileType {
fn is_block_device(&self) -> bool { false }
fn is_char_device(&self) -> bool { false }
fn is_fifo(&self) -> bool { false }
fn is_socket(&self) -> bool { false }
}
pub struct OpenOptions {
read: bool,
write: bool,
append: bool,
create: bool,
truncate: bool,
mode: u16,
}
impl OpenOptions {
pub fn new() -> OpenOptions {
OpenOptions {
read: false,
write: false,
append: false,
create: false,
truncate: false,
mode: 0,
}
}
pub fn read(&mut self, read: bool) -> &mut OpenOptions {
self.read = read;
self
}
pub fn write(&mut self, write: bool) -> &mut OpenOptions {
self.write = write;
self
}
pub fn append(&mut self, append: bool) -> &mut OpenOptions {
self.append = append;
self
}
pub fn create(&mut self, create: bool) -> &mut OpenOptions {
self.create = create;
self
}
pub fn truncate(&mut self, truncate: bool) -> &mut OpenOptions {
self.truncate = truncate;
self
}
pub fn open<P: AsRef<Path>>(&self, path: P) -> Result<File> {
let mut flags = 0;
if self.read && self.write {
flags |= O_RDWR;
} else if self.read {
flags |= O_RDONLY;
} else if self.write {
flags |= O_WRONLY;
}
if self.append {
flags |= O_APPEND;
}
if self.create {
flags |= O_CREAT;
}
if self.truncate {
flags |= O_TRUNC;
}
flags |= (self.mode & MODE_PERM) as usize;
let path_str = path.as_ref().as_os_str().as_inner();
open(path_str, flags).map(|fd| unsafe { File::from_raw_fd(fd) }).map_err(|x| Error::from_sys(x))
}
}
impl ::os::unix::fs::OpenOptionsExt for OpenOptions {
fn mode(&mut self, mode: u32) -> &mut Self {
self.mode = mode as u16;
self
}
}
pub struct Metadata {
stat: Stat
}
impl Metadata {
pub fn file_type(&self) -> FileType {
FileType {
dir: self.stat.st_mode & MODE_DIR == MODE_DIR,
file: self.stat.st_mode & MODE_FILE == MODE_FILE
}
}
pub fn is_dir(&self) -> bool {
self.stat.st_mode & MODE_DIR == MODE_DIR
}
pub fn is_file(&self) -> bool {
self.stat.st_mode & MODE_FILE == MODE_FILE
}
pub fn len(&self) -> u64 {
self.stat.st_size
}
pub fn permissions(&self) -> Permissions {
Permissions {
mode: self.stat.st_mode & MODE_PERM
}
}
}
impl ::os::unix::fs::MetadataExt for Metadata {
fn mode(&self) -> u32 {
self.stat.st_mode as u32
}
fn uid(&self) -> u32 {
self.stat.st_uid
}
fn gid(&self) -> u32 {
self.stat.st_gid
}
fn size(&self) -> u64 {
self.stat.st_size
}
}
pub struct Permissions {
mode: u16
}
impl Permissions {
pub fn readonly(&self) -> bool {
self.mode & 0o222 == 0
}
pub fn set_readonly(&mut self, readonly: bool) {
if readonly {
self.mode &= !0o222;
} else {
self.mode |= 0o222;
}
}
}
impl ::os::unix::fs::PermissionsExt for Permissions {
fn mode(&self) -> u32 {
self.mode as u32
}
fn set_mode(&mut self, mode: u32) {
self.mode = mode as u16;
}
fn from_mode(mode: u32) -> Self {
Permissions {
mode: mode as u16
}
}
}
pub struct DirEntry {
path: PathBuf,
}
impl DirEntry {
pub fn file_name(&self) -> &Path {
unsafe { mem::transmute(self.path.file_name().unwrap().to_str().unwrap()) }
}
pub fn file_type(&self) -> Result<FileType> {
self.metadata().map(|metadata| metadata.file_type())
}
pub fn metadata(&self) -> Result<Metadata> {
metadata(&self.path)
}
pub fn path(&self) -> PathBuf {
self.path.clone()
}
}
pub struct ReadDir {
path: PathBuf,
file: BufReader<File>,
}
impl Iterator for ReadDir {
type Item = Result<DirEntry>;
fn next(&mut self) -> Option<Result<DirEntry>> {
let mut name = String::new();
match self.file.read_line(&mut name) {
Ok(0) => None,
Ok(_) => {
if name.ends_with('\n') {
name.pop();
}
let mut path = self.path.clone();
path.push(name);
Some(Ok(DirEntry {
path: path
}))
},
Err(err) => Some(Err(err))
}
}
}
/// Find the canonical path of a file
pub fn canonicalize<P: AsRef<Path>>(path: P) -> Result<PathBuf> {
match File::open(path) {
Ok(file) => {
match file.path() {
Ok(realpath) => Ok(realpath),
Err(err) => Err(err)
}
},
Err(err) => Err(err)
}
}
/// Get information about a file
pub fn metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
try!(File::open(path)).metadata()
}
/// Get information about a file without following symlinks
/// Warning: Redox does not currently support symlinks
pub fn symlink_metadata<P: AsRef<Path>>(path: P) -> Result<Metadata> {
metadata(path)
}
/// Create a new directory, using a path
/// The default mode of the directory is 775
pub fn create_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
mkdir(path_str, 0o775).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Recursively create a directory and all of its parent components if they are missing.
pub fn | <P: AsRef<Path>>(path: P) -> Result<()> {
if let Some(parent) = path.as_ref().parent() {
try!(create_dir_all(&parent));
}
if let Err(_err) = metadata(&path) {
try!(create_dir(&path));
}
Ok(())
}
/// Copy the contents of one file to another
pub fn copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<u64> {
let mut infile = try!(File::open(from));
let mut outfile = try!(File::create(to));
io::copy(&mut infile, &mut outfile)
}
/// Rename a file or directory to a new name
pub fn rename<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> Result<()> {
try!(copy(Path::new(from.as_ref()), to));
remove_file(from)
}
/// Return an iterator over the entries within a directory
pub fn read_dir<P: AsRef<Path>>(path: P) -> Result<ReadDir> {
let path_buf = path.as_ref().to_owned();
File::open(&path_buf).map(|file| ReadDir { path: path_buf, file: BufReader::new(file) })
}
/// Removes an existing, empty directory
pub fn remove_dir<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
rmdir(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
}
/// Removes a directory at this path, after removing all its contents. Use carefully!
pub fn remove_dir_all<P: AsRef<Path>>(path: P) -> Result<()> {
for child in try!(read_dir(&path)) {
let child = try!(child);
if try!(child.file_type()).is_dir() {
try!(remove_dir_all(&child.path()));
} else {
try!(remove_file(&child.path()));
}
}
remove_dir(path)
}
/// Removes a file from the filesystem
pub fn remove_file<P: AsRef<Path>>(path: P) -> Result<()> {
let path_str = path.as_ref().as_os_str().as_inner();
unlink(path_str).and(Ok(())).map_err(|x| Error::from_sys(x))
}
| create_dir_all | identifier_name |
dnp3.go | // Copyright 2019, The GoPacket Authors, All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
//
//******************************************************************************
package layers
import (
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"strconv"
"github.com/google/gopacket"
)
//******************************************************************************
//
// DNP3 Decoding Layer
// ------------------------------------------
// This file provides a GoPacket decoding layer for DNP3.
//
//******************************************************************************
// DNP3 is the struct for storing DNP3 packet headers.
const (
MIN_HEADER_LENGTH = 10
START_FIELD = 0x0564
)
var FCodes = map[byte]string{
0: "Confirm",
1: "Read",
2: "Write",
3: "Select",
4: "Operate",
5: "Direct Operate",
6: "Direct Operate No ACK",
7: "Immediate Freeze",
8: "Immediate Freeze No ACK",
9: "Freeze and Clear",
10: "Freeze and Clear No ACK",
11: "Freeze With Time",
12: "Freeze With Time No ACK",
13: "Cold Restart",
14: "Warm Restart",
15: "Initialize Data",
16: "Initialize Application",
17: "Start Application",
18: "Stop Application",
19: "Save Configuration",
20: "Enable Spontaneous Msg",
21: "Disable Spontaneous Msg",
22: "Assign Classes",
23: "Delay Measurement",
24: "Record Current Time",
25: "Open File",
26: "Close File",
27: "Delete File",
28: "Get File Info",
29: "Authenticate File",
30: "Abort File",
31: "Activate Config",
32: "Authentication Request",
33: "Authentication Error",
129: "Response",
130: "Unsolicited Response",
131: "Authentication Response",
}
// "-" Reserved or Obsolete
var PfCodes = map[byte]string{
0: "Reset of Remote Link", // 0x10
1: "Reset of User Process",
2: "Test Function For Link", // 0x12
3: "User Data", // 0x13
4: "Unconfirmed User Data", // 0x14
5: "-",
6: "-",
7: "-",
8: "-",
9: "Request Link Status", // 0x19
10: "-",
11: "-",
12: "-",
13: "-",
14: "-",
15: "-",
}
var SfCodes = map[byte]string{
0: "ACK", // 0x00
1: "NAK", // 0x01
2: "-",
3: "-",
4: "-",
5: "-",
6: "-",
7: "-",
8: "-",
9: "-",
10: "-",
11: "Status of Link", // 0x0B
12: "-",
13: "-",
14: "Link Service Not Functioning",
15: "Link Service Not Used or Implemented", // 0x0F
}
/***************************************************************************/
/* Application Layer Internal Indication (IIN) bits */
/* 2 Bytes, message formatting: [First Octet] | [Second Octet] */
/***************************************************************************/
var IINCodes = map[string]string{
/* Octet 1 */
"0x0100": "Broadcast message rx'd",
"0x0200": "Class 1 Data Available",
"0x0400": "Class 2 Data Available",
"0x0800": "Class 3 Data Available",
"0x1000": "Time Sync Req'd from Master",
"0x2000": "Outputs in Local Mode",
"0x4000": "Device Trouble",
"0x8000": "Device Restart",
/* Octet 2 */
"0x0001": "Function code not implemented",
"0x0002": "Requested Objects Unknown",
"0x0004": "Parameters Invalid or Out of Range",
"0x0008": "Event Buffer Overflow",
"0x0010": "Operation Already Executing",
"0x0020": "Device Configuration Corrupt",
"0x0040": "Reserved",
"0x0080": "Reserved",
}
/***************************************************************************/
/* Application Layer Object Prefix codes bits */
/***************************************************************************/
var ObjPrefixCodes = map[byte]string{
0: "Objects packed without a prefix",
1: "Objects prefixed with 1-octet index",
2: "Objects prefixed with 2-octet index",
3: "Objects prefixed with 4-octet index",
4: "Objects prefixed with 1-octet object size",
5: "Objects prefixed with 2-octet object size",
6: "Objects prefixed with 4-octet object size",
7: "Reserved",
}
/***************************************************************************/
/* Application Layer Object Prefix codes bits */
/***************************************************************************/
var ObjRangeSpecifierCodes = map[byte]string{
0: "8-bit Start and Stop Indices in Range Field",
1: "16-bit Start and Stop Indices in Range Field",
2: "32-bit Start and Stop Indices in Range Field",
3: "8-bit Absolute Address in Range Field",
4: "16-bit Absolute Address in Range Field",
5: "32-bit Absolute Address in Range Field",
6: "Length of Range field is 0 (no range field)",
7: "8-bit Single Field Quantity",
8: "16-bit Single Field Quantity",
9: "32-bit Single Field Quantity",
10: "Reserved",
11: "Free-format Qualifier, range field has 1 octet count of objects",
12: "Reserved",
13: "Reserved",
14: "Reserved",
15: "Reserved",
}
var (
errDNP3PacketTooShort = errors.New("DNS packet too short")
)
type DNP3 struct {
BaseLayer // Stores the packet bytes and payload bytes.
DNP3DataLinkLayer DNP3DataLinkLayer
DNP3TransportLayer DNP3TransportLayer
DNP3ApplicationLayer DNP3ApplicationLayer
SomeByte byte
AnotherByte byte
restOfData []byte
}
type DNP3DataLinkLayer struct {
Start string
Length int
Control struct {
ControlByte string
IsMaster int `json:"Is Master"`
PRM int `json:"Primary"`
FCB int `json:"Frame Count Bit"`
FCV int `json:"Frame Count Valid"`
FUNC string `json:"Function Code"`
}
Destination int
Source int
CRC string
}
type DNP3TransportLayer struct {
TransportByte string
Final int
First int
Sequence int
}
type DNP3ApplicationLayer struct {
Control struct {
ControlByte string
First int
Final int
Confirm int
Unsolicited int
Sequence int
}
Function string `json:"Function Code"`
IINCode string `json:"Internal Indication (IIN)"`
}
type DNP3AppObject struct {
Group int
Variation int
Qualifier int
RangeStart int
RangeStop int
DataType int
Length int
}
func (d *DNP3) LayerType() gopacket.LayerType { return LayerTypeDNP3 }
func (d *DNP3) LayerContents() []byte {
return []byte{d.SomeByte, d.AnotherByte}
}
func (d *DNP3) LayerPayload() []byte {
return d.restOfData
}
func (d *DNP3) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
// If the data block is too short to be a DNP3 layer, then return an error.
if len(data) < 10 {
df.SetTruncated()
return errDNP3PacketTooShort
}
d.linkLayer(data)
d.transportLayer(data)
d.applicationLayer(data)
return nil
}
func decodeDNP3(data []byte, p gopacket.PacketBuilder) error {
// Attempt to decode the byte slice.
d := &DNP3{}
err := d.DecodeFromBytes(data, p)
if err != nil {
return err
}
// If the decoding worked, add the layer to the packet and set it
// as the application layer too, if there isn't already one.
p.AddLayer(d)
p.SetApplicationLayer(d)
d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
d.BaseLayer.Payload = nil
return p.NextDecoder(gopacket.LayerTypePayload)
}
// CanDecode implements gopacket.DecodingLayer.
func (d *DNP3) CanDecode() gopacket.LayerClass {
return LayerTypeDNP3
}
// NextLayerType returns the layer type contained by this DecodingLayer.
func (d *DNP3) NextLayerType() gopacket.LayerType {
return gopacket.LayerTypePayload
}
// Payload returns nil, since TLS encrypted payload is inside TLSAppDataRecord
func (d *DNP3) Payload() []byte {
return nil
}
func appObject(bytesRead []byte) {
object := bytesRead[22:]
// indexSize := uint(object[2] & 0x70 >> 4)
// QualifierCode := uint(object[2] & 0x0F)
// fmt.Println(indexSize)
// fmt.Println(QualifierCode)
group := int(object[0])
variation := int(object[1])
qualifier := int(object[2])
rangeStart := int(object[3])
rangeStop := int(object[4])
dataType := int(object[5])
length := int(object[6])
appObject := DNP3AppObject{
Group: group,
Variation: variation,
Qualifier: qualifier,
RangeStart: rangeStart,
RangeStop: rangeStop,
DataType: dataType,
Length: length,
}
out, err := json.Marshal(appObject)
if err != nil {
panic(err)
}
fmt.Println(string(out))
}
func (d *DNP3) linkLayer(data []byte) {
start := d.hexConvert(data[0:2])
d.DNP3DataLinkLayer.Start = start
length := int(data[2])
d.DNP3DataLinkLayer.Length = length
ctlControl := d.hexConvert([]byte{data[3]})
d.DNP3DataLinkLayer.Control.ControlByte = ctlControl
IsMaster := int((data[3] & 0x80) >> 7)
d.DNP3DataLinkLayer.Control.IsMaster = IsMaster
PRM := int((data[3] & 0x40) >> 6)
d.DNP3DataLinkLayer.Control.PRM = PRM
FCB := int((data[3] & 0x20) >> 5)
d.DNP3DataLinkLayer.Control.FCB = FCB
FCV := int((data[3] & 0x10) >> 4)
d.DNP3DataLinkLayer.Control.FCV = FCV
FUNCCODE := data[3] & 0x0F
ctlFUNCCODE := fmt.Sprintf("%d", FUNCCODE)
var ctlFUNC string
if PRM == 0x00 {
ctlFUNC = SfCodes[FUNCCODE]
}
if PRM == 0x01 {
ctlFUNC = PfCodes[FUNCCODE]
}
ctlFUNC = ctlFUNC + " (" + ctlFUNCCODE + ")"
d.DNP3DataLinkLayer.Control.FUNC = ctlFUNC
// TODO: make sure 0 to 65535
destination := fmt.Sprintf("%x%x", data[5], data[4])
destinationInt, _ := strconv.Atoi(destination)
d.DNP3DataLinkLayer.Destination = destinationInt
// TODO: make sure 0 to 65535
source := fmt.Sprintf("%x%x", data[7], data[6])
sourceInt, _ := strconv.Atoi(source)
d.DNP3DataLinkLayer.Source = sourceInt
// TODO: Is correct? Hesapla
crc := fmt.Sprintf("0x%x%x", data[9], data[8])
d.DNP3DataLinkLayer.CRC = crc
}
func (d *DNP3) transportLayer(data []byte) {
transport := fmt.Sprintf("0x%x", data[10])
d.DNP3TransportLayer.TransportByte = transport
final := data[10] & 0x80 >> 7
d.DNP3TransportLayer.Final = int(final)
first := data[10] & 0x40 >> 6
d.DNP3TransportLayer.First = int(first)
sequence := data[10] & 0x3f // 6bit
d.DNP3TransportLayer.Sequence = int(sequence)
}
func (d *DNP3) applicationLayer(data []byte) {
// /***************************************************************************/
// /* Application Layer Bit-Masks */
// /***************************************************************************/
// #define DNP3_AL_UNS 0x10
// #define DNP3_AL_CON 0x20
// #define DNP3_AL_FIN 0x40
// #define DNP3_AL_FIR 0x80
// #define DNP3_AL_SEQ 0x0f
// #define DNP3_AL_FUNC 0xff
controlByte := fmt.Sprintf("0x%x", data[11])
d.DNP3ApplicationLayer.Control.ControlByte = controlByte
first := data[11] & 0x80 >> 7
d.DNP3ApplicationLayer.Control.First = int(first)
final := data[11] & 0x40 >> 6
d.DNP3ApplicationLayer.Control.Final = int(final)
confirm := data[11] & 0x20 >> 5
d.DNP3ApplicationLayer.Control.Confirm = int(confirm)
unsolicited := data[11] & 0x10 >> 4
d.DNP3ApplicationLayer.Control.Unsolicited = int(unsolicited)
sequence := data[11] & 0x0f
d.DNP3ApplicationLayer.Control.Sequence = int(sequence)
functionCode := data[12]
// TODO: refactor this hex convert
src := []byte{functionCode}
dst := make([]byte, hex.EncodedLen(len(src)))
hex.Encode(dst, src)
FUNC := fmt.Sprintf("0x%s", dst)
function := FCodes[functionCode] + " (" + FUNC + ")"
d.DNP3ApplicationLayer.Function = function
objectStart := 13
if d.DNP3DataLinkLayer.Control.IsMaster == 0 {
objectStart = 15
// TODO: refactor this hex convert
src := []byte{data[13], data[14]}
dst := make([]byte, hex.EncodedLen(len(src)))
hex.Encode(dst, src)
IIN := fmt.Sprintf("0x%s", dst)
IINCode := IINCodes[IIN] + " (" + IIN + ")"
d.DNP3ApplicationLayer.IINCode = IINCode
}
dataSize := len(data[12:])
fmt.Printf("DataSize %d\n", dataSize)
switch functionCode {
case 0: // Confirm
case 1: // Read
case 2: // Write
case 3: // Select
case 4: // Operate
case 5: // Direct Operate
case 6: // Direct Operate No ACK
case 7: // Immediate Freeze
case 8: // Immediate Freeze No ACK
case 9: // Freeze and Clear
case 10: // Freeze and Clear No ACK
case 11: // Freeze With Time
case 12: // Freeze With Time No ACK
case 13: // Cold Restart
case 14: // Warm Restart
case 15: // Initialize Data
case 16: // Initialize Application
case 17: // Start Application
case 18: // Stop Application
case 19: // Save Configuration
case 20: // Enable Spontaneous Msg
case 21: // Disable Spontaneous Msg
case 22: // Assign Classes
case 23: // Delay Measurement
case 24: // Record Current Time
case 25: // Open File
case 26: // Close File
case 27: // Delete File
case 28: // Get File Info
case 29: // Authenticate File
case 30: // Abort File
case 31: // Activate Config
case 32: // Authentication Request
case 33: // Authentication Error
case 129: // Response
case 130: // Unsolicited Response
case 131: // Authentication Response
}
objTypeField := binary.BigEndian.Uint16([]byte{data[objectStart], data[objectStart+1]})
objectGroup := objTypeField & 0xFF00
objectVariation := objTypeField & 0x00FF
object := d.hexConvert([]byte{data[objectStart], data[objectStart+1]})
objectPrefixCode := data[objectStart+2] & 0x70 // OPC
objectRangeSpecifierCode := data[objectStart+2] & 0x0F // RSC
fmt.Println(object)
fmt.Println(objectGroup)
fmt.Println(objectVariation)
fmt.Printf("Prefix Code %d\n", objectPrefixCode)
fmt.Println(ObjPrefixCodes[objectPrefixCode])
fmt.Printf("Range Specifier Code %d\n", objectRangeSpecifierCode) // 6 means no range field
fmt.Println(ObjRangeSpecifierCodes[objectRangeSpecifierCode])
fmt.Println(d.hexConvert([]byte{data[objectStart+3]}))
offset := objectStart + 3
rangebytes := 0
fmt.Println(offset)
switch objectRangeSpecifierCode {
case 0:
// start := offset
numItems := int(data[offset+1]) - int(data[offset]) + 1
rangebytes = 2
fmt.Println(numItems)
pointAddress := int(data[offset])
fmt.Println(pointAddress)
// num_items = ( tvb_get_guint8(tvb, offset+1) - tvb_get_guint8(tvb, offset) + 1);
// proto_item_set_generated(range_item);
// al_ptaddr = tvb_get_guint8(tvb, offset);
// proto_tree_add_item(range_tree, hf_dnp3_al_range_start8, tvb, offset, 1, ENC_LITTLE_ENDIAN);
// proto_tree_add_item(range_tree, hf_dnp3_al_range_stop8, tvb, offset + 1, 1, ENC_LITTLE_ENDIAN);
// rangebytes = 2;
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
case 9:
case 10:
case 11:
case 12:
case 13:
case 14:
case 15:
}
/* Move offset past any range field */
offset += rangebytes
fmt.Println(offset)
// RSCArrayFirst := []byte{0, 1, 2, 3, 4, 5}
// if d.contains(RSCArrayFirst, objectRangeSpecifierCode) {
// }
/* Special handling for Octet string objects as the variation is the length of the string */
// temp = objTypeField & 0xFF00
// if (temp == AL_OBJ_OCT) || (temp == AL_OBJ_OCT_EVT) {
// al_oct_len = al_obj & 0xFF
// al_obj = temp
// }
// objectGroup := data[objectStart] & 0x0f
// objectGroup := fmt.Sprintf("0x%x%x", data[objectStart], data[objectStart+1])
// fmt.Println(objectGroup)
// objectGroup, _ := strconv.Atoi(fmt.Sprintf("%d", data[objectStart]))
// objectVariation, _ := strconv.Atoi(fmt.Sprintf("%d", data[objectStart+1]))
// fmt.Println(objectGroup)
// fmt.Println(objectVariation)
/* Index Size (3-bits x111xxxx) */
// /* When Qualifier Code != 11 */
// #define AL_OBJQL_PREFIX_NI 0x00 /* Objects are Packed with no index */
// #define AL_OBJQL_PREFIX_1O 0x01 /* Objects are prefixed w/ 1-octet index */
// #define AL_OBJQL_PREFIX_2O 0x02 /* Objects are prefixed w/ 2-octet index */
// #define AL_OBJQL_PREFIX_4O 0x03 /* Objects are prefixed w/ 4-octet index */
// #define AL_OBJQL_PREFIX_1OS 0x04 /* Objects are prefixed w/ 1-octet object size */
// #define AL_OBJQL_PREFIX_2OS 0x05 /* Objects are prefixed w/ 2-octet object size */
// #define AL_OBJQL_PREFIX_4OS 0x06 /* Objects are prefixed w/ 4-octet object size */
// /* When Qualifier Code == 11 */
// #define AL_OBJQL_IDX11_1OIS 0x01 /* 1 octet identifier size */
// #define AL_OBJQL_IDX11_2OIS 0x02 /* 2 octet identifier size */
// #define AL_OBJQL_IDX11_4OIS 0x03 /* 4 octet identifier size */
// /* Qualifier Code (4-bits) */
// /* 4-bits ( xxxx1111 ) */
// #define AL_OBJQL_RANGE_SSI8 0x00 /* 00 8-bit Start and Stop Indices in Range Field */
// #define AL_OBJQL_RANGE_SSI16 0x01 /* 01 16-bit Start and Stop Indices in Range Field */
// #define AL_OBJQL_RANGE_SSI32 0x02 /* 02 32-bit Start and Stop Indices in Range Field */
// #define AL_OBJQL_RANGE_AA8 0x03 /* 03 8-bit Absolute Address in Range Field */
// #define AL_OBJQL_RANGE_AA16 0x04 /* 04 16-bit Absolute Address in Range Field */
// #define AL_OBJQL_RANGE_AA32 0x05 /* 05 32-bit Absolute Address in Range Field */
// #define AL_OBJQL_RANGE_R0 0x06 /* 06 Length of Range field is 0 (no range field) */
// #define AL_OBJQL_RANGE_SF8 0x07 /* 07 8-bit Single Field Quantity */
// #define AL_OBJQL_RANGE_SF16 0x08 /* 08 16-bit Single Field Quantity */
// #define AL_OBJQL_RANGE_SF32 0x09 /* 09 32-bit Single Field Quantity */
// /* 0x0A 10 Reserved */
// #define AL_OBJQL_RANGE_FF 0x0B /* 11 Free-format Qualifier, range field has 1 octet count of objects */
// /* 0x0C 12 Reserved */
// /* 0x0D 13 Reserved */
// /* 0x0E 14 Reserved */
// /* 0x0F 15 Reserved */
/***************************************************************************/
/* Application Layer Data Object Qualifier */
/***************************************************************************/
// /* Bit-Masks */
// #define AL_OBJQ_PREFIX 0x70 /* x111xxxx Masks Prefix from Qualifier */
// #define AL_OBJQ_RANGE 0x0F /* xxxx1111 Masks Range from Qualifier */
// objectQualifier := fmt.Sprintf("0x%d", data[objectStart+2])
// fmt.Println(objectQualifier)
// src = []byte{data[objectStart], data[objectStart+1]}
// dst = make([]byte, hex.EncodedLen(len(src)))
// hex.Encode(dst, src)
// prefixCode := fmt.Sprintf("0x%s", dst)
// fmt.Println(prefixCode)
}
func (d *DNP3) IsDNP3(bytesRead []byte) bool {
if len(bytesRead) >= MIN_HEADER_LENGTH && binary.BigEndian.Uint16(bytesRead[0:2]) == START_FIELD {
return true
}
return false
}
func (d *DNP3) isMaster(bytesRead []byte) bool {
intValue := int((bytesRead[3] & 0x80) >> 7)
var boolValue bool = intValue != 0
return boolValue
}
func (d *DNP3) hexConvert(byteArray []byte) string {
return "0x" + hex.EncodeToString(byteArray)
}
func (d *DNP3) isMultiPart(bytesRead []byte) bool {
var FirstOfMulti01 byte = 0x40
var NotFirstNotLast00 byte = 0x00 | case FirstOfMulti01:
return false
case NotFirstNotLast00:
return false
case FinalFrame10:
return true
case OneFrame11:
return true
}
return false
}
// Contains tells whether a contains x.
// func (d *DNP3) contains(a []byte, x int) bool {
// for _, n := range a {
// if x == n {
// return true
// }
// }
// return false
// } | var FinalFrame10 byte = 0x80
var OneFrame11 byte = 0xC0
TpFinFir := bytesRead[10] & 0xC0
switch TpFinFir { | random_line_split |
dnp3.go | // Copyright 2019, The GoPacket Authors, All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
//
//******************************************************************************
package layers
import (
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"strconv"
"github.com/google/gopacket"
)
//******************************************************************************
//
// DNP3 Decoding Layer
// ------------------------------------------
// This file provides a GoPacket decoding layer for DNP3.
//
//******************************************************************************
// DNP3 is the struct for storing DNP3 packet headers.
const (
MIN_HEADER_LENGTH = 10
START_FIELD = 0x0564
)
var FCodes = map[byte]string{
0: "Confirm",
1: "Read",
2: "Write",
3: "Select",
4: "Operate",
5: "Direct Operate",
6: "Direct Operate No ACK",
7: "Immediate Freeze",
8: "Immediate Freeze No ACK",
9: "Freeze and Clear",
10: "Freeze and Clear No ACK",
11: "Freeze With Time",
12: "Freeze With Time No ACK",
13: "Cold Restart",
14: "Warm Restart",
15: "Initialize Data",
16: "Initialize Application",
17: "Start Application",
18: "Stop Application",
19: "Save Configuration",
20: "Enable Spontaneous Msg",
21: "Disable Spontaneous Msg",
22: "Assign Classes",
23: "Delay Measurement",
24: "Record Current Time",
25: "Open File",
26: "Close File",
27: "Delete File",
28: "Get File Info",
29: "Authenticate File",
30: "Abort File",
31: "Activate Config",
32: "Authentication Request",
33: "Authentication Error",
129: "Response",
130: "Unsolicited Response",
131: "Authentication Response",
}
// "-" Reserved or Obsolete
var PfCodes = map[byte]string{
0: "Reset of Remote Link", // 0x10
1: "Reset of User Process",
2: "Test Function For Link", // 0x12
3: "User Data", // 0x13
4: "Unconfirmed User Data", // 0x14
5: "-",
6: "-",
7: "-",
8: "-",
9: "Request Link Status", // 0x19
10: "-",
11: "-",
12: "-",
13: "-",
14: "-",
15: "-",
}
var SfCodes = map[byte]string{
0: "ACK", // 0x00
1: "NAK", // 0x01
2: "-",
3: "-",
4: "-",
5: "-",
6: "-",
7: "-",
8: "-",
9: "-",
10: "-",
11: "Status of Link", // 0x0B
12: "-",
13: "-",
14: "Link Service Not Functioning",
15: "Link Service Not Used or Implemented", // 0x0F
}
/***************************************************************************/
/* Application Layer Internal Indication (IIN) bits */
/* 2 Bytes, message formatting: [First Octet] | [Second Octet] */
/***************************************************************************/
var IINCodes = map[string]string{
/* Octet 1 */
"0x0100": "Broadcast message rx'd",
"0x0200": "Class 1 Data Available",
"0x0400": "Class 2 Data Available",
"0x0800": "Class 3 Data Available",
"0x1000": "Time Sync Req'd from Master",
"0x2000": "Outputs in Local Mode",
"0x4000": "Device Trouble",
"0x8000": "Device Restart",
/* Octet 2 */
"0x0001": "Function code not implemented",
"0x0002": "Requested Objects Unknown",
"0x0004": "Parameters Invalid or Out of Range",
"0x0008": "Event Buffer Overflow",
"0x0010": "Operation Already Executing",
"0x0020": "Device Configuration Corrupt",
"0x0040": "Reserved",
"0x0080": "Reserved",
}
/***************************************************************************/
/* Application Layer Object Prefix codes bits */
/***************************************************************************/
var ObjPrefixCodes = map[byte]string{
0: "Objects packed without a prefix",
1: "Objects prefixed with 1-octet index",
2: "Objects prefixed with 2-octet index",
3: "Objects prefixed with 4-octet index",
4: "Objects prefixed with 1-octet object size",
5: "Objects prefixed with 2-octet object size",
6: "Objects prefixed with 4-octet object size",
7: "Reserved",
}
/***************************************************************************/
/* Application Layer Object Prefix codes bits */
/***************************************************************************/
var ObjRangeSpecifierCodes = map[byte]string{
0: "8-bit Start and Stop Indices in Range Field",
1: "16-bit Start and Stop Indices in Range Field",
2: "32-bit Start and Stop Indices in Range Field",
3: "8-bit Absolute Address in Range Field",
4: "16-bit Absolute Address in Range Field",
5: "32-bit Absolute Address in Range Field",
6: "Length of Range field is 0 (no range field)",
7: "8-bit Single Field Quantity",
8: "16-bit Single Field Quantity",
9: "32-bit Single Field Quantity",
10: "Reserved",
11: "Free-format Qualifier, range field has 1 octet count of objects",
12: "Reserved",
13: "Reserved",
14: "Reserved",
15: "Reserved",
}
var (
errDNP3PacketTooShort = errors.New("DNS packet too short")
)
type DNP3 struct {
BaseLayer // Stores the packet bytes and payload bytes.
DNP3DataLinkLayer DNP3DataLinkLayer
DNP3TransportLayer DNP3TransportLayer
DNP3ApplicationLayer DNP3ApplicationLayer
SomeByte byte
AnotherByte byte
restOfData []byte
}
type DNP3DataLinkLayer struct {
Start string
Length int
Control struct {
ControlByte string
IsMaster int `json:"Is Master"`
PRM int `json:"Primary"`
FCB int `json:"Frame Count Bit"`
FCV int `json:"Frame Count Valid"`
FUNC string `json:"Function Code"`
}
Destination int
Source int
CRC string
}
type DNP3TransportLayer struct {
TransportByte string
Final int
First int
Sequence int
}
type DNP3ApplicationLayer struct {
Control struct {
ControlByte string
First int
Final int
Confirm int
Unsolicited int
Sequence int
}
Function string `json:"Function Code"`
IINCode string `json:"Internal Indication (IIN)"`
}
type DNP3AppObject struct {
Group int
Variation int
Qualifier int
RangeStart int
RangeStop int
DataType int
Length int
}
func (d *DNP3) LayerType() gopacket.LayerType { return LayerTypeDNP3 }
func (d *DNP3) LayerContents() []byte {
return []byte{d.SomeByte, d.AnotherByte}
}
func (d *DNP3) LayerPayload() []byte {
return d.restOfData
}
func (d *DNP3) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
// If the data block is too short to be a DNP3 layer, then return an error.
if len(data) < 10 {
df.SetTruncated()
return errDNP3PacketTooShort
}
d.linkLayer(data)
d.transportLayer(data)
d.applicationLayer(data)
return nil
}
func decodeDNP3(data []byte, p gopacket.PacketBuilder) error {
// Attempt to decode the byte slice.
d := &DNP3{}
err := d.DecodeFromBytes(data, p)
if err != nil {
return err
}
// If the decoding worked, add the layer to the packet and set it
// as the application layer too, if there isn't already one.
p.AddLayer(d)
p.SetApplicationLayer(d)
d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
d.BaseLayer.Payload = nil
return p.NextDecoder(gopacket.LayerTypePayload)
}
// CanDecode implements gopacket.DecodingLayer.
func (d *DNP3) CanDecode() gopacket.LayerClass {
return LayerTypeDNP3
}
// NextLayerType returns the layer type contained by this DecodingLayer.
func (d *DNP3) | () gopacket.LayerType {
return gopacket.LayerTypePayload
}
// Payload returns nil, since TLS encrypted payload is inside TLSAppDataRecord
func (d *DNP3) Payload() []byte {
return nil
}
func appObject(bytesRead []byte) {
object := bytesRead[22:]
// indexSize := uint(object[2] & 0x70 >> 4)
// QualifierCode := uint(object[2] & 0x0F)
// fmt.Println(indexSize)
// fmt.Println(QualifierCode)
group := int(object[0])
variation := int(object[1])
qualifier := int(object[2])
rangeStart := int(object[3])
rangeStop := int(object[4])
dataType := int(object[5])
length := int(object[6])
appObject := DNP3AppObject{
Group: group,
Variation: variation,
Qualifier: qualifier,
RangeStart: rangeStart,
RangeStop: rangeStop,
DataType: dataType,
Length: length,
}
out, err := json.Marshal(appObject)
if err != nil {
panic(err)
}
fmt.Println(string(out))
}
func (d *DNP3) linkLayer(data []byte) {
start := d.hexConvert(data[0:2])
d.DNP3DataLinkLayer.Start = start
length := int(data[2])
d.DNP3DataLinkLayer.Length = length
ctlControl := d.hexConvert([]byte{data[3]})
d.DNP3DataLinkLayer.Control.ControlByte = ctlControl
IsMaster := int((data[3] & 0x80) >> 7)
d.DNP3DataLinkLayer.Control.IsMaster = IsMaster
PRM := int((data[3] & 0x40) >> 6)
d.DNP3DataLinkLayer.Control.PRM = PRM
FCB := int((data[3] & 0x20) >> 5)
d.DNP3DataLinkLayer.Control.FCB = FCB
FCV := int((data[3] & 0x10) >> 4)
d.DNP3DataLinkLayer.Control.FCV = FCV
FUNCCODE := data[3] & 0x0F
ctlFUNCCODE := fmt.Sprintf("%d", FUNCCODE)
var ctlFUNC string
if PRM == 0x00 {
ctlFUNC = SfCodes[FUNCCODE]
}
if PRM == 0x01 {
ctlFUNC = PfCodes[FUNCCODE]
}
ctlFUNC = ctlFUNC + " (" + ctlFUNCCODE + ")"
d.DNP3DataLinkLayer.Control.FUNC = ctlFUNC
// TODO: make sure 0 to 65535
destination := fmt.Sprintf("%x%x", data[5], data[4])
destinationInt, _ := strconv.Atoi(destination)
d.DNP3DataLinkLayer.Destination = destinationInt
// TODO: make sure 0 to 65535
source := fmt.Sprintf("%x%x", data[7], data[6])
sourceInt, _ := strconv.Atoi(source)
d.DNP3DataLinkLayer.Source = sourceInt
// TODO: Is correct? Hesapla
crc := fmt.Sprintf("0x%x%x", data[9], data[8])
d.DNP3DataLinkLayer.CRC = crc
}
func (d *DNP3) transportLayer(data []byte) {
transport := fmt.Sprintf("0x%x", data[10])
d.DNP3TransportLayer.TransportByte = transport
final := data[10] & 0x80 >> 7
d.DNP3TransportLayer.Final = int(final)
first := data[10] & 0x40 >> 6
d.DNP3TransportLayer.First = int(first)
sequence := data[10] & 0x3f // 6bit
d.DNP3TransportLayer.Sequence = int(sequence)
}
func (d *DNP3) applicationLayer(data []byte) {
// /***************************************************************************/
// /* Application Layer Bit-Masks */
// /***************************************************************************/
// #define DNP3_AL_UNS 0x10
// #define DNP3_AL_CON 0x20
// #define DNP3_AL_FIN 0x40
// #define DNP3_AL_FIR 0x80
// #define DNP3_AL_SEQ 0x0f
// #define DNP3_AL_FUNC 0xff
controlByte := fmt.Sprintf("0x%x", data[11])
d.DNP3ApplicationLayer.Control.ControlByte = controlByte
first := data[11] & 0x80 >> 7
d.DNP3ApplicationLayer.Control.First = int(first)
final := data[11] & 0x40 >> 6
d.DNP3ApplicationLayer.Control.Final = int(final)
confirm := data[11] & 0x20 >> 5
d.DNP3ApplicationLayer.Control.Confirm = int(confirm)
unsolicited := data[11] & 0x10 >> 4
d.DNP3ApplicationLayer.Control.Unsolicited = int(unsolicited)
sequence := data[11] & 0x0f
d.DNP3ApplicationLayer.Control.Sequence = int(sequence)
functionCode := data[12]
// TODO: refactor this hex convert
src := []byte{functionCode}
dst := make([]byte, hex.EncodedLen(len(src)))
hex.Encode(dst, src)
FUNC := fmt.Sprintf("0x%s", dst)
function := FCodes[functionCode] + " (" + FUNC + ")"
d.DNP3ApplicationLayer.Function = function
objectStart := 13
if d.DNP3DataLinkLayer.Control.IsMaster == 0 {
objectStart = 15
// TODO: refactor this hex convert
src := []byte{data[13], data[14]}
dst := make([]byte, hex.EncodedLen(len(src)))
hex.Encode(dst, src)
IIN := fmt.Sprintf("0x%s", dst)
IINCode := IINCodes[IIN] + " (" + IIN + ")"
d.DNP3ApplicationLayer.IINCode = IINCode
}
dataSize := len(data[12:])
fmt.Printf("DataSize %d\n", dataSize)
switch functionCode {
case 0: // Confirm
case 1: // Read
case 2: // Write
case 3: // Select
case 4: // Operate
case 5: // Direct Operate
case 6: // Direct Operate No ACK
case 7: // Immediate Freeze
case 8: // Immediate Freeze No ACK
case 9: // Freeze and Clear
case 10: // Freeze and Clear No ACK
case 11: // Freeze With Time
case 12: // Freeze With Time No ACK
case 13: // Cold Restart
case 14: // Warm Restart
case 15: // Initialize Data
case 16: // Initialize Application
case 17: // Start Application
case 18: // Stop Application
case 19: // Save Configuration
case 20: // Enable Spontaneous Msg
case 21: // Disable Spontaneous Msg
case 22: // Assign Classes
case 23: // Delay Measurement
case 24: // Record Current Time
case 25: // Open File
case 26: // Close File
case 27: // Delete File
case 28: // Get File Info
case 29: // Authenticate File
case 30: // Abort File
case 31: // Activate Config
case 32: // Authentication Request
case 33: // Authentication Error
case 129: // Response
case 130: // Unsolicited Response
case 131: // Authentication Response
}
objTypeField := binary.BigEndian.Uint16([]byte{data[objectStart], data[objectStart+1]})
objectGroup := objTypeField & 0xFF00
objectVariation := objTypeField & 0x00FF
object := d.hexConvert([]byte{data[objectStart], data[objectStart+1]})
objectPrefixCode := data[objectStart+2] & 0x70 // OPC
objectRangeSpecifierCode := data[objectStart+2] & 0x0F // RSC
fmt.Println(object)
fmt.Println(objectGroup)
fmt.Println(objectVariation)
fmt.Printf("Prefix Code %d\n", objectPrefixCode)
fmt.Println(ObjPrefixCodes[objectPrefixCode])
fmt.Printf("Range Specifier Code %d\n", objectRangeSpecifierCode) // 6 means no range field
fmt.Println(ObjRangeSpecifierCodes[objectRangeSpecifierCode])
fmt.Println(d.hexConvert([]byte{data[objectStart+3]}))
offset := objectStart + 3
rangebytes := 0
fmt.Println(offset)
switch objectRangeSpecifierCode {
case 0:
// start := offset
numItems := int(data[offset+1]) - int(data[offset]) + 1
rangebytes = 2
fmt.Println(numItems)
pointAddress := int(data[offset])
fmt.Println(pointAddress)
// num_items = ( tvb_get_guint8(tvb, offset+1) - tvb_get_guint8(tvb, offset) + 1);
// proto_item_set_generated(range_item);
// al_ptaddr = tvb_get_guint8(tvb, offset);
// proto_tree_add_item(range_tree, hf_dnp3_al_range_start8, tvb, offset, 1, ENC_LITTLE_ENDIAN);
// proto_tree_add_item(range_tree, hf_dnp3_al_range_stop8, tvb, offset + 1, 1, ENC_LITTLE_ENDIAN);
// rangebytes = 2;
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
case 9:
case 10:
case 11:
case 12:
case 13:
case 14:
case 15:
}
/* Move offset past any range field */
offset += rangebytes
fmt.Println(offset)
// RSCArrayFirst := []byte{0, 1, 2, 3, 4, 5}
// if d.contains(RSCArrayFirst, objectRangeSpecifierCode) {
// }
/* Special handling for Octet string objects as the variation is the length of the string */
// temp = objTypeField & 0xFF00
// if (temp == AL_OBJ_OCT) || (temp == AL_OBJ_OCT_EVT) {
// al_oct_len = al_obj & 0xFF
// al_obj = temp
// }
// objectGroup := data[objectStart] & 0x0f
// objectGroup := fmt.Sprintf("0x%x%x", data[objectStart], data[objectStart+1])
// fmt.Println(objectGroup)
// objectGroup, _ := strconv.Atoi(fmt.Sprintf("%d", data[objectStart]))
// objectVariation, _ := strconv.Atoi(fmt.Sprintf("%d", data[objectStart+1]))
// fmt.Println(objectGroup)
// fmt.Println(objectVariation)
/* Index Size (3-bits x111xxxx) */
// /* When Qualifier Code != 11 */
// #define AL_OBJQL_PREFIX_NI 0x00 /* Objects are Packed with no index */
// #define AL_OBJQL_PREFIX_1O 0x01 /* Objects are prefixed w/ 1-octet index */
// #define AL_OBJQL_PREFIX_2O 0x02 /* Objects are prefixed w/ 2-octet index */
// #define AL_OBJQL_PREFIX_4O 0x03 /* Objects are prefixed w/ 4-octet index */
// #define AL_OBJQL_PREFIX_1OS 0x04 /* Objects are prefixed w/ 1-octet object size */
// #define AL_OBJQL_PREFIX_2OS 0x05 /* Objects are prefixed w/ 2-octet object size */
// #define AL_OBJQL_PREFIX_4OS 0x06 /* Objects are prefixed w/ 4-octet object size */
// /* When Qualifier Code == 11 */
// #define AL_OBJQL_IDX11_1OIS 0x01 /* 1 octet identifier size */
// #define AL_OBJQL_IDX11_2OIS 0x02 /* 2 octet identifier size */
// #define AL_OBJQL_IDX11_4OIS 0x03 /* 4 octet identifier size */
// /* Qualifier Code (4-bits) */
// /* 4-bits ( xxxx1111 ) */
// #define AL_OBJQL_RANGE_SSI8 0x00 /* 00 8-bit Start and Stop Indices in Range Field */
// #define AL_OBJQL_RANGE_SSI16 0x01 /* 01 16-bit Start and Stop Indices in Range Field */
// #define AL_OBJQL_RANGE_SSI32 0x02 /* 02 32-bit Start and Stop Indices in Range Field */
// #define AL_OBJQL_RANGE_AA8 0x03 /* 03 8-bit Absolute Address in Range Field */
// #define AL_OBJQL_RANGE_AA16 0x04 /* 04 16-bit Absolute Address in Range Field */
// #define AL_OBJQL_RANGE_AA32 0x05 /* 05 32-bit Absolute Address in Range Field */
// #define AL_OBJQL_RANGE_R0 0x06 /* 06 Length of Range field is 0 (no range field) */
// #define AL_OBJQL_RANGE_SF8 0x07 /* 07 8-bit Single Field Quantity */
// #define AL_OBJQL_RANGE_SF16 0x08 /* 08 16-bit Single Field Quantity */
// #define AL_OBJQL_RANGE_SF32 0x09 /* 09 32-bit Single Field Quantity */
// /* 0x0A 10 Reserved */
// #define AL_OBJQL_RANGE_FF 0x0B /* 11 Free-format Qualifier, range field has 1 octet count of objects */
// /* 0x0C 12 Reserved */
// /* 0x0D 13 Reserved */
// /* 0x0E 14 Reserved */
// /* 0x0F 15 Reserved */
/***************************************************************************/
/* Application Layer Data Object Qualifier */
/***************************************************************************/
// /* Bit-Masks */
// #define AL_OBJQ_PREFIX 0x70 /* x111xxxx Masks Prefix from Qualifier */
// #define AL_OBJQ_RANGE 0x0F /* xxxx1111 Masks Range from Qualifier */
// objectQualifier := fmt.Sprintf("0x%d", data[objectStart+2])
// fmt.Println(objectQualifier)
// src = []byte{data[objectStart], data[objectStart+1]}
// dst = make([]byte, hex.EncodedLen(len(src)))
// hex.Encode(dst, src)
// prefixCode := fmt.Sprintf("0x%s", dst)
// fmt.Println(prefixCode)
}
func (d *DNP3) IsDNP3(bytesRead []byte) bool {
if len(bytesRead) >= MIN_HEADER_LENGTH && binary.BigEndian.Uint16(bytesRead[0:2]) == START_FIELD {
return true
}
return false
}
func (d *DNP3) isMaster(bytesRead []byte) bool {
intValue := int((bytesRead[3] & 0x80) >> 7)
var boolValue bool = intValue != 0
return boolValue
}
func (d *DNP3) hexConvert(byteArray []byte) string {
return "0x" + hex.EncodeToString(byteArray)
}
func (d *DNP3) isMultiPart(bytesRead []byte) bool {
var FirstOfMulti01 byte = 0x40
var NotFirstNotLast00 byte = 0x00
var FinalFrame10 byte = 0x80
var OneFrame11 byte = 0xC0
TpFinFir := bytesRead[10] & 0xC0
switch TpFinFir {
case FirstOfMulti01:
return false
case NotFirstNotLast00:
return false
case FinalFrame10:
return true
case OneFrame11:
return true
}
return false
}
// Contains tells whether a contains x.
// func (d *DNP3) contains(a []byte, x int) bool {
// for _, n := range a {
// if x == n {
// return true
// }
// }
// return false
// }
| NextLayerType | identifier_name |
dnp3.go | // Copyright 2019, The GoPacket Authors, All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
//
//******************************************************************************
package layers
import (
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"strconv"
"github.com/google/gopacket"
)
//******************************************************************************
//
// DNP3 Decoding Layer
// ------------------------------------------
// This file provides a GoPacket decoding layer for DNP3.
//
//******************************************************************************
// DNP3 is the struct for storing DNP3 packet headers.
const (
MIN_HEADER_LENGTH = 10
START_FIELD = 0x0564
)
var FCodes = map[byte]string{
0: "Confirm",
1: "Read",
2: "Write",
3: "Select",
4: "Operate",
5: "Direct Operate",
6: "Direct Operate No ACK",
7: "Immediate Freeze",
8: "Immediate Freeze No ACK",
9: "Freeze and Clear",
10: "Freeze and Clear No ACK",
11: "Freeze With Time",
12: "Freeze With Time No ACK",
13: "Cold Restart",
14: "Warm Restart",
15: "Initialize Data",
16: "Initialize Application",
17: "Start Application",
18: "Stop Application",
19: "Save Configuration",
20: "Enable Spontaneous Msg",
21: "Disable Spontaneous Msg",
22: "Assign Classes",
23: "Delay Measurement",
24: "Record Current Time",
25: "Open File",
26: "Close File",
27: "Delete File",
28: "Get File Info",
29: "Authenticate File",
30: "Abort File",
31: "Activate Config",
32: "Authentication Request",
33: "Authentication Error",
129: "Response",
130: "Unsolicited Response",
131: "Authentication Response",
}
// "-" Reserved or Obsolete
var PfCodes = map[byte]string{
0: "Reset of Remote Link", // 0x10
1: "Reset of User Process",
2: "Test Function For Link", // 0x12
3: "User Data", // 0x13
4: "Unconfirmed User Data", // 0x14
5: "-",
6: "-",
7: "-",
8: "-",
9: "Request Link Status", // 0x19
10: "-",
11: "-",
12: "-",
13: "-",
14: "-",
15: "-",
}
var SfCodes = map[byte]string{
0: "ACK", // 0x00
1: "NAK", // 0x01
2: "-",
3: "-",
4: "-",
5: "-",
6: "-",
7: "-",
8: "-",
9: "-",
10: "-",
11: "Status of Link", // 0x0B
12: "-",
13: "-",
14: "Link Service Not Functioning",
15: "Link Service Not Used or Implemented", // 0x0F
}
/***************************************************************************/
/* Application Layer Internal Indication (IIN) bits */
/* 2 Bytes, message formatting: [First Octet] | [Second Octet] */
/***************************************************************************/
var IINCodes = map[string]string{
/* Octet 1 */
"0x0100": "Broadcast message rx'd",
"0x0200": "Class 1 Data Available",
"0x0400": "Class 2 Data Available",
"0x0800": "Class 3 Data Available",
"0x1000": "Time Sync Req'd from Master",
"0x2000": "Outputs in Local Mode",
"0x4000": "Device Trouble",
"0x8000": "Device Restart",
/* Octet 2 */
"0x0001": "Function code not implemented",
"0x0002": "Requested Objects Unknown",
"0x0004": "Parameters Invalid or Out of Range",
"0x0008": "Event Buffer Overflow",
"0x0010": "Operation Already Executing",
"0x0020": "Device Configuration Corrupt",
"0x0040": "Reserved",
"0x0080": "Reserved",
}
/***************************************************************************/
/* Application Layer Object Prefix codes bits */
/***************************************************************************/
var ObjPrefixCodes = map[byte]string{
0: "Objects packed without a prefix",
1: "Objects prefixed with 1-octet index",
2: "Objects prefixed with 2-octet index",
3: "Objects prefixed with 4-octet index",
4: "Objects prefixed with 1-octet object size",
5: "Objects prefixed with 2-octet object size",
6: "Objects prefixed with 4-octet object size",
7: "Reserved",
}
/***************************************************************************/
/* Application Layer Object Prefix codes bits */
/***************************************************************************/
var ObjRangeSpecifierCodes = map[byte]string{
0: "8-bit Start and Stop Indices in Range Field",
1: "16-bit Start and Stop Indices in Range Field",
2: "32-bit Start and Stop Indices in Range Field",
3: "8-bit Absolute Address in Range Field",
4: "16-bit Absolute Address in Range Field",
5: "32-bit Absolute Address in Range Field",
6: "Length of Range field is 0 (no range field)",
7: "8-bit Single Field Quantity",
8: "16-bit Single Field Quantity",
9: "32-bit Single Field Quantity",
10: "Reserved",
11: "Free-format Qualifier, range field has 1 octet count of objects",
12: "Reserved",
13: "Reserved",
14: "Reserved",
15: "Reserved",
}
var (
errDNP3PacketTooShort = errors.New("DNS packet too short")
)
type DNP3 struct {
BaseLayer // Stores the packet bytes and payload bytes.
DNP3DataLinkLayer DNP3DataLinkLayer
DNP3TransportLayer DNP3TransportLayer
DNP3ApplicationLayer DNP3ApplicationLayer
SomeByte byte
AnotherByte byte
restOfData []byte
}
type DNP3DataLinkLayer struct {
Start string
Length int
Control struct {
ControlByte string
IsMaster int `json:"Is Master"`
PRM int `json:"Primary"`
FCB int `json:"Frame Count Bit"`
FCV int `json:"Frame Count Valid"`
FUNC string `json:"Function Code"`
}
Destination int
Source int
CRC string
}
type DNP3TransportLayer struct {
TransportByte string
Final int
First int
Sequence int
}
type DNP3ApplicationLayer struct {
Control struct {
ControlByte string
First int
Final int
Confirm int
Unsolicited int
Sequence int
}
Function string `json:"Function Code"`
IINCode string `json:"Internal Indication (IIN)"`
}
type DNP3AppObject struct {
Group int
Variation int
Qualifier int
RangeStart int
RangeStop int
DataType int
Length int
}
func (d *DNP3) LayerType() gopacket.LayerType { return LayerTypeDNP3 }
func (d *DNP3) LayerContents() []byte {
return []byte{d.SomeByte, d.AnotherByte}
}
func (d *DNP3) LayerPayload() []byte |
func (d *DNP3) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
// If the data block is too short to be a DNP3 layer, then return an error.
if len(data) < 10 {
df.SetTruncated()
return errDNP3PacketTooShort
}
d.linkLayer(data)
d.transportLayer(data)
d.applicationLayer(data)
return nil
}
func decodeDNP3(data []byte, p gopacket.PacketBuilder) error {
// Attempt to decode the byte slice.
d := &DNP3{}
err := d.DecodeFromBytes(data, p)
if err != nil {
return err
}
// If the decoding worked, add the layer to the packet and set it
// as the application layer too, if there isn't already one.
p.AddLayer(d)
p.SetApplicationLayer(d)
d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
d.BaseLayer.Payload = nil
return p.NextDecoder(gopacket.LayerTypePayload)
}
// CanDecode implements gopacket.DecodingLayer.
func (d *DNP3) CanDecode() gopacket.LayerClass {
return LayerTypeDNP3
}
// NextLayerType returns the layer type contained by this DecodingLayer.
func (d *DNP3) NextLayerType() gopacket.LayerType {
return gopacket.LayerTypePayload
}
// Payload returns nil, since TLS encrypted payload is inside TLSAppDataRecord
func (d *DNP3) Payload() []byte {
return nil
}
func appObject(bytesRead []byte) {
object := bytesRead[22:]
// indexSize := uint(object[2] & 0x70 >> 4)
// QualifierCode := uint(object[2] & 0x0F)
// fmt.Println(indexSize)
// fmt.Println(QualifierCode)
group := int(object[0])
variation := int(object[1])
qualifier := int(object[2])
rangeStart := int(object[3])
rangeStop := int(object[4])
dataType := int(object[5])
length := int(object[6])
appObject := DNP3AppObject{
Group: group,
Variation: variation,
Qualifier: qualifier,
RangeStart: rangeStart,
RangeStop: rangeStop,
DataType: dataType,
Length: length,
}
out, err := json.Marshal(appObject)
if err != nil {
panic(err)
}
fmt.Println(string(out))
}
func (d *DNP3) linkLayer(data []byte) {
start := d.hexConvert(data[0:2])
d.DNP3DataLinkLayer.Start = start
length := int(data[2])
d.DNP3DataLinkLayer.Length = length
ctlControl := d.hexConvert([]byte{data[3]})
d.DNP3DataLinkLayer.Control.ControlByte = ctlControl
IsMaster := int((data[3] & 0x80) >> 7)
d.DNP3DataLinkLayer.Control.IsMaster = IsMaster
PRM := int((data[3] & 0x40) >> 6)
d.DNP3DataLinkLayer.Control.PRM = PRM
FCB := int((data[3] & 0x20) >> 5)
d.DNP3DataLinkLayer.Control.FCB = FCB
FCV := int((data[3] & 0x10) >> 4)
d.DNP3DataLinkLayer.Control.FCV = FCV
FUNCCODE := data[3] & 0x0F
ctlFUNCCODE := fmt.Sprintf("%d", FUNCCODE)
var ctlFUNC string
if PRM == 0x00 {
ctlFUNC = SfCodes[FUNCCODE]
}
if PRM == 0x01 {
ctlFUNC = PfCodes[FUNCCODE]
}
ctlFUNC = ctlFUNC + " (" + ctlFUNCCODE + ")"
d.DNP3DataLinkLayer.Control.FUNC = ctlFUNC
// TODO: make sure 0 to 65535
destination := fmt.Sprintf("%x%x", data[5], data[4])
destinationInt, _ := strconv.Atoi(destination)
d.DNP3DataLinkLayer.Destination = destinationInt
// TODO: make sure 0 to 65535
source := fmt.Sprintf("%x%x", data[7], data[6])
sourceInt, _ := strconv.Atoi(source)
d.DNP3DataLinkLayer.Source = sourceInt
// TODO: Is correct? Hesapla
crc := fmt.Sprintf("0x%x%x", data[9], data[8])
d.DNP3DataLinkLayer.CRC = crc
}
func (d *DNP3) transportLayer(data []byte) {
transport := fmt.Sprintf("0x%x", data[10])
d.DNP3TransportLayer.TransportByte = transport
final := data[10] & 0x80 >> 7
d.DNP3TransportLayer.Final = int(final)
first := data[10] & 0x40 >> 6
d.DNP3TransportLayer.First = int(first)
sequence := data[10] & 0x3f // 6bit
d.DNP3TransportLayer.Sequence = int(sequence)
}
func (d *DNP3) applicationLayer(data []byte) {
// /***************************************************************************/
// /* Application Layer Bit-Masks */
// /***************************************************************************/
// #define DNP3_AL_UNS 0x10
// #define DNP3_AL_CON 0x20
// #define DNP3_AL_FIN 0x40
// #define DNP3_AL_FIR 0x80
// #define DNP3_AL_SEQ 0x0f
// #define DNP3_AL_FUNC 0xff
controlByte := fmt.Sprintf("0x%x", data[11])
d.DNP3ApplicationLayer.Control.ControlByte = controlByte
first := data[11] & 0x80 >> 7
d.DNP3ApplicationLayer.Control.First = int(first)
final := data[11] & 0x40 >> 6
d.DNP3ApplicationLayer.Control.Final = int(final)
confirm := data[11] & 0x20 >> 5
d.DNP3ApplicationLayer.Control.Confirm = int(confirm)
unsolicited := data[11] & 0x10 >> 4
d.DNP3ApplicationLayer.Control.Unsolicited = int(unsolicited)
sequence := data[11] & 0x0f
d.DNP3ApplicationLayer.Control.Sequence = int(sequence)
functionCode := data[12]
// TODO: refactor this hex convert
src := []byte{functionCode}
dst := make([]byte, hex.EncodedLen(len(src)))
hex.Encode(dst, src)
FUNC := fmt.Sprintf("0x%s", dst)
function := FCodes[functionCode] + " (" + FUNC + ")"
d.DNP3ApplicationLayer.Function = function
objectStart := 13
if d.DNP3DataLinkLayer.Control.IsMaster == 0 {
objectStart = 15
// TODO: refactor this hex convert
src := []byte{data[13], data[14]}
dst := make([]byte, hex.EncodedLen(len(src)))
hex.Encode(dst, src)
IIN := fmt.Sprintf("0x%s", dst)
IINCode := IINCodes[IIN] + " (" + IIN + ")"
d.DNP3ApplicationLayer.IINCode = IINCode
}
dataSize := len(data[12:])
fmt.Printf("DataSize %d\n", dataSize)
switch functionCode {
case 0: // Confirm
case 1: // Read
case 2: // Write
case 3: // Select
case 4: // Operate
case 5: // Direct Operate
case 6: // Direct Operate No ACK
case 7: // Immediate Freeze
case 8: // Immediate Freeze No ACK
case 9: // Freeze and Clear
case 10: // Freeze and Clear No ACK
case 11: // Freeze With Time
case 12: // Freeze With Time No ACK
case 13: // Cold Restart
case 14: // Warm Restart
case 15: // Initialize Data
case 16: // Initialize Application
case 17: // Start Application
case 18: // Stop Application
case 19: // Save Configuration
case 20: // Enable Spontaneous Msg
case 21: // Disable Spontaneous Msg
case 22: // Assign Classes
case 23: // Delay Measurement
case 24: // Record Current Time
case 25: // Open File
case 26: // Close File
case 27: // Delete File
case 28: // Get File Info
case 29: // Authenticate File
case 30: // Abort File
case 31: // Activate Config
case 32: // Authentication Request
case 33: // Authentication Error
case 129: // Response
case 130: // Unsolicited Response
case 131: // Authentication Response
}
objTypeField := binary.BigEndian.Uint16([]byte{data[objectStart], data[objectStart+1]})
objectGroup := objTypeField & 0xFF00
objectVariation := objTypeField & 0x00FF
object := d.hexConvert([]byte{data[objectStart], data[objectStart+1]})
objectPrefixCode := data[objectStart+2] & 0x70 // OPC
objectRangeSpecifierCode := data[objectStart+2] & 0x0F // RSC
fmt.Println(object)
fmt.Println(objectGroup)
fmt.Println(objectVariation)
fmt.Printf("Prefix Code %d\n", objectPrefixCode)
fmt.Println(ObjPrefixCodes[objectPrefixCode])
fmt.Printf("Range Specifier Code %d\n", objectRangeSpecifierCode) // 6 means no range field
fmt.Println(ObjRangeSpecifierCodes[objectRangeSpecifierCode])
fmt.Println(d.hexConvert([]byte{data[objectStart+3]}))
offset := objectStart + 3
rangebytes := 0
fmt.Println(offset)
switch objectRangeSpecifierCode {
case 0:
// start := offset
numItems := int(data[offset+1]) - int(data[offset]) + 1
rangebytes = 2
fmt.Println(numItems)
pointAddress := int(data[offset])
fmt.Println(pointAddress)
// num_items = ( tvb_get_guint8(tvb, offset+1) - tvb_get_guint8(tvb, offset) + 1);
// proto_item_set_generated(range_item);
// al_ptaddr = tvb_get_guint8(tvb, offset);
// proto_tree_add_item(range_tree, hf_dnp3_al_range_start8, tvb, offset, 1, ENC_LITTLE_ENDIAN);
// proto_tree_add_item(range_tree, hf_dnp3_al_range_stop8, tvb, offset + 1, 1, ENC_LITTLE_ENDIAN);
// rangebytes = 2;
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
case 9:
case 10:
case 11:
case 12:
case 13:
case 14:
case 15:
}
/* Move offset past any range field */
offset += rangebytes
fmt.Println(offset)
// RSCArrayFirst := []byte{0, 1, 2, 3, 4, 5}
// if d.contains(RSCArrayFirst, objectRangeSpecifierCode) {
// }
/* Special handling for Octet string objects as the variation is the length of the string */
// temp = objTypeField & 0xFF00
// if (temp == AL_OBJ_OCT) || (temp == AL_OBJ_OCT_EVT) {
// al_oct_len = al_obj & 0xFF
// al_obj = temp
// }
// objectGroup := data[objectStart] & 0x0f
// objectGroup := fmt.Sprintf("0x%x%x", data[objectStart], data[objectStart+1])
// fmt.Println(objectGroup)
// objectGroup, _ := strconv.Atoi(fmt.Sprintf("%d", data[objectStart]))
// objectVariation, _ := strconv.Atoi(fmt.Sprintf("%d", data[objectStart+1]))
// fmt.Println(objectGroup)
// fmt.Println(objectVariation)
/* Index Size (3-bits x111xxxx) */
// /* When Qualifier Code != 11 */
// #define AL_OBJQL_PREFIX_NI 0x00 /* Objects are Packed with no index */
// #define AL_OBJQL_PREFIX_1O 0x01 /* Objects are prefixed w/ 1-octet index */
// #define AL_OBJQL_PREFIX_2O 0x02 /* Objects are prefixed w/ 2-octet index */
// #define AL_OBJQL_PREFIX_4O 0x03 /* Objects are prefixed w/ 4-octet index */
// #define AL_OBJQL_PREFIX_1OS 0x04 /* Objects are prefixed w/ 1-octet object size */
// #define AL_OBJQL_PREFIX_2OS 0x05 /* Objects are prefixed w/ 2-octet object size */
// #define AL_OBJQL_PREFIX_4OS 0x06 /* Objects are prefixed w/ 4-octet object size */
// /* When Qualifier Code == 11 */
// #define AL_OBJQL_IDX11_1OIS 0x01 /* 1 octet identifier size */
// #define AL_OBJQL_IDX11_2OIS 0x02 /* 2 octet identifier size */
// #define AL_OBJQL_IDX11_4OIS 0x03 /* 4 octet identifier size */
// /* Qualifier Code (4-bits) */
// /* 4-bits ( xxxx1111 ) */
// #define AL_OBJQL_RANGE_SSI8 0x00 /* 00 8-bit Start and Stop Indices in Range Field */
// #define AL_OBJQL_RANGE_SSI16 0x01 /* 01 16-bit Start and Stop Indices in Range Field */
// #define AL_OBJQL_RANGE_SSI32 0x02 /* 02 32-bit Start and Stop Indices in Range Field */
// #define AL_OBJQL_RANGE_AA8 0x03 /* 03 8-bit Absolute Address in Range Field */
// #define AL_OBJQL_RANGE_AA16 0x04 /* 04 16-bit Absolute Address in Range Field */
// #define AL_OBJQL_RANGE_AA32 0x05 /* 05 32-bit Absolute Address in Range Field */
// #define AL_OBJQL_RANGE_R0 0x06 /* 06 Length of Range field is 0 (no range field) */
// #define AL_OBJQL_RANGE_SF8 0x07 /* 07 8-bit Single Field Quantity */
// #define AL_OBJQL_RANGE_SF16 0x08 /* 08 16-bit Single Field Quantity */
// #define AL_OBJQL_RANGE_SF32 0x09 /* 09 32-bit Single Field Quantity */
// /* 0x0A 10 Reserved */
// #define AL_OBJQL_RANGE_FF 0x0B /* 11 Free-format Qualifier, range field has 1 octet count of objects */
// /* 0x0C 12 Reserved */
// /* 0x0D 13 Reserved */
// /* 0x0E 14 Reserved */
// /* 0x0F 15 Reserved */
/***************************************************************************/
/* Application Layer Data Object Qualifier */
/***************************************************************************/
// /* Bit-Masks */
// #define AL_OBJQ_PREFIX 0x70 /* x111xxxx Masks Prefix from Qualifier */
// #define AL_OBJQ_RANGE 0x0F /* xxxx1111 Masks Range from Qualifier */
// objectQualifier := fmt.Sprintf("0x%d", data[objectStart+2])
// fmt.Println(objectQualifier)
// src = []byte{data[objectStart], data[objectStart+1]}
// dst = make([]byte, hex.EncodedLen(len(src)))
// hex.Encode(dst, src)
// prefixCode := fmt.Sprintf("0x%s", dst)
// fmt.Println(prefixCode)
}
func (d *DNP3) IsDNP3(bytesRead []byte) bool {
if len(bytesRead) >= MIN_HEADER_LENGTH && binary.BigEndian.Uint16(bytesRead[0:2]) == START_FIELD {
return true
}
return false
}
func (d *DNP3) isMaster(bytesRead []byte) bool {
intValue := int((bytesRead[3] & 0x80) >> 7)
var boolValue bool = intValue != 0
return boolValue
}
func (d *DNP3) hexConvert(byteArray []byte) string {
return "0x" + hex.EncodeToString(byteArray)
}
func (d *DNP3) isMultiPart(bytesRead []byte) bool {
var FirstOfMulti01 byte = 0x40
var NotFirstNotLast00 byte = 0x00
var FinalFrame10 byte = 0x80
var OneFrame11 byte = 0xC0
TpFinFir := bytesRead[10] & 0xC0
switch TpFinFir {
case FirstOfMulti01:
return false
case NotFirstNotLast00:
return false
case FinalFrame10:
return true
case OneFrame11:
return true
}
return false
}
// Contains tells whether a contains x.
// func (d *DNP3) contains(a []byte, x int) bool {
// for _, n := range a {
// if x == n {
// return true
// }
// }
// return false
// }
| {
return d.restOfData
} | identifier_body |
dnp3.go | // Copyright 2019, The GoPacket Authors, All rights reserved.
//
// Use of this source code is governed by a BSD-style license
// that can be found in the LICENSE file in the root of the source
// tree.
//
//******************************************************************************
package layers
import (
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"strconv"
"github.com/google/gopacket"
)
//******************************************************************************
//
// DNP3 Decoding Layer
// ------------------------------------------
// This file provides a GoPacket decoding layer for DNP3.
//
//******************************************************************************
// DNP3 is the struct for storing DNP3 packet headers.
const (
MIN_HEADER_LENGTH = 10
START_FIELD = 0x0564
)
var FCodes = map[byte]string{
0: "Confirm",
1: "Read",
2: "Write",
3: "Select",
4: "Operate",
5: "Direct Operate",
6: "Direct Operate No ACK",
7: "Immediate Freeze",
8: "Immediate Freeze No ACK",
9: "Freeze and Clear",
10: "Freeze and Clear No ACK",
11: "Freeze With Time",
12: "Freeze With Time No ACK",
13: "Cold Restart",
14: "Warm Restart",
15: "Initialize Data",
16: "Initialize Application",
17: "Start Application",
18: "Stop Application",
19: "Save Configuration",
20: "Enable Spontaneous Msg",
21: "Disable Spontaneous Msg",
22: "Assign Classes",
23: "Delay Measurement",
24: "Record Current Time",
25: "Open File",
26: "Close File",
27: "Delete File",
28: "Get File Info",
29: "Authenticate File",
30: "Abort File",
31: "Activate Config",
32: "Authentication Request",
33: "Authentication Error",
129: "Response",
130: "Unsolicited Response",
131: "Authentication Response",
}
// "-" Reserved or Obsolete
var PfCodes = map[byte]string{
0: "Reset of Remote Link", // 0x10
1: "Reset of User Process",
2: "Test Function For Link", // 0x12
3: "User Data", // 0x13
4: "Unconfirmed User Data", // 0x14
5: "-",
6: "-",
7: "-",
8: "-",
9: "Request Link Status", // 0x19
10: "-",
11: "-",
12: "-",
13: "-",
14: "-",
15: "-",
}
var SfCodes = map[byte]string{
0: "ACK", // 0x00
1: "NAK", // 0x01
2: "-",
3: "-",
4: "-",
5: "-",
6: "-",
7: "-",
8: "-",
9: "-",
10: "-",
11: "Status of Link", // 0x0B
12: "-",
13: "-",
14: "Link Service Not Functioning",
15: "Link Service Not Used or Implemented", // 0x0F
}
/***************************************************************************/
/* Application Layer Internal Indication (IIN) bits */
/* 2 Bytes, message formatting: [First Octet] | [Second Octet] */
/***************************************************************************/
var IINCodes = map[string]string{
/* Octet 1 */
"0x0100": "Broadcast message rx'd",
"0x0200": "Class 1 Data Available",
"0x0400": "Class 2 Data Available",
"0x0800": "Class 3 Data Available",
"0x1000": "Time Sync Req'd from Master",
"0x2000": "Outputs in Local Mode",
"0x4000": "Device Trouble",
"0x8000": "Device Restart",
/* Octet 2 */
"0x0001": "Function code not implemented",
"0x0002": "Requested Objects Unknown",
"0x0004": "Parameters Invalid or Out of Range",
"0x0008": "Event Buffer Overflow",
"0x0010": "Operation Already Executing",
"0x0020": "Device Configuration Corrupt",
"0x0040": "Reserved",
"0x0080": "Reserved",
}
/***************************************************************************/
/* Application Layer Object Prefix codes bits */
/***************************************************************************/
var ObjPrefixCodes = map[byte]string{
0: "Objects packed without a prefix",
1: "Objects prefixed with 1-octet index",
2: "Objects prefixed with 2-octet index",
3: "Objects prefixed with 4-octet index",
4: "Objects prefixed with 1-octet object size",
5: "Objects prefixed with 2-octet object size",
6: "Objects prefixed with 4-octet object size",
7: "Reserved",
}
/***************************************************************************/
/* Application Layer Object Prefix codes bits */
/***************************************************************************/
var ObjRangeSpecifierCodes = map[byte]string{
0: "8-bit Start and Stop Indices in Range Field",
1: "16-bit Start and Stop Indices in Range Field",
2: "32-bit Start and Stop Indices in Range Field",
3: "8-bit Absolute Address in Range Field",
4: "16-bit Absolute Address in Range Field",
5: "32-bit Absolute Address in Range Field",
6: "Length of Range field is 0 (no range field)",
7: "8-bit Single Field Quantity",
8: "16-bit Single Field Quantity",
9: "32-bit Single Field Quantity",
10: "Reserved",
11: "Free-format Qualifier, range field has 1 octet count of objects",
12: "Reserved",
13: "Reserved",
14: "Reserved",
15: "Reserved",
}
var (
errDNP3PacketTooShort = errors.New("DNS packet too short")
)
type DNP3 struct {
BaseLayer // Stores the packet bytes and payload bytes.
DNP3DataLinkLayer DNP3DataLinkLayer
DNP3TransportLayer DNP3TransportLayer
DNP3ApplicationLayer DNP3ApplicationLayer
SomeByte byte
AnotherByte byte
restOfData []byte
}
type DNP3DataLinkLayer struct {
Start string
Length int
Control struct {
ControlByte string
IsMaster int `json:"Is Master"`
PRM int `json:"Primary"`
FCB int `json:"Frame Count Bit"`
FCV int `json:"Frame Count Valid"`
FUNC string `json:"Function Code"`
}
Destination int
Source int
CRC string
}
type DNP3TransportLayer struct {
TransportByte string
Final int
First int
Sequence int
}
type DNP3ApplicationLayer struct {
Control struct {
ControlByte string
First int
Final int
Confirm int
Unsolicited int
Sequence int
}
Function string `json:"Function Code"`
IINCode string `json:"Internal Indication (IIN)"`
}
type DNP3AppObject struct {
Group int
Variation int
Qualifier int
RangeStart int
RangeStop int
DataType int
Length int
}
func (d *DNP3) LayerType() gopacket.LayerType { return LayerTypeDNP3 }
func (d *DNP3) LayerContents() []byte {
return []byte{d.SomeByte, d.AnotherByte}
}
func (d *DNP3) LayerPayload() []byte {
return d.restOfData
}
func (d *DNP3) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
// If the data block is too short to be a DNP3 layer, then return an error.
if len(data) < 10 {
df.SetTruncated()
return errDNP3PacketTooShort
}
d.linkLayer(data)
d.transportLayer(data)
d.applicationLayer(data)
return nil
}
func decodeDNP3(data []byte, p gopacket.PacketBuilder) error {
// Attempt to decode the byte slice.
d := &DNP3{}
err := d.DecodeFromBytes(data, p)
if err != nil {
return err
}
// If the decoding worked, add the layer to the packet and set it
// as the application layer too, if there isn't already one.
p.AddLayer(d)
p.SetApplicationLayer(d)
d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
d.BaseLayer.Payload = nil
return p.NextDecoder(gopacket.LayerTypePayload)
}
// CanDecode implements gopacket.DecodingLayer.
func (d *DNP3) CanDecode() gopacket.LayerClass {
return LayerTypeDNP3
}
// NextLayerType returns the layer type contained by this DecodingLayer.
func (d *DNP3) NextLayerType() gopacket.LayerType {
return gopacket.LayerTypePayload
}
// Payload returns nil, since TLS encrypted payload is inside TLSAppDataRecord
func (d *DNP3) Payload() []byte {
return nil
}
func appObject(bytesRead []byte) {
object := bytesRead[22:]
// indexSize := uint(object[2] & 0x70 >> 4)
// QualifierCode := uint(object[2] & 0x0F)
// fmt.Println(indexSize)
// fmt.Println(QualifierCode)
group := int(object[0])
variation := int(object[1])
qualifier := int(object[2])
rangeStart := int(object[3])
rangeStop := int(object[4])
dataType := int(object[5])
length := int(object[6])
appObject := DNP3AppObject{
Group: group,
Variation: variation,
Qualifier: qualifier,
RangeStart: rangeStart,
RangeStop: rangeStop,
DataType: dataType,
Length: length,
}
out, err := json.Marshal(appObject)
if err != nil {
panic(err)
}
fmt.Println(string(out))
}
func (d *DNP3) linkLayer(data []byte) {
start := d.hexConvert(data[0:2])
d.DNP3DataLinkLayer.Start = start
length := int(data[2])
d.DNP3DataLinkLayer.Length = length
ctlControl := d.hexConvert([]byte{data[3]})
d.DNP3DataLinkLayer.Control.ControlByte = ctlControl
IsMaster := int((data[3] & 0x80) >> 7)
d.DNP3DataLinkLayer.Control.IsMaster = IsMaster
PRM := int((data[3] & 0x40) >> 6)
d.DNP3DataLinkLayer.Control.PRM = PRM
FCB := int((data[3] & 0x20) >> 5)
d.DNP3DataLinkLayer.Control.FCB = FCB
FCV := int((data[3] & 0x10) >> 4)
d.DNP3DataLinkLayer.Control.FCV = FCV
FUNCCODE := data[3] & 0x0F
ctlFUNCCODE := fmt.Sprintf("%d", FUNCCODE)
var ctlFUNC string
if PRM == 0x00 {
ctlFUNC = SfCodes[FUNCCODE]
}
if PRM == 0x01 |
ctlFUNC = ctlFUNC + " (" + ctlFUNCCODE + ")"
d.DNP3DataLinkLayer.Control.FUNC = ctlFUNC
// TODO: make sure 0 to 65535
destination := fmt.Sprintf("%x%x", data[5], data[4])
destinationInt, _ := strconv.Atoi(destination)
d.DNP3DataLinkLayer.Destination = destinationInt
// TODO: make sure 0 to 65535
source := fmt.Sprintf("%x%x", data[7], data[6])
sourceInt, _ := strconv.Atoi(source)
d.DNP3DataLinkLayer.Source = sourceInt
// TODO: Is correct? Hesapla
crc := fmt.Sprintf("0x%x%x", data[9], data[8])
d.DNP3DataLinkLayer.CRC = crc
}
func (d *DNP3) transportLayer(data []byte) {
transport := fmt.Sprintf("0x%x", data[10])
d.DNP3TransportLayer.TransportByte = transport
final := data[10] & 0x80 >> 7
d.DNP3TransportLayer.Final = int(final)
first := data[10] & 0x40 >> 6
d.DNP3TransportLayer.First = int(first)
sequence := data[10] & 0x3f // 6bit
d.DNP3TransportLayer.Sequence = int(sequence)
}
func (d *DNP3) applicationLayer(data []byte) {
// /***************************************************************************/
// /* Application Layer Bit-Masks */
// /***************************************************************************/
// #define DNP3_AL_UNS 0x10
// #define DNP3_AL_CON 0x20
// #define DNP3_AL_FIN 0x40
// #define DNP3_AL_FIR 0x80
// #define DNP3_AL_SEQ 0x0f
// #define DNP3_AL_FUNC 0xff
controlByte := fmt.Sprintf("0x%x", data[11])
d.DNP3ApplicationLayer.Control.ControlByte = controlByte
first := data[11] & 0x80 >> 7
d.DNP3ApplicationLayer.Control.First = int(first)
final := data[11] & 0x40 >> 6
d.DNP3ApplicationLayer.Control.Final = int(final)
confirm := data[11] & 0x20 >> 5
d.DNP3ApplicationLayer.Control.Confirm = int(confirm)
unsolicited := data[11] & 0x10 >> 4
d.DNP3ApplicationLayer.Control.Unsolicited = int(unsolicited)
sequence := data[11] & 0x0f
d.DNP3ApplicationLayer.Control.Sequence = int(sequence)
functionCode := data[12]
// TODO: refactor this hex convert
src := []byte{functionCode}
dst := make([]byte, hex.EncodedLen(len(src)))
hex.Encode(dst, src)
FUNC := fmt.Sprintf("0x%s", dst)
function := FCodes[functionCode] + " (" + FUNC + ")"
d.DNP3ApplicationLayer.Function = function
objectStart := 13
if d.DNP3DataLinkLayer.Control.IsMaster == 0 {
objectStart = 15
// TODO: refactor this hex convert
src := []byte{data[13], data[14]}
dst := make([]byte, hex.EncodedLen(len(src)))
hex.Encode(dst, src)
IIN := fmt.Sprintf("0x%s", dst)
IINCode := IINCodes[IIN] + " (" + IIN + ")"
d.DNP3ApplicationLayer.IINCode = IINCode
}
dataSize := len(data[12:])
fmt.Printf("DataSize %d\n", dataSize)
switch functionCode {
case 0: // Confirm
case 1: // Read
case 2: // Write
case 3: // Select
case 4: // Operate
case 5: // Direct Operate
case 6: // Direct Operate No ACK
case 7: // Immediate Freeze
case 8: // Immediate Freeze No ACK
case 9: // Freeze and Clear
case 10: // Freeze and Clear No ACK
case 11: // Freeze With Time
case 12: // Freeze With Time No ACK
case 13: // Cold Restart
case 14: // Warm Restart
case 15: // Initialize Data
case 16: // Initialize Application
case 17: // Start Application
case 18: // Stop Application
case 19: // Save Configuration
case 20: // Enable Spontaneous Msg
case 21: // Disable Spontaneous Msg
case 22: // Assign Classes
case 23: // Delay Measurement
case 24: // Record Current Time
case 25: // Open File
case 26: // Close File
case 27: // Delete File
case 28: // Get File Info
case 29: // Authenticate File
case 30: // Abort File
case 31: // Activate Config
case 32: // Authentication Request
case 33: // Authentication Error
case 129: // Response
case 130: // Unsolicited Response
case 131: // Authentication Response
}
objTypeField := binary.BigEndian.Uint16([]byte{data[objectStart], data[objectStart+1]})
objectGroup := objTypeField & 0xFF00
objectVariation := objTypeField & 0x00FF
object := d.hexConvert([]byte{data[objectStart], data[objectStart+1]})
objectPrefixCode := data[objectStart+2] & 0x70 // OPC
objectRangeSpecifierCode := data[objectStart+2] & 0x0F // RSC
fmt.Println(object)
fmt.Println(objectGroup)
fmt.Println(objectVariation)
fmt.Printf("Prefix Code %d\n", objectPrefixCode)
fmt.Println(ObjPrefixCodes[objectPrefixCode])
fmt.Printf("Range Specifier Code %d\n", objectRangeSpecifierCode) // 6 means no range field
fmt.Println(ObjRangeSpecifierCodes[objectRangeSpecifierCode])
fmt.Println(d.hexConvert([]byte{data[objectStart+3]}))
offset := objectStart + 3
rangebytes := 0
fmt.Println(offset)
switch objectRangeSpecifierCode {
case 0:
// start := offset
numItems := int(data[offset+1]) - int(data[offset]) + 1
rangebytes = 2
fmt.Println(numItems)
pointAddress := int(data[offset])
fmt.Println(pointAddress)
// num_items = ( tvb_get_guint8(tvb, offset+1) - tvb_get_guint8(tvb, offset) + 1);
// proto_item_set_generated(range_item);
// al_ptaddr = tvb_get_guint8(tvb, offset);
// proto_tree_add_item(range_tree, hf_dnp3_al_range_start8, tvb, offset, 1, ENC_LITTLE_ENDIAN);
// proto_tree_add_item(range_tree, hf_dnp3_al_range_stop8, tvb, offset + 1, 1, ENC_LITTLE_ENDIAN);
// rangebytes = 2;
case 1:
case 2:
case 3:
case 4:
case 5:
case 6:
case 7:
case 8:
case 9:
case 10:
case 11:
case 12:
case 13:
case 14:
case 15:
}
/* Move offset past any range field */
offset += rangebytes
fmt.Println(offset)
// RSCArrayFirst := []byte{0, 1, 2, 3, 4, 5}
// if d.contains(RSCArrayFirst, objectRangeSpecifierCode) {
// }
/* Special handling for Octet string objects as the variation is the length of the string */
// temp = objTypeField & 0xFF00
// if (temp == AL_OBJ_OCT) || (temp == AL_OBJ_OCT_EVT) {
// al_oct_len = al_obj & 0xFF
// al_obj = temp
// }
// objectGroup := data[objectStart] & 0x0f
// objectGroup := fmt.Sprintf("0x%x%x", data[objectStart], data[objectStart+1])
// fmt.Println(objectGroup)
// objectGroup, _ := strconv.Atoi(fmt.Sprintf("%d", data[objectStart]))
// objectVariation, _ := strconv.Atoi(fmt.Sprintf("%d", data[objectStart+1]))
// fmt.Println(objectGroup)
// fmt.Println(objectVariation)
/* Index Size (3-bits x111xxxx) */
// /* When Qualifier Code != 11 */
// #define AL_OBJQL_PREFIX_NI 0x00 /* Objects are Packed with no index */
// #define AL_OBJQL_PREFIX_1O 0x01 /* Objects are prefixed w/ 1-octet index */
// #define AL_OBJQL_PREFIX_2O 0x02 /* Objects are prefixed w/ 2-octet index */
// #define AL_OBJQL_PREFIX_4O 0x03 /* Objects are prefixed w/ 4-octet index */
// #define AL_OBJQL_PREFIX_1OS 0x04 /* Objects are prefixed w/ 1-octet object size */
// #define AL_OBJQL_PREFIX_2OS 0x05 /* Objects are prefixed w/ 2-octet object size */
// #define AL_OBJQL_PREFIX_4OS 0x06 /* Objects are prefixed w/ 4-octet object size */
// /* When Qualifier Code == 11 */
// #define AL_OBJQL_IDX11_1OIS 0x01 /* 1 octet identifier size */
// #define AL_OBJQL_IDX11_2OIS 0x02 /* 2 octet identifier size */
// #define AL_OBJQL_IDX11_4OIS 0x03 /* 4 octet identifier size */
// /* Qualifier Code (4-bits) */
// /* 4-bits ( xxxx1111 ) */
// #define AL_OBJQL_RANGE_SSI8 0x00 /* 00 8-bit Start and Stop Indices in Range Field */
// #define AL_OBJQL_RANGE_SSI16 0x01 /* 01 16-bit Start and Stop Indices in Range Field */
// #define AL_OBJQL_RANGE_SSI32 0x02 /* 02 32-bit Start and Stop Indices in Range Field */
// #define AL_OBJQL_RANGE_AA8 0x03 /* 03 8-bit Absolute Address in Range Field */
// #define AL_OBJQL_RANGE_AA16 0x04 /* 04 16-bit Absolute Address in Range Field */
// #define AL_OBJQL_RANGE_AA32 0x05 /* 05 32-bit Absolute Address in Range Field */
// #define AL_OBJQL_RANGE_R0 0x06 /* 06 Length of Range field is 0 (no range field) */
// #define AL_OBJQL_RANGE_SF8 0x07 /* 07 8-bit Single Field Quantity */
// #define AL_OBJQL_RANGE_SF16 0x08 /* 08 16-bit Single Field Quantity */
// #define AL_OBJQL_RANGE_SF32 0x09 /* 09 32-bit Single Field Quantity */
// /* 0x0A 10 Reserved */
// #define AL_OBJQL_RANGE_FF 0x0B /* 11 Free-format Qualifier, range field has 1 octet count of objects */
// /* 0x0C 12 Reserved */
// /* 0x0D 13 Reserved */
// /* 0x0E 14 Reserved */
// /* 0x0F 15 Reserved */
/***************************************************************************/
/* Application Layer Data Object Qualifier */
/***************************************************************************/
// /* Bit-Masks */
// #define AL_OBJQ_PREFIX 0x70 /* x111xxxx Masks Prefix from Qualifier */
// #define AL_OBJQ_RANGE 0x0F /* xxxx1111 Masks Range from Qualifier */
// objectQualifier := fmt.Sprintf("0x%d", data[objectStart+2])
// fmt.Println(objectQualifier)
// src = []byte{data[objectStart], data[objectStart+1]}
// dst = make([]byte, hex.EncodedLen(len(src)))
// hex.Encode(dst, src)
// prefixCode := fmt.Sprintf("0x%s", dst)
// fmt.Println(prefixCode)
}
func (d *DNP3) IsDNP3(bytesRead []byte) bool {
if len(bytesRead) >= MIN_HEADER_LENGTH && binary.BigEndian.Uint16(bytesRead[0:2]) == START_FIELD {
return true
}
return false
}
func (d *DNP3) isMaster(bytesRead []byte) bool {
intValue := int((bytesRead[3] & 0x80) >> 7)
var boolValue bool = intValue != 0
return boolValue
}
func (d *DNP3) hexConvert(byteArray []byte) string {
return "0x" + hex.EncodeToString(byteArray)
}
func (d *DNP3) isMultiPart(bytesRead []byte) bool {
var FirstOfMulti01 byte = 0x40
var NotFirstNotLast00 byte = 0x00
var FinalFrame10 byte = 0x80
var OneFrame11 byte = 0xC0
TpFinFir := bytesRead[10] & 0xC0
switch TpFinFir {
case FirstOfMulti01:
return false
case NotFirstNotLast00:
return false
case FinalFrame10:
return true
case OneFrame11:
return true
}
return false
}
// Contains tells whether a contains x.
// func (d *DNP3) contains(a []byte, x int) bool {
// for _, n := range a {
// if x == n {
// return true
// }
// }
// return false
// }
| {
ctlFUNC = PfCodes[FUNCCODE]
} | conditional_block |
config.js | 'use strict';
// load modules
const path = require('path');
const { flow, reduce } = require('lodash');
const { map, max } = require('lodash/fp');
const moment = require('moment-timezone');
const PUSH_NOTIFICATION_STAGING_URL = ''; // TODO([email protected])
const PUSH_NOTIFICATION_PRODUCTION_URL = ''; // TODO([email protected])
const { IntervalEnum, DevicePropertyEnum, LogTypeEnum, LogLevelEnum, NmsUpdateStatusEnum } = require('./lib/enums');
const toMs = (type, num) => moment.duration(num, type).asMilliseconds();
// declare internals
const internals = { Config: {} };
internals.Config.defaultNmsHostname = 'your.unmsdomain.com';
internals.Config.isTest = process.env.NODE_ENV === 'test';
internals.Config.isProduction = process.env.NODE_ENV === 'production';
internals.Config.isDevelopment = process.env.NODE_ENV === 'development';
internals.Config.demo = process.env.DEMO === 'true';
internals.Config.cloud = process.env.CLOUD === 'true';
internals.Config.hostTag = process.env.HOST_TAG;
// NOTE: if you use AWS Elastic Beanstalk, it sets many env variables like {CONTAINER_NAME}_PORT
// or {CONTAINER_NAME}_HOST which may collide with ours
internals.Config.redisHost = process.env.UNMS_REDISDB_HOST || '127.0.0.1';
internals.Config.redisPort = parseInt(process.env.UNMS_REDISDB_PORT, 10) || 6379;
internals.Config.redisDb = parseInt(process.env.UNMS_REDISDB_DB, 10) || 0;
internals.Config.fluentdHost = process.env.UNMS_FLUENTD_HOST || '127.0.0.1';
internals.Config.fluentdPort = parseInt(process.env.UNMS_FLUENTD_PORT, 10) || 24224;
internals.Config.nginxHost = process.env.UNMS_NGINX_HOST || '127.0.0.1';
internals.Config.nginxPort = parseInt(process.env.UNMS_NGINX_PORT, 10) || 12345;
internals.Config.defaultInternalHttpPort = 8081;
internals.Config.defaultInternalWsPort = 8082;
internals.Config.defaultHttpsPort = 443;
internals.Config.httpPort = parseInt(process.env.HTTP_PORT, 10) || internals.Config.defaultInternalHttpPort;
internals.Config.wsPort = parseInt(process.env.WS_PORT, 10) || internals.Config.defaultInternalWsPort;
internals.Config.publicHttpsPort = parseInt(process.env.PUBLIC_HTTPS_PORT, 10) || internals.Config.defaultHttpsPort;
internals.Config.publicWsPort = parseInt(process.env.PUBLIC_WS_PORT, 10) || internals.Config.publicHttpsPort;
internals.Config.secureLinkSecret = process.env.SECURE_LINK_SECRET || 'enigma';
internals.Config.leChallengeDir = './challenge';
internals.Config.useCustomSslCert = Boolean(process.env.SSL_CERT);
internals.Config.socketRPCTimeout = 15000; // generic rpc call timeout
internals.Config.socketRPCBackupTimeout = 180000; // backup rpc call timeout
internals.Config.pg = {
host: process.env.UNMS_PG_HOST || '127.0.0.1',
port: parseInt(process.env.UNMS_PG_PORT, 10) || 5432,
database: process.env.UNMS_PG_DATABASE || 'unms',
user: process.env.UNMS_PG_USER || 'postgres',
password: process.env.UNMS_PG_PASSWORD || '',
schema: process.env.UNMS_PG_SCHEMA || 'public',
};
internals.Config.rabbitMQ = {
host: process.env.UNMS_RABBITMQ_HOST || '127.0.0.1',
port: parseInt(process.env.UNMS_RABBITMQ_PORT, 10) || 5672,
exchange: 'unms',
};
internals.Config.publicDir = path.join(__dirname, 'public');
internals.Config.templatePaths = map(viewPath => path.join(__dirname, viewPath), [
'/public',
'/lib/api-docs/templates',
]);
internals.Config.publicPaths = map(viewPath => path.join(__dirname, viewPath), [
'/public',
'/lib/api-docs/public',
]);
// UNMS cloud related settings
internals.Config.cloudSettings = internals.Config.cloud ? {
domain: process.env.CLOUD_DOMAIN,
googleMapsApiKey: process.env.CLOUD_MAPS_API_KEY,
mapsProvider: process.env.CLOUD_MAPS_PROVIDER,
billingApiUrl: process.env.CLOUD_BILLING_API_URL,
ssoApiUrl: process.env.CLOUD_SSO_API_URL,
maintenanceWindowApiUrl: process.env.CLOUD_MAINTENANCE_WINDOW_API_URL,
redisInstanceId: process.env.UNMS_REDISDB_INSTANCE_ID,
smtpHostname: process.env.CLOUD_SMTP_HOSTNAME,
smtpPort: process.env.CLOUD_SMTP_PORT,
smtpUsername: process.env.CLOUD_SMTP_USERNAME,
smtpPassword: process.env.CLOUD_SMTP_PASSWORD,
smtpSender: process.env.CLOUD_SMTP_SENDER_ADDRESS,
smtpTimeout: process.env.CLOUD_SMTP_TIMEOUT,
smtpTlsAllowUnauthorized: process.env.CLOUD_SMTP_TLS_ALLOW_UNAUTHORIZED,
smtpSecurityMode: process.env.CLOUD_SMTP_SECURITY_MODE,
smtpAuthEnabled: process.env.CLOUD_SMTP_AUTH_ENABLED,
storage: {
gcsProjectId: process.env.CLOUD_GCS_PROJECT_ID,
gcsKeyFilename: process.env.CLOUD_GCS_KEY_FILENAME,
},
} : null;
internals.Config.branch = process.env.BRANCH || 'master';
internals.Config.unmsLatestVersionUrl =
`https://api.github.com/repos/Ubiquiti-App/UNMS/contents/latest-version?ref=${internals.Config.branch}`;
// discovery
internals.Config.discoveryScanTimeout = toMs('second', 5);
internals.Config.discoveryIpRangeMaxSize = 65536; // 2^16
// Reporting
internals.Config.sentryDSN =
'https://3a0cdfa562074c6ca018c01b666d37c5:[email protected]/120911';
// production log token in Logentries
internals.Config.logentriesToken = 'b740d3fa-9176-43b8-b259-58a6660590dc';
// affects password hashing time, see https://github.com/kelektiv/node.bcrypt.js#a-note-on-rounds
internals.Config.pwdSaltRounds = 8;
internals.Config.jwtToken = 'x-auth-token';
internals.Config.authStrategy = 'jwt';
internals.Config.defaultEmail = '[email protected]';
internals.Config.defaultUsername = 'ubnt';
internals.Config.defaultPwd = 'ubntubnt';
internals.Config.passwordTokenExpiry = toMs('minute', 30);
internals.Config.sessionTimeout = toMs('minute', 30);
internals.Config.extendedSessionTimeout = toMs('hour', 24);
internals.Config.hashAlgorithm = 'aes-256-cbc';
internals.Config.totpAuthSecretOptions = {
issuer: 'UNMS',
};
internals.Config.httpConnection = {
port: internals.Config.httpPort,
routes: {
cors: {
origin: ['*'],
headers: ['Accept', 'Authorization', 'Content-Type', 'If-None-Match', internals.Config.jwtToken],
exposedHeaders: [internals.Config.jwtToken],
},
files: {
relativeTo: internals.Config.publicDir,
},
security: true,
},
state: {
strictHeader: false,
},
};
internals.Config.deviceConfigBackup = {
dir: './data/config-backups',
minimumFiles: 6,
ttl: toMs('day', 30),
multiBackup: {
dir: 'multi',
ttl: toMs('hour', 1),
},
fileMaxBytes: 10000000,
queue: {
delay: toMs('minute', 2),
concurency: 5,
},
};
internals.Config.deviceTypes = {
all: ['olt', 'erouter'],
};
internals.Config.deviceServices = {
ntpServers: {
ntpServer1: '0.pool.ntp.org',
ntpServer2: '1.pool.ntp.org',
},
};
internals.Config.statisticsIntervals = {
minute: { length: toMs('minute', 2), period: toMs('second', 1) },
hour: { length: toMs('hour', 1), period: toMs('second', 15) },
day: { length: toMs('day', 1), period: toMs('minute', 15) },
month: { length: toMs('month', 1), period: toMs('hour', 8) },
quarter: { length: toMs('month', 3), period: toMs('hour', 24) },
year: { length: toMs('year', 1), period: toMs('hour', 106) },
};
internals.Config.statisticsIntervalMapping = {
[IntervalEnum.Hour]: ['hour'],
[IntervalEnum.Day]: ['day'],
[IntervalEnum.Month]: ['month'],
[IntervalEnum.Quarter]: ['quarter'],
[IntervalEnum.Year]: ['year'],
};
const findMaxStatisticsIntervalPeriod = flow(
map(name => internals.Config.statisticsIntervals[name].period),
max
);
internals.Config.statisticsIntervalPeriodMapping = reduce(
internals.Config.statisticsIntervalMapping,
(acc, values, interval) => (Object.assign(acc, { [interval]: findMaxStatisticsIntervalPeriod(values) })),
{}
);
const findMaxStatisticsIntervalLength = flow(
map(name => internals.Config.statisticsIntervals[name].length),
max
);
internals.Config.statisticsIntervalLengthMapping = reduce(
internals.Config.statisticsIntervalMapping,
(acc, values, interval) => (Object.assign(acc, { [interval]: findMaxStatisticsIntervalLength(values) })),
{}
);
internals.Config.deviceLog = {
[DevicePropertyEnum.Cpu]: {
name: 'CPU',
unit: '%',
limit: 90,
interval: toMs('second', 30),
level: LogLevelEnum.Warning,
logType: LogTypeEnum.DeviceCpuOverLimit,
},
[DevicePropertyEnum.Ram]: {
name: 'RAM',
unit: '%',
limit: 90,
interval: toMs('second', 10),
level: LogLevelEnum.Warning,
logType: LogTypeEnum.DeviceRamOverLimit,
},
};
internals.Config.outages = {
unmsStartGracePeriod: toMs('minute', 2),
lagGracePeriod: toMs('second', 5), // avoid responding to possible lagging device's communication
maxAge: toMs('month', 3),
};
internals.Config.siteImages = { | publicDir: './public',
imagesUrl: 'site-images',
imagesDir: './public/site-images', // images directory is mapped to /data/images in docker
maxBytes: 20000000, // 20MB
maxResolution: 10000 ** 2, // limit in pixels
thumb: {
width: 700, // px
height: 700, // px
},
};
internals.Config.nmsBackup = {
dir: './data/unms-backups',
restoreDir: 'restore',
downloadDir: 'download',
downloadTtl: toMs('hour', 1),
restoreTtl: toMs('hour', 1),
fileMaxBytes: 1000000000, // 1GB
backupFormat: '1',
};
internals.Config.logs = {
dir: './data/logs',
ttl: toMs('day', 5),
downloadDir: 'supportinfo',
downloadTtl: toMs('hour', 1),
packageName: 'logs.tgz',
};
internals.Config.import = {
dir: './data/import',
};
internals.Config.firmwares = {
dir: path.join(__dirname, 'public', 'firmwares'), // firmwares directory is mapped to /data/firmwares in docker
publicDir: 'firmwares',
urlExpiration: toMs('hour', 1),
allowAutoUpdateUbntFirmwares: true,
fetchUbntFirmwaresConcurrency: 1,
fetchUbntFirmwaresInterval: toMs('hour', 1),
};
internals.Config.interfaces = {
mtuDefault: 1500,
mtuMin: 68,
mtuMax: 2018,
pppoeMtuDefault: 1492,
pppoeMtuMin: 68,
pppoeMtuMax: 1500,
};
internals.Config.periodSelectOptions = {
defaults: {
log: toMs('hour', 1),
outage: toMs('hour', 1),
},
};
internals.Config.eventLog = {
mailNotification: {
level: [LogLevelEnum.Error],
type: [LogTypeEnum.DeviceReappear],
maxItems: 100,
failedLogPeriod: toMs('day', 1),
},
maxAge: toMs('day', 30),
};
internals.Config.nmsUpdate = {
dir: './data/update',
requestFile: './data/update/request-update',
daemonActiveLimit: toMs('minute', 3),
lastUpdateFile: './data/update/last-update',
logFile: './data/update/update.log',
timeouts: {
[NmsUpdateStatusEnum.Requested]: toMs('minute', 2),
[NmsUpdateStatusEnum.Started]: toMs('minute', 1),
[NmsUpdateStatusEnum.Updating]: toMs('minute', 15),
},
backupFile: `${internals.Config.nmsBackup.dir}/update-backup.tar.gz`,
};
internals.Config.fixtures = {
site: {
count: 30,
},
endpoint: {
minCount: 1,
maxCount: 5,
},
device: {
count: 100,
},
};
internals.Config.pushNotificationServiceUrl =
process.env.PUSH_NOTIFICATION_URL ||
(internals.Config.isProduction ? PUSH_NOTIFICATION_PRODUCTION_URL : PUSH_NOTIFICATION_STAGING_URL);
// push notifications currently on self-signed cert
process.env.NODE_TLS_REJECT_UNAUTHORIZED = '0';
internals.Config.connectionLogEventInterval = toMs('day', 1);
internals.Config.apiRateLimit = {
userCacheExpiresIn: toMs('minute', 2),
};
internals.Config.newsFeedUrl =
internals.Config.isProduction ?
'https://unms.com/assets/notifications/news.json' :
'https://dev-unms-micro.ubnt.com/assets/notifications/news.json';
module.exports = internals.Config; | random_line_split |
|
packet.go | package minq
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"encoding/hex"
"fmt"
)
// Encode a QUIC packet.
/*
Long header
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+
|1| Type (7) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Version (32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|DCIL(4)|SCIL(4)|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Payload Length (i) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Packet Number (8/16/32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Payload (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// Initial Header: same as long header but with Token
+-+-+-+-+-+-+-+-+
|1| 0x7f |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Version (32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|DCIL(4)|SCIL(4)|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Token Length (i) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Token (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length (i) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Packet Number (8/16/32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Payload (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+
|0|K|1|1|0|R R R|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Connection ID (0..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Packet Number (8/16/32) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Protected Payload (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
const (
packetFlagLongHeader = byte(0x80)
packetFlagK = byte(0x40)
packetFlagShortHeader = byte(0x30)
)
// This packet type differs considerably from the spec. It includes both
// long and short headers in the same value space. Long headers are from
// 0-0x7f (inclusive); short headers are always represented as 0xff.
type packetType byte
const (
packetTypeInitial = packetType(0x7f)
packetTypeRetry = packetType(0x7e)
packetTypeHandshake = packetType(0x7d)
packetType0RTTProtected = packetType(0x7c)
packetTypeProtectedShort = packetType(0x00) // Not a real type
)
func (pt packetType) isLongHeader() bool {
return pt&packetType(packetFlagLongHeader) != 0
}
func (pt packetType) isProtected() bool {
if !pt.isLongHeader() {
return true
}
switch pt & 0x7f {
case packetTypeInitial, packetTypeHandshake, packetTypeRetry:
return false
}
return true
}
func (pt packetType) String() string {
switch pt {
case packetTypeInitial:
return "Initial"
case packetTypeRetry:
return "Retry"
case packetTypeHandshake:
return "Handshake"
case packetType0RTTProtected:
return "0-RTT"
case packetTypeProtectedShort:
return "1-RTT"
default:
return fmt.Sprintf("%x", uint8(pt))
}
}
// kCidDefaultLength is the length of connection ID we generate.
// TODO: make this configurable.
const kCidDefaultLength = 5
// ConnectionId identifies the connection that a packet belongs to.
type ConnectionId []byte
// String stringifies a connection ID in the natural way.
func (c ConnectionId) String() string {
return hex.EncodeToString(c)
}
// EncodeLength produces the length encoding used in the long packet header.
func (c ConnectionId) EncodeLength() byte {
if len(c) == 0 {
return 0
}
assert(len(c) >= 4 && len(c) <= 18)
return byte(len(c) - 3)
}
// The PDU definition for the header.
// These types are capitalized so that |codec| can use them.
type packetHeader struct {
// Type is the on-the-wire form of the packet type.
// Consult getHeaderType if you want a value that corresponds to the
// definition of packetType.
Type packetType
Version VersionNumber
ConnectionIDLengths byte
DestinationConnectionID ConnectionId
SourceConnectionID ConnectionId
TokenLength uint8
Token []byte
PayloadLength uint64 `tls:"varint"`
// In order to decode a short header, the length of the connection
// ID must be set in |shortCidLength| before decoding.
shortCidLength uintptr
}
func (p packetHeader) String() string {
ht := "SHORT"
if p.Type.isLongHeader() {
ht = "LONG"
}
return fmt.Sprintf("%s PT=%v", ht, p.getHeaderType())
}
func (p *packetHeader) getHeaderType() packetType {
if p.Type.isLongHeader() {
return p.Type & 0x7f
}
return packetTypeProtectedShort
}
type packet struct {
packetHeader
PacketNumber uint64 // Never more than 32 bits on the wire.
payload []byte
}
// This reads from p.ConnectionIDLengths.
func (p packetHeader) ConnectionIDLengths__length() uintptr {
if p.Type.isLongHeader() {
return 1
}
return 0
}
func (p packetHeader) TokenLength__length() uintptr {
if p.getHeaderType() != packetTypeInitial {
assert(len(p.Token) == 0)
return 0
}
return 1
}
func (p packetHeader) Token__length() uintptr {
if p.getHeaderType() != packetTypeInitial {
assert(len(p.Token) == 0)
return 0
}
return uintptr(p.TokenLength)
}
func (p packetHeader) DestinationConnectionID__length() uintptr {
if !p.Type.isLongHeader() {
return p.shortCidLength
}
l := p.ConnectionIDLengths >> 4
if l != 0 {
l += 3
}
return uintptr(l)
}
func (p packetHeader) SourceConnectionID__length() uintptr {
if !p.Type.isLongHeader() {
return 0
}
l := p.ConnectionIDLengths & 0xf
if l != 0 {
l += 3
}
return uintptr(l)
}
func (p packetHeader) PayloadLength__length() uintptr {
if p.Type.isLongHeader() {
return codecDefaultSize
}
return 0
}
func (p packetHeader) Version__length() uintptr {
if p.Type.isLongHeader() {
return 4
}
return 0
}
func newPacket(pt packetType, destCid ConnectionId, srcCid ConnectionId, ver VersionNumber, pn uint64, payload []byte, aeadOverhead int) *packet {
if pt == packetTypeProtectedShort {
// Only support writing the 32-bit packet number.
pt = packetType(0x2 | packetFlagShortHeader)
srcCid = nil
} else {
pt = pt | packetType(packetFlagLongHeader)
}
lengths := (destCid.EncodeLength() << 4) | srcCid.EncodeLength()
return &packet{
packetHeader: packetHeader{
Type: pt,
ConnectionIDLengths: lengths,
DestinationConnectionID: destCid,
SourceConnectionID: srcCid,
Version: ver,
PayloadLength: uint64(len(payload) + 4 + aeadOverhead),
},
PacketNumber: pn,
payload: payload,
}
}
type versionNegotiationPacket struct {
Versions []byte
}
func newVersionNegotiationPacket(versions []VersionNumber) *versionNegotiationPacket {
var buf bytes.Buffer
for _, v := range versions {
buf.Write(encodeArgs(v))
}
return &versionNegotiationPacket{buf.Bytes()}
}
/*
We don't use these.
func encodePacket(c ConnectionState, aead Aead, p *Packet) ([]byte, error) {
hdr, err := encode(&p.packetHeader)
if err != nil {
return nil, err
}
b, err := aead.protect(p.packetHeader.PacketNumber, hdr, p.payload)
if err != nil {
return nil, err
}
return encodeArgs(hdr, b), nil
}
func decodePacket(c ConnectionState, aead Aead, b []byte) (*Packet, error) {
// Parse the header
var hdr packetHeader
br, err := decode(&hdr, b)
if err != nil {
return nil, err
}
hdr.PacketNumber = c.expandPacketNumber(hdr.PacketNumber)
pt, err := aead.unprotect(hdr.PacketNumber, b[0:br], b[br:])
if err != nil {
return nil, err
}
return &Packet{hdr, pt}, nil
}
*/
func dumpPacket(payload []byte) string {
first := true
ret := fmt.Sprintf("%d=[", len(payload))
for len(payload) > 0 {
if !first {
ret += ", "
}
first = false
n, f, err := decodeFrame(payload)
if err != nil {
ret += fmt.Sprintf("Undecoded: [%x]", payload)
break
}
payload = payload[n:]
// TODO([email protected]): Not sure why %v doesn't work
ret += f.String()
}
ret += "]"
return ret
}
type pneCipherFactory interface {
create(sample []byte) cipher.Stream
}
type pneCipherFactoryAES struct {
block cipher.Block
}
func newPneCipherFactoryAES(key []byte) pneCipherFactory {
inner, err := aes.NewCipher(key)
assert(err == nil)
if err != nil {
return nil
}
return &pneCipherFactoryAES{block: inner}
}
func (f *pneCipherFactoryAES) create(sample []byte) cipher.Stream {
if len(sample) != 16 {
return nil
}
return cipher.NewCTR(f.block, sample)
}
func xorPacketNumber(hdr *packetHeader, hdrlen int, pnbuf []byte, p []byte, factory pneCipherFactory) error {
logf(logTypeTrace, "PNE Operation: hdrlen=%v, hdr=%x, payload=%x", hdrlen, p[:hdrlen], p)
// The packet must be at least long enough to contain
// the header, plus a minimum 1-byte PN, plus the sample.
sample_length := 16
if sample_length > len(p)-(hdrlen+1) {
logf(logTypePacket, "Packet too short")
return nil
}
// Now compute the offset
sample_offset := hdrlen + 4
if sample_offset+sample_length > len(p) {
sample_offset = len(p) - sample_length
}
sample := p[sample_offset : sample_offset+sample_length]
logf(logTypeTrace, "PNE sample_offset=%d sample=%x", sample_offset, sample)
stream := factory.create(sample)
stream.XORKeyStream(pnbuf, p[hdrlen:hdrlen+len(pnbuf)])
return nil
}
var pnPatterns = []struct {
prefix byte
mask byte
length int
}{
{
0, 0x80, 1,
},
{
0x80, 0xc0, 2,
},
{
0xc0, 0xc0, 4,
},
}
const ()
func encodePacketNumber(pn uint64, l int) []byte {
var buf bytes.Buffer
i := 0
for i, _ = range pnPatterns {
if pnPatterns[i].length == l {
break
}
}
uintEncodeInt(&buf, pn, uintptr(l))
b := buf.Bytes()
b[0] &= ^pnPatterns[i].mask
b[0] |= pnPatterns[i].prefix
return b
}
func decodePacketNumber(buf []byte) (uint64, int, error) {
if len(buf) < 1 {
return 0, 0, fmt.Errorf("Zero-length packet number")
}
i := 0
for i, _ = range pnPatterns {
if pnPatterns[i].mask&buf[0] == pnPatterns[i].prefix {
break
}
}
pat := &pnPatterns[i]
if len(buf) < pat.length |
buf = dup(buf[:pat.length])
buf[0] &= ^pat.mask
return uintDecodeIntBuf(buf), pat.length, nil
}
| {
return 0, 0, fmt.Errorf("Buffer too short for packet number (%v < %v)", len(buf), pat.length)
} | conditional_block |
packet.go | package minq
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"encoding/hex"
"fmt"
)
// Encode a QUIC packet.
/*
Long header
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+
|1| Type (7) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Version (32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|DCIL(4)|SCIL(4)|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Payload Length (i) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Packet Number (8/16/32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Payload (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// Initial Header: same as long header but with Token
+-+-+-+-+-+-+-+-+
|1| 0x7f |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Version (32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|DCIL(4)|SCIL(4)|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Token Length (i) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Token (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length (i) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Packet Number (8/16/32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Payload (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+
|0|K|1|1|0|R R R|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Connection ID (0..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Packet Number (8/16/32) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Protected Payload (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
const (
packetFlagLongHeader = byte(0x80)
packetFlagK = byte(0x40)
packetFlagShortHeader = byte(0x30)
)
// This packet type differs considerably from the spec. It includes both
// long and short headers in the same value space. Long headers are from
// 0-0x7f (inclusive); short headers are always represented as 0xff.
type packetType byte
const (
packetTypeInitial = packetType(0x7f)
packetTypeRetry = packetType(0x7e)
packetTypeHandshake = packetType(0x7d)
packetType0RTTProtected = packetType(0x7c)
packetTypeProtectedShort = packetType(0x00) // Not a real type
)
func (pt packetType) isLongHeader() bool {
return pt&packetType(packetFlagLongHeader) != 0
}
func (pt packetType) isProtected() bool {
if !pt.isLongHeader() {
return true
}
switch pt & 0x7f {
case packetTypeInitial, packetTypeHandshake, packetTypeRetry:
return false
}
return true
}
func (pt packetType) String() string {
switch pt {
case packetTypeInitial:
return "Initial"
case packetTypeRetry:
return "Retry"
case packetTypeHandshake:
return "Handshake"
case packetType0RTTProtected:
return "0-RTT"
case packetTypeProtectedShort:
return "1-RTT"
default:
return fmt.Sprintf("%x", uint8(pt))
}
}
// kCidDefaultLength is the length of connection ID we generate.
// TODO: make this configurable.
const kCidDefaultLength = 5
// ConnectionId identifies the connection that a packet belongs to.
type ConnectionId []byte
// String stringifies a connection ID in the natural way.
func (c ConnectionId) String() string {
return hex.EncodeToString(c)
}
// EncodeLength produces the length encoding used in the long packet header.
func (c ConnectionId) EncodeLength() byte {
if len(c) == 0 {
return 0
}
assert(len(c) >= 4 && len(c) <= 18)
return byte(len(c) - 3)
}
// The PDU definition for the header.
// These types are capitalized so that |codec| can use them.
type packetHeader struct {
// Type is the on-the-wire form of the packet type.
// Consult getHeaderType if you want a value that corresponds to the
// definition of packetType.
Type packetType
Version VersionNumber
ConnectionIDLengths byte
DestinationConnectionID ConnectionId
SourceConnectionID ConnectionId
TokenLength uint8
Token []byte
PayloadLength uint64 `tls:"varint"`
// In order to decode a short header, the length of the connection
// ID must be set in |shortCidLength| before decoding.
shortCidLength uintptr
}
func (p packetHeader) String() string {
ht := "SHORT"
if p.Type.isLongHeader() {
ht = "LONG"
}
return fmt.Sprintf("%s PT=%v", ht, p.getHeaderType())
}
func (p *packetHeader) getHeaderType() packetType {
if p.Type.isLongHeader() {
return p.Type & 0x7f
}
return packetTypeProtectedShort
}
type packet struct {
packetHeader
PacketNumber uint64 // Never more than 32 bits on the wire.
payload []byte
}
// This reads from p.ConnectionIDLengths.
func (p packetHeader) ConnectionIDLengths__length() uintptr {
if p.Type.isLongHeader() {
return 1
}
return 0
}
func (p packetHeader) TokenLength__length() uintptr {
if p.getHeaderType() != packetTypeInitial {
assert(len(p.Token) == 0)
return 0
}
return 1
}
func (p packetHeader) Token__length() uintptr {
if p.getHeaderType() != packetTypeInitial {
assert(len(p.Token) == 0)
return 0
}
return uintptr(p.TokenLength)
}
func (p packetHeader) DestinationConnectionID__length() uintptr {
if !p.Type.isLongHeader() {
return p.shortCidLength
}
l := p.ConnectionIDLengths >> 4
if l != 0 {
l += 3
}
return uintptr(l)
}
func (p packetHeader) SourceConnectionID__length() uintptr {
if !p.Type.isLongHeader() {
return 0
}
l := p.ConnectionIDLengths & 0xf
if l != 0 {
l += 3
}
return uintptr(l)
}
func (p packetHeader) PayloadLength__length() uintptr {
if p.Type.isLongHeader() {
return codecDefaultSize
}
return 0
}
func (p packetHeader) Version__length() uintptr {
if p.Type.isLongHeader() {
return 4
}
return 0
}
func newPacket(pt packetType, destCid ConnectionId, srcCid ConnectionId, ver VersionNumber, pn uint64, payload []byte, aeadOverhead int) *packet {
if pt == packetTypeProtectedShort {
// Only support writing the 32-bit packet number.
pt = packetType(0x2 | packetFlagShortHeader)
srcCid = nil
} else {
pt = pt | packetType(packetFlagLongHeader)
}
lengths := (destCid.EncodeLength() << 4) | srcCid.EncodeLength()
return &packet{
packetHeader: packetHeader{
Type: pt,
ConnectionIDLengths: lengths,
DestinationConnectionID: destCid,
SourceConnectionID: srcCid,
Version: ver,
PayloadLength: uint64(len(payload) + 4 + aeadOverhead),
},
PacketNumber: pn,
payload: payload,
}
}
type versionNegotiationPacket struct {
Versions []byte
}
func newVersionNegotiationPacket(versions []VersionNumber) *versionNegotiationPacket {
var buf bytes.Buffer
for _, v := range versions {
buf.Write(encodeArgs(v))
}
return &versionNegotiationPacket{buf.Bytes()}
}
/*
We don't use these.
func encodePacket(c ConnectionState, aead Aead, p *Packet) ([]byte, error) {
hdr, err := encode(&p.packetHeader)
if err != nil {
return nil, err
}
b, err := aead.protect(p.packetHeader.PacketNumber, hdr, p.payload)
if err != nil {
return nil, err
}
return encodeArgs(hdr, b), nil
}
func decodePacket(c ConnectionState, aead Aead, b []byte) (*Packet, error) {
// Parse the header
var hdr packetHeader
br, err := decode(&hdr, b)
if err != nil {
return nil, err
}
hdr.PacketNumber = c.expandPacketNumber(hdr.PacketNumber)
pt, err := aead.unprotect(hdr.PacketNumber, b[0:br], b[br:])
if err != nil {
return nil, err
}
return &Packet{hdr, pt}, nil
}
*/
func dumpPacket(payload []byte) string {
first := true
ret := fmt.Sprintf("%d=[", len(payload))
for len(payload) > 0 {
if !first {
ret += ", "
}
first = false
n, f, err := decodeFrame(payload)
if err != nil {
ret += fmt.Sprintf("Undecoded: [%x]", payload)
break
}
payload = payload[n:]
// TODO([email protected]): Not sure why %v doesn't work
ret += f.String()
}
ret += "]"
return ret
}
type pneCipherFactory interface {
create(sample []byte) cipher.Stream
}
type pneCipherFactoryAES struct {
block cipher.Block
}
func newPneCipherFactoryAES(key []byte) pneCipherFactory {
inner, err := aes.NewCipher(key)
assert(err == nil)
if err != nil {
return nil
}
return &pneCipherFactoryAES{block: inner}
}
func (f *pneCipherFactoryAES) create(sample []byte) cipher.Stream {
if len(sample) != 16 {
return nil
}
return cipher.NewCTR(f.block, sample)
}
func xorPacketNumber(hdr *packetHeader, hdrlen int, pnbuf []byte, p []byte, factory pneCipherFactory) error {
logf(logTypeTrace, "PNE Operation: hdrlen=%v, hdr=%x, payload=%x", hdrlen, p[:hdrlen], p)
// The packet must be at least long enough to contain
// the header, plus a minimum 1-byte PN, plus the sample. | logf(logTypePacket, "Packet too short")
return nil
}
// Now compute the offset
sample_offset := hdrlen + 4
if sample_offset+sample_length > len(p) {
sample_offset = len(p) - sample_length
}
sample := p[sample_offset : sample_offset+sample_length]
logf(logTypeTrace, "PNE sample_offset=%d sample=%x", sample_offset, sample)
stream := factory.create(sample)
stream.XORKeyStream(pnbuf, p[hdrlen:hdrlen+len(pnbuf)])
return nil
}
var pnPatterns = []struct {
prefix byte
mask byte
length int
}{
{
0, 0x80, 1,
},
{
0x80, 0xc0, 2,
},
{
0xc0, 0xc0, 4,
},
}
const ()
func encodePacketNumber(pn uint64, l int) []byte {
var buf bytes.Buffer
i := 0
for i, _ = range pnPatterns {
if pnPatterns[i].length == l {
break
}
}
uintEncodeInt(&buf, pn, uintptr(l))
b := buf.Bytes()
b[0] &= ^pnPatterns[i].mask
b[0] |= pnPatterns[i].prefix
return b
}
func decodePacketNumber(buf []byte) (uint64, int, error) {
if len(buf) < 1 {
return 0, 0, fmt.Errorf("Zero-length packet number")
}
i := 0
for i, _ = range pnPatterns {
if pnPatterns[i].mask&buf[0] == pnPatterns[i].prefix {
break
}
}
pat := &pnPatterns[i]
if len(buf) < pat.length {
return 0, 0, fmt.Errorf("Buffer too short for packet number (%v < %v)", len(buf), pat.length)
}
buf = dup(buf[:pat.length])
buf[0] &= ^pat.mask
return uintDecodeIntBuf(buf), pat.length, nil
} | sample_length := 16
if sample_length > len(p)-(hdrlen+1) { | random_line_split |
packet.go | package minq
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"encoding/hex"
"fmt"
)
// Encode a QUIC packet.
/*
Long header
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+
|1| Type (7) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Version (32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|DCIL(4)|SCIL(4)|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Payload Length (i) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Packet Number (8/16/32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Payload (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// Initial Header: same as long header but with Token
+-+-+-+-+-+-+-+-+
|1| 0x7f |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Version (32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|DCIL(4)|SCIL(4)|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Token Length (i) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Token (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length (i) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Packet Number (8/16/32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Payload (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+
|0|K|1|1|0|R R R|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Connection ID (0..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Packet Number (8/16/32) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Protected Payload (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
const (
packetFlagLongHeader = byte(0x80)
packetFlagK = byte(0x40)
packetFlagShortHeader = byte(0x30)
)
// This packet type differs considerably from the spec. It includes both
// long and short headers in the same value space. Long headers are from
// 0-0x7f (inclusive); short headers are always represented as 0xff.
type packetType byte
const (
packetTypeInitial = packetType(0x7f)
packetTypeRetry = packetType(0x7e)
packetTypeHandshake = packetType(0x7d)
packetType0RTTProtected = packetType(0x7c)
packetTypeProtectedShort = packetType(0x00) // Not a real type
)
func (pt packetType) isLongHeader() bool {
return pt&packetType(packetFlagLongHeader) != 0
}
func (pt packetType) isProtected() bool {
if !pt.isLongHeader() {
return true
}
switch pt & 0x7f {
case packetTypeInitial, packetTypeHandshake, packetTypeRetry:
return false
}
return true
}
func (pt packetType) String() string {
switch pt {
case packetTypeInitial:
return "Initial"
case packetTypeRetry:
return "Retry"
case packetTypeHandshake:
return "Handshake"
case packetType0RTTProtected:
return "0-RTT"
case packetTypeProtectedShort:
return "1-RTT"
default:
return fmt.Sprintf("%x", uint8(pt))
}
}
// kCidDefaultLength is the length of connection ID we generate.
// TODO: make this configurable.
const kCidDefaultLength = 5
// ConnectionId identifies the connection that a packet belongs to.
type ConnectionId []byte
// String stringifies a connection ID in the natural way.
func (c ConnectionId) String() string {
return hex.EncodeToString(c)
}
// EncodeLength produces the length encoding used in the long packet header.
func (c ConnectionId) EncodeLength() byte {
if len(c) == 0 {
return 0
}
assert(len(c) >= 4 && len(c) <= 18)
return byte(len(c) - 3)
}
// The PDU definition for the header.
// These types are capitalized so that |codec| can use them.
type packetHeader struct {
// Type is the on-the-wire form of the packet type.
// Consult getHeaderType if you want a value that corresponds to the
// definition of packetType.
Type packetType
Version VersionNumber
ConnectionIDLengths byte
DestinationConnectionID ConnectionId
SourceConnectionID ConnectionId
TokenLength uint8
Token []byte
PayloadLength uint64 `tls:"varint"`
// In order to decode a short header, the length of the connection
// ID must be set in |shortCidLength| before decoding.
shortCidLength uintptr
}
func (p packetHeader) String() string {
ht := "SHORT"
if p.Type.isLongHeader() {
ht = "LONG"
}
return fmt.Sprintf("%s PT=%v", ht, p.getHeaderType())
}
func (p *packetHeader) getHeaderType() packetType {
if p.Type.isLongHeader() {
return p.Type & 0x7f
}
return packetTypeProtectedShort
}
type packet struct {
packetHeader
PacketNumber uint64 // Never more than 32 bits on the wire.
payload []byte
}
// This reads from p.ConnectionIDLengths.
func (p packetHeader) ConnectionIDLengths__length() uintptr {
if p.Type.isLongHeader() {
return 1
}
return 0
}
func (p packetHeader) TokenLength__length() uintptr {
if p.getHeaderType() != packetTypeInitial {
assert(len(p.Token) == 0)
return 0
}
return 1
}
func (p packetHeader) Token__length() uintptr {
if p.getHeaderType() != packetTypeInitial {
assert(len(p.Token) == 0)
return 0
}
return uintptr(p.TokenLength)
}
func (p packetHeader) DestinationConnectionID__length() uintptr {
if !p.Type.isLongHeader() {
return p.shortCidLength
}
l := p.ConnectionIDLengths >> 4
if l != 0 {
l += 3
}
return uintptr(l)
}
func (p packetHeader) SourceConnectionID__length() uintptr {
if !p.Type.isLongHeader() {
return 0
}
l := p.ConnectionIDLengths & 0xf
if l != 0 {
l += 3
}
return uintptr(l)
}
func (p packetHeader) PayloadLength__length() uintptr {
if p.Type.isLongHeader() {
return codecDefaultSize
}
return 0
}
func (p packetHeader) Version__length() uintptr {
if p.Type.isLongHeader() {
return 4
}
return 0
}
func newPacket(pt packetType, destCid ConnectionId, srcCid ConnectionId, ver VersionNumber, pn uint64, payload []byte, aeadOverhead int) *packet {
if pt == packetTypeProtectedShort {
// Only support writing the 32-bit packet number.
pt = packetType(0x2 | packetFlagShortHeader)
srcCid = nil
} else {
pt = pt | packetType(packetFlagLongHeader)
}
lengths := (destCid.EncodeLength() << 4) | srcCid.EncodeLength()
return &packet{
packetHeader: packetHeader{
Type: pt,
ConnectionIDLengths: lengths,
DestinationConnectionID: destCid,
SourceConnectionID: srcCid,
Version: ver,
PayloadLength: uint64(len(payload) + 4 + aeadOverhead),
},
PacketNumber: pn,
payload: payload,
}
}
type versionNegotiationPacket struct {
Versions []byte
}
func | (versions []VersionNumber) *versionNegotiationPacket {
var buf bytes.Buffer
for _, v := range versions {
buf.Write(encodeArgs(v))
}
return &versionNegotiationPacket{buf.Bytes()}
}
/*
We don't use these.
func encodePacket(c ConnectionState, aead Aead, p *Packet) ([]byte, error) {
hdr, err := encode(&p.packetHeader)
if err != nil {
return nil, err
}
b, err := aead.protect(p.packetHeader.PacketNumber, hdr, p.payload)
if err != nil {
return nil, err
}
return encodeArgs(hdr, b), nil
}
func decodePacket(c ConnectionState, aead Aead, b []byte) (*Packet, error) {
// Parse the header
var hdr packetHeader
br, err := decode(&hdr, b)
if err != nil {
return nil, err
}
hdr.PacketNumber = c.expandPacketNumber(hdr.PacketNumber)
pt, err := aead.unprotect(hdr.PacketNumber, b[0:br], b[br:])
if err != nil {
return nil, err
}
return &Packet{hdr, pt}, nil
}
*/
func dumpPacket(payload []byte) string {
first := true
ret := fmt.Sprintf("%d=[", len(payload))
for len(payload) > 0 {
if !first {
ret += ", "
}
first = false
n, f, err := decodeFrame(payload)
if err != nil {
ret += fmt.Sprintf("Undecoded: [%x]", payload)
break
}
payload = payload[n:]
// TODO([email protected]): Not sure why %v doesn't work
ret += f.String()
}
ret += "]"
return ret
}
type pneCipherFactory interface {
create(sample []byte) cipher.Stream
}
type pneCipherFactoryAES struct {
block cipher.Block
}
func newPneCipherFactoryAES(key []byte) pneCipherFactory {
inner, err := aes.NewCipher(key)
assert(err == nil)
if err != nil {
return nil
}
return &pneCipherFactoryAES{block: inner}
}
func (f *pneCipherFactoryAES) create(sample []byte) cipher.Stream {
if len(sample) != 16 {
return nil
}
return cipher.NewCTR(f.block, sample)
}
func xorPacketNumber(hdr *packetHeader, hdrlen int, pnbuf []byte, p []byte, factory pneCipherFactory) error {
logf(logTypeTrace, "PNE Operation: hdrlen=%v, hdr=%x, payload=%x", hdrlen, p[:hdrlen], p)
// The packet must be at least long enough to contain
// the header, plus a minimum 1-byte PN, plus the sample.
sample_length := 16
if sample_length > len(p)-(hdrlen+1) {
logf(logTypePacket, "Packet too short")
return nil
}
// Now compute the offset
sample_offset := hdrlen + 4
if sample_offset+sample_length > len(p) {
sample_offset = len(p) - sample_length
}
sample := p[sample_offset : sample_offset+sample_length]
logf(logTypeTrace, "PNE sample_offset=%d sample=%x", sample_offset, sample)
stream := factory.create(sample)
stream.XORKeyStream(pnbuf, p[hdrlen:hdrlen+len(pnbuf)])
return nil
}
var pnPatterns = []struct {
prefix byte
mask byte
length int
}{
{
0, 0x80, 1,
},
{
0x80, 0xc0, 2,
},
{
0xc0, 0xc0, 4,
},
}
const ()
func encodePacketNumber(pn uint64, l int) []byte {
var buf bytes.Buffer
i := 0
for i, _ = range pnPatterns {
if pnPatterns[i].length == l {
break
}
}
uintEncodeInt(&buf, pn, uintptr(l))
b := buf.Bytes()
b[0] &= ^pnPatterns[i].mask
b[0] |= pnPatterns[i].prefix
return b
}
func decodePacketNumber(buf []byte) (uint64, int, error) {
if len(buf) < 1 {
return 0, 0, fmt.Errorf("Zero-length packet number")
}
i := 0
for i, _ = range pnPatterns {
if pnPatterns[i].mask&buf[0] == pnPatterns[i].prefix {
break
}
}
pat := &pnPatterns[i]
if len(buf) < pat.length {
return 0, 0, fmt.Errorf("Buffer too short for packet number (%v < %v)", len(buf), pat.length)
}
buf = dup(buf[:pat.length])
buf[0] &= ^pat.mask
return uintDecodeIntBuf(buf), pat.length, nil
}
| newVersionNegotiationPacket | identifier_name |
packet.go | package minq
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"encoding/hex"
"fmt"
)
// Encode a QUIC packet.
/*
Long header
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+
|1| Type (7) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Version (32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|DCIL(4)|SCIL(4)|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Payload Length (i) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Packet Number (8/16/32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Payload (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// Initial Header: same as long header but with Token
+-+-+-+-+-+-+-+-+
|1| 0x7f |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Version (32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|DCIL(4)|SCIL(4)|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Source Connection ID (0/32..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Token Length (i) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Token (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Length (i) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Packet Number (8/16/32) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Payload (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+
|0|K|1|1|0|R R R|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Destination Connection ID (0..144) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Packet Number (8/16/32) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Protected Payload (*) ...
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
const (
packetFlagLongHeader = byte(0x80)
packetFlagK = byte(0x40)
packetFlagShortHeader = byte(0x30)
)
// This packet type differs considerably from the spec. It includes both
// long and short headers in the same value space. Long headers are from
// 0-0x7f (inclusive); short headers are always represented as 0xff.
type packetType byte
const (
packetTypeInitial = packetType(0x7f)
packetTypeRetry = packetType(0x7e)
packetTypeHandshake = packetType(0x7d)
packetType0RTTProtected = packetType(0x7c)
packetTypeProtectedShort = packetType(0x00) // Not a real type
)
func (pt packetType) isLongHeader() bool {
return pt&packetType(packetFlagLongHeader) != 0
}
func (pt packetType) isProtected() bool |
func (pt packetType) String() string {
switch pt {
case packetTypeInitial:
return "Initial"
case packetTypeRetry:
return "Retry"
case packetTypeHandshake:
return "Handshake"
case packetType0RTTProtected:
return "0-RTT"
case packetTypeProtectedShort:
return "1-RTT"
default:
return fmt.Sprintf("%x", uint8(pt))
}
}
// kCidDefaultLength is the length of connection ID we generate.
// TODO: make this configurable.
const kCidDefaultLength = 5
// ConnectionId identifies the connection that a packet belongs to.
type ConnectionId []byte
// String stringifies a connection ID in the natural way.
func (c ConnectionId) String() string {
return hex.EncodeToString(c)
}
// EncodeLength produces the length encoding used in the long packet header.
func (c ConnectionId) EncodeLength() byte {
if len(c) == 0 {
return 0
}
assert(len(c) >= 4 && len(c) <= 18)
return byte(len(c) - 3)
}
// The PDU definition for the header.
// These types are capitalized so that |codec| can use them.
type packetHeader struct {
// Type is the on-the-wire form of the packet type.
// Consult getHeaderType if you want a value that corresponds to the
// definition of packetType.
Type packetType
Version VersionNumber
ConnectionIDLengths byte
DestinationConnectionID ConnectionId
SourceConnectionID ConnectionId
TokenLength uint8
Token []byte
PayloadLength uint64 `tls:"varint"`
// In order to decode a short header, the length of the connection
// ID must be set in |shortCidLength| before decoding.
shortCidLength uintptr
}
func (p packetHeader) String() string {
ht := "SHORT"
if p.Type.isLongHeader() {
ht = "LONG"
}
return fmt.Sprintf("%s PT=%v", ht, p.getHeaderType())
}
func (p *packetHeader) getHeaderType() packetType {
if p.Type.isLongHeader() {
return p.Type & 0x7f
}
return packetTypeProtectedShort
}
type packet struct {
packetHeader
PacketNumber uint64 // Never more than 32 bits on the wire.
payload []byte
}
// This reads from p.ConnectionIDLengths.
func (p packetHeader) ConnectionIDLengths__length() uintptr {
if p.Type.isLongHeader() {
return 1
}
return 0
}
func (p packetHeader) TokenLength__length() uintptr {
if p.getHeaderType() != packetTypeInitial {
assert(len(p.Token) == 0)
return 0
}
return 1
}
func (p packetHeader) Token__length() uintptr {
if p.getHeaderType() != packetTypeInitial {
assert(len(p.Token) == 0)
return 0
}
return uintptr(p.TokenLength)
}
func (p packetHeader) DestinationConnectionID__length() uintptr {
if !p.Type.isLongHeader() {
return p.shortCidLength
}
l := p.ConnectionIDLengths >> 4
if l != 0 {
l += 3
}
return uintptr(l)
}
func (p packetHeader) SourceConnectionID__length() uintptr {
if !p.Type.isLongHeader() {
return 0
}
l := p.ConnectionIDLengths & 0xf
if l != 0 {
l += 3
}
return uintptr(l)
}
func (p packetHeader) PayloadLength__length() uintptr {
if p.Type.isLongHeader() {
return codecDefaultSize
}
return 0
}
func (p packetHeader) Version__length() uintptr {
if p.Type.isLongHeader() {
return 4
}
return 0
}
func newPacket(pt packetType, destCid ConnectionId, srcCid ConnectionId, ver VersionNumber, pn uint64, payload []byte, aeadOverhead int) *packet {
if pt == packetTypeProtectedShort {
// Only support writing the 32-bit packet number.
pt = packetType(0x2 | packetFlagShortHeader)
srcCid = nil
} else {
pt = pt | packetType(packetFlagLongHeader)
}
lengths := (destCid.EncodeLength() << 4) | srcCid.EncodeLength()
return &packet{
packetHeader: packetHeader{
Type: pt,
ConnectionIDLengths: lengths,
DestinationConnectionID: destCid,
SourceConnectionID: srcCid,
Version: ver,
PayloadLength: uint64(len(payload) + 4 + aeadOverhead),
},
PacketNumber: pn,
payload: payload,
}
}
type versionNegotiationPacket struct {
Versions []byte
}
func newVersionNegotiationPacket(versions []VersionNumber) *versionNegotiationPacket {
var buf bytes.Buffer
for _, v := range versions {
buf.Write(encodeArgs(v))
}
return &versionNegotiationPacket{buf.Bytes()}
}
/*
We don't use these.
func encodePacket(c ConnectionState, aead Aead, p *Packet) ([]byte, error) {
hdr, err := encode(&p.packetHeader)
if err != nil {
return nil, err
}
b, err := aead.protect(p.packetHeader.PacketNumber, hdr, p.payload)
if err != nil {
return nil, err
}
return encodeArgs(hdr, b), nil
}
func decodePacket(c ConnectionState, aead Aead, b []byte) (*Packet, error) {
// Parse the header
var hdr packetHeader
br, err := decode(&hdr, b)
if err != nil {
return nil, err
}
hdr.PacketNumber = c.expandPacketNumber(hdr.PacketNumber)
pt, err := aead.unprotect(hdr.PacketNumber, b[0:br], b[br:])
if err != nil {
return nil, err
}
return &Packet{hdr, pt}, nil
}
*/
func dumpPacket(payload []byte) string {
first := true
ret := fmt.Sprintf("%d=[", len(payload))
for len(payload) > 0 {
if !first {
ret += ", "
}
first = false
n, f, err := decodeFrame(payload)
if err != nil {
ret += fmt.Sprintf("Undecoded: [%x]", payload)
break
}
payload = payload[n:]
// TODO([email protected]): Not sure why %v doesn't work
ret += f.String()
}
ret += "]"
return ret
}
type pneCipherFactory interface {
create(sample []byte) cipher.Stream
}
type pneCipherFactoryAES struct {
block cipher.Block
}
func newPneCipherFactoryAES(key []byte) pneCipherFactory {
inner, err := aes.NewCipher(key)
assert(err == nil)
if err != nil {
return nil
}
return &pneCipherFactoryAES{block: inner}
}
func (f *pneCipherFactoryAES) create(sample []byte) cipher.Stream {
if len(sample) != 16 {
return nil
}
return cipher.NewCTR(f.block, sample)
}
func xorPacketNumber(hdr *packetHeader, hdrlen int, pnbuf []byte, p []byte, factory pneCipherFactory) error {
logf(logTypeTrace, "PNE Operation: hdrlen=%v, hdr=%x, payload=%x", hdrlen, p[:hdrlen], p)
// The packet must be at least long enough to contain
// the header, plus a minimum 1-byte PN, plus the sample.
sample_length := 16
if sample_length > len(p)-(hdrlen+1) {
logf(logTypePacket, "Packet too short")
return nil
}
// Now compute the offset
sample_offset := hdrlen + 4
if sample_offset+sample_length > len(p) {
sample_offset = len(p) - sample_length
}
sample := p[sample_offset : sample_offset+sample_length]
logf(logTypeTrace, "PNE sample_offset=%d sample=%x", sample_offset, sample)
stream := factory.create(sample)
stream.XORKeyStream(pnbuf, p[hdrlen:hdrlen+len(pnbuf)])
return nil
}
var pnPatterns = []struct {
prefix byte
mask byte
length int
}{
{
0, 0x80, 1,
},
{
0x80, 0xc0, 2,
},
{
0xc0, 0xc0, 4,
},
}
const ()
func encodePacketNumber(pn uint64, l int) []byte {
var buf bytes.Buffer
i := 0
for i, _ = range pnPatterns {
if pnPatterns[i].length == l {
break
}
}
uintEncodeInt(&buf, pn, uintptr(l))
b := buf.Bytes()
b[0] &= ^pnPatterns[i].mask
b[0] |= pnPatterns[i].prefix
return b
}
func decodePacketNumber(buf []byte) (uint64, int, error) {
if len(buf) < 1 {
return 0, 0, fmt.Errorf("Zero-length packet number")
}
i := 0
for i, _ = range pnPatterns {
if pnPatterns[i].mask&buf[0] == pnPatterns[i].prefix {
break
}
}
pat := &pnPatterns[i]
if len(buf) < pat.length {
return 0, 0, fmt.Errorf("Buffer too short for packet number (%v < %v)", len(buf), pat.length)
}
buf = dup(buf[:pat.length])
buf[0] &= ^pat.mask
return uintDecodeIntBuf(buf), pat.length, nil
}
| {
if !pt.isLongHeader() {
return true
}
switch pt & 0x7f {
case packetTypeInitial, packetTypeHandshake, packetTypeRetry:
return false
}
return true
} | identifier_body |
TCP_echo_server.py | import asyncio
import hashlib
import re
import os
import random
import json
import ssl
import sqlite3
import concurrent.futures
from config.config import *
from xmlrpc.server import SimpleXMLRPCServer
user_list = {} # 存储已连接上的用户
server_list = {}
rules = []
def create_sqlite_db():
con = sqlite3.connect("user.db")
cur = con.cursor()
sql = "CREATE TABLE IF NOT EXISTS user(id INTEGER PRIMARY KEY,username TEXT,password TEXT,email TEXT)"
cur.execute(sql)
return con, cur
def transfer_json(msg, method):
"""字符串与json格式互相转换"""
if method:
return json.dumps(msg)
else:
return json.loads(msg)
async def connect_dest_server(dest_addr, local_reader, local_writer, dest_reader, dest_writer):
try:
local_addr = local_writer.get_extra_info('peername') # 请求者的ip,port
request_msg = {'local_addr': local_addr, 'request_addr': dest_addr, 'code': 'Request connection'} # 给客户端的请求连接信息
request_msg = transfer_json(request_msg, method=True)
# print('发送给目标客户端的连接请求'+request_msg)
dest_writer.write(request_msg.encode()) # 给目标客户端发送连接请求
await dest_writer.drain()
try:
ensure_connection = await dest_reader.read(500)
ensure_connection = transfer_json(ensure_connection.decode(), method=False)
if ensure_connection['code'] == 'Accept connection':
try:
connect_success = {'local_addr': local_addr, 'request_addr': dest_addr, 'code': 'Ready'}
connect_success = transfer_json(connect_success, method=True)
local_writer.write(connect_success.encode())
await local_writer.drain()
print('请求成功:' + str(local_addr) + '正在与' + str(dest_addr) + '通讯...\n')
dest = {'addr': dest_addr, 'Reader': dest_reader, 'Writer': dest_writer}
return dest
except ConnectionResetError:
# print('已断开用户连接:', local_addr)
return False
except Exception as e:
print('0',e)
elif ensure_connection['code'] == 'Refuse connection':
try:
connect_fail = {'local_addr': local_addr, 'request_addr': dest_addr, 'code': 'No'}
connect_fail = transfer_json(connect_fail, method=True)
local_writer.write(connect_fail.encode())
await local_writer.drain()
print('请求失败:' + str(dest_addr) + '拒绝与' + str(local_addr) + '通讯...\n')
dest_writer.close()
local_writer.close()
return False
except Exception as e:
print('1',e)
else:
pass
except Exception as e:
print('2',e)
pass
except Exception as e:
print('3',e)
pass
def hold_user_info(ip, addr, reader, writer):
"""存储已连接客户端的相关内容"""
user = {'addr': addr, 'Reader': reader, 'Writer': writer}
user_list[ip] = user
def hold_server_info(ip, addr, reader, writer):
"""存储已连接目标服务器(客户端)的相关内容"""
user = {'addr': addr, 'Reader': reader, 'Writer': writer}
server_list[ip] = user
async def server_authenticate(reader, writer, secret_key):
"""客户端合法认证"""
message = os.urandom(32) # 随机产生 n=32 个字节的字符串
writer.write(message)
await writer.drain()
s = hashlib.sha512()
s.update(message + secret_key.encode('utf-8')) # 加密
digest = s.hexdigest()
response = await reader.read(1024)
if digest == response.decode('utf-8'):
client_addr = writer.get_extra_info('peername')
client_addr_str = str(client_addr[0]) + str(client_addr[1]) # 拼接ip和port
hold_user_info(client_addr_str, client_addr, reader, writer)
print('\n客户端:' + str(client_addr) + '连接成功\n')
return digest
else:
writer.write('connection_error'.encode()) # 若连接失败,发送错误信息
writer.close()
async def user_login(reader, writer):
global search_result, account
try:
search_result = None
account = await reader.read(1024)
account = transfer_json(account.decode(), False)
sql = "select * from user where username = '{}' and password = '{}'".format(account['username'],
account['password'])
cur.execute(sql)
search_result = cur.fetchall()
except sqlite3.OperationalError:
search_result = False
except ssl.SSLError:
search_result = False
if search_result:
print('\n用户' + account['username'] + '登陆成功!\n')
writer.write('Login Success'.encode())
await writer.drain()
return True
else:
writer.write('Need Email'.encode())
await writer.drain()
email = await reader.read(1024)
verify_email = re.match(r'^[0-9a-zA-Z_]{0,19}@[0-9a-zA-Z]{1,13}\.[com,cn,net]{1,3}(.cn)?$', email.decode())
if verify_email:
email = verify_email.group()
sql = "insert into user(username,password,email) values ('{}','{}','{}')".format(
str(account['username']), str(account['password']), str(email))
try:
cur.execute(sql)
con.commit()
print('\n用户' + account['username'] + '注册成功!\n')
writer.write('Register Success'.encode())
await writer.drain()
return True
except Exception as e:
writer.write('Register Fail'.encode())
await writer.drain()
return False
else:
writer.write('Register Fail'.encode())
await writer.drain()
return False
def creat_server_ssl():
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ssl_ctx.options |= ssl.OP_NO_TLSv1
ssl_ctx.options |= ssl.OP_NO_TLSv1_1
ssl_ctx.options |= ssl.OP_SINGLE_DH_USE
ssl_ctx.options |= ssl.OP_SINGLE_ECDH_USE
ssl_ctx.load_cert_chain(certfile='./server_ssl/mycertfile.pem', keyfile='./server_ssl/mykeyfile.pem')
ssl_ctx.load_verify_locations(cafile='./server_ssl/mycertfile.pem')
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.VerifyMode.CERT_REQUIRED
ssl_ctx.set_ciphers('ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384')
return ssl_ctx
def set_rule(rule_json):
global rule
rule = json.loads(rule_json)
rules.append(rule)
rpc_server.server_close()
return rule_json
def RPC_server(rpc_port):
global rpc_server
# port = 0
rpc_server = SimpleXMLRPCServer(('localhost', rpc_port)) # 初始化
rpc_server.register_function(set_rule, "set_rule") # 注册函数
print("等待RPC规则配置......")
try:
rpc_server.handle_request() # 保持等待调用状态
print('配置完成......')
except OSError:
print('配置完成......')
return rule
async def handle_echo(reader, writer):
client_addr = writer.get_extra_info('peername')
connect_result = await server_authenticate(reader, writer, SECRET_KEY) # 用户合法性验证
if not connect_result:
print('客户端:' + str(client_addr) + '连接失败')
writer.close()
return
try:
login_result = await user_login(reader, writer)
if not login_result:
user_list.pop(str(client_addr[0]) + str(client_addr[1]))
print('已断开用户连接:', client_addr)
writer.close()
return
except ConnectionResetError:
user_list.pop(str(client_addr[0]) + str(client_addr[1]))
print('用户已断开连接:', client_addr)
writer.close()
return
except ssl.SSLError as e:
return
rpc_port = random.randint(49995,50000)
# await asyncio.sleep(0.01)
rpc_port = 12039
# print(rpc_port)
writer.write(str(rpc_port).encode())
await writer.drain()
loop = asyncio.get_running_loop()
with concurrent.futures.ProcessPoolExecutor() as pool:
config_rule = await loop.run_in_executor(pool, RPC_server, rpc_port)
# config_rule = RPC_server()
ident, src_ip, src_port, dst_ip, dst_port = \
config_rule['ident'], config_rule['src_ip'], config_rule['src_port'],config_rule['dst_ip'],config_rule['dst_port'],
# 此处设置要访问的默认服务器
dst_ip = Client_Ip[0]
dst_port = Client_Port[0]
l_reader, l_writer = await asyncio.open_connection(dst_ip, dst_port)
server_addr = l_writer.get_extra_info('peername')
server_addr_str = str(server_addr[0]) + str(server_addr[1])
hold_server_info(server_addr_str, server_addr, l_reader, l_writer) # 将处理目标客户端(服务器)的请求信息存起来
# print(server_list)
try:
# dest_ip = await reader.read(100) # 首先需要得到客户端的请求ip
# dest_ip = dest_ip.decode()
dest_ip = server_addr # 此处设置默认连接server_addr
print('正在请求:' + str(client_addr) + '请求目的ip:' + str(dest_ip))
find_dest = await connect_dest_server(dest_ip, reader, writer, l_reader, l_writer)
if find_dest:
config_rule['ident'] = 'hi'
config_rule_json = json.dumps(config_rule)
writer.write(config_rule_json.encode())
await writer.drain()
# print(config_rule)
s_reader = find_dest['Reader']
s_writer = find_dest['Writer']
while True:
data = await reader.read(100)
message = data.decode()
if message == 'Heart beat!':
writer.write(message.encode())
print('心跳响应' + message)
continue
if message == '' or message == 'exit':
message = 'Disconnect Request'
s_writer.write(message.encode())
await s_writer.drain()
re_msg = await s_reader.read(1024)
if re_msg.decode() == 'ByeBye':
writer.write(re_msg)
await writer.drain()
user_list.pop(str(client_addr[0]) + str(client_addr[1]))
print('用户已断开连接:', client_addr)
writer.close()
s_writer.close()
break
else:
pass
s_writer.write(data)
await s_writer.drain()
print(str(client_addr) + '正在给' + str(server_addr) + '发送信息:' + data.decode())
try:
re_msg = await s_reader.read(1024)
print('已收到' + str(server_addr) + '的回复:\n' + re_msg.decode())
try:
writer.write(re_msg)
await writer.drain()
print('成功给' + str(client_addr) + '发送回复:\n' + re_msg.decode())
except Exception as e:
print('5',e)
except Exception as e:
print('6',e)
else:
print("请求失败,连接已断开!")
except ConnectionResetError:
message = 'Force Disconnect'
l_writer.write(message.encode())
await l_writer.drain()
l_writer.close()
# user_list.pop(str(client_addr[0]) + str(client_addr[1]))
print('用户已断开连接:', client_addr)
writer.close()
except ssl.SSLError as e:
pass
async def get_general_control():
reader, writer = await asyncio.open_connection(Operat_server_IP[0], Operat_server_Port[0])
while True:
cmd = await reader.read(100)
cmd = cmd.decode()
if cmd == '1' or cmd == 'find -all':
user_info = []
for user in user_list:
user_info.append(user_list[user]['addr'])
re_cmd = transfer_json(user_info, True)
writer.write(re_cmd.encode())
await writer.drain()
elif cmd == '2' or cmd == 'break -all':
for user in user_list:
try:
user_writer = user_list[user]['Writer']
user_writer.write('exit'.encode())
await writer.drain()
user_writer.close()
print('已断开用户连接:', user_list[user]['addr'])
except Exception as e:
print('7',e)
user_list.clear()
writer.write('-----全部关闭成功-----'.encode())
await writer.drain()
else:
writer.write('-----命令有误!请重试-----'.encode())
await writer.drain()
async def main():
ssl_server = creat_server_ssl()
server = await asyncio.start_server(handle_echo, Server_Ip[0], Server_Port[0], ssl=ssl_server)
# server.socket返回内部的服务器套接字列表副本
addr = server.sockets[0].getsockname()
print('成功开启服务器:', addr)
print('等待客户端连接...\n')
try:
await get_general_control()
except Exception as e:
print('8',e)
async with server:
# 开始接受连接,直到协程被取消。 serve_forever 任务的取消将导致服务器被关闭。
await server.serve_forever()
def open_agent_server():
asyncio.run(main())
con, cur = create_sqlite_db()
if __name__ == '__main__':
open_agent_server()
| identifier_body |
||
TCP_echo_server.py | import asyncio
import hashlib
import re
import os
import random
import json
import ssl
import sqlite3
import concurrent.futures
from config.config import *
from xmlrpc.server import SimpleXMLRPCServer
user_list = {} # 存储已连接上的用户
server_list = {}
rules = []
def create_sqlite_db():
con = sqlite3.connect("user.db")
cur = con.cursor()
sql = "CREATE TABLE IF NOT EXISTS user(id INTEGER PRIMARY KEY,username TEXT,password TEXT,email TEXT)"
cur.execute(sql)
return con, cur
def transfer_json(msg, method):
"""字符串与json格式互相转换"""
if method:
return json.dumps(msg)
else:
return json.loads(msg)
async def connect_dest_server(dest_addr, local_reader, local_writer, dest_reader, dest_writer):
try:
local_addr = local_writer.get_extra_info('peername') # 请求者的ip,port
request_msg = {'local_addr': local_addr, 'request_addr': dest_addr, 'code': 'Request connection'} # 给客户端的请求连接信息
request_msg = transfer_json(request_msg, method=True)
# print('发送给目标客户端的连接请求'+request_msg)
dest_writer.write(request_msg.encode()) # 给目标客户端发送连接请求
await dest_writer.drain()
try:
ensure_connection = await dest_reader.read(500)
ensure_connection = transfer_json(ensure_connection.decode(), method=False)
if ensure_connection['code'] == 'Accept connection':
try:
connect_success = {'local_addr': local_addr, 'request_addr': dest_addr, 'code': 'Ready'}
connect_success = transfer_json(connect_success, method=True)
local_writer.write(connect_success.encode())
await local_writer.drain()
print('请求成功:' + str(local_addr) + '正在与' + str(dest_addr) + '通讯...\n')
dest = {'addr': dest_addr, 'Reader': dest_reader, 'Writer': dest_writer}
return dest
except ConnectionResetError:
# print('已断开用户连接:', local_addr)
return False
except Exception as e:
print('0',e)
elif ensure_connection['code'] == 'Refuse connection':
try:
connect_fail = {'local_addr': local_addr, 'request_addr': dest_addr, 'code': 'No'}
connect_fail = transfer_json(connect_fail, method=True)
local_writer.write(connect_fail.encode())
await local_writer.drain()
print('请求失败:' + str(dest_addr) + '拒绝与' + str(local_addr) + '通讯...\n')
dest_writer.close()
local_writer.close()
return False
except Exception as e:
print('1',e)
else:
pass
except Exception as e:
print('2',e)
pass
except Exception as e:
print('3',e)
pass
def hold_user_info(ip, addr, reader, writer):
"""存储已连接客户端的相关内容"""
user = {'addr': addr, 'Reader': reader, 'Writer': writer}
user_list[ip] = user
def hold_server_info(ip, addr, reader, writer):
"""存储已连接目标服务器(客户端)的相关内容"""
user = {'addr': addr, 'Reader': reader, 'Writer': writer}
server_list[ip] = user
async def server_authenticate(reader, writer, secret_key):
"""客户端合法认证"""
message = os.urandom(32) # 随机产生 n=32 个字节的字符串
writer.write(message)
await writer.drain()
s = hashlib.sha512()
s.update(message + secret_key.encode('utf-8')) # 加密
digest = s.hexdigest()
response = await reader.read(1024)
if digest == response.decode('utf-8'):
client_addr = writer.get_extra_info('peername')
client_addr_str = str(client_addr[0]) + str(client_addr[1]) # 拼接ip和port
hold_user_info(client_addr_str, client_addr, reader, writer)
print('\n客户端:' + str(client_addr) + '连接成功\n')
return digest
else:
writer.write('connection_error'.encode()) # 若连接失败,发送错误信息
writer.close()
async def user_login(reader, writer):
global search_result, account
try:
search_result = None
account = await reader.read(1024)
account = transfer_json(account.decode(), False)
sql = "select * from user where username = '{}' and password = '{}'".format(account['username'],
account['password'])
cur.execute(sql)
search_result = cur.fetchall()
except sqlite3.OperationalError:
search_result = False
except ssl.SSLError:
search_result = False
if search_result:
print('\n用户' + account['username'] + '登陆成功!\n')
writer.write('Login Success'.encode())
await writer.drain()
return True
else:
writer.write('Need Email'.encode())
await writer.drain()
email = await reader.read(1024)
verify_email = re.match(r'^[0-9a-zA-Z_]{0,19}@[0-9a-zA-Z]{1,13}\.[com,cn,net]{1,3}(.cn)?$', email.decode())
if verify_email:
email = verify_email.group()
sql = "insert into user(username,password,email) values ('{}','{}','{}')".format(
str(account['username']), str(account['password']), str(email))
try:
cur.execute(sql)
con.commit()
print('\n用户' + account['username'] + '注册成功!\n')
writer.write('Register Success'.encode())
await writer.drain()
return True
except Exception as e:
writer.write('Register Fail'.encode())
await writer.drain()
return False
else:
writer.write('Register Fail'.encode())
await writer.drain()
return False
def creat_server_ssl():
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ssl_ctx.options |= ssl.OP_NO_TLSv1
ssl_ctx.options |= ssl.OP_NO_TLSv1_1
ssl_ctx.options |= ssl.OP_SINGLE_DH_USE
ssl_ctx.options |= ssl.OP_SINGLE_ECDH_USE
ssl_ctx.load_cert_chain(certfile='./server_ssl/mycertfile.pem', keyfile='./server_ssl/mykeyfile.pem')
ssl_ctx.load_verify_locations(cafile='./server_ssl/mycertfile.pem')
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.VerifyMode.CERT_REQUIRED
ssl_ctx.set_ciphers('ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384')
return ssl_ctx
def set_rule(rule_json):
global rule
rule = json.loads(rule_json)
rules.append(rule)
rpc_server.server_close()
return rule_json
def RPC_server(rpc_port):
global rpc_server
# port = 0
rpc_server = SimpleXMLRPCServer(('localhost', rpc_port)) # 初始化
rpc_server.register_function(set_rule, "set_rule") # 注册函数
print("等待RPC规则配置......")
try:
rpc_server.handle_request() # 保持等待调用状态
print('配置完成......')
except OSError:
print('配置完成......')
return rule
async def handle_echo(reader, writer):
client_addr = writer.get_extra_info('peername')
connect_result = await server_authenticate(reader, writer, SECRET_KEY) # 用户合法性验证
if not connect_result:
print('客户端:' + str(client_addr) + '连接失败')
writer.close()
return
try:
login_result = await user_login(reader, writer)
if not login_result:
user_list.pop(str(client_addr[0]) + str(client_addr[1]))
print('已断开用户连接:', client_addr)
writer.close()
return
except ConnectionResetError:
user_list.pop(str(client_addr[0]) + str(client_addr[1]))
print('用户已断开连接:', client_addr)
writer.close()
return
except ssl.SSLError as e:
return
rpc_port = random.randint(49995,50000)
# await asyncio.sleep(0.01)
rpc_port = 12039
# print(rpc_port)
writer.write(str(rpc_port).encode())
await writer.drain()
loop = asyncio.get_running_loop()
with concurrent.futures.ProcessPoolExecutor() as pool:
config_rule = await loop.run_in_executor(pool, RPC_server, rpc_port)
# config_rule = RPC_server()
ident, src_ip, src_port, dst_ip, dst_port = \
config_rule['ident'], config_rule['src_ip'], config_rule['src_port'],config_rule['dst_ip'],config_rule['dst_port'],
# 此处设置要访问的默认服务器
dst_ip = Client_Ip[0]
dst_port = Client_Port[0]
l_reader, l_writer = await asyncio.open_connection(dst_ip, dst_port)
server_addr = l_writer.get_extra_info('peername')
server_addr_str = str(server_addr[0]) + str(server_addr[1])
hold_server_info(server_addr_str, server_addr, l_reader, l_writer) # 将处理目标客户端(服务器)的请求信息存起来
# print(server_list)
try:
# dest_ip = await reader.read(100) # 首先需要得到客户端的请求ip
# dest_ip = dest_ip.decode()
dest_ip = server_addr # 此处设置默认连接server_addr |
print('正在请求:' + str(client_addr) + '请求目的ip:' + str(dest_ip))
find_dest = await connect_dest_server(dest_ip, reader, writer, l_reader, l_writer)
if find_dest:
config_rule['ident'] = 'hi'
config_rule_json = json.dumps(config_rule)
writer.write(config_rule_json.encode())
await writer.drain()
# print(config_rule)
s_reader = find_dest['Reader']
s_writer = find_dest['Writer']
while True:
data = await reader.read(100)
message = data.decode()
if message == 'Heart beat!':
writer.write(message.encode())
print('心跳响应' + message)
continue
if message == '' or message == 'exit':
message = 'Disconnect Request'
s_writer.write(message.encode())
await s_writer.drain()
re_msg = await s_reader.read(1024)
if re_msg.decode() == 'ByeBye':
writer.write(re_msg)
await writer.drain()
user_list.pop(str(client_addr[0]) + str(client_addr[1]))
print('用户已断开连接:', client_addr)
writer.close()
s_writer.close()
break
else:
pass
s_writer.write(data)
await s_writer.drain()
print(str(client_addr) + '正在给' + str(server_addr) + '发送信息:' + data.decode())
try:
re_msg = await s_reader.read(1024)
print('已收到' + str(server_addr) + '的回复:\n' + re_msg.decode())
try:
writer.write(re_msg)
await writer.drain()
print('成功给' + str(client_addr) + '发送回复:\n' + re_msg.decode())
except Exception as e:
print('5',e)
except Exception as e:
print('6',e)
else:
print("请求失败,连接已断开!")
except ConnectionResetError:
message = 'Force Disconnect'
l_writer.write(message.encode())
await l_writer.drain()
l_writer.close()
# user_list.pop(str(client_addr[0]) + str(client_addr[1]))
print('用户已断开连接:', client_addr)
writer.close()
except ssl.SSLError as e:
pass
async def get_general_control():
reader, writer = await asyncio.open_connection(Operat_server_IP[0], Operat_server_Port[0])
while True:
cmd = await reader.read(100)
cmd = cmd.decode()
if cmd == '1' or cmd == 'find -all':
user_info = []
for user in user_list:
user_info.append(user_list[user]['addr'])
re_cmd = transfer_json(user_info, True)
writer.write(re_cmd.encode())
await writer.drain()
elif cmd == '2' or cmd == 'break -all':
for user in user_list:
try:
user_writer = user_list[user]['Writer']
user_writer.write('exit'.encode())
await writer.drain()
user_writer.close()
print('已断开用户连接:', user_list[user]['addr'])
except Exception as e:
print('7',e)
user_list.clear()
writer.write('-----全部关闭成功-----'.encode())
await writer.drain()
else:
writer.write('-----命令有误!请重试-----'.encode())
await writer.drain()
async def main():
ssl_server = creat_server_ssl()
server = await asyncio.start_server(handle_echo, Server_Ip[0], Server_Port[0], ssl=ssl_server)
# server.socket返回内部的服务器套接字列表副本
addr = server.sockets[0].getsockname()
print('成功开启服务器:', addr)
print('等待客户端连接...\n')
try:
await get_general_control()
except Exception as e:
print('8',e)
async with server:
# 开始接受连接,直到协程被取消。 serve_forever 任务的取消将导致服务器被关闭。
await server.serve_forever()
def open_agent_server():
asyncio.run(main())
con, cur = create_sqlite_db()
if __name__ == '__main__':
open_agent_server() | random_line_split |
|
TCP_echo_server.py | import asyncio
import hashlib
import re
import os
import random
import json
import ssl
import sqlite3
import concurrent.futures
from config.config import *
from xmlrpc.server import SimpleXMLRPCServer
user_list = {} # 存储已连接上的用户
server_list = {}
rules = []
def create_sqlite_db():
con = sqlite3.connect("user.db")
cur = con.cursor()
sql = "CREATE TABLE IF NOT EXISTS user(id INTEGER PRIMARY KEY,username TEXT,password TEXT,email TEXT)"
cur.execute(sql)
return con, cur
def transfer_json(msg, method):
"""字符串与json格式互相转换"""
if method:
return json.dumps(msg)
else:
return json.loads(msg)
async def connect_dest_server(dest_addr, local_reader, local_writer, dest_reader, dest_writer):
try:
local_addr = local_writer.get_extra_info('peername') # 请求者的ip,port
request_msg = {'local_addr': local_addr, 'request_addr': dest_addr, 'code': 'Request connection'} # 给客户端的请求连接信息
request_msg = transfer_json(request_msg, method=True)
# print('发送给目标客户端的连接请求'+request_msg)
dest_writer.write(request_msg.encode()) # 给目标客户端发送连接请求
await dest_writer.drain()
try:
ensure_connection = await dest_reader.read(500)
ensure_connection = transfer_json(ensure_connection.decode(), method=False)
if ensure_connection['code'] == 'Accept connection':
try:
connect_success = {'local_addr': local_addr, 'request_addr': dest_addr, 'code': 'Ready'}
connect_success = transfer_json(connect_success, method=True)
local_writer.write(connect_success.encode())
await local_writer.drain()
print('请求成功:' + str(local_addr) + '正在与' + str(dest_addr) + '通讯...\n')
dest = {'addr': dest_addr, 'Reader': dest_reader, 'Writer': dest_writer}
return dest
except ConnectionResetError:
# print('已断开用户连接:', local_addr)
return False
except Exception as e:
print('0',e)
elif ensure_connection['code'] == 'Refuse connection':
try:
connect_fail = {'local_addr': local_addr, 'request_addr': dest_addr, 'code': 'No'}
connect_fail = transfer_json(connect_fail, method=True)
local_writer.write(connect_fail.encode())
await local_writer.drain()
print('请求失败:' + str(dest_addr) + '拒绝与' + str(local_addr) + '通讯...\n')
dest_writer.close()
local_writer.close()
return False
except Exception as e:
print('1',e)
else:
pass
except Exception as e:
print('2',e)
pass
except Exception as e:
print('3',e)
pass
def hold_user_info(ip, addr, reader, writer):
"""存储已连接客户端的相关内容"""
user = {'addr': addr, 'Reader': reader, 'Writer': writer}
user_list[ip] = user
def hold_server_info(ip, addr, reader, writer):
"""存储已连接目标服务器(客户端)的相关内容"""
user = {'addr': addr, 'Reader': reader, 'Writer': writer}
server_list[ip] = user
async def server_authenticate(reader, writer, secret_key):
"""客户端合法认证"""
message = os.urandom(32) # 随机产生 n=32 个字节的字符串
writer.write(message)
await writer.drain()
s = hashlib.sha512()
s.update(message + secret_key.encode('utf-8')) # 加密
digest = s.hexdigest()
response = await reader.read(1024)
if digest == response.decode('utf-8'):
client_addr = writer.get_extra_info('peername')
client_addr_str = str(client_addr[0]) + str(client_addr[1]) # 拼接ip和port
hold_user_info(client_addr_str, client_addr, reader, writer)
print('\n客户端:' + str(client_addr) + '连接成功\n')
return digest
else:
writer.write('connection_error'.encode()) # 若连接失败,发送错误信息
writer.close()
async def user_login(reader, writer):
global search_result, account
try:
search_result = None
account = await reader.read(1024)
account = transfer_json(account.decode(), False)
sql = "select * from user where username = '{}' and password = '{}'".format(account['username'],
| account['password'])
cur.execute(sql)
search_result = cur.fetchall()
except sqlite3.OperationalError:
search_result = False
except ssl.SSLError:
search_result = False
if search_result:
print('\n用户' + account['username'] + '登陆成功!\n')
writer.write('Login Success'.encode())
await writer.drain()
return True
else:
writer.write('Need Email'.encode())
await writer.drain()
email = await reader.read(1024)
verify_email = re.match(r'^[0-9a-zA-Z_]{0,19}@[0-9a-zA-Z]{1,13}\.[com,cn,net]{1,3}(.cn)?$', email.decode())
if verify_email:
email = verify_email.group()
sql = "insert into user(username,password,email) values ('{}','{}','{}')".format(
str(account['username']), str(account['password']), str(email))
try:
cur.execute(sql)
con.commit()
print('\n用户' + account['username'] + '注册成功!\n')
writer.write('Register Success'.encode())
await writer.drain()
return True
except Exception as e:
writer.write('Register Fail'.encode())
await writer.drain()
return False
else:
writer.write('Register Fail'.encode())
await writer.drain()
return False
def creat_server_ssl():
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ssl_ctx.options |= ssl.OP_NO_TLSv1
ssl_ctx.options |= ssl.OP_NO_TLSv1_1
ssl_ctx.options |= ssl.OP_SINGLE_DH_USE
ssl_ctx.options |= ssl.OP_SINGLE_ECDH_USE
ssl_ctx.load_cert_chain(certfile='./server_ssl/mycertfile.pem', keyfile='./server_ssl/mykeyfile.pem')
ssl_ctx.load_verify_locations(cafile='./server_ssl/mycertfile.pem')
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.VerifyMode.CERT_REQUIRED
ssl_ctx.set_ciphers('ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384')
return ssl_ctx
def set_rule(rule_json):
global rule
rule = json.loads(rule_json)
rules.append(rule)
rpc_server.server_close()
return rule_json
def RPC_server(rpc_port):
global rpc_server
# port = 0
rpc_server = SimpleXMLRPCServer(('localhost', rpc_port)) # 初始化
rpc_server.register_function(set_rule, "set_rule") # 注册函数
print("等待RPC规则配置......")
try:
rpc_server.handle_request() # 保持等待调用状态
print('配置完成......')
except OSError:
print('配置完成......')
return rule
async def handle_echo(reader, writer):
client_addr = writer.get_extra_info('peername')
connect_result = await server_authenticate(reader, writer, SECRET_KEY) # 用户合法性验证
if not connect_result:
print('客户端:' + str(client_addr) + '连接失败')
writer.close()
return
try:
login_result = await user_login(reader, writer)
if not login_result:
user_list.pop(str(client_addr[0]) + str(client_addr[1]))
print('已断开用户连接:', client_addr)
writer.close()
return
except ConnectionResetError:
user_list.pop(str(client_addr[0]) + str(client_addr[1]))
print('用户已断开连接:', client_addr)
writer.close()
return
except ssl.SSLError as e:
return
rpc_port = random.randint(49995,50000)
# await asyncio.sleep(0.01)
rpc_port = 12039
# print(rpc_port)
writer.write(str(rpc_port).encode())
await writer.drain()
loop = asyncio.get_running_loop()
with concurrent.futures.ProcessPoolExecutor() as pool:
config_rule = await loop.run_in_executor(pool, RPC_server, rpc_port)
# config_rule = RPC_server()
ident, src_ip, src_port, dst_ip, dst_port = \
config_rule['ident'], config_rule['src_ip'], config_rule['src_port'],config_rule['dst_ip'],config_rule['dst_port'],
# 此处设置要访问的默认服务器
dst_ip = Client_Ip[0]
dst_port = Client_Port[0]
l_reader, l_writer = await asyncio.open_connection(dst_ip, dst_port)
server_addr = l_writer.get_extra_info('peername')
server_addr_str = str(server_addr[0]) + str(server_addr[1])
hold_server_info(server_addr_str, server_addr, l_reader, l_writer) # 将处理目标客户端(服务器)的请求信息存起来
# print(server_list)
try:
# dest_ip = await reader.read(100) # 首先需要得到客户端的请求ip
# dest_ip = dest_ip.decode()
dest_ip = server_addr # 此处设置默认连接server_addr
print('正在请求:' + str(client_addr) + '请求目的ip:' + str(dest_ip))
find_dest = await connect_dest_server(dest_ip, reader, writer, l_reader, l_writer)
if find_dest:
config_rule['ident'] = 'hi'
config_rule_json = json.dumps(config_rule)
writer.write(config_rule_json.encode())
await writer.drain()
# print(config_rule)
s_reader = find_dest['Reader']
s_writer = find_dest['Writer']
while True:
data = await reader.read(100)
message = data.decode()
if message == 'Heart beat!':
writer.write(message.encode())
print('心跳响应' + message)
continue
if message == '' or message == 'exit':
message = 'Disconnect Request'
s_writer.write(message.encode())
await s_writer.drain()
re_msg = await s_reader.read(1024)
if re_msg.decode() == 'ByeBye':
writer.write(re_msg)
await writer.drain()
user_list.pop(str(client_addr[0]) + str(client_addr[1]))
print('用户已断开连接:', client_addr)
writer.close()
s_writer.close()
break
else:
pass
s_writer.write(data)
await s_writer.drain()
print(str(client_addr) + '正在给' + str(server_addr) + '发送信息:' + data.decode())
try:
re_msg = await s_reader.read(1024)
print('已收到' + str(server_addr) + '的回复:\n' + re_msg.decode())
try:
writer.write(re_msg)
await writer.drain()
print('成功给' + str(client_addr) + '发送回复:\n' + re_msg.decode())
except Exception as e:
print('5',e)
except Exception as e:
print('6',e)
else:
print("请求失败,连接已断开!")
except ConnectionResetError:
message = 'Force Disconnect'
l_writer.write(message.encode())
await l_writer.drain()
l_writer.close()
# user_list.pop(str(client_addr[0]) + str(client_addr[1]))
print('用户已断开连接:', client_addr)
writer.close()
except ssl.SSLError as e:
pass
async def get_general_control():
reader, writer = await asyncio.open_connection(Operat_server_IP[0], Operat_server_Port[0])
while True:
cmd = await reader.read(100)
cmd = cmd.decode()
if cmd == '1' or cmd == 'find -all':
user_info = []
for user in user_list:
user_info.append(user_list[user]['addr'])
re_cmd = transfer_json(user_info, True)
writer.write(re_cmd.encode())
await writer.drain()
elif cmd == '2' or cmd == 'break -all':
for user in user_list:
try:
user_writer = user_list[user]['Writer']
user_writer.write('exit'.encode())
await writer.drain()
user_writer.close()
print('已断开用户连接:', user_list[user]['addr'])
except Exception as e:
print('7',e)
user_list.clear()
writer.write('-----全部关闭成功-----'.encode())
await writer.drain()
else:
writer.write('-----命令有误!请重试-----'.encode())
await writer.drain()
async def main():
ssl_server = creat_server_ssl()
server = await asyncio.start_server(handle_echo, Server_Ip[0], Server_Port[0], ssl=ssl_server)
# server.socket返回内部的服务器套接字列表副本
addr = server.sockets[0].getsockname()
print('成功开启服务器:', addr)
print('等待客户端连接...\n')
try:
await get_general_control()
except Exception as e:
print('8',e)
async with server:
# 开始接受连接,直到协程被取消。 serve_forever 任务的取消将导致服务器被关闭。
await server.serve_forever()
def open_agent_server():
asyncio.run(main())
con, cur = create_sqlite_db()
if __name__ == '__main__':
open_agent_server()
| identifier_name |
|
TCP_echo_server.py | import asyncio
import hashlib
import re
import os
import random
import json
import ssl
import sqlite3
import concurrent.futures
from config.config import *
from xmlrpc.server import SimpleXMLRPCServer
user_list = {} # 存储已连接上的用户
server_list = {}
rules = []
def create_sqlite_db():
con = sqlite3.connect("user.db")
cur = con.cursor()
sql = "CREATE TABLE IF NOT EXISTS user(id INTEGER PRIMARY KEY,username TEXT,password TEXT,email TEXT)"
cur.execute(sql)
return con, cur
def transfer_json(msg, method):
"""字符串与json格式互相转换"""
if method:
return json.dumps(msg)
else:
return json.loads(msg)
async def connect_dest_server(dest_addr, local_reader, local_writer, dest_reader, dest_writer):
try:
local_addr = local_writer.get_extra_info('peername') # 请求者的ip,port
request_msg = {'local_addr': local_addr, 'request_addr': dest_addr, 'code': 'Request connection'} # 给客户端的请求连接信息
request_msg = transfer_json(request_msg, method=True)
# print('发送给目标客户端的连接请求'+request_msg)
dest_writer.write(request_msg.encode()) # 给目标客户端发送连接请求
await dest_writer.drain()
try:
ensure_connection = await dest_reader.read(500)
ensure_connection = transfer_json(ensure_connection.decode(), method=False)
if ensure_connection['code'] == 'Accept connection':
try:
connect_success = {'local_addr': local_addr, 'request_addr': dest_addr, 'code': 'Ready'}
connect_success = transfer_json(connect_success, method=True)
local_writer.write(connect_success.encode())
await local_writer.drain()
print('请求成功:' + str(local_addr) + '正在与' + str(dest_addr) + '通讯...\n')
dest = {'addr': dest_addr, 'Reader': dest_reader, 'Writer': dest_writer}
return dest
except ConnectionResetError:
# print('已断开用户连接:', local_addr)
return False
except Exception as e:
print('0',e)
elif ensure_connection['code'] == 'Refuse connection':
try:
connect_fail = {'local_addr': local_addr, 'request_addr': dest_addr, 'code': 'No'}
connect_fail = transfer_json(connect_fail, method=True)
local_writer.write(connect_fail.encode())
await local_writer.drain()
print('请求失败:' + str(dest_addr) + '拒绝与' + str(local_addr) + '通讯...\n')
dest_writer.close()
local_writer.close()
return False
except Exception as e:
print('1',e)
else:
pass
except Exception as e:
print('2',e)
pass
except Exception as e:
print('3',e)
pass
def hold_user_info(ip, addr, reader, writer):
"""存储已连接客户端的相关内容"""
user = {'addr': addr, 'Reader': reader, 'Writer': writer}
user_list[ip] = user
def hold_server_info(ip, addr, reader, writer):
"""存储已连接目标服务器(客户端)的相关内容"""
user = {'addr': addr, 'Reader': reader, 'Writer': writer}
server_list[ip] = user
async def server_authenticate(reader, writer, secret_key):
"""客户端合法认证"""
message = os.urandom(32) # 随机产生 n=32 个字节的字符串
writer.write(message)
await writer.drain()
s = hashlib.sha512()
s.update(message + secret_key.encode('utf-8')) # 加密
digest = s.hexdigest()
response = await reader.read(1024)
if digest == response.decode('utf-8'):
client_addr = writer.get_extra_info('peername')
client_addr_str = str(client_addr[0]) + str(client_addr[1]) # 拼接ip和port
hold_user_info(client_addr_str, client_addr, reader, writer)
print('\n客户端:' + str(client_addr) + '连接成功\n')
return digest
else:
writer.write('connection_error'.encode()) # 若连接失败,发送错误信息
writer.close()
async def user_login(reader, writer):
global search_result, account
try:
search_result = None
account = await reader.read(1024)
account = transfer_json(account.decode(), False)
sql = "select * from user where username = '{}' and password = '{}'".format(account['username'],
account['password'])
cur.execute(sql)
search_result = cur.fetchall()
except sqlite3.OperationalError:
search_result = False
except ssl.SSLError:
search_result = False
if search_result:
print('\n用户' + account['username'] + '登陆成功!\n')
writer.write('Login Success'.encode())
await writer.drain()
return True
else:
writer.write('Need Email'.encode())
await writer.drain()
email = await reader.read(1024)
verify_email = re.match(r'^[0-9a-zA-Z_]{0,19}@[0-9a-zA-Z]{1,13}\.[com,cn,net]{1,3}(.cn)?$', email.decode())
if verify_email:
email = verify_email.group()
sql = "insert into user(username,password,email) values ('{}','{}','{}')".format(
str(account['username']), str(account['password']), str(email))
try:
cur.execute(sql)
con.commit()
print('\n用户' + account['username'] + '注册成功!\n')
writer.write('Register Success'.encode())
await writer.drain()
return True
except Exception as e:
writer.write('Register Fail'.encode())
await writer.drain()
return False
else:
writer.write('Register Fail'.encode())
await writer.drain()
return False
def creat_server_ssl():
ssl_ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ssl_ctx.options |= ssl.OP_NO_TLSv1
ssl_ctx.options |= ssl.OP_NO_TLSv1_1
ssl_ctx.options |= ssl.OP_SINGLE_DH_USE
ssl_ctx.options |= ssl.OP_SINGLE_ECDH_USE
ssl_ctx.load_cert_chain(certfile='./server_ssl/mycertfile.pem', keyfile='./server_ssl/mykeyfile.pem')
ssl_ctx.load_verify_locations(cafile='./server_ssl/mycertfile.pem')
ssl_ctx.check_hostname = False
ssl_ctx.verify_mode = ssl.VerifyMode.CERT_REQUIRED
ssl_ctx.set_ciphers('ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384')
return ssl_ctx
def set_rule(rule_json):
global rule
rule = json.loads(rule_json)
rules.append(rule)
rpc_server.server_close()
return rule_json
def RPC_server(rpc_port):
global rpc_server
# port = 0
rpc_server = SimpleXMLRPCServer(('localhost', rpc_port)) # 初始化
rpc_server.register_function(set_rule, "set_rule") # 注册函数
print("等待RPC规则配置......")
try:
rpc_server.handle_request() # 保持等待调用状态
print('配置完成......')
except OSError:
print('配置完成......')
return rule
async def handle_echo(reader, writer):
client_addr = writer.get_extra_info('peername')
connect_result = await server_authenticate(reader, writer, SECRET_KEY) # 用户合法性验证
if not connect_result:
print('客户端:' + str(client_addr) + '连接失败')
writer.close()
return
try:
login_result = await user_login(reader, writer)
if not login_result:
user_list.pop(str(client_addr[0]) + str(client_addr[1]))
print('已断开用户连接:', client_addr)
writer.close()
return
except ConnectionResetError:
user_list.pop(str(client_addr[0]) + str(client_addr[1]))
print('用户已断开连接:', client_addr)
writer.close()
return
except ssl.SSLError as e:
return
rpc_port = random.randint(49995,50000)
# await asyncio.sleep(0.01)
rpc_port = 12039
# print(rpc_port)
writer.write(str(rpc_port).encode())
await writer.drain()
loop = asyncio.get_running_loop()
with concurrent.futures.ProcessPoolExecutor() as pool:
config_rule = await loop.run_in_executor(pool, RPC_server, rpc_port)
# config_rule = RPC_server()
ident, src_ip, src_port, dst_ip, dst_port = \
config_rule['ident'], config_rule['src_ip'], config_rule['src_port'],config_rule['dst_ip'],config_rule['dst_port'],
# 此处设置要访问的默认服务器
dst_ip = Client_Ip[0]
dst_port = Client_Port[0]
l_reader, l_writer = await asyncio.open_connection(dst_ip, dst_port)
server_addr = l_writer.get_extra_info('peername')
server_addr_str = str(server_addr[0]) + str(server_addr[1])
hold_server_info(server_addr_str, server_addr, l_reader, l_writer) # 将处理目标客户端(服务器)的请求信息存起来
# print(server_list)
try:
# dest_ip = await reader.read(100) # 首先需要得到客户端的请求ip
# dest_ip = dest_ip.decode()
dest_ip = server_addr # 此处设置默认连接server_addr
print('正在请求:' + str(client_addr) + '请求目的ip:' + str(dest_ip))
find_dest = await connect_dest_server(dest_ip, reader, writer, l_reader, l_writer)
if find_dest:
config_rule['ident'] = 'hi'
config_rule_json = json.dumps(config_rule)
writer.write(config_rule_json.encode())
await writer.drain()
# print(config_rule)
s_reader = find_dest['Reader']
s_writer = find_dest['Writer']
while True:
data = await reader.read(100)
message = data.decode()
if message == 'Heart beat!':
writer.write(message.encode())
print('心跳响应' + message)
continue
if message == '' or message == 'exit':
message = 'Disconnect Request'
s_writer.write(message.encode())
await s_writer.drain()
re_msg = await s_reader.read(1024)
if re_msg.decode() == 'ByeBye':
writer.write(re_msg)
await writer.drain()
user_list.pop(str(client_addr[0]) + str(client_addr[1]))
| riter.close()
break
else:
pass
s_writer.write(data)
await s_writer.drain()
print(str(client_addr) + '正在给' + str(server_addr) + '发送信息:' + data.decode())
try:
re_msg = await s_reader.read(1024)
print('已收到' + str(server_addr) + '的回复:\n' + re_msg.decode())
try:
writer.write(re_msg)
await writer.drain()
print('成功给' + str(client_addr) + '发送回复:\n' + re_msg.decode())
except Exception as e:
print('5',e)
except Exception as e:
print('6',e)
else:
print("请求失败,连接已断开!")
except ConnectionResetError:
message = 'Force Disconnect'
l_writer.write(message.encode())
await l_writer.drain()
l_writer.close()
# user_list.pop(str(client_addr[0]) + str(client_addr[1]))
print('用户已断开连接:', client_addr)
writer.close()
except ssl.SSLError as e:
pass
async def get_general_control():
reader, writer = await asyncio.open_connection(Operat_server_IP[0], Operat_server_Port[0])
while True:
cmd = await reader.read(100)
cmd = cmd.decode()
if cmd == '1' or cmd == 'find -all':
user_info = []
for user in user_list:
user_info.append(user_list[user]['addr'])
re_cmd = transfer_json(user_info, True)
writer.write(re_cmd.encode())
await writer.drain()
elif cmd == '2' or cmd == 'break -all':
for user in user_list:
try:
user_writer = user_list[user]['Writer']
user_writer.write('exit'.encode())
await writer.drain()
user_writer.close()
print('已断开用户连接:', user_list[user]['addr'])
except Exception as e:
print('7',e)
user_list.clear()
writer.write('-----全部关闭成功-----'.encode())
await writer.drain()
else:
writer.write('-----命令有误!请重试-----'.encode())
await writer.drain()
async def main():
ssl_server = creat_server_ssl()
server = await asyncio.start_server(handle_echo, Server_Ip[0], Server_Port[0], ssl=ssl_server)
# server.socket返回内部的服务器套接字列表副本
addr = server.sockets[0].getsockname()
print('成功开启服务器:', addr)
print('等待客户端连接...\n')
try:
await get_general_control()
except Exception as e:
print('8',e)
async with server:
# 开始接受连接,直到协程被取消。 serve_forever 任务的取消将导致服务器被关闭。
await server.serve_forever()
def open_agent_server():
asyncio.run(main())
con, cur = create_sqlite_db()
if __name__ == '__main__':
open_agent_server()
| print('用户已断开连接:', client_addr)
writer.close()
s_w | conditional_block |
localization.rs | // Copyright 2019 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Localization handling.
//!
//! Localization is backed by [Fluent], via [fluent-rs].
//!
//! In Druid, the main way you will deal with localization is via the
//! [`LocalizedString`] struct.
//!
//! You construct a [`LocalizedString`] with a key, which identifies a 'message'
//! in your `.flt` files. If your string requires arguments, you supply it with
//! closures that can extract those arguments from the current [`Env`] and
//! [`Data`].
//!
//! At runtime, you resolve your [`LocalizedString`] into an actual string,
//! passing it the current [`Env`] and [`Data`].
//!
//!
//! [Fluent]: https://projectfluent.org
//! [fluent-rs]: https://github.com/projectfluent/fluent-rs
//! [`LocalizedString`]: struct.LocalizedString.html
//! [`Env`]: struct.Env.html
//! [`Data`]: trait.Data.html
/* ////
use std::collections::HashMap;
use std::sync::Arc;
use std::{fs, io};
use log::{debug, error, warn};
*/ ////
use crate::data::Data;
use crate::env::Env;
use crate::{ArgValue, ArgValues}; ////
/* ////
use fluent_bundle::{
FluentArgs, FluentBundle, FluentError, FluentMessage, FluentResource, FluentValue,
};
use fluent_locale::{negotiate_languages, NegotiationStrategy};
use fluent_syntax::ast::Pattern as FluentPattern;
use unic_langid::LanguageIdentifier;
// Localization looks for string files in druid/resources, but this path is hardcoded;
// it will only work if you're running an example from the druid/ directory.
// At some point we will need to bundle strings with applications, and choose
// the path dynamically.
static FALLBACK_STRINGS: &str = include_str!("../resources/i18n/en-US/builtin.ftl");
/// Provides access to the localization strings for the current locale.
#[allow(dead_code)]
pub(crate) struct L10nManager {
// these two are not currently used; will be used when we let the user
// add additional localization files.
res_mgr: ResourceManager,
resources: Vec<String>,
current_bundle: BundleStack,
current_locale: LanguageIdentifier,
}
/// Manages a collection of localization files.
struct ResourceManager {
resources: HashMap<String, Arc<FluentResource>>,
locales: Vec<LanguageIdentifier>,
default_locale: LanguageIdentifier,
path_scheme: String,
}
*/ ////
type MaxLocalizedString = heapless::consts::U20; //// Max length of localized strings
type String = heapless::String::<MaxLocalizedString>; ////
type MaxLocalizedArg = heapless::consts::U2; //// Max number of localized args
type Vec<T> = heapless::Vec::<T, MaxLocalizedArg>; ///
//////NOTE: instead of a closure, at some point we can use something like a lens for this.
//////TODO: this is an Arc so that it can be clone, which is a bound on things like `Menu`.
/////// A closure that generates a localization value.
type ArgClosure<T> = fn(&T, &Env) -> ArgValue; ////
////type ArgClosure<T> = Arc<dyn Fn(&T, &Env) -> FluentValue<'static> + 'static>;
/// Wraps a closure that generates an argument for localization.
#[derive(Clone)]
struct ArgSource<T>(ArgClosure<T>);
/// A string that can be localized based on the current locale.
///
/// At its simplest, a `LocalizedString` is a key that can be resolved
/// against a map of localized strings for a given locale.
#[derive(Clone)]////
////#[derive(Debug, Clone)]
pub struct LocalizedString<T> {
pub(crate) key: &'static str,
placeholder: Option<String>,
args: Option<Vec<(&'static str, ArgSource<T>)>>,
resolved: Option<String>,
////resolved_lang: Option<LanguageIdentifier>,
}
/* ////
/// A stack of localization resources, used for fallback.
struct BundleStack(Vec<FluentBundle<Arc<FluentResource>>>);
impl BundleStack {
fn get_message(&self, id: &str) -> Option<FluentMessage> {
self.0.iter().flat_map(|b| b.get_message(id)).next()
}
fn format_pattern(
&self,
id: &str,
pattern: &FluentPattern,
args: Option<&FluentArgs>,
errors: &mut Vec<FluentError>,
) -> String {
for bundle in self.0.iter() {
if bundle.has_message(id) {
return bundle.format_pattern(pattern, args, errors).to_string();
}
}
format!("localization failed for key '{}'", id)
}
}
//NOTE: much of this is adapted from https://github.com/projectfluent/fluent-rs/blob/master/fluent-resmgr/src/resource_manager.rs
impl ResourceManager {
/// Loads a new localization resource from disk, as needed.
fn get_resource(&mut self, res_id: &str, locale: &str) -> Arc<FluentResource> {
let path = self
.path_scheme
.replace("{locale}", locale)
.replace("{res_id}", res_id);
if let Some(res) = self.resources.get(&path) {
res.clone()
} else {
let string = fs::read_to_string(&path).unwrap_or_else(|_| {
if (res_id, locale) == ("builtin.ftl", "en-US") {
FALLBACK_STRINGS.to_string()
} else {
error!("missing resouce {}/{}", locale, res_id);
String::new()
}
});
let res = match FluentResource::try_new(string) {
Ok(res) => Arc::new(res),
Err((res, _err)) => Arc::new(res),
};
self.resources.insert(path, res.clone());
res
}
}
/// Return the best localization bundle for the provided `LanguageIdentifier`.
fn get_bundle(&mut self, locale: &LanguageIdentifier, resource_ids: &[String]) -> BundleStack {
let resolved_locales = self.resolve_locales(locale.clone());
debug!("resolved: {}", PrintLocales(resolved_locales.as_slice()));
let mut stack = Vec::new();
for locale in &resolved_locales {
let mut bundle = FluentBundle::new(&resolved_locales);
for res_id in resource_ids {
let res = self.get_resource(&res_id, &locale.to_string());
bundle.add_resource(res).unwrap();
}
stack.push(bundle);
}
BundleStack(stack)
}
/// Given a locale, returns the best set of available locales.
pub(crate) fn resolve_locales(&self, locale: LanguageIdentifier) -> Vec<LanguageIdentifier> {
negotiate_languages(
&[locale],
&self.locales,
Some(&self.default_locale),
NegotiationStrategy::Filtering,
)
.into_iter()
.map(|l| l.to_owned())
.collect()
}
}
impl L10nManager {
/// Create a new localization manager.
///
/// `resources` is a list of file names that contain strings. `base_dir`
/// is a path to a directory that includes per-locale subdirectories.
///
/// This directory should be of the structure `base_dir/{locale}/{resource}`,
/// where '{locale}' is a valid BCP47 language tag, and {resource} is a `.ftl`
/// included in `resources`.
pub fn new(resources: Vec<String>, base_dir: &str) -> Self {
fn get_available_locales(base_dir: &str) -> Result<Vec<LanguageIdentifier>, io::Error> {
let mut locales = vec![];
let res_dir = fs::read_dir(base_dir)?;
for entry in res_dir {
if let Ok(entry) = entry {
let path = entry.path();
if path.is_dir() {
if let Some(name) = path.file_name() {
if let Some(name) = name.to_str() {
let langid: LanguageIdentifier =
name.parse().expect("Parsing failed.");
locales.push(langid);
}
}
}
}
}
Ok(locales)
}
let default_locale: LanguageIdentifier =
"en-US".parse().expect("failed to parse default locale");
let current_locale = Application::get_locale()
.parse()
.unwrap_or_else(|_| default_locale.clone());
let locales = get_available_locales(base_dir).unwrap_or_default();
debug!(
"available locales {}, current {}",
PrintLocales(&locales),
current_locale,
);
let mut path_scheme = base_dir.to_string();
path_scheme.push_str("/{locale}/{res_id}");
let mut res_mgr = ResourceManager {
resources: HashMap::new(),
path_scheme,
default_locale,
locales,
};
let current_bundle = res_mgr.get_bundle(¤t_locale, &resources);
L10nManager {
res_mgr,
current_bundle,
resources,
current_locale,
}
}
/// Fetch a localized string from the current bundle by key.
///
/// In general, this should not be used directly; [`LocalizedString`]
/// should be used for localization, and you should call
/// [`LocalizedString::resolve`] to update the string as required.
///
///[`LocalizedString`]: struct.LocalizedString.html
///[`LocalizedString::resolve`]: struct.LocalizedString.html#method.resolve
pub fn localize<'args>(
&'args self,
key: &str,
args: impl Into<Option<&'args FluentArgs<'args>>>,
) -> Option<String> {
let args = args.into();
let value = match self
.current_bundle
.get_message(key)
.and_then(|msg| msg.value)
{
Some(v) => v,
None => return None,
};
let mut errs = Vec::new();
let result = self
.current_bundle
.format_pattern(key, value, args, &mut errs);
for err in errs {
warn!("localization error {:?}", err);
}
// fluent inserts bidi controls when interpolating, and they can
// cause rendering issues; for now we just strip them.
// https://www.w3.org/International/questions/qa-bidi-unicode-controls#basedirection
const START_ISOLATE: char = '\u{2068}';
const END_ISOLATE: char = '\u{2069}';
if args.is_some() && result.chars().any(|c| c == START_ISOLATE) {
Some(
result
.chars()
.filter(|c| c != &START_ISOLATE && c != &END_ISOLATE)
.collect(),
)
} else {
Some(result)
}
}
//TODO: handle locale change
}
*/ ////
impl<T> LocalizedString<T> {
/// Create a new `LocalizedString` with the given key.
pub const fn new(key: &'static str) -> Self {
LocalizedString {
key,
args: None,
placeholder: None,
resolved: None,
////resolved_lang: None,
}
}
/* ////
/// Add a placeholder value. This will be used if localization fails.
///
/// This is intended for use during prototyping.
pub fn with_placeholder(mut self, placeholder: String) -> Self {
self.placeholder = Some(placeholder);
self
}
*/ ////
/// Return the localized value for this string, or the placeholder, if
/// the localization is missing, or the key if there is no placeholder.
pub fn localized_str(&self) -> &str {
//cortex_m::asm::bkpt(); ////
self.resolved ////
.as_ref()
.expect("not resolved")
.as_str()
/* ////
self.resolved
.as_ref()
.map(|s| s.as_str())
.or_else(|| self.placeholder.as_ref().map(String::as_ref))
.unwrap_or(self.key)
*/ ////
}
}
impl<T: Data> LocalizedString<T> {
/// Add a named argument and a corresponding [`ArgClosure`]. This closure
/// is a function that will return a value for the given key from the current
/// environment and data.
///
/// [`ArgClosure`]: type.ArgClosure.html
pub fn with_arg(
mut self,
key: &'static str,
f: fn(&T, &Env) -> ArgValue, ////
////f: impl Fn(&T, &Env) -> FluentValue<'static> + 'static,
) -> Self {
self.args
.get_or_insert(Vec::new())
.push((key, ArgSource(f)))
.expect("with arg failed"); ////
////.push((key, ArgSource(Arc::new(f))));
self
}
/// Lazily compute the localized value for this string based on the provided
/// environment and data.
///
/// Returns `true` if the current value of the string has changed.
pub fn resolve<'a>(&'a mut self, data: &T, env: &Env) -> bool {
//TODO: this recomputes the string if either the language has changed,
//or *anytime* we have arguments. Ideally we would be using a lens
//to only recompute when our actual data has changed.
if self.args.is_some()
////|| self.resolved_lang.as_ref() != Some(&env.localization_manager().current_locale)
{
//// Resolve all args
let mut args = ArgValues::new();
for arg in self.args.as_ref().expect("resolve fail") {
let (k, v) = arg;
let argvalue = (v.0)(data, env);
args.insert(k, argvalue.clone())
.expect("resolve fail");
//// Convert the first arg to text and exit
self.resolved = Some(argvalue.to_string());
//cortex_m::asm::bkpt(); ////
return true;
}
//// No args to resolve
false
/* ////
let args: Option<FluentArgs> = self
.args
.as_ref()
.map(|a| a.iter().map(|(k, v)| (*k, (v.0)(data, env))).collect());
self.resolved_lang = Some(env.localization_manager().current_locale.clone());
let next = env.localization_manager().localize(self.key, args.as_ref());
let result = next != self.resolved;
self.resolved = next;
result
*/ ////
} else |
}
}
/* ////
impl<T> std::fmt::Debug for ArgSource<T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Arg Resolver {:p}", self.0)
}
}
/// Helper to impl display for slices of displayable things.
struct PrintLocales<'a, T>(&'a [T]);
impl<'a, T: std::fmt::Display> std::fmt::Display for PrintLocales<'a, T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[")?;
let mut prev = false;
for l in self.0 {
if prev {
write!(f, ", ")?;
}
prev = true;
write!(f, "{}", l)?;
}
write!(f, "]")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn resolve() {
let en_us: LanguageIdentifier = "en-US".parse().unwrap();
let en_ca: LanguageIdentifier = "en-CA".parse().unwrap();
let en_gb: LanguageIdentifier = "en-GB".parse().unwrap();
let fr_fr: LanguageIdentifier = "fr-FR".parse().unwrap();
let pt_pt: LanguageIdentifier = "pt-PT".parse().unwrap();
let resmgr = ResourceManager {
resources: HashMap::new(),
locales: vec![en_us.clone(), en_ca.clone(), en_gb.clone(), fr_fr.clone()],
default_locale: en_us.clone(),
path_scheme: String::new(),
};
let en_za: LanguageIdentifier = "en-GB".parse().unwrap();
let cn_hk: LanguageIdentifier = "cn-HK".parse().unwrap();
let fr_ca: LanguageIdentifier = "fr-CA".parse().unwrap();
assert_eq!(
resmgr.resolve_locales(en_ca.clone()),
vec![en_ca.clone(), en_us.clone(), en_gb.clone()]
);
assert_eq!(
resmgr.resolve_locales(en_za.clone()),
vec![en_gb.clone(), en_us.clone(), en_ca.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_ca.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_fr.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(resmgr.resolve_locales(cn_hk), vec![en_us.clone()]);
assert_eq!(resmgr.resolve_locales(pt_pt), vec![en_us.clone()]);
}
}
*/ ////
/// Implement formatted output for ArgSource
impl<T> core::fmt::Debug for ArgSource<T> { ////
fn fmt(&self, _fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
// TODO
Ok(())
}
} | {
false
} | conditional_block |
localization.rs | // Copyright 2019 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Localization handling.
//!
//! Localization is backed by [Fluent], via [fluent-rs].
//!
//! In Druid, the main way you will deal with localization is via the
//! [`LocalizedString`] struct.
//!
//! You construct a [`LocalizedString`] with a key, which identifies a 'message'
//! in your `.flt` files. If your string requires arguments, you supply it with
//! closures that can extract those arguments from the current [`Env`] and
//! [`Data`].
//!
//! At runtime, you resolve your [`LocalizedString`] into an actual string,
//! passing it the current [`Env`] and [`Data`].
//!
//!
//! [Fluent]: https://projectfluent.org
//! [fluent-rs]: https://github.com/projectfluent/fluent-rs
//! [`LocalizedString`]: struct.LocalizedString.html
//! [`Env`]: struct.Env.html
//! [`Data`]: trait.Data.html
/* ////
use std::collections::HashMap;
use std::sync::Arc;
use std::{fs, io};
use log::{debug, error, warn};
*/ ////
use crate::data::Data;
use crate::env::Env;
use crate::{ArgValue, ArgValues}; ////
/* ////
use fluent_bundle::{
FluentArgs, FluentBundle, FluentError, FluentMessage, FluentResource, FluentValue,
};
use fluent_locale::{negotiate_languages, NegotiationStrategy};
use fluent_syntax::ast::Pattern as FluentPattern;
use unic_langid::LanguageIdentifier;
// Localization looks for string files in druid/resources, but this path is hardcoded;
// it will only work if you're running an example from the druid/ directory.
// At some point we will need to bundle strings with applications, and choose
// the path dynamically.
static FALLBACK_STRINGS: &str = include_str!("../resources/i18n/en-US/builtin.ftl");
/// Provides access to the localization strings for the current locale.
#[allow(dead_code)]
pub(crate) struct L10nManager {
// these two are not currently used; will be used when we let the user
// add additional localization files.
res_mgr: ResourceManager,
resources: Vec<String>,
current_bundle: BundleStack,
current_locale: LanguageIdentifier,
}
/// Manages a collection of localization files.
struct ResourceManager {
resources: HashMap<String, Arc<FluentResource>>,
locales: Vec<LanguageIdentifier>,
default_locale: LanguageIdentifier,
path_scheme: String,
}
*/ ////
type MaxLocalizedString = heapless::consts::U20; //// Max length of localized strings
type String = heapless::String::<MaxLocalizedString>; ////
type MaxLocalizedArg = heapless::consts::U2; //// Max number of localized args
type Vec<T> = heapless::Vec::<T, MaxLocalizedArg>; ///
//////NOTE: instead of a closure, at some point we can use something like a lens for this.
//////TODO: this is an Arc so that it can be clone, which is a bound on things like `Menu`.
/////// A closure that generates a localization value.
type ArgClosure<T> = fn(&T, &Env) -> ArgValue; ////
////type ArgClosure<T> = Arc<dyn Fn(&T, &Env) -> FluentValue<'static> + 'static>;
/// Wraps a closure that generates an argument for localization.
#[derive(Clone)]
struct ArgSource<T>(ArgClosure<T>);
/// A string that can be localized based on the current locale.
///
/// At its simplest, a `LocalizedString` is a key that can be resolved
/// against a map of localized strings for a given locale.
#[derive(Clone)]////
////#[derive(Debug, Clone)]
pub struct LocalizedString<T> {
pub(crate) key: &'static str,
placeholder: Option<String>,
args: Option<Vec<(&'static str, ArgSource<T>)>>,
resolved: Option<String>,
////resolved_lang: Option<LanguageIdentifier>,
}
/* ////
/// A stack of localization resources, used for fallback.
struct BundleStack(Vec<FluentBundle<Arc<FluentResource>>>);
impl BundleStack {
fn get_message(&self, id: &str) -> Option<FluentMessage> {
self.0.iter().flat_map(|b| b.get_message(id)).next()
}
fn format_pattern(
&self,
id: &str,
pattern: &FluentPattern,
args: Option<&FluentArgs>,
errors: &mut Vec<FluentError>,
) -> String {
for bundle in self.0.iter() {
if bundle.has_message(id) {
return bundle.format_pattern(pattern, args, errors).to_string();
}
}
format!("localization failed for key '{}'", id)
}
}
//NOTE: much of this is adapted from https://github.com/projectfluent/fluent-rs/blob/master/fluent-resmgr/src/resource_manager.rs
impl ResourceManager {
/// Loads a new localization resource from disk, as needed.
fn get_resource(&mut self, res_id: &str, locale: &str) -> Arc<FluentResource> {
let path = self
.path_scheme
.replace("{locale}", locale)
.replace("{res_id}", res_id);
if let Some(res) = self.resources.get(&path) {
res.clone()
} else {
let string = fs::read_to_string(&path).unwrap_or_else(|_| {
if (res_id, locale) == ("builtin.ftl", "en-US") {
FALLBACK_STRINGS.to_string()
} else {
error!("missing resouce {}/{}", locale, res_id);
String::new()
}
});
let res = match FluentResource::try_new(string) {
Ok(res) => Arc::new(res),
Err((res, _err)) => Arc::new(res),
};
self.resources.insert(path, res.clone());
res
}
}
/// Return the best localization bundle for the provided `LanguageIdentifier`.
fn get_bundle(&mut self, locale: &LanguageIdentifier, resource_ids: &[String]) -> BundleStack {
let resolved_locales = self.resolve_locales(locale.clone());
debug!("resolved: {}", PrintLocales(resolved_locales.as_slice()));
let mut stack = Vec::new();
for locale in &resolved_locales {
let mut bundle = FluentBundle::new(&resolved_locales);
for res_id in resource_ids {
let res = self.get_resource(&res_id, &locale.to_string());
bundle.add_resource(res).unwrap();
}
stack.push(bundle);
}
BundleStack(stack)
}
/// Given a locale, returns the best set of available locales.
pub(crate) fn resolve_locales(&self, locale: LanguageIdentifier) -> Vec<LanguageIdentifier> {
negotiate_languages(
&[locale],
&self.locales,
Some(&self.default_locale),
NegotiationStrategy::Filtering,
)
.into_iter()
.map(|l| l.to_owned())
.collect()
}
}
impl L10nManager {
/// Create a new localization manager.
///
/// `resources` is a list of file names that contain strings. `base_dir`
/// is a path to a directory that includes per-locale subdirectories.
///
/// This directory should be of the structure `base_dir/{locale}/{resource}`,
/// where '{locale}' is a valid BCP47 language tag, and {resource} is a `.ftl`
/// included in `resources`.
pub fn new(resources: Vec<String>, base_dir: &str) -> Self {
fn get_available_locales(base_dir: &str) -> Result<Vec<LanguageIdentifier>, io::Error> {
let mut locales = vec![];
let res_dir = fs::read_dir(base_dir)?;
for entry in res_dir {
if let Ok(entry) = entry {
let path = entry.path();
if path.is_dir() {
if let Some(name) = path.file_name() {
if let Some(name) = name.to_str() {
let langid: LanguageIdentifier =
name.parse().expect("Parsing failed.");
locales.push(langid);
}
}
}
}
}
Ok(locales)
}
let default_locale: LanguageIdentifier =
"en-US".parse().expect("failed to parse default locale");
let current_locale = Application::get_locale()
.parse()
.unwrap_or_else(|_| default_locale.clone());
let locales = get_available_locales(base_dir).unwrap_or_default();
debug!(
"available locales {}, current {}",
PrintLocales(&locales),
current_locale,
);
let mut path_scheme = base_dir.to_string();
path_scheme.push_str("/{locale}/{res_id}");
let mut res_mgr = ResourceManager {
resources: HashMap::new(),
path_scheme,
default_locale,
locales,
};
let current_bundle = res_mgr.get_bundle(¤t_locale, &resources);
L10nManager {
res_mgr,
current_bundle,
resources,
current_locale,
}
}
/// Fetch a localized string from the current bundle by key.
///
/// In general, this should not be used directly; [`LocalizedString`]
/// should be used for localization, and you should call
/// [`LocalizedString::resolve`] to update the string as required.
///
///[`LocalizedString`]: struct.LocalizedString.html
///[`LocalizedString::resolve`]: struct.LocalizedString.html#method.resolve
pub fn localize<'args>(
&'args self,
key: &str,
args: impl Into<Option<&'args FluentArgs<'args>>>,
) -> Option<String> {
let args = args.into();
let value = match self
.current_bundle
.get_message(key)
.and_then(|msg| msg.value)
{
Some(v) => v,
None => return None,
};
let mut errs = Vec::new();
let result = self
.current_bundle
.format_pattern(key, value, args, &mut errs);
for err in errs {
warn!("localization error {:?}", err);
}
// fluent inserts bidi controls when interpolating, and they can
// cause rendering issues; for now we just strip them.
// https://www.w3.org/International/questions/qa-bidi-unicode-controls#basedirection
const START_ISOLATE: char = '\u{2068}';
const END_ISOLATE: char = '\u{2069}';
if args.is_some() && result.chars().any(|c| c == START_ISOLATE) {
Some(
result
.chars()
.filter(|c| c != &START_ISOLATE && c != &END_ISOLATE)
.collect(),
)
} else {
Some(result)
}
}
//TODO: handle locale change
}
*/ ////
impl<T> LocalizedString<T> {
/// Create a new `LocalizedString` with the given key.
pub const fn new(key: &'static str) -> Self {
LocalizedString {
key,
args: None,
placeholder: None,
resolved: None,
////resolved_lang: None,
}
}
/* ////
/// Add a placeholder value. This will be used if localization fails.
///
/// This is intended for use during prototyping.
pub fn with_placeholder(mut self, placeholder: String) -> Self {
self.placeholder = Some(placeholder);
self
}
*/ ////
/// Return the localized value for this string, or the placeholder, if
/// the localization is missing, or the key if there is no placeholder.
pub fn localized_str(&self) -> &str {
//cortex_m::asm::bkpt(); ////
self.resolved ////
.as_ref()
.expect("not resolved")
.as_str()
/* ////
self.resolved
.as_ref()
.map(|s| s.as_str())
.or_else(|| self.placeholder.as_ref().map(String::as_ref))
.unwrap_or(self.key)
*/ ////
}
}
impl<T: Data> LocalizedString<T> {
/// Add a named argument and a corresponding [`ArgClosure`]. This closure
/// is a function that will return a value for the given key from the current
/// environment and data.
///
/// [`ArgClosure`]: type.ArgClosure.html
pub fn | (
mut self,
key: &'static str,
f: fn(&T, &Env) -> ArgValue, ////
////f: impl Fn(&T, &Env) -> FluentValue<'static> + 'static,
) -> Self {
self.args
.get_or_insert(Vec::new())
.push((key, ArgSource(f)))
.expect("with arg failed"); ////
////.push((key, ArgSource(Arc::new(f))));
self
}
/// Lazily compute the localized value for this string based on the provided
/// environment and data.
///
/// Returns `true` if the current value of the string has changed.
pub fn resolve<'a>(&'a mut self, data: &T, env: &Env) -> bool {
//TODO: this recomputes the string if either the language has changed,
//or *anytime* we have arguments. Ideally we would be using a lens
//to only recompute when our actual data has changed.
if self.args.is_some()
////|| self.resolved_lang.as_ref() != Some(&env.localization_manager().current_locale)
{
//// Resolve all args
let mut args = ArgValues::new();
for arg in self.args.as_ref().expect("resolve fail") {
let (k, v) = arg;
let argvalue = (v.0)(data, env);
args.insert(k, argvalue.clone())
.expect("resolve fail");
//// Convert the first arg to text and exit
self.resolved = Some(argvalue.to_string());
//cortex_m::asm::bkpt(); ////
return true;
}
//// No args to resolve
false
/* ////
let args: Option<FluentArgs> = self
.args
.as_ref()
.map(|a| a.iter().map(|(k, v)| (*k, (v.0)(data, env))).collect());
self.resolved_lang = Some(env.localization_manager().current_locale.clone());
let next = env.localization_manager().localize(self.key, args.as_ref());
let result = next != self.resolved;
self.resolved = next;
result
*/ ////
} else {
false
}
}
}
/* ////
impl<T> std::fmt::Debug for ArgSource<T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Arg Resolver {:p}", self.0)
}
}
/// Helper to impl display for slices of displayable things.
struct PrintLocales<'a, T>(&'a [T]);
impl<'a, T: std::fmt::Display> std::fmt::Display for PrintLocales<'a, T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[")?;
let mut prev = false;
for l in self.0 {
if prev {
write!(f, ", ")?;
}
prev = true;
write!(f, "{}", l)?;
}
write!(f, "]")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn resolve() {
let en_us: LanguageIdentifier = "en-US".parse().unwrap();
let en_ca: LanguageIdentifier = "en-CA".parse().unwrap();
let en_gb: LanguageIdentifier = "en-GB".parse().unwrap();
let fr_fr: LanguageIdentifier = "fr-FR".parse().unwrap();
let pt_pt: LanguageIdentifier = "pt-PT".parse().unwrap();
let resmgr = ResourceManager {
resources: HashMap::new(),
locales: vec![en_us.clone(), en_ca.clone(), en_gb.clone(), fr_fr.clone()],
default_locale: en_us.clone(),
path_scheme: String::new(),
};
let en_za: LanguageIdentifier = "en-GB".parse().unwrap();
let cn_hk: LanguageIdentifier = "cn-HK".parse().unwrap();
let fr_ca: LanguageIdentifier = "fr-CA".parse().unwrap();
assert_eq!(
resmgr.resolve_locales(en_ca.clone()),
vec![en_ca.clone(), en_us.clone(), en_gb.clone()]
);
assert_eq!(
resmgr.resolve_locales(en_za.clone()),
vec![en_gb.clone(), en_us.clone(), en_ca.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_ca.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_fr.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(resmgr.resolve_locales(cn_hk), vec![en_us.clone()]);
assert_eq!(resmgr.resolve_locales(pt_pt), vec![en_us.clone()]);
}
}
*/ ////
/// Implement formatted output for ArgSource
impl<T> core::fmt::Debug for ArgSource<T> { ////
fn fmt(&self, _fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
// TODO
Ok(())
}
} | with_arg | identifier_name |
localization.rs | // Copyright 2019 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Localization handling.
//!
//! Localization is backed by [Fluent], via [fluent-rs].
//!
//! In Druid, the main way you will deal with localization is via the
//! [`LocalizedString`] struct.
//!
//! You construct a [`LocalizedString`] with a key, which identifies a 'message'
//! in your `.flt` files. If your string requires arguments, you supply it with
//! closures that can extract those arguments from the current [`Env`] and
//! [`Data`].
//!
//! At runtime, you resolve your [`LocalizedString`] into an actual string,
//! passing it the current [`Env`] and [`Data`].
//!
//!
//! [Fluent]: https://projectfluent.org
//! [fluent-rs]: https://github.com/projectfluent/fluent-rs
//! [`LocalizedString`]: struct.LocalizedString.html
//! [`Env`]: struct.Env.html
//! [`Data`]: trait.Data.html
/* ////
use std::collections::HashMap;
use std::sync::Arc;
use std::{fs, io};
use log::{debug, error, warn};
*/ ////
use crate::data::Data;
use crate::env::Env;
use crate::{ArgValue, ArgValues}; ////
/* ////
use fluent_bundle::{
FluentArgs, FluentBundle, FluentError, FluentMessage, FluentResource, FluentValue,
};
use fluent_locale::{negotiate_languages, NegotiationStrategy};
use fluent_syntax::ast::Pattern as FluentPattern;
use unic_langid::LanguageIdentifier;
// Localization looks for string files in druid/resources, but this path is hardcoded;
// it will only work if you're running an example from the druid/ directory.
// At some point we will need to bundle strings with applications, and choose
// the path dynamically.
static FALLBACK_STRINGS: &str = include_str!("../resources/i18n/en-US/builtin.ftl");
/// Provides access to the localization strings for the current locale.
#[allow(dead_code)]
pub(crate) struct L10nManager {
// these two are not currently used; will be used when we let the user
// add additional localization files.
res_mgr: ResourceManager,
resources: Vec<String>,
current_bundle: BundleStack,
current_locale: LanguageIdentifier,
}
/// Manages a collection of localization files.
struct ResourceManager {
resources: HashMap<String, Arc<FluentResource>>,
locales: Vec<LanguageIdentifier>,
default_locale: LanguageIdentifier,
path_scheme: String,
}
*/ ////
type MaxLocalizedString = heapless::consts::U20; //// Max length of localized strings
type String = heapless::String::<MaxLocalizedString>; ////
type MaxLocalizedArg = heapless::consts::U2; //// Max number of localized args
type Vec<T> = heapless::Vec::<T, MaxLocalizedArg>; ///
//////NOTE: instead of a closure, at some point we can use something like a lens for this.
//////TODO: this is an Arc so that it can be clone, which is a bound on things like `Menu`.
/////// A closure that generates a localization value.
type ArgClosure<T> = fn(&T, &Env) -> ArgValue; ////
////type ArgClosure<T> = Arc<dyn Fn(&T, &Env) -> FluentValue<'static> + 'static>;
/// Wraps a closure that generates an argument for localization.
#[derive(Clone)]
struct ArgSource<T>(ArgClosure<T>);
/// A string that can be localized based on the current locale.
///
/// At its simplest, a `LocalizedString` is a key that can be resolved
/// against a map of localized strings for a given locale.
#[derive(Clone)]////
////#[derive(Debug, Clone)]
pub struct LocalizedString<T> {
pub(crate) key: &'static str,
placeholder: Option<String>,
args: Option<Vec<(&'static str, ArgSource<T>)>>,
resolved: Option<String>,
////resolved_lang: Option<LanguageIdentifier>,
}
/* ////
/// A stack of localization resources, used for fallback.
struct BundleStack(Vec<FluentBundle<Arc<FluentResource>>>);
impl BundleStack {
fn get_message(&self, id: &str) -> Option<FluentMessage> {
self.0.iter().flat_map(|b| b.get_message(id)).next()
}
fn format_pattern(
&self,
id: &str,
pattern: &FluentPattern,
args: Option<&FluentArgs>,
errors: &mut Vec<FluentError>,
) -> String {
for bundle in self.0.iter() {
if bundle.has_message(id) {
return bundle.format_pattern(pattern, args, errors).to_string();
}
}
format!("localization failed for key '{}'", id)
}
}
//NOTE: much of this is adapted from https://github.com/projectfluent/fluent-rs/blob/master/fluent-resmgr/src/resource_manager.rs
impl ResourceManager {
/// Loads a new localization resource from disk, as needed.
fn get_resource(&mut self, res_id: &str, locale: &str) -> Arc<FluentResource> {
let path = self
.path_scheme
.replace("{locale}", locale)
.replace("{res_id}", res_id);
if let Some(res) = self.resources.get(&path) {
res.clone()
} else {
let string = fs::read_to_string(&path).unwrap_or_else(|_| {
if (res_id, locale) == ("builtin.ftl", "en-US") {
FALLBACK_STRINGS.to_string()
} else {
error!("missing resouce {}/{}", locale, res_id);
String::new()
}
});
let res = match FluentResource::try_new(string) {
Ok(res) => Arc::new(res),
Err((res, _err)) => Arc::new(res),
};
self.resources.insert(path, res.clone());
res
}
}
/// Return the best localization bundle for the provided `LanguageIdentifier`.
fn get_bundle(&mut self, locale: &LanguageIdentifier, resource_ids: &[String]) -> BundleStack {
let resolved_locales = self.resolve_locales(locale.clone());
debug!("resolved: {}", PrintLocales(resolved_locales.as_slice()));
let mut stack = Vec::new();
for locale in &resolved_locales {
let mut bundle = FluentBundle::new(&resolved_locales);
for res_id in resource_ids {
let res = self.get_resource(&res_id, &locale.to_string());
bundle.add_resource(res).unwrap();
}
stack.push(bundle);
}
BundleStack(stack)
}
/// Given a locale, returns the best set of available locales.
pub(crate) fn resolve_locales(&self, locale: LanguageIdentifier) -> Vec<LanguageIdentifier> {
negotiate_languages(
&[locale],
&self.locales,
Some(&self.default_locale),
NegotiationStrategy::Filtering,
)
.into_iter()
.map(|l| l.to_owned())
.collect()
}
}
impl L10nManager {
/// Create a new localization manager.
///
/// `resources` is a list of file names that contain strings. `base_dir`
/// is a path to a directory that includes per-locale subdirectories.
///
/// This directory should be of the structure `base_dir/{locale}/{resource}`,
/// where '{locale}' is a valid BCP47 language tag, and {resource} is a `.ftl`
/// included in `resources`.
pub fn new(resources: Vec<String>, base_dir: &str) -> Self {
fn get_available_locales(base_dir: &str) -> Result<Vec<LanguageIdentifier>, io::Error> {
let mut locales = vec![];
let res_dir = fs::read_dir(base_dir)?;
for entry in res_dir {
if let Ok(entry) = entry {
let path = entry.path();
if path.is_dir() {
if let Some(name) = path.file_name() {
if let Some(name) = name.to_str() {
let langid: LanguageIdentifier =
name.parse().expect("Parsing failed.");
locales.push(langid);
}
}
}
}
}
Ok(locales)
}
let default_locale: LanguageIdentifier =
"en-US".parse().expect("failed to parse default locale");
let current_locale = Application::get_locale()
.parse()
.unwrap_or_else(|_| default_locale.clone());
let locales = get_available_locales(base_dir).unwrap_or_default();
debug!(
"available locales {}, current {}",
PrintLocales(&locales),
current_locale,
);
let mut path_scheme = base_dir.to_string();
path_scheme.push_str("/{locale}/{res_id}");
let mut res_mgr = ResourceManager {
resources: HashMap::new(),
path_scheme,
default_locale,
locales,
};
let current_bundle = res_mgr.get_bundle(¤t_locale, &resources);
L10nManager {
res_mgr,
current_bundle,
resources,
current_locale,
}
}
/// Fetch a localized string from the current bundle by key.
///
/// In general, this should not be used directly; [`LocalizedString`]
/// should be used for localization, and you should call
/// [`LocalizedString::resolve`] to update the string as required.
///
///[`LocalizedString`]: struct.LocalizedString.html
///[`LocalizedString::resolve`]: struct.LocalizedString.html#method.resolve
pub fn localize<'args>(
&'args self,
key: &str,
args: impl Into<Option<&'args FluentArgs<'args>>>,
) -> Option<String> {
let args = args.into();
let value = match self
.current_bundle
.get_message(key)
.and_then(|msg| msg.value)
{
Some(v) => v,
None => return None,
};
let mut errs = Vec::new();
let result = self
.current_bundle
.format_pattern(key, value, args, &mut errs);
for err in errs {
warn!("localization error {:?}", err);
}
// fluent inserts bidi controls when interpolating, and they can
// cause rendering issues; for now we just strip them.
// https://www.w3.org/International/questions/qa-bidi-unicode-controls#basedirection
const START_ISOLATE: char = '\u{2068}';
const END_ISOLATE: char = '\u{2069}';
if args.is_some() && result.chars().any(|c| c == START_ISOLATE) {
Some(
result
.chars()
.filter(|c| c != &START_ISOLATE && c != &END_ISOLATE)
.collect(),
)
} else {
Some(result)
}
}
//TODO: handle locale change
}
*/ ////
impl<T> LocalizedString<T> {
/// Create a new `LocalizedString` with the given key.
pub const fn new(key: &'static str) -> Self {
LocalizedString {
key,
args: None,
placeholder: None,
resolved: None,
////resolved_lang: None,
}
}
/* ////
/// Add a placeholder value. This will be used if localization fails.
///
/// This is intended for use during prototyping.
pub fn with_placeholder(mut self, placeholder: String) -> Self {
self.placeholder = Some(placeholder);
self
}
*/ ////
/// Return the localized value for this string, or the placeholder, if
/// the localization is missing, or the key if there is no placeholder.
pub fn localized_str(&self) -> &str {
//cortex_m::asm::bkpt(); ////
self.resolved ////
.as_ref()
.expect("not resolved")
.as_str()
/* ////
self.resolved
.as_ref()
.map(|s| s.as_str())
.or_else(|| self.placeholder.as_ref().map(String::as_ref))
.unwrap_or(self.key) | }
}
impl<T: Data> LocalizedString<T> {
/// Add a named argument and a corresponding [`ArgClosure`]. This closure
/// is a function that will return a value for the given key from the current
/// environment and data.
///
/// [`ArgClosure`]: type.ArgClosure.html
pub fn with_arg(
mut self,
key: &'static str,
f: fn(&T, &Env) -> ArgValue, ////
////f: impl Fn(&T, &Env) -> FluentValue<'static> + 'static,
) -> Self {
self.args
.get_or_insert(Vec::new())
.push((key, ArgSource(f)))
.expect("with arg failed"); ////
////.push((key, ArgSource(Arc::new(f))));
self
}
/// Lazily compute the localized value for this string based on the provided
/// environment and data.
///
/// Returns `true` if the current value of the string has changed.
pub fn resolve<'a>(&'a mut self, data: &T, env: &Env) -> bool {
//TODO: this recomputes the string if either the language has changed,
//or *anytime* we have arguments. Ideally we would be using a lens
//to only recompute when our actual data has changed.
if self.args.is_some()
////|| self.resolved_lang.as_ref() != Some(&env.localization_manager().current_locale)
{
//// Resolve all args
let mut args = ArgValues::new();
for arg in self.args.as_ref().expect("resolve fail") {
let (k, v) = arg;
let argvalue = (v.0)(data, env);
args.insert(k, argvalue.clone())
.expect("resolve fail");
//// Convert the first arg to text and exit
self.resolved = Some(argvalue.to_string());
//cortex_m::asm::bkpt(); ////
return true;
}
//// No args to resolve
false
/* ////
let args: Option<FluentArgs> = self
.args
.as_ref()
.map(|a| a.iter().map(|(k, v)| (*k, (v.0)(data, env))).collect());
self.resolved_lang = Some(env.localization_manager().current_locale.clone());
let next = env.localization_manager().localize(self.key, args.as_ref());
let result = next != self.resolved;
self.resolved = next;
result
*/ ////
} else {
false
}
}
}
/* ////
impl<T> std::fmt::Debug for ArgSource<T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Arg Resolver {:p}", self.0)
}
}
/// Helper to impl display for slices of displayable things.
struct PrintLocales<'a, T>(&'a [T]);
impl<'a, T: std::fmt::Display> std::fmt::Display for PrintLocales<'a, T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[")?;
let mut prev = false;
for l in self.0 {
if prev {
write!(f, ", ")?;
}
prev = true;
write!(f, "{}", l)?;
}
write!(f, "]")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn resolve() {
let en_us: LanguageIdentifier = "en-US".parse().unwrap();
let en_ca: LanguageIdentifier = "en-CA".parse().unwrap();
let en_gb: LanguageIdentifier = "en-GB".parse().unwrap();
let fr_fr: LanguageIdentifier = "fr-FR".parse().unwrap();
let pt_pt: LanguageIdentifier = "pt-PT".parse().unwrap();
let resmgr = ResourceManager {
resources: HashMap::new(),
locales: vec![en_us.clone(), en_ca.clone(), en_gb.clone(), fr_fr.clone()],
default_locale: en_us.clone(),
path_scheme: String::new(),
};
let en_za: LanguageIdentifier = "en-GB".parse().unwrap();
let cn_hk: LanguageIdentifier = "cn-HK".parse().unwrap();
let fr_ca: LanguageIdentifier = "fr-CA".parse().unwrap();
assert_eq!(
resmgr.resolve_locales(en_ca.clone()),
vec![en_ca.clone(), en_us.clone(), en_gb.clone()]
);
assert_eq!(
resmgr.resolve_locales(en_za.clone()),
vec![en_gb.clone(), en_us.clone(), en_ca.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_ca.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_fr.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(resmgr.resolve_locales(cn_hk), vec![en_us.clone()]);
assert_eq!(resmgr.resolve_locales(pt_pt), vec![en_us.clone()]);
}
}
*/ ////
/// Implement formatted output for ArgSource
impl<T> core::fmt::Debug for ArgSource<T> { ////
fn fmt(&self, _fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
// TODO
Ok(())
}
} | */ //// | random_line_split |
localization.rs | // Copyright 2019 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Localization handling.
//!
//! Localization is backed by [Fluent], via [fluent-rs].
//!
//! In Druid, the main way you will deal with localization is via the
//! [`LocalizedString`] struct.
//!
//! You construct a [`LocalizedString`] with a key, which identifies a 'message'
//! in your `.flt` files. If your string requires arguments, you supply it with
//! closures that can extract those arguments from the current [`Env`] and
//! [`Data`].
//!
//! At runtime, you resolve your [`LocalizedString`] into an actual string,
//! passing it the current [`Env`] and [`Data`].
//!
//!
//! [Fluent]: https://projectfluent.org
//! [fluent-rs]: https://github.com/projectfluent/fluent-rs
//! [`LocalizedString`]: struct.LocalizedString.html
//! [`Env`]: struct.Env.html
//! [`Data`]: trait.Data.html
/* ////
use std::collections::HashMap;
use std::sync::Arc;
use std::{fs, io};
use log::{debug, error, warn};
*/ ////
use crate::data::Data;
use crate::env::Env;
use crate::{ArgValue, ArgValues}; ////
/* ////
use fluent_bundle::{
FluentArgs, FluentBundle, FluentError, FluentMessage, FluentResource, FluentValue,
};
use fluent_locale::{negotiate_languages, NegotiationStrategy};
use fluent_syntax::ast::Pattern as FluentPattern;
use unic_langid::LanguageIdentifier;
// Localization looks for string files in druid/resources, but this path is hardcoded;
// it will only work if you're running an example from the druid/ directory.
// At some point we will need to bundle strings with applications, and choose
// the path dynamically.
static FALLBACK_STRINGS: &str = include_str!("../resources/i18n/en-US/builtin.ftl");
/// Provides access to the localization strings for the current locale.
#[allow(dead_code)]
pub(crate) struct L10nManager {
// these two are not currently used; will be used when we let the user
// add additional localization files.
res_mgr: ResourceManager,
resources: Vec<String>,
current_bundle: BundleStack,
current_locale: LanguageIdentifier,
}
/// Manages a collection of localization files.
struct ResourceManager {
resources: HashMap<String, Arc<FluentResource>>,
locales: Vec<LanguageIdentifier>,
default_locale: LanguageIdentifier,
path_scheme: String,
}
*/ ////
type MaxLocalizedString = heapless::consts::U20; //// Max length of localized strings
type String = heapless::String::<MaxLocalizedString>; ////
type MaxLocalizedArg = heapless::consts::U2; //// Max number of localized args
type Vec<T> = heapless::Vec::<T, MaxLocalizedArg>; ///
//////NOTE: instead of a closure, at some point we can use something like a lens for this.
//////TODO: this is an Arc so that it can be clone, which is a bound on things like `Menu`.
/////// A closure that generates a localization value.
type ArgClosure<T> = fn(&T, &Env) -> ArgValue; ////
////type ArgClosure<T> = Arc<dyn Fn(&T, &Env) -> FluentValue<'static> + 'static>;
/// Wraps a closure that generates an argument for localization.
#[derive(Clone)]
struct ArgSource<T>(ArgClosure<T>);
/// A string that can be localized based on the current locale.
///
/// At its simplest, a `LocalizedString` is a key that can be resolved
/// against a map of localized strings for a given locale.
#[derive(Clone)]////
////#[derive(Debug, Clone)]
pub struct LocalizedString<T> {
pub(crate) key: &'static str,
placeholder: Option<String>,
args: Option<Vec<(&'static str, ArgSource<T>)>>,
resolved: Option<String>,
////resolved_lang: Option<LanguageIdentifier>,
}
/* ////
/// A stack of localization resources, used for fallback.
struct BundleStack(Vec<FluentBundle<Arc<FluentResource>>>);
impl BundleStack {
fn get_message(&self, id: &str) -> Option<FluentMessage> {
self.0.iter().flat_map(|b| b.get_message(id)).next()
}
fn format_pattern(
&self,
id: &str,
pattern: &FluentPattern,
args: Option<&FluentArgs>,
errors: &mut Vec<FluentError>,
) -> String {
for bundle in self.0.iter() {
if bundle.has_message(id) {
return bundle.format_pattern(pattern, args, errors).to_string();
}
}
format!("localization failed for key '{}'", id)
}
}
//NOTE: much of this is adapted from https://github.com/projectfluent/fluent-rs/blob/master/fluent-resmgr/src/resource_manager.rs
impl ResourceManager {
/// Loads a new localization resource from disk, as needed.
fn get_resource(&mut self, res_id: &str, locale: &str) -> Arc<FluentResource> {
let path = self
.path_scheme
.replace("{locale}", locale)
.replace("{res_id}", res_id);
if let Some(res) = self.resources.get(&path) {
res.clone()
} else {
let string = fs::read_to_string(&path).unwrap_or_else(|_| {
if (res_id, locale) == ("builtin.ftl", "en-US") {
FALLBACK_STRINGS.to_string()
} else {
error!("missing resouce {}/{}", locale, res_id);
String::new()
}
});
let res = match FluentResource::try_new(string) {
Ok(res) => Arc::new(res),
Err((res, _err)) => Arc::new(res),
};
self.resources.insert(path, res.clone());
res
}
}
/// Return the best localization bundle for the provided `LanguageIdentifier`.
fn get_bundle(&mut self, locale: &LanguageIdentifier, resource_ids: &[String]) -> BundleStack {
let resolved_locales = self.resolve_locales(locale.clone());
debug!("resolved: {}", PrintLocales(resolved_locales.as_slice()));
let mut stack = Vec::new();
for locale in &resolved_locales {
let mut bundle = FluentBundle::new(&resolved_locales);
for res_id in resource_ids {
let res = self.get_resource(&res_id, &locale.to_string());
bundle.add_resource(res).unwrap();
}
stack.push(bundle);
}
BundleStack(stack)
}
/// Given a locale, returns the best set of available locales.
pub(crate) fn resolve_locales(&self, locale: LanguageIdentifier) -> Vec<LanguageIdentifier> {
negotiate_languages(
&[locale],
&self.locales,
Some(&self.default_locale),
NegotiationStrategy::Filtering,
)
.into_iter()
.map(|l| l.to_owned())
.collect()
}
}
impl L10nManager {
/// Create a new localization manager.
///
/// `resources` is a list of file names that contain strings. `base_dir`
/// is a path to a directory that includes per-locale subdirectories.
///
/// This directory should be of the structure `base_dir/{locale}/{resource}`,
/// where '{locale}' is a valid BCP47 language tag, and {resource} is a `.ftl`
/// included in `resources`.
pub fn new(resources: Vec<String>, base_dir: &str) -> Self {
fn get_available_locales(base_dir: &str) -> Result<Vec<LanguageIdentifier>, io::Error> {
let mut locales = vec![];
let res_dir = fs::read_dir(base_dir)?;
for entry in res_dir {
if let Ok(entry) = entry {
let path = entry.path();
if path.is_dir() {
if let Some(name) = path.file_name() {
if let Some(name) = name.to_str() {
let langid: LanguageIdentifier =
name.parse().expect("Parsing failed.");
locales.push(langid);
}
}
}
}
}
Ok(locales)
}
let default_locale: LanguageIdentifier =
"en-US".parse().expect("failed to parse default locale");
let current_locale = Application::get_locale()
.parse()
.unwrap_or_else(|_| default_locale.clone());
let locales = get_available_locales(base_dir).unwrap_or_default();
debug!(
"available locales {}, current {}",
PrintLocales(&locales),
current_locale,
);
let mut path_scheme = base_dir.to_string();
path_scheme.push_str("/{locale}/{res_id}");
let mut res_mgr = ResourceManager {
resources: HashMap::new(),
path_scheme,
default_locale,
locales,
};
let current_bundle = res_mgr.get_bundle(¤t_locale, &resources);
L10nManager {
res_mgr,
current_bundle,
resources,
current_locale,
}
}
/// Fetch a localized string from the current bundle by key.
///
/// In general, this should not be used directly; [`LocalizedString`]
/// should be used for localization, and you should call
/// [`LocalizedString::resolve`] to update the string as required.
///
///[`LocalizedString`]: struct.LocalizedString.html
///[`LocalizedString::resolve`]: struct.LocalizedString.html#method.resolve
pub fn localize<'args>(
&'args self,
key: &str,
args: impl Into<Option<&'args FluentArgs<'args>>>,
) -> Option<String> {
let args = args.into();
let value = match self
.current_bundle
.get_message(key)
.and_then(|msg| msg.value)
{
Some(v) => v,
None => return None,
};
let mut errs = Vec::new();
let result = self
.current_bundle
.format_pattern(key, value, args, &mut errs);
for err in errs {
warn!("localization error {:?}", err);
}
// fluent inserts bidi controls when interpolating, and they can
// cause rendering issues; for now we just strip them.
// https://www.w3.org/International/questions/qa-bidi-unicode-controls#basedirection
const START_ISOLATE: char = '\u{2068}';
const END_ISOLATE: char = '\u{2069}';
if args.is_some() && result.chars().any(|c| c == START_ISOLATE) {
Some(
result
.chars()
.filter(|c| c != &START_ISOLATE && c != &END_ISOLATE)
.collect(),
)
} else {
Some(result)
}
}
//TODO: handle locale change
}
*/ ////
impl<T> LocalizedString<T> {
/// Create a new `LocalizedString` with the given key.
pub const fn new(key: &'static str) -> Self {
LocalizedString {
key,
args: None,
placeholder: None,
resolved: None,
////resolved_lang: None,
}
}
/* ////
/// Add a placeholder value. This will be used if localization fails.
///
/// This is intended for use during prototyping.
pub fn with_placeholder(mut self, placeholder: String) -> Self {
self.placeholder = Some(placeholder);
self
}
*/ ////
/// Return the localized value for this string, or the placeholder, if
/// the localization is missing, or the key if there is no placeholder.
pub fn localized_str(&self) -> &str {
//cortex_m::asm::bkpt(); ////
self.resolved ////
.as_ref()
.expect("not resolved")
.as_str()
/* ////
self.resolved
.as_ref()
.map(|s| s.as_str())
.or_else(|| self.placeholder.as_ref().map(String::as_ref))
.unwrap_or(self.key)
*/ ////
}
}
impl<T: Data> LocalizedString<T> {
/// Add a named argument and a corresponding [`ArgClosure`]. This closure
/// is a function that will return a value for the given key from the current
/// environment and data.
///
/// [`ArgClosure`]: type.ArgClosure.html
pub fn with_arg(
mut self,
key: &'static str,
f: fn(&T, &Env) -> ArgValue, ////
////f: impl Fn(&T, &Env) -> FluentValue<'static> + 'static,
) -> Self {
self.args
.get_or_insert(Vec::new())
.push((key, ArgSource(f)))
.expect("with arg failed"); ////
////.push((key, ArgSource(Arc::new(f))));
self
}
/// Lazily compute the localized value for this string based on the provided
/// environment and data.
///
/// Returns `true` if the current value of the string has changed.
pub fn resolve<'a>(&'a mut self, data: &T, env: &Env) -> bool {
//TODO: this recomputes the string if either the language has changed,
//or *anytime* we have arguments. Ideally we would be using a lens
//to only recompute when our actual data has changed.
if self.args.is_some()
////|| self.resolved_lang.as_ref() != Some(&env.localization_manager().current_locale)
{
//// Resolve all args
let mut args = ArgValues::new();
for arg in self.args.as_ref().expect("resolve fail") {
let (k, v) = arg;
let argvalue = (v.0)(data, env);
args.insert(k, argvalue.clone())
.expect("resolve fail");
//// Convert the first arg to text and exit
self.resolved = Some(argvalue.to_string());
//cortex_m::asm::bkpt(); ////
return true;
}
//// No args to resolve
false
/* ////
let args: Option<FluentArgs> = self
.args
.as_ref()
.map(|a| a.iter().map(|(k, v)| (*k, (v.0)(data, env))).collect());
self.resolved_lang = Some(env.localization_manager().current_locale.clone());
let next = env.localization_manager().localize(self.key, args.as_ref());
let result = next != self.resolved;
self.resolved = next;
result
*/ ////
} else {
false
}
}
}
/* ////
impl<T> std::fmt::Debug for ArgSource<T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Arg Resolver {:p}", self.0)
}
}
/// Helper to impl display for slices of displayable things.
struct PrintLocales<'a, T>(&'a [T]);
impl<'a, T: std::fmt::Display> std::fmt::Display for PrintLocales<'a, T> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "[")?;
let mut prev = false;
for l in self.0 {
if prev {
write!(f, ", ")?;
}
prev = true;
write!(f, "{}", l)?;
}
write!(f, "]")
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn resolve() {
let en_us: LanguageIdentifier = "en-US".parse().unwrap();
let en_ca: LanguageIdentifier = "en-CA".parse().unwrap();
let en_gb: LanguageIdentifier = "en-GB".parse().unwrap();
let fr_fr: LanguageIdentifier = "fr-FR".parse().unwrap();
let pt_pt: LanguageIdentifier = "pt-PT".parse().unwrap();
let resmgr = ResourceManager {
resources: HashMap::new(),
locales: vec![en_us.clone(), en_ca.clone(), en_gb.clone(), fr_fr.clone()],
default_locale: en_us.clone(),
path_scheme: String::new(),
};
let en_za: LanguageIdentifier = "en-GB".parse().unwrap();
let cn_hk: LanguageIdentifier = "cn-HK".parse().unwrap();
let fr_ca: LanguageIdentifier = "fr-CA".parse().unwrap();
assert_eq!(
resmgr.resolve_locales(en_ca.clone()),
vec![en_ca.clone(), en_us.clone(), en_gb.clone()]
);
assert_eq!(
resmgr.resolve_locales(en_za.clone()),
vec![en_gb.clone(), en_us.clone(), en_ca.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_ca.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(
resmgr.resolve_locales(fr_fr.clone()),
vec![fr_fr.clone(), en_us.clone()]
);
assert_eq!(resmgr.resolve_locales(cn_hk), vec![en_us.clone()]);
assert_eq!(resmgr.resolve_locales(pt_pt), vec![en_us.clone()]);
}
}
*/ ////
/// Implement formatted output for ArgSource
impl<T> core::fmt::Debug for ArgSource<T> { ////
fn fmt(&self, _fmt: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result |
} | {
// TODO
Ok(())
} | identifier_body |
cardano_wallet.d.ts | /* tslint:disable */
/**
* @param {Entropy} entropy
* @param {Uint8Array} iv
* @param {string} password
* @returns {any}
*/
export function paper_wallet_scramble(entropy: Entropy, iv: Uint8Array, password: string): any;
/**
* @param {Uint8Array} paper
* @param {string} password
* @returns {Entropy}
*/
export function paper_wallet_unscramble(paper: Uint8Array, password: string): Entropy;
/**
* encrypt the given data with a password, a salt and a nonce
*
* Salt: must be 32 bytes long;
* Nonce: must be 12 bytes long;
*
* @param {string} password
* @param {Uint8Array} salt
* @param {Uint8Array} nonce
* @param {Uint8Array} data
* @returns {any}
*/
export function password_encrypt(password: string, salt: Uint8Array, nonce: Uint8Array, data: Uint8Array): any;
/**
* decrypt the data with the password
*
* @param {string} password
* @param {Uint8Array} encrypted_data
* @returns {any}
*/
export function password_decrypt(password: string, encrypted_data: Uint8Array): any;
/**
*/
export class AccountIndex {
free(): void;
static new(index: number): AccountIndex;
}
/**
*/
export class Address {
free(): void;
to_base58(): string;
static from_base58(s: string): Address;
}
/**
*/
export class AddressKeyIndex {
free(): void;
static new(index: number): AddressKeyIndex;
}
/**
*/
export class Bip44AccountPrivate {
free(): void;
static new(key: PrivateKey, derivation_scheme: DerivationScheme): Bip44AccountPrivate;
public(): Bip44AccountPublic;
address_key(internal: boolean, index: AddressKeyIndex): PrivateKey;
key(): PrivateKey;
}
/**
*/
export class Bip44AccountPublic {
free(): void;
static new(key: PublicKey, derivation_scheme: DerivationScheme): Bip44AccountPublic;
address_key(internal: boolean, index: AddressKeyIndex): PublicKey;
key(): PublicKey;
}
/**
* Root Private Key of a BIP44 HD Wallet
*/
export class Bip44RootPrivateKey {
free(): void;
static new(key: PrivateKey, derivation_scheme: DerivationScheme): Bip44RootPrivateKey;
static recover(entropy: Entropy, password: string): Bip44RootPrivateKey;
bip44_account(index: AccountIndex): Bip44AccountPrivate;
key(): PrivateKey;
}
/**
* setting of the blockchain
*
* This includes the `ProtocolMagic` a discriminant value to differentiate
* different instances of the cardano blockchain (Mainnet, Testnet... ).
*/
export class BlockchainSettings {
free(): void;
to_json(): any;
static from_json(value: any): BlockchainSettings;
static mainnet(): BlockchainSettings;
}
/**
*/
export class Coin {
free(): void;
constructor();
static from_str(s: string): Coin;
to_str(): string;
static from(ada: number, lovelace: number): Coin;
ada(): number;
lovelace(): number;
add(other: Coin): Coin;
}
/**
*/
export class CoinDiff {
free(): void;
is_zero(): boolean;
is_negative(): boolean;
is_positive(): boolean;
value(): Coin;
}
/**
*/
export class DaedalusAddressChecker {
free(): void;
static new(wallet: DaedalusWallet): DaedalusAddressChecker;
check_address(address: Address): DaedalusCheckedAddress;
}
/**
* result value of the check_address function of the DaedalusAddressChecker.
*
* If the address passed to check_address was recognised by the daedalus wallet
* then this object will contain the private key associated to this wallet
* private key necessary to sign transactions
*/
export class DaedalusCheckedAddress {
free(): void;
is_checked(): boolean;
private_key(): PrivateKey;
}
/**
*/
export class DaedalusWallet {
free(): void;
static new(key: PrivateKey): DaedalusWallet;
static recover(entropy: Entropy): DaedalusWallet;
}
/**
* There is a special function to use when deriving Addresses. This function
* has been revised to offer stronger properties. This is why there is a
* V2 derivation scheme. The V1 being the legacy one still used in daedalus
* now a days.
*
* It is strongly advised to use V2 as the V1 is deprecated since April 2018.
* Its support is already provided for backward compatibility with old
* addresses.
*/
export class DerivationScheme {
free(): void;
static v1(): DerivationScheme;
static v2(): DerivationScheme;
}
/**
* the entropy associated to mnemonics. This is a bytes representation of the
* mnemonics the user has to remember how to generate the root key of an
* HD Wallet.
*
* TODO: interface to generate a new entropy
*
* # Security considerations
*
* * do not store this value without encrypting it;
* * do not leak the mnemonics;
* * make sure the user remembers the mnemonics string;
*
*/
export class Entropy {
free(): void;
static from_english_mnemonics(mnemonics: string): Entropy;
to_english_mnemonics(): string;
to_array(): any;
}
/**
*/
export class InputSelectionBuilder {
free(): void;
static first_match_first(): InputSelectionBuilder;
static largest_first(): InputSelectionBuilder;
static blackjack(dust_threshold: Coin): InputSelectionBuilder;
add_input(tx_input: TxInput): void;
add_output(output: TxOut): void;
select_inputs(fee_algorithm: LinearFeeAlgorithm, output_policy: OutputPolicy): InputSelectionResult;
}
/**
*/
export class InputSelectionResult {
free(): void;
is_input(txo_pointer: TxoPointer): boolean;
estimated_fees(): Coin;
estimated_change(): Coin;
}
/**
* This is the linear fee algorithm used buy the current cardano blockchain.
*
* However it is possible the linear fee algorithm may change its settings:
*
* It is currently a function `fee(n) = a * x + b`. `a` and `b` can be
* re-configured by a protocol update. Users of this object need to be aware
* that it may change and that they might need to update its settings.
*
*/
export class LinearFeeAlgorithm {
free(): void;
static default(): LinearFeeAlgorithm;
}
/**
* This is the Output policy for automatic Input selection.
*/
export class OutputPolicy {
free(): void;
static change_to_one_address(address: Address): OutputPolicy;
}
/**
* A given private key. You can use this key to sign transactions.
*
* # security considerations
*
* * do not store this key without encrypting it;
* * if leaked anyone can _spend_ a UTxO (Unspent Transaction Output)
* with it;
*
*/
export class PrivateKey {
free(): void;
static new(entropy: Entropy, password: string): PrivateKey;
static from_hex(hex: string): PrivateKey;
to_hex(): string;
public(): PublicKey;
sign(data: Uint8Array): Signature;
derive(derivation_scheme: DerivationScheme, index: number): PrivateKey;
}
/**
*/
export class PrivateRedeemKey {
free(): void;
static from_bytes(bytes: Uint8Array): PrivateRedeemKey;
static from_hex(hex: string): PrivateRedeemKey;
to_hex(): string;
public(): PublicRedeemKey;
sign(data: Uint8Array): RedeemSignature;
}
/**
* The public key associated to a given private key.
*
* It is not possible to sign (and then spend) with a private key.
* However it is possible to verify a Signature.
*
* # Security Consideration
*
* * it is rather harmless to leak a public key, in the worst case
* only the privacy is leaked;
*
*/
export class PublicKey {
free(): void;
static from_hex(hex: string): PublicKey;
to_hex(): string;
verify(data: Uint8Array, signature: Signature): boolean;
derive(derivation_scheme: DerivationScheme, index: number): PublicKey;
bootstrap_era_address(blockchain_settings: BlockchainSettings): Address;
}
/**
*/
export class | {
free(): void;
static from_hex(hex: string): PublicRedeemKey;
to_hex(): string;
verify(data: Uint8Array, signature: RedeemSignature): boolean;
address(settings: BlockchainSettings): Address;
}
/**
*/
export class RedeemSignature {
free(): void;
static from_hex(hex: string): RedeemSignature;
to_hex(): string;
}
/**
*/
export class Signature {
free(): void;
static from_hex(hex: string): Signature;
to_hex(): string;
}
/**
* a signed transaction, ready to be sent to the network.
*/
export class SignedTransaction {
free(): void;
id(): string;
to_json(): any;
static from_bytes(bytes: Uint8Array): SignedTransaction;
to_hex(): string;
}
/**
* a transaction type, this is not ready for sending to the network. It is only an
* intermediate type to use between the transaction builder and the transaction
* finalizer. It allows separation of concerns:
*
* 1. build the transaction on one side/thread/machine/...;
* 2. sign the transaction on the other/thread/machines/cold-wallet...;
*
*/
export class Transaction {
free(): void;
id(): TransactionId;
to_json(): any;
to_hex(): string;
}
/**
* The transaction builder provides a set of tools to help build
* a valid Transaction.
*/
export class TransactionBuilder {
free(): void;
constructor();
add_input(txo_pointer: TxoPointer, value: Coin): void;
get_input_total(): Coin;
add_output(output: TxOut): void;
apply_output_policy(fee_algorithm: LinearFeeAlgorithm, policy: OutputPolicy): any;
get_output_total(): Coin;
estimate_fee(fee_algorithm: LinearFeeAlgorithm): Coin;
get_balance(fee_algorithm: LinearFeeAlgorithm): CoinDiff;
get_balance_without_fees(): CoinDiff;
make_transaction(): Transaction;
}
/**
*/
export class TransactionFinalized {
free(): void;
constructor(transaction: Transaction);
id(): TransactionId;
sign(blockchain_settings: BlockchainSettings, key: PrivateKey): void;
add_witness(witness: Witness): void;
finalize(): SignedTransaction;
}
/**
*/
export class TransactionId {
free(): void;
to_hex(): string;
static from_hex(s: string): TransactionId;
}
/**
*/
export class TransactionSignature {
free(): void;
static from_hex(hex: string): TransactionSignature;
to_hex(): string;
}
/**
*/
export class TxInput {
free(): void;
static new(ptr: TxoPointer, value: TxOut): TxInput;
to_json(): any;
static from_json(value: any): TxInput;
}
/**
*/
export class TxOut {
free(): void;
static new(address: Address, value: Coin): TxOut;
to_json(): any;
static from_json(value: any): TxOut;
}
/**
*/
export class TxoPointer {
free(): void;
static new(id: TransactionId, index: number): TxoPointer;
to_json(): any;
static from_json(value: any): TxoPointer;
}
/**
*/
export class Witness {
free(): void;
static new_extended_key(blockchain_settings: BlockchainSettings, signing_key: PrivateKey, transaction_id: TransactionId): Witness;
static new_redeem_key(blockchain_settings: BlockchainSettings, signing_key: PrivateRedeemKey, transaction_id: TransactionId): Witness;
static from_external(key: PublicKey, signature: TransactionSignature): Witness;
}
| PublicRedeemKey | identifier_name |
cardano_wallet.d.ts | /* tslint:disable */
/**
* @param {Entropy} entropy
* @param {Uint8Array} iv
* @param {string} password
* @returns {any}
*/
export function paper_wallet_scramble(entropy: Entropy, iv: Uint8Array, password: string): any;
/**
* @param {Uint8Array} paper
* @param {string} password
* @returns {Entropy}
*/
export function paper_wallet_unscramble(paper: Uint8Array, password: string): Entropy;
/**
* encrypt the given data with a password, a salt and a nonce
*
* Salt: must be 32 bytes long;
* Nonce: must be 12 bytes long;
*
* @param {string} password
* @param {Uint8Array} salt
* @param {Uint8Array} nonce
* @param {Uint8Array} data
* @returns {any}
*/
export function password_encrypt(password: string, salt: Uint8Array, nonce: Uint8Array, data: Uint8Array): any;
/**
* decrypt the data with the password
*
* @param {string} password
* @param {Uint8Array} encrypted_data
* @returns {any}
*/
export function password_decrypt(password: string, encrypted_data: Uint8Array): any;
/**
*/
export class AccountIndex {
free(): void;
static new(index: number): AccountIndex;
}
/**
*/
export class Address {
free(): void;
to_base58(): string;
static from_base58(s: string): Address;
}
/**
*/
export class AddressKeyIndex {
free(): void;
static new(index: number): AddressKeyIndex;
}
/**
*/
export class Bip44AccountPrivate {
free(): void;
static new(key: PrivateKey, derivation_scheme: DerivationScheme): Bip44AccountPrivate;
public(): Bip44AccountPublic;
address_key(internal: boolean, index: AddressKeyIndex): PrivateKey;
key(): PrivateKey;
}
/**
*/
export class Bip44AccountPublic {
free(): void;
static new(key: PublicKey, derivation_scheme: DerivationScheme): Bip44AccountPublic;
address_key(internal: boolean, index: AddressKeyIndex): PublicKey;
key(): PublicKey;
}
/**
* Root Private Key of a BIP44 HD Wallet
*/
export class Bip44RootPrivateKey {
free(): void;
static new(key: PrivateKey, derivation_scheme: DerivationScheme): Bip44RootPrivateKey;
static recover(entropy: Entropy, password: string): Bip44RootPrivateKey;
bip44_account(index: AccountIndex): Bip44AccountPrivate;
key(): PrivateKey;
}
/**
* setting of the blockchain
*
* This includes the `ProtocolMagic` a discriminant value to differentiate
* different instances of the cardano blockchain (Mainnet, Testnet... ).
*/
export class BlockchainSettings {
free(): void;
to_json(): any;
static from_json(value: any): BlockchainSettings;
static mainnet(): BlockchainSettings;
}
/**
*/
export class Coin {
free(): void;
constructor();
static from_str(s: string): Coin;
to_str(): string;
static from(ada: number, lovelace: number): Coin;
ada(): number;
lovelace(): number;
add(other: Coin): Coin;
}
/**
*/
export class CoinDiff {
free(): void;
is_zero(): boolean;
is_negative(): boolean;
is_positive(): boolean;
value(): Coin;
}
/**
*/
export class DaedalusAddressChecker {
free(): void;
static new(wallet: DaedalusWallet): DaedalusAddressChecker;
check_address(address: Address): DaedalusCheckedAddress;
}
/**
* result value of the check_address function of the DaedalusAddressChecker.
*
* If the address passed to check_address was recognised by the daedalus wallet
* then this object will contain the private key associated to this wallet
* private key necessary to sign transactions
*/
export class DaedalusCheckedAddress {
free(): void;
is_checked(): boolean;
private_key(): PrivateKey;
}
/**
*/
export class DaedalusWallet {
free(): void;
static new(key: PrivateKey): DaedalusWallet;
static recover(entropy: Entropy): DaedalusWallet;
}
/**
* There is a special function to use when deriving Addresses. This function
* has been revised to offer stronger properties. This is why there is a
* V2 derivation scheme. The V1 being the legacy one still used in daedalus
* now a days.
*
* It is strongly advised to use V2 as the V1 is deprecated since April 2018.
* Its support is already provided for backward compatibility with old
* addresses.
*/
export class DerivationScheme {
free(): void;
static v1(): DerivationScheme;
static v2(): DerivationScheme;
}
/**
* the entropy associated to mnemonics. This is a bytes representation of the
* mnemonics the user has to remember how to generate the root key of an
* HD Wallet.
*
* TODO: interface to generate a new entropy
*
* # Security considerations
*
* * do not store this value without encrypting it;
* * do not leak the mnemonics;
* * make sure the user remembers the mnemonics string;
* | export class Entropy {
free(): void;
static from_english_mnemonics(mnemonics: string): Entropy;
to_english_mnemonics(): string;
to_array(): any;
}
/**
*/
export class InputSelectionBuilder {
free(): void;
static first_match_first(): InputSelectionBuilder;
static largest_first(): InputSelectionBuilder;
static blackjack(dust_threshold: Coin): InputSelectionBuilder;
add_input(tx_input: TxInput): void;
add_output(output: TxOut): void;
select_inputs(fee_algorithm: LinearFeeAlgorithm, output_policy: OutputPolicy): InputSelectionResult;
}
/**
*/
export class InputSelectionResult {
free(): void;
is_input(txo_pointer: TxoPointer): boolean;
estimated_fees(): Coin;
estimated_change(): Coin;
}
/**
* This is the linear fee algorithm used buy the current cardano blockchain.
*
* However it is possible the linear fee algorithm may change its settings:
*
* It is currently a function `fee(n) = a * x + b`. `a` and `b` can be
* re-configured by a protocol update. Users of this object need to be aware
* that it may change and that they might need to update its settings.
*
*/
export class LinearFeeAlgorithm {
free(): void;
static default(): LinearFeeAlgorithm;
}
/**
* This is the Output policy for automatic Input selection.
*/
export class OutputPolicy {
free(): void;
static change_to_one_address(address: Address): OutputPolicy;
}
/**
* A given private key. You can use this key to sign transactions.
*
* # security considerations
*
* * do not store this key without encrypting it;
* * if leaked anyone can _spend_ a UTxO (Unspent Transaction Output)
* with it;
*
*/
export class PrivateKey {
free(): void;
static new(entropy: Entropy, password: string): PrivateKey;
static from_hex(hex: string): PrivateKey;
to_hex(): string;
public(): PublicKey;
sign(data: Uint8Array): Signature;
derive(derivation_scheme: DerivationScheme, index: number): PrivateKey;
}
/**
*/
export class PrivateRedeemKey {
free(): void;
static from_bytes(bytes: Uint8Array): PrivateRedeemKey;
static from_hex(hex: string): PrivateRedeemKey;
to_hex(): string;
public(): PublicRedeemKey;
sign(data: Uint8Array): RedeemSignature;
}
/**
* The public key associated to a given private key.
*
* It is not possible to sign (and then spend) with a private key.
* However it is possible to verify a Signature.
*
* # Security Consideration
*
* * it is rather harmless to leak a public key, in the worst case
* only the privacy is leaked;
*
*/
export class PublicKey {
free(): void;
static from_hex(hex: string): PublicKey;
to_hex(): string;
verify(data: Uint8Array, signature: Signature): boolean;
derive(derivation_scheme: DerivationScheme, index: number): PublicKey;
bootstrap_era_address(blockchain_settings: BlockchainSettings): Address;
}
/**
*/
export class PublicRedeemKey {
free(): void;
static from_hex(hex: string): PublicRedeemKey;
to_hex(): string;
verify(data: Uint8Array, signature: RedeemSignature): boolean;
address(settings: BlockchainSettings): Address;
}
/**
*/
export class RedeemSignature {
free(): void;
static from_hex(hex: string): RedeemSignature;
to_hex(): string;
}
/**
*/
export class Signature {
free(): void;
static from_hex(hex: string): Signature;
to_hex(): string;
}
/**
* a signed transaction, ready to be sent to the network.
*/
export class SignedTransaction {
free(): void;
id(): string;
to_json(): any;
static from_bytes(bytes: Uint8Array): SignedTransaction;
to_hex(): string;
}
/**
* a transaction type, this is not ready for sending to the network. It is only an
* intermediate type to use between the transaction builder and the transaction
* finalizer. It allows separation of concerns:
*
* 1. build the transaction on one side/thread/machine/...;
* 2. sign the transaction on the other/thread/machines/cold-wallet...;
*
*/
export class Transaction {
free(): void;
id(): TransactionId;
to_json(): any;
to_hex(): string;
}
/**
* The transaction builder provides a set of tools to help build
* a valid Transaction.
*/
export class TransactionBuilder {
free(): void;
constructor();
add_input(txo_pointer: TxoPointer, value: Coin): void;
get_input_total(): Coin;
add_output(output: TxOut): void;
apply_output_policy(fee_algorithm: LinearFeeAlgorithm, policy: OutputPolicy): any;
get_output_total(): Coin;
estimate_fee(fee_algorithm: LinearFeeAlgorithm): Coin;
get_balance(fee_algorithm: LinearFeeAlgorithm): CoinDiff;
get_balance_without_fees(): CoinDiff;
make_transaction(): Transaction;
}
/**
*/
export class TransactionFinalized {
free(): void;
constructor(transaction: Transaction);
id(): TransactionId;
sign(blockchain_settings: BlockchainSettings, key: PrivateKey): void;
add_witness(witness: Witness): void;
finalize(): SignedTransaction;
}
/**
*/
export class TransactionId {
free(): void;
to_hex(): string;
static from_hex(s: string): TransactionId;
}
/**
*/
export class TransactionSignature {
free(): void;
static from_hex(hex: string): TransactionSignature;
to_hex(): string;
}
/**
*/
export class TxInput {
free(): void;
static new(ptr: TxoPointer, value: TxOut): TxInput;
to_json(): any;
static from_json(value: any): TxInput;
}
/**
*/
export class TxOut {
free(): void;
static new(address: Address, value: Coin): TxOut;
to_json(): any;
static from_json(value: any): TxOut;
}
/**
*/
export class TxoPointer {
free(): void;
static new(id: TransactionId, index: number): TxoPointer;
to_json(): any;
static from_json(value: any): TxoPointer;
}
/**
*/
export class Witness {
free(): void;
static new_extended_key(blockchain_settings: BlockchainSettings, signing_key: PrivateKey, transaction_id: TransactionId): Witness;
static new_redeem_key(blockchain_settings: BlockchainSettings, signing_key: PrivateRedeemKey, transaction_id: TransactionId): Witness;
static from_external(key: PublicKey, signature: TransactionSignature): Witness;
} | */ | random_line_split |
Upload.js | const UI = window.classes.UI;
const AppIcon = require('../../icons/AppIcon.js');
const Button = window.classes.Button;
const UIInput = window.classes.UIInput;
const Layouter = require('../utils/Layouter.js');
class Upload extends UI {
constructor(params) {
super(params);
this._components.CloseIcon = this.newC(AppIcon, {icon: 'close'});
this._components.Button = this.newC(Button, {title: 'Send', loadingTitle: 'Sending...'});
this._components.CaptionInput = this.newC(UIInput, {label: 'Caption'});
this._events = [
['click', 'uploadClose', 'onClose'],
['click', 'uploadPreviewAlbum', 'onAlbumClick'],
];
this._componentEvents = [
['click', 'Button', 'sendFiles'],
];
this._files = [];
this._uploadingMedia = false;
}
buttonLoading(loading) {
if (loading) {
this.$('.uploadButton').style.display = 'none';
this.$('.uploadLoading').style.display = 'block';
} else {
this.$('.uploadButton').style.display = 'block';
this.$('.uploadLoading').style.display = 'none';
}
}
sendFiles() {
this.emit('sendFiles', {files: this._files, caption: this._components.CaptionInput.val()});
this.buttonLoading(true);
setTimeout(()=>{
this.hide();
}, 2000);
}
onClose() {
this.hide();
}
show() {
this.$('#uploadTop').style.display = 'block';
this.$('.popupOverlay').classList.add('active');
this.loaded();
this.initScrollBarOn(this.$('.uploadPreview'));
}
hide() {
this.$('.popupOverlay').classList.remove('active');
this.$('.popupOverlay').classList.add('fading');
setTimeout(()=>{
this.$('.popupOverlay').classList.remove('fading');
this.$('#uploadTop').style.display = 'none';
this.buttonLoading(false);
}, 500);
}
async readFile(inputFile) {
let readTheData = (ifile, method)=>{
return new Promise((res, rej)=>{
let reader = new FileReader();
reader.onload = (e)=>{
res(e.target.result);
}
reader.onerror = (e)=>{
rej(e);
}
reader.onabort = ()=>{
rej();
}
reader[method](ifile);
});
};
let dims = (ifile)=>{
return new Promise((res,rej)=>{
const img = new Image();
img.onload = function () {
res([this.width, this.height]);
};
img.onerror = function() {
res([100,100]); // ???
};
img.src = ifile.dataURL;
});
};
try {
let ab = await readTheData(inputFile, 'readAsArrayBuffer');
let filename = (''+inputFile.name) || 'undefined';
let ext = (filename.lastIndexOf('.') != -1) ? (filename.substr(filename.lastIndexOf('.') + 1)).toLowerCase() : '';
let fileData = {
ab: ab,
name: inputFile.name,
size: inputFile.size,
filename: filename,
ext: ext,
type: 'doc',
aspectRatio: 1,
};
// nice one. https://stackoverflow.com/a/20732091/1119169 thanks Andrew!
const sizeI = Math.floor( Math.log(fileData.size) / Math.log(1024) );
fileData.sizeHuman = ( fileData.size / Math.pow(1024, sizeI) ).toFixed(2) * 1 + ' ' + ['B', 'kB', 'MB', 'GB', 'TB'][sizeI];
fileData.color = ((''+ext+' ').charCodeAt(0) % 8 + 1);
fileData.random = (''+Math.random()).split('.').join('');
let photoExts = ['png', 'jpg', 'jpeg'];
let videoExts = ['mp4', 'mpeg', 'avi', 'mov'];
if (photoExts.indexOf(ext) != -1 && this._uploadingMedia) {
fileData.type = 'photo';
} else if (videoExts.indexOf(ext) != -1 && this._uploadingMedia) {
fileData.type = 'video';
}
if (fileData.type == 'photo') {
// let dataURL = await readTheData(inputFile, 'readAsDataURL'); // ? URL.createObjectURL(file);
fileData.dataURL = URL.createObjectURL(inputFile);
[fileData.width, fileData.height] = await dims(fileData); // parallelize?
} else if (fileData.type == 'video') {
fileData.width = 100;
fileData.height = 100;
}
this._files.push(fileData);
return fileData;
} catch(e) {
// console.log(e);
return false;
}
}
loaded() {
// console.error(this._files);
this.updateTitle();
this.generateUploadPreview();
}
updateTitle() {
let suffix = {
doc: 'File',
video: 'Video',
photo: 'Photo',
};
let title = 'Send '+( (this._files.length > 1) ? this._files.length : '' ) + ' ' + suffix[this._files[0].type] + ( (this._files.length > 1) ? 's' : '' );
this.$('.uploadTitle').innerHTML = title;
}
onAlbumClick(e) {
const base = this.$();
let closest = event.target.closest('.upiRemove');
if (closest && base.contains(closest)) {
const el = closest.parentElement;
if (el) {
const id = el.id.split('uploadPreviewItem_').join('');
let ri = null;
for (let i = 0; i < this._files.length; i++) {
if (this._files[i].random == id) {
ri = i;
}
}
if (ri !== null) {
this._files.splice(ri, 1);
el.remove();
if (!this._files.length) {
this.hide();
} else {
this.doCalcs();
this.updateTitle();
}
}
}
}
}
doCalcs() {
this._layouter = new Layouter(this._files, {maxWidth: 384});
this._files = this._layouter.layout();
for (let file of this._files) {
const el = this.$('#uploadPreviewItem_'+file.random);
if (el) |
}
this.$('#uploadPreviewAlbum').style.height = this._layouter._height+'px';
this.initScrollBarOn(this.$('.uploadPreview'));
}
swapItems(id1, id2) {
if (id1 == id2) {
return;
}
id1 = id1.split('uploadPreviewItem_').join('');
id2 = id2.split('uploadPreviewItem_').join('');
let toMove1 = null;
let i1 = null;
let toMove2 = null;
let i2 = null;
// let insertBeforeI = null;
for (let i = 0; i < this._files.length; i++) {
if (this._files[i].random == id1) {
toMove1 = this._files[i];
i1 = i;
}
if (this._files[i].random == id2) {
toMove2 = this._files[i];
i2 = i;
}
}
this._files[i1] = toMove2;
this._files[i2] = toMove1;
this.doCalcs();
}
async generateVideoPreview(file, tryToSliceLength) {
let respAB = file.ab;
if (tryToSliceLength) {
respAB = file.ab.slice(0, tryToSliceLength);
}
let response = new Response(
respAB,
{
status: 206,
statusText: 'Partial Content',
headers: [
['Content-Type', 'video/mp4'],
['Content-Length', file.ab.byteLength],
['Content-Range', '0-'+(respAB.byteLength - 1)+'/'+file.ab.byteLength ]]
});
let blob = await response.blob();
let blobUrl = URL.createObjectURL(blob);
let promise = new Promise((res, rej)=>{
let video = document.createElement('video');
video.addEventListener('error', function(event) {
rej();
}, true);
video.onloadeddata = (event) => {
let width = video.videoWidth;
let height = video.videoHeight;
let canvas = document.createElement('canvas');
canvas.width = width;
canvas.height = height;
file.aspectRatio = (height ? (width/height) : 1);
file.width = width;
file.height = height;
this.doCalcs();
let ctx = canvas.getContext("2d");
ctx.imageSmoothingEnabled = true;
ctx.drawImage(video, 0, 0, width, height);
let blankCanvas = document.createElement('canvas');
blankCanvas.width = width;
blankCanvas.height = height;
if (canvas.toDataURL() == blankCanvas.toDataURL()) {
rej();
}
canvas.toBlob((canvasBlob)=>{
let canvasBlobUrl = URL.createObjectURL(canvasBlob);
res(canvasBlobUrl);
});
};
video.preload = 'metadata';
video.src = blobUrl;
// Load video in Safari / IE11
video.muted = true;
video.playsInline = true;
video.play();
});
return await promise;
}
generateUploadPreviewForFile(file) {
const closeHTML = this._components.CloseIcon.render({noDOM: true});
if (file.type == 'photo') {
return '<div class="uploadPreviewItem" draggable="true" id="uploadPreviewItem_'+file.random+'" style="background-image: url(\''+file.dataURL+'\'); '+file.style+'"><div class="upiRemove">'+closeHTML+'</div></div>';
} else if (file.type == 'video') {
return '<div class="uploadPreviewItem" draggable="true" id="uploadPreviewItem_'+file.random+'" style="border: 1px solid #eee;"><div class="upiRemove">'+closeHTML+'</div></div>';
} else if (file.type == 'doc') {
return `<div class="uploadDocItem">
<div class="rsDoc">
<div class="rsDocIcon avatarC${file.color}">${file.ext}</div>
<div class="rsDocName">${file.filename}</div>
<div class="rsDocMeta">${file.sizeHuman}</div>
</div>
</div>`;
}
}
async generateUploadPreview() {
this.doCalcs();
let html = '';
for (let file of this._files) {
html += this.generateUploadPreviewForFile(file);
}
this.$('.uploadPreviewAlbum').innerHTML = html;
for (let file of this._files) {
const itemEl = this.$('#uploadPreviewItem_'+file.random);
if (itemEl) {
itemEl.addEventListener('dragstart', (ev)=>{
ev.dataTransfer.dropEffect = "move";
ev.dataTransfer.setData("text/plain", ev.target.id);
});
// itemEl.addEventListener('touchstart', (ev)=>{
// this._touchedItem = ev.target.id;
// });
itemEl.addEventListener('touchend', (ev)=>{
if (ev.target && ev.target.classList.contains('uploadPreviewItem')) {
if (ev.changedTouches && ev.changedTouches[0]) {
let x = ev.changedTouches[0].clientX;
let y = ev.changedTouches[0].clientY;
for (let file of this._files) {
const cont = this.$('#uploadPreviewItem_'+file.random);
const clientRect = cont.getBoundingClientRect();
if (x > clientRect.left && x < (clientRect.left + clientRect.width) && y > clientRect.top && y < (clientRect.top+clientRect.height)) {
return this.swapItems(cont.id, ev.target.id);
}
}
}
}
// console.error(ev);
});
itemEl.addEventListener('dragover', (ev)=>{
ev.preventDefault();
ev.dataTransfer.dropEffect = "move";
});
itemEl.addEventListener('drop', (ev)=>{
ev.preventDefault();
let toId = ev.target.id;
let fromId = ev.dataTransfer.getData("text/plain");
this.swapItems(toId, fromId);
});
}
if (itemEl && file.type == 'video' && !file.previewGenerated) {
let blobUrl = null;
try {
blobUrl = await this.generateVideoPreview(file, 256*1024);
} catch(e) {
try {
blobUrl = await this.generateVideoPreview(file, 5*1024*1024);
} catch(e) {
try {
blobUrl = await this.generateVideoPreview(file, 10*1024*1024);
} catch(e) {
}
}
}
if (blobUrl) {
itemEl.style.backgroundImage = "url('"+blobUrl+"')";
} else {
itemEl.style.backgroundColor = "black";
}
itemEl.innerHTML+= '<div class=playIcon></div>';
file.previewGenerated = true;
}
}
}
selectFiles(forMedia) {
this._uploadingMedia = forMedia;
let input = this.$('#'+this.domId+'_fileInput_files');
if (forMedia) {
input = this.$('#'+this.domId+'_fileInput_media');
}
this._files = [];
input.onchange = ()=>{
if (input.files) {
let promises = [];
for (let inputFile of input.files) {
promises.push(this.readFile(inputFile));
}
Promise.all(promises)
.then(()=>{
this.show();
input.value = '';
});
}
};
input.click();
}
template() {
return `
<div id="uploadTop">
<div class="popupOverlay">
<div class="popup">
<input type="file" id="{{domId}}_fileInput_media" class="hidden" multiple="multiple" accept="image/*,video/*,.jpg,.png,.mp4">
<input type="file" id="{{domId}}_fileInput_files" class="hidden" multiple="multiple">
<div class="uploadPanel">
<div class="uploadClose" id="uploadClose">{{component(options.components.CloseIcon)}}{{/component}}</div>
<div class="uploadButton">{{component(options.components.Button)}}{{/component}}</div>
<div class="uploadLoading">
<div class="cssload-zenith dark"></div>
</div>
<div class="uploadTitle">Send 3 photos</div>
</div>
<div class="uploadPreview">
<div class="uploadPreviewAlbum" id="uploadPreviewAlbum"></div>
</div>
<div class="uploadCaption">
{{component(options.components.CaptionInput)}}{{/component}}
</div>
</div>
</div>
</div>
`;
}
};
module.exports = Upload;
| {
el.style.left = '' + file.pos.left + 'px';
el.style.top = '' + file.pos.top + 'px';
el.style.width = '' + file.pos.width + 'px';
el.style.height = '' + file.pos.height + 'px';
} | conditional_block |
Upload.js | const UI = window.classes.UI;
const AppIcon = require('../../icons/AppIcon.js');
const Button = window.classes.Button;
const UIInput = window.classes.UIInput;
const Layouter = require('../utils/Layouter.js');
class Upload extends UI {
constructor(params) {
super(params);
this._components.CloseIcon = this.newC(AppIcon, {icon: 'close'});
this._components.Button = this.newC(Button, {title: 'Send', loadingTitle: 'Sending...'});
this._components.CaptionInput = this.newC(UIInput, {label: 'Caption'});
this._events = [
['click', 'uploadClose', 'onClose'],
['click', 'uploadPreviewAlbum', 'onAlbumClick'],
];
this._componentEvents = [
['click', 'Button', 'sendFiles'],
];
this._files = [];
this._uploadingMedia = false;
}
buttonLoading(loading) {
if (loading) {
this.$('.uploadButton').style.display = 'none';
this.$('.uploadLoading').style.display = 'block';
} else {
this.$('.uploadButton').style.display = 'block';
this.$('.uploadLoading').style.display = 'none';
}
}
sendFiles() {
this.emit('sendFiles', {files: this._files, caption: this._components.CaptionInput.val()});
this.buttonLoading(true);
setTimeout(()=>{
this.hide();
}, 2000);
}
onClose() {
this.hide();
}
show() {
this.$('#uploadTop').style.display = 'block';
this.$('.popupOverlay').classList.add('active');
this.loaded();
this.initScrollBarOn(this.$('.uploadPreview'));
}
hide() {
this.$('.popupOverlay').classList.remove('active');
this.$('.popupOverlay').classList.add('fading');
setTimeout(()=>{
this.$('.popupOverlay').classList.remove('fading');
this.$('#uploadTop').style.display = 'none';
this.buttonLoading(false);
}, 500);
}
async readFile(inputFile) {
let readTheData = (ifile, method)=>{
return new Promise((res, rej)=>{
let reader = new FileReader();
reader.onload = (e)=>{
res(e.target.result);
}
reader.onerror = (e)=>{
rej(e);
}
reader.onabort = ()=>{
rej();
}
reader[method](ifile);
});
};
let dims = (ifile)=>{
return new Promise((res,rej)=>{
const img = new Image();
img.onload = function () {
res([this.width, this.height]);
};
img.onerror = function() {
res([100,100]); // ???
};
img.src = ifile.dataURL;
});
};
try {
let ab = await readTheData(inputFile, 'readAsArrayBuffer');
let filename = (''+inputFile.name) || 'undefined';
let ext = (filename.lastIndexOf('.') != -1) ? (filename.substr(filename.lastIndexOf('.') + 1)).toLowerCase() : '';
let fileData = {
ab: ab,
name: inputFile.name,
size: inputFile.size,
filename: filename,
ext: ext,
type: 'doc',
aspectRatio: 1,
};
// nice one. https://stackoverflow.com/a/20732091/1119169 thanks Andrew!
const sizeI = Math.floor( Math.log(fileData.size) / Math.log(1024) );
fileData.sizeHuman = ( fileData.size / Math.pow(1024, sizeI) ).toFixed(2) * 1 + ' ' + ['B', 'kB', 'MB', 'GB', 'TB'][sizeI];
fileData.color = ((''+ext+' ').charCodeAt(0) % 8 + 1);
fileData.random = (''+Math.random()).split('.').join('');
let photoExts = ['png', 'jpg', 'jpeg'];
let videoExts = ['mp4', 'mpeg', 'avi', 'mov'];
if (photoExts.indexOf(ext) != -1 && this._uploadingMedia) {
fileData.type = 'photo';
} else if (videoExts.indexOf(ext) != -1 && this._uploadingMedia) {
fileData.type = 'video';
}
if (fileData.type == 'photo') {
// let dataURL = await readTheData(inputFile, 'readAsDataURL'); // ? URL.createObjectURL(file);
fileData.dataURL = URL.createObjectURL(inputFile);
[fileData.width, fileData.height] = await dims(fileData); // parallelize?
} else if (fileData.type == 'video') {
fileData.width = 100;
fileData.height = 100;
}
this._files.push(fileData);
return fileData;
} catch(e) {
// console.log(e);
return false;
}
}
loaded() {
// console.error(this._files);
this.updateTitle();
this.generateUploadPreview();
}
updateTitle() {
let suffix = {
doc: 'File',
video: 'Video',
photo: 'Photo', | let title = 'Send '+( (this._files.length > 1) ? this._files.length : '' ) + ' ' + suffix[this._files[0].type] + ( (this._files.length > 1) ? 's' : '' );
this.$('.uploadTitle').innerHTML = title;
}
onAlbumClick(e) {
const base = this.$();
let closest = event.target.closest('.upiRemove');
if (closest && base.contains(closest)) {
const el = closest.parentElement;
if (el) {
const id = el.id.split('uploadPreviewItem_').join('');
let ri = null;
for (let i = 0; i < this._files.length; i++) {
if (this._files[i].random == id) {
ri = i;
}
}
if (ri !== null) {
this._files.splice(ri, 1);
el.remove();
if (!this._files.length) {
this.hide();
} else {
this.doCalcs();
this.updateTitle();
}
}
}
}
}
doCalcs() {
this._layouter = new Layouter(this._files, {maxWidth: 384});
this._files = this._layouter.layout();
for (let file of this._files) {
const el = this.$('#uploadPreviewItem_'+file.random);
if (el) {
el.style.left = '' + file.pos.left + 'px';
el.style.top = '' + file.pos.top + 'px';
el.style.width = '' + file.pos.width + 'px';
el.style.height = '' + file.pos.height + 'px';
}
}
this.$('#uploadPreviewAlbum').style.height = this._layouter._height+'px';
this.initScrollBarOn(this.$('.uploadPreview'));
}
swapItems(id1, id2) {
if (id1 == id2) {
return;
}
id1 = id1.split('uploadPreviewItem_').join('');
id2 = id2.split('uploadPreviewItem_').join('');
let toMove1 = null;
let i1 = null;
let toMove2 = null;
let i2 = null;
// let insertBeforeI = null;
for (let i = 0; i < this._files.length; i++) {
if (this._files[i].random == id1) {
toMove1 = this._files[i];
i1 = i;
}
if (this._files[i].random == id2) {
toMove2 = this._files[i];
i2 = i;
}
}
this._files[i1] = toMove2;
this._files[i2] = toMove1;
this.doCalcs();
}
async generateVideoPreview(file, tryToSliceLength) {
let respAB = file.ab;
if (tryToSliceLength) {
respAB = file.ab.slice(0, tryToSliceLength);
}
let response = new Response(
respAB,
{
status: 206,
statusText: 'Partial Content',
headers: [
['Content-Type', 'video/mp4'],
['Content-Length', file.ab.byteLength],
['Content-Range', '0-'+(respAB.byteLength - 1)+'/'+file.ab.byteLength ]]
});
let blob = await response.blob();
let blobUrl = URL.createObjectURL(blob);
let promise = new Promise((res, rej)=>{
let video = document.createElement('video');
video.addEventListener('error', function(event) {
rej();
}, true);
video.onloadeddata = (event) => {
let width = video.videoWidth;
let height = video.videoHeight;
let canvas = document.createElement('canvas');
canvas.width = width;
canvas.height = height;
file.aspectRatio = (height ? (width/height) : 1);
file.width = width;
file.height = height;
this.doCalcs();
let ctx = canvas.getContext("2d");
ctx.imageSmoothingEnabled = true;
ctx.drawImage(video, 0, 0, width, height);
let blankCanvas = document.createElement('canvas');
blankCanvas.width = width;
blankCanvas.height = height;
if (canvas.toDataURL() == blankCanvas.toDataURL()) {
rej();
}
canvas.toBlob((canvasBlob)=>{
let canvasBlobUrl = URL.createObjectURL(canvasBlob);
res(canvasBlobUrl);
});
};
video.preload = 'metadata';
video.src = blobUrl;
// Load video in Safari / IE11
video.muted = true;
video.playsInline = true;
video.play();
});
return await promise;
}
generateUploadPreviewForFile(file) {
const closeHTML = this._components.CloseIcon.render({noDOM: true});
if (file.type == 'photo') {
return '<div class="uploadPreviewItem" draggable="true" id="uploadPreviewItem_'+file.random+'" style="background-image: url(\''+file.dataURL+'\'); '+file.style+'"><div class="upiRemove">'+closeHTML+'</div></div>';
} else if (file.type == 'video') {
return '<div class="uploadPreviewItem" draggable="true" id="uploadPreviewItem_'+file.random+'" style="border: 1px solid #eee;"><div class="upiRemove">'+closeHTML+'</div></div>';
} else if (file.type == 'doc') {
return `<div class="uploadDocItem">
<div class="rsDoc">
<div class="rsDocIcon avatarC${file.color}">${file.ext}</div>
<div class="rsDocName">${file.filename}</div>
<div class="rsDocMeta">${file.sizeHuman}</div>
</div>
</div>`;
}
}
async generateUploadPreview() {
this.doCalcs();
let html = '';
for (let file of this._files) {
html += this.generateUploadPreviewForFile(file);
}
this.$('.uploadPreviewAlbum').innerHTML = html;
for (let file of this._files) {
const itemEl = this.$('#uploadPreviewItem_'+file.random);
if (itemEl) {
itemEl.addEventListener('dragstart', (ev)=>{
ev.dataTransfer.dropEffect = "move";
ev.dataTransfer.setData("text/plain", ev.target.id);
});
// itemEl.addEventListener('touchstart', (ev)=>{
// this._touchedItem = ev.target.id;
// });
itemEl.addEventListener('touchend', (ev)=>{
if (ev.target && ev.target.classList.contains('uploadPreviewItem')) {
if (ev.changedTouches && ev.changedTouches[0]) {
let x = ev.changedTouches[0].clientX;
let y = ev.changedTouches[0].clientY;
for (let file of this._files) {
const cont = this.$('#uploadPreviewItem_'+file.random);
const clientRect = cont.getBoundingClientRect();
if (x > clientRect.left && x < (clientRect.left + clientRect.width) && y > clientRect.top && y < (clientRect.top+clientRect.height)) {
return this.swapItems(cont.id, ev.target.id);
}
}
}
}
// console.error(ev);
});
itemEl.addEventListener('dragover', (ev)=>{
ev.preventDefault();
ev.dataTransfer.dropEffect = "move";
});
itemEl.addEventListener('drop', (ev)=>{
ev.preventDefault();
let toId = ev.target.id;
let fromId = ev.dataTransfer.getData("text/plain");
this.swapItems(toId, fromId);
});
}
if (itemEl && file.type == 'video' && !file.previewGenerated) {
let blobUrl = null;
try {
blobUrl = await this.generateVideoPreview(file, 256*1024);
} catch(e) {
try {
blobUrl = await this.generateVideoPreview(file, 5*1024*1024);
} catch(e) {
try {
blobUrl = await this.generateVideoPreview(file, 10*1024*1024);
} catch(e) {
}
}
}
if (blobUrl) {
itemEl.style.backgroundImage = "url('"+blobUrl+"')";
} else {
itemEl.style.backgroundColor = "black";
}
itemEl.innerHTML+= '<div class=playIcon></div>';
file.previewGenerated = true;
}
}
}
selectFiles(forMedia) {
this._uploadingMedia = forMedia;
let input = this.$('#'+this.domId+'_fileInput_files');
if (forMedia) {
input = this.$('#'+this.domId+'_fileInput_media');
}
this._files = [];
input.onchange = ()=>{
if (input.files) {
let promises = [];
for (let inputFile of input.files) {
promises.push(this.readFile(inputFile));
}
Promise.all(promises)
.then(()=>{
this.show();
input.value = '';
});
}
};
input.click();
}
template() {
return `
<div id="uploadTop">
<div class="popupOverlay">
<div class="popup">
<input type="file" id="{{domId}}_fileInput_media" class="hidden" multiple="multiple" accept="image/*,video/*,.jpg,.png,.mp4">
<input type="file" id="{{domId}}_fileInput_files" class="hidden" multiple="multiple">
<div class="uploadPanel">
<div class="uploadClose" id="uploadClose">{{component(options.components.CloseIcon)}}{{/component}}</div>
<div class="uploadButton">{{component(options.components.Button)}}{{/component}}</div>
<div class="uploadLoading">
<div class="cssload-zenith dark"></div>
</div>
<div class="uploadTitle">Send 3 photos</div>
</div>
<div class="uploadPreview">
<div class="uploadPreviewAlbum" id="uploadPreviewAlbum"></div>
</div>
<div class="uploadCaption">
{{component(options.components.CaptionInput)}}{{/component}}
</div>
</div>
</div>
</div>
`;
}
};
module.exports = Upload; | };
| random_line_split |
Upload.js | const UI = window.classes.UI;
const AppIcon = require('../../icons/AppIcon.js');
const Button = window.classes.Button;
const UIInput = window.classes.UIInput;
const Layouter = require('../utils/Layouter.js');
class Upload extends UI {
constructor(params) {
super(params);
this._components.CloseIcon = this.newC(AppIcon, {icon: 'close'});
this._components.Button = this.newC(Button, {title: 'Send', loadingTitle: 'Sending...'});
this._components.CaptionInput = this.newC(UIInput, {label: 'Caption'});
this._events = [
['click', 'uploadClose', 'onClose'],
['click', 'uploadPreviewAlbum', 'onAlbumClick'],
];
this._componentEvents = [
['click', 'Button', 'sendFiles'],
];
this._files = [];
this._uploadingMedia = false;
}
buttonLoading(loading) {
if (loading) {
this.$('.uploadButton').style.display = 'none';
this.$('.uploadLoading').style.display = 'block';
} else {
this.$('.uploadButton').style.display = 'block';
this.$('.uploadLoading').style.display = 'none';
}
}
sendFiles() {
this.emit('sendFiles', {files: this._files, caption: this._components.CaptionInput.val()});
this.buttonLoading(true);
setTimeout(()=>{
this.hide();
}, 2000);
}
onClose() {
this.hide();
}
| () {
this.$('#uploadTop').style.display = 'block';
this.$('.popupOverlay').classList.add('active');
this.loaded();
this.initScrollBarOn(this.$('.uploadPreview'));
}
hide() {
this.$('.popupOverlay').classList.remove('active');
this.$('.popupOverlay').classList.add('fading');
setTimeout(()=>{
this.$('.popupOverlay').classList.remove('fading');
this.$('#uploadTop').style.display = 'none';
this.buttonLoading(false);
}, 500);
}
async readFile(inputFile) {
let readTheData = (ifile, method)=>{
return new Promise((res, rej)=>{
let reader = new FileReader();
reader.onload = (e)=>{
res(e.target.result);
}
reader.onerror = (e)=>{
rej(e);
}
reader.onabort = ()=>{
rej();
}
reader[method](ifile);
});
};
let dims = (ifile)=>{
return new Promise((res,rej)=>{
const img = new Image();
img.onload = function () {
res([this.width, this.height]);
};
img.onerror = function() {
res([100,100]); // ???
};
img.src = ifile.dataURL;
});
};
try {
let ab = await readTheData(inputFile, 'readAsArrayBuffer');
let filename = (''+inputFile.name) || 'undefined';
let ext = (filename.lastIndexOf('.') != -1) ? (filename.substr(filename.lastIndexOf('.') + 1)).toLowerCase() : '';
let fileData = {
ab: ab,
name: inputFile.name,
size: inputFile.size,
filename: filename,
ext: ext,
type: 'doc',
aspectRatio: 1,
};
// nice one. https://stackoverflow.com/a/20732091/1119169 thanks Andrew!
const sizeI = Math.floor( Math.log(fileData.size) / Math.log(1024) );
fileData.sizeHuman = ( fileData.size / Math.pow(1024, sizeI) ).toFixed(2) * 1 + ' ' + ['B', 'kB', 'MB', 'GB', 'TB'][sizeI];
fileData.color = ((''+ext+' ').charCodeAt(0) % 8 + 1);
fileData.random = (''+Math.random()).split('.').join('');
let photoExts = ['png', 'jpg', 'jpeg'];
let videoExts = ['mp4', 'mpeg', 'avi', 'mov'];
if (photoExts.indexOf(ext) != -1 && this._uploadingMedia) {
fileData.type = 'photo';
} else if (videoExts.indexOf(ext) != -1 && this._uploadingMedia) {
fileData.type = 'video';
}
if (fileData.type == 'photo') {
// let dataURL = await readTheData(inputFile, 'readAsDataURL'); // ? URL.createObjectURL(file);
fileData.dataURL = URL.createObjectURL(inputFile);
[fileData.width, fileData.height] = await dims(fileData); // parallelize?
} else if (fileData.type == 'video') {
fileData.width = 100;
fileData.height = 100;
}
this._files.push(fileData);
return fileData;
} catch(e) {
// console.log(e);
return false;
}
}
loaded() {
// console.error(this._files);
this.updateTitle();
this.generateUploadPreview();
}
updateTitle() {
let suffix = {
doc: 'File',
video: 'Video',
photo: 'Photo',
};
let title = 'Send '+( (this._files.length > 1) ? this._files.length : '' ) + ' ' + suffix[this._files[0].type] + ( (this._files.length > 1) ? 's' : '' );
this.$('.uploadTitle').innerHTML = title;
}
onAlbumClick(e) {
const base = this.$();
let closest = event.target.closest('.upiRemove');
if (closest && base.contains(closest)) {
const el = closest.parentElement;
if (el) {
const id = el.id.split('uploadPreviewItem_').join('');
let ri = null;
for (let i = 0; i < this._files.length; i++) {
if (this._files[i].random == id) {
ri = i;
}
}
if (ri !== null) {
this._files.splice(ri, 1);
el.remove();
if (!this._files.length) {
this.hide();
} else {
this.doCalcs();
this.updateTitle();
}
}
}
}
}
doCalcs() {
this._layouter = new Layouter(this._files, {maxWidth: 384});
this._files = this._layouter.layout();
for (let file of this._files) {
const el = this.$('#uploadPreviewItem_'+file.random);
if (el) {
el.style.left = '' + file.pos.left + 'px';
el.style.top = '' + file.pos.top + 'px';
el.style.width = '' + file.pos.width + 'px';
el.style.height = '' + file.pos.height + 'px';
}
}
this.$('#uploadPreviewAlbum').style.height = this._layouter._height+'px';
this.initScrollBarOn(this.$('.uploadPreview'));
}
swapItems(id1, id2) {
if (id1 == id2) {
return;
}
id1 = id1.split('uploadPreviewItem_').join('');
id2 = id2.split('uploadPreviewItem_').join('');
let toMove1 = null;
let i1 = null;
let toMove2 = null;
let i2 = null;
// let insertBeforeI = null;
for (let i = 0; i < this._files.length; i++) {
if (this._files[i].random == id1) {
toMove1 = this._files[i];
i1 = i;
}
if (this._files[i].random == id2) {
toMove2 = this._files[i];
i2 = i;
}
}
this._files[i1] = toMove2;
this._files[i2] = toMove1;
this.doCalcs();
}
async generateVideoPreview(file, tryToSliceLength) {
let respAB = file.ab;
if (tryToSliceLength) {
respAB = file.ab.slice(0, tryToSliceLength);
}
let response = new Response(
respAB,
{
status: 206,
statusText: 'Partial Content',
headers: [
['Content-Type', 'video/mp4'],
['Content-Length', file.ab.byteLength],
['Content-Range', '0-'+(respAB.byteLength - 1)+'/'+file.ab.byteLength ]]
});
let blob = await response.blob();
let blobUrl = URL.createObjectURL(blob);
let promise = new Promise((res, rej)=>{
let video = document.createElement('video');
video.addEventListener('error', function(event) {
rej();
}, true);
video.onloadeddata = (event) => {
let width = video.videoWidth;
let height = video.videoHeight;
let canvas = document.createElement('canvas');
canvas.width = width;
canvas.height = height;
file.aspectRatio = (height ? (width/height) : 1);
file.width = width;
file.height = height;
this.doCalcs();
let ctx = canvas.getContext("2d");
ctx.imageSmoothingEnabled = true;
ctx.drawImage(video, 0, 0, width, height);
let blankCanvas = document.createElement('canvas');
blankCanvas.width = width;
blankCanvas.height = height;
if (canvas.toDataURL() == blankCanvas.toDataURL()) {
rej();
}
canvas.toBlob((canvasBlob)=>{
let canvasBlobUrl = URL.createObjectURL(canvasBlob);
res(canvasBlobUrl);
});
};
video.preload = 'metadata';
video.src = blobUrl;
// Load video in Safari / IE11
video.muted = true;
video.playsInline = true;
video.play();
});
return await promise;
}
generateUploadPreviewForFile(file) {
const closeHTML = this._components.CloseIcon.render({noDOM: true});
if (file.type == 'photo') {
return '<div class="uploadPreviewItem" draggable="true" id="uploadPreviewItem_'+file.random+'" style="background-image: url(\''+file.dataURL+'\'); '+file.style+'"><div class="upiRemove">'+closeHTML+'</div></div>';
} else if (file.type == 'video') {
return '<div class="uploadPreviewItem" draggable="true" id="uploadPreviewItem_'+file.random+'" style="border: 1px solid #eee;"><div class="upiRemove">'+closeHTML+'</div></div>';
} else if (file.type == 'doc') {
return `<div class="uploadDocItem">
<div class="rsDoc">
<div class="rsDocIcon avatarC${file.color}">${file.ext}</div>
<div class="rsDocName">${file.filename}</div>
<div class="rsDocMeta">${file.sizeHuman}</div>
</div>
</div>`;
}
}
async generateUploadPreview() {
this.doCalcs();
let html = '';
for (let file of this._files) {
html += this.generateUploadPreviewForFile(file);
}
this.$('.uploadPreviewAlbum').innerHTML = html;
for (let file of this._files) {
const itemEl = this.$('#uploadPreviewItem_'+file.random);
if (itemEl) {
itemEl.addEventListener('dragstart', (ev)=>{
ev.dataTransfer.dropEffect = "move";
ev.dataTransfer.setData("text/plain", ev.target.id);
});
// itemEl.addEventListener('touchstart', (ev)=>{
// this._touchedItem = ev.target.id;
// });
itemEl.addEventListener('touchend', (ev)=>{
if (ev.target && ev.target.classList.contains('uploadPreviewItem')) {
if (ev.changedTouches && ev.changedTouches[0]) {
let x = ev.changedTouches[0].clientX;
let y = ev.changedTouches[0].clientY;
for (let file of this._files) {
const cont = this.$('#uploadPreviewItem_'+file.random);
const clientRect = cont.getBoundingClientRect();
if (x > clientRect.left && x < (clientRect.left + clientRect.width) && y > clientRect.top && y < (clientRect.top+clientRect.height)) {
return this.swapItems(cont.id, ev.target.id);
}
}
}
}
// console.error(ev);
});
itemEl.addEventListener('dragover', (ev)=>{
ev.preventDefault();
ev.dataTransfer.dropEffect = "move";
});
itemEl.addEventListener('drop', (ev)=>{
ev.preventDefault();
let toId = ev.target.id;
let fromId = ev.dataTransfer.getData("text/plain");
this.swapItems(toId, fromId);
});
}
if (itemEl && file.type == 'video' && !file.previewGenerated) {
let blobUrl = null;
try {
blobUrl = await this.generateVideoPreview(file, 256*1024);
} catch(e) {
try {
blobUrl = await this.generateVideoPreview(file, 5*1024*1024);
} catch(e) {
try {
blobUrl = await this.generateVideoPreview(file, 10*1024*1024);
} catch(e) {
}
}
}
if (blobUrl) {
itemEl.style.backgroundImage = "url('"+blobUrl+"')";
} else {
itemEl.style.backgroundColor = "black";
}
itemEl.innerHTML+= '<div class=playIcon></div>';
file.previewGenerated = true;
}
}
}
selectFiles(forMedia) {
this._uploadingMedia = forMedia;
let input = this.$('#'+this.domId+'_fileInput_files');
if (forMedia) {
input = this.$('#'+this.domId+'_fileInput_media');
}
this._files = [];
input.onchange = ()=>{
if (input.files) {
let promises = [];
for (let inputFile of input.files) {
promises.push(this.readFile(inputFile));
}
Promise.all(promises)
.then(()=>{
this.show();
input.value = '';
});
}
};
input.click();
}
template() {
return `
<div id="uploadTop">
<div class="popupOverlay">
<div class="popup">
<input type="file" id="{{domId}}_fileInput_media" class="hidden" multiple="multiple" accept="image/*,video/*,.jpg,.png,.mp4">
<input type="file" id="{{domId}}_fileInput_files" class="hidden" multiple="multiple">
<div class="uploadPanel">
<div class="uploadClose" id="uploadClose">{{component(options.components.CloseIcon)}}{{/component}}</div>
<div class="uploadButton">{{component(options.components.Button)}}{{/component}}</div>
<div class="uploadLoading">
<div class="cssload-zenith dark"></div>
</div>
<div class="uploadTitle">Send 3 photos</div>
</div>
<div class="uploadPreview">
<div class="uploadPreviewAlbum" id="uploadPreviewAlbum"></div>
</div>
<div class="uploadCaption">
{{component(options.components.CaptionInput)}}{{/component}}
</div>
</div>
</div>
</div>
`;
}
};
module.exports = Upload;
| show | identifier_name |
image_utils.py | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes and utilities for image datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.utils import metrics
import tensorflow as tf
def matplotlib_pyplot():
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("agg")
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
return plt
def image_to_tf_summary_value(image, tag):
"""Converts a NumPy image to a tf.Summary.Value object.
Args:
image: 3-D NumPy array.
tag: name for tf.Summary.Value for display in tensorboard.
Returns:
image_summary: A tf.Summary.Value object.
"""
curr_image = np.asarray(image, dtype=np.uint8)
height, width, n_channels = curr_image.shape
# If monochrome image, then reshape to [height, width]
if n_channels == 1:
curr_image = np.reshape(curr_image, [height, width])
s = io.BytesIO()
matplotlib_pyplot().imsave(s, curr_image, format="png")
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=height, width=width,
colorspace=n_channels)
return tf.Summary.Value(tag=tag, image=img_sum)
def convert_predictions_to_image_summaries(hook_args):
"""Optionally converts images from hooks_args to image summaries.
Args:
hook_args: DecodeHookArgs namedtuple
Returns:
summaries: list of tf.Summary values if hook_args.decode_hpara
"""
decode_hparams = hook_args.decode_hparams
if not decode_hparams.display_decoded_images:
return []
predictions = hook_args.predictions[0]
# Display ten random inputs and outputs so that tensorboard does not hang.
all_summaries = []
rand_predictions = np.random.choice(predictions, size=10)
for ind, prediction in enumerate(rand_predictions):
output_summary = image_to_tf_summary_value(
prediction["outputs"], tag="%d_output" % ind)
input_summary = image_to_tf_summary_value(
prediction["inputs"], tag="%d_input" % ind)
all_summaries.append(input_summary)
all_summaries.append(output_summary)
return all_summaries
def resize_by_area(img, size):
"""image resize function used by quite a few image problems."""
return tf.to_int64(
tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA))
def make_multiscale(image, resolutions,
resize_method=tf.image.ResizeMethod.BICUBIC,
num_channels=3):
"""Returns list of scaled images, one for each resolution.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to.
resize_method: tf.image.ResizeMethod.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels].
"""
scaled_images = []
for height in resolutions:
scaled_image = tf.image.resize_images(
image,
size=[height, height], # assuming that height = width
method=resize_method)
scaled_image = tf.to_int64(scaled_image)
scaled_image.set_shape([height, height, num_channels])
scaled_images.append(scaled_image)
return scaled_images
def make_multiscale_dilated(image, resolutions, num_channels=3):
"""Returns list of scaled images, one for each resolution.
Resizes by skipping every nth pixel.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to. The function
assumes VALID padding, so the original image's height must be divisible
by each resolution's height to return the exact resolution size.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels] if resolutions properly
divide the original image's height; otherwise shape height and width is up
to valid skips.
"""
image_height = common_layers.shape_list(image)[0]
scaled_images = []
for height in resolutions:
dilation_rate = image_height // height # assuming height = width
scaled_image = image[::dilation_rate, ::dilation_rate]
scaled_image = tf.to_int64(scaled_image)
scaled_image.set_shape([None, None, num_channels])
scaled_images.append(scaled_image)
return scaled_images
class ImageProblem(problem.Problem):
"""Base class for problems with images."""
@property
def num_channels(self):
"""Number of color channels."""
return 3
@property
def vocab_size(self):
"""Number of pixel values."""
return 256
def example_reading_spec(self):
data_fields = {
"image/encoded": tf.FixedLenFeature((), tf.string),
"image/format": tf.FixedLenFeature((), tf.string),
}
data_items_to_decoders = {
"inputs":
tf.contrib.slim.tfexample_decoder.Image(
image_key="image/encoded",
format_key="image/format",
channels=self.num_channels),
}
return data_fields, data_items_to_decoders
def preprocess_example(self, example, mode, hparams):
if not self._was_reversed:
example["inputs"] = tf.image.per_image_standardization(example["inputs"])
return example
def eval_metrics(self):
eval_metrics = [
metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,
metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY
]
if self._was_reversed:
eval_metrics += [metrics.Metrics.IMAGE_SUMMARY]
return eval_metrics
@property
def decode_hooks(self):
return [convert_predictions_to_image_summaries]
class Image2ClassProblem(ImageProblem):
"""Base class for image classification problems."""
@property
def is_small(self):
raise NotImplementedError()
@property
def num_classes(self):
raise NotImplementedError()
@property
def train_shards(self):
raise NotImplementedError()
@property
def dev_shards(self):
return 1
@property
def class_labels(self):
return ["ID_%d" % i for i in range(self.num_classes)]
def feature_encoders(self, data_dir):
del data_dir
return {
"inputs": text_encoder.ImageEncoder(channels=self.num_channels),
"targets": text_encoder.ClassLabelEncoder(self.class_labels)
}
def generator(self, data_dir, tmp_dir, is_training):
raise NotImplementedError()
def example_reading_spec(self):
label_key = "image/class/label"
data_fields, data_items_to_decoders = (
super(Image2ClassProblem, self).example_reading_spec())
data_fields[label_key] = tf.FixedLenFeature((1,), tf.int64)
data_items_to_decoders[
"targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key)
return data_fields, data_items_to_decoders
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.ModalityType.IMAGE,
"targets": modalities.ModalityType.CLASS_LABEL}
p.vocab_size = {"inputs": 256,
"targets": self.num_classes}
p.batch_size_multiplier = 4 if self.is_small else 256
p.loss_multiplier = 3.0 if self.is_small else 1.0
if self._was_reversed:
p.loss_multiplier = 1.0
p.input_space_id = problem.SpaceID.IMAGE
p.target_space_id = problem.SpaceID.IMAGE_LABEL
def generate_data(self, data_dir, tmp_dir, task_id=-1):
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, True),
self.training_filepaths(data_dir, self.train_shards, shuffled=False),
self.generator(data_dir, tmp_dir, False),
self.dev_filepaths(data_dir, self.dev_shards, shuffled=False))
def encode_images_as_png(images):
"""Yield images encoded as pngs."""
if tf.executing_eagerly():
for image in images:
yield tf.image.encode_png(image).numpy()
else:
(height, width, channels) = images[0].shape
with tf.Graph().as_default():
image_t = tf.placeholder(dtype=tf.uint8, shape=(height, width, channels))
encoded_image_t = tf.image.encode_png(image_t)
with tf.Session() as sess:
for image in images:
enc_string = sess.run(encoded_image_t, feed_dict={image_t: image})
yield enc_string
def image_generator(images, labels):
"""Generator for images that takes image and labels lists and creates pngs.
Args:
images: list of images given as [width x height x channels] numpy arrays.
labels: list of ints, same length as images.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as PNG,
* image/format: the string "png" representing image format,
* image/class/label: an integer representing the label,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a singleton list of the corresponding type.
Raises:
ValueError: if images is an empty list.
"""
if not images:
raise ValueError("Must provide some images for the generator.")
width, height, _ = images[0].shape
for (enc_image, label) in zip(encode_images_as_png(images), labels):
|
class Image2TextProblem(ImageProblem):
"""Base class for image-to-text problems."""
@property
def is_character_level(self):
raise NotImplementedError()
@property
def vocab_problem(self):
raise NotImplementedError() # Not needed if self.is_character_level.
@property
def target_space_id(self):
raise NotImplementedError()
@property
def train_shards(self):
raise NotImplementedError()
@property
def dev_shards(self):
raise NotImplementedError()
def generator(self, data_dir, tmp_dir, is_training):
raise NotImplementedError()
def example_reading_spec(self):
label_key = "image/class/label"
data_fields, data_items_to_decoders = (
super(Image2TextProblem, self).example_reading_spec())
data_fields[label_key] = tf.VarLenFeature(tf.int64)
data_items_to_decoders[
"targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key)
return data_fields, data_items_to_decoders
def feature_encoders(self, data_dir):
if self.is_character_level:
encoder = text_encoder.ByteTextEncoder()
else:
vocab_filename = os.path.join(
data_dir, self.vocab_problem.vocab_filename)
encoder = text_encoder.SubwordTextEncoder(vocab_filename)
input_encoder = text_encoder.ImageEncoder(channels=self.num_channels)
return {"inputs": input_encoder, "targets": encoder}
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.ModalityType.IMAGE,
"targets": modalities.ModalityType.SYMBOL}
p.vocab_size = {"inputs": 256,
"targets": self._encoders["targets"].vocab_size}
p.batch_size_multiplier = 256
p.loss_multiplier = 1.0
p.input_space_id = problem.SpaceID.IMAGE
p.target_space_id = self.target_space_id
def generate_data(self, data_dir, tmp_dir, task_id=-1):
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, True),
self.training_filepaths(data_dir, self.train_shards, shuffled=False),
self.generator(data_dir, tmp_dir, False),
self.dev_filepaths(data_dir, self.dev_shards, shuffled=False))
def image_augmentation(images, do_colors=False, crop_size=None):
"""Image augmentation: cropping, flipping, and color transforms."""
if crop_size is None:
crop_size = [299, 299]
images = tf.random_crop(images, crop_size + [3])
images = tf.image.random_flip_left_right(images)
if do_colors: # More augmentation, but might be slow.
images = tf.image.random_brightness(images, max_delta=32. / 255.)
images = tf.image.random_saturation(images, lower=0.5, upper=1.5)
images = tf.image.random_hue(images, max_delta=0.2)
images = tf.image.random_contrast(images, lower=0.5, upper=1.5)
return images
def cifar_image_augmentation(images):
"""Image augmentation suitable for CIFAR-10/100.
As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5).
Args:
images: a Tensor.
Returns:
Tensor of the same shape as images.
"""
images = tf.image.resize_image_with_crop_or_pad(images, 40, 40)
images = tf.random_crop(images, [32, 32, 3])
images = tf.image.random_flip_left_right(images)
return images
def random_shift(image, wsr=0.1, hsr=0.1):
"""Apply random horizontal and vertical shift to images.
This is the default data-augmentation strategy used on CIFAR in Glow.
Args:
image: a 3-D Tensor
wsr: Width shift range, as a float fraction of the width.
hsr: Height shift range, as a float fraction of the width.
Returns:
images: images translated by the provided wsr and hsr.
"""
height, width, _ = common_layers.shape_list(image)
width_range, height_range = wsr*width, hsr*height
height_translations = tf.random_uniform((1,), -height_range, height_range)
width_translations = tf.random_uniform((1,), -width_range, width_range)
translations = tf.concat((height_translations, width_translations), axis=0)
return tf.contrib.image.translate(image, translations=translations)
| yield {
"image/encoded": [enc_image],
"image/format": ["png"],
"image/class/label": [int(label)],
"image/height": [height],
"image/width": [width]
} | conditional_block |
image_utils.py | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes and utilities for image datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.utils import metrics
import tensorflow as tf
def matplotlib_pyplot():
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("agg")
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
return plt
def image_to_tf_summary_value(image, tag):
"""Converts a NumPy image to a tf.Summary.Value object.
Args:
image: 3-D NumPy array.
tag: name for tf.Summary.Value for display in tensorboard.
Returns:
image_summary: A tf.Summary.Value object.
"""
curr_image = np.asarray(image, dtype=np.uint8)
height, width, n_channels = curr_image.shape
# If monochrome image, then reshape to [height, width]
if n_channels == 1:
curr_image = np.reshape(curr_image, [height, width])
s = io.BytesIO()
matplotlib_pyplot().imsave(s, curr_image, format="png")
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=height, width=width,
colorspace=n_channels)
return tf.Summary.Value(tag=tag, image=img_sum)
def convert_predictions_to_image_summaries(hook_args):
"""Optionally converts images from hooks_args to image summaries.
Args:
hook_args: DecodeHookArgs namedtuple
Returns:
summaries: list of tf.Summary values if hook_args.decode_hpara
"""
decode_hparams = hook_args.decode_hparams
if not decode_hparams.display_decoded_images:
return []
predictions = hook_args.predictions[0]
# Display ten random inputs and outputs so that tensorboard does not hang.
all_summaries = []
rand_predictions = np.random.choice(predictions, size=10)
for ind, prediction in enumerate(rand_predictions):
output_summary = image_to_tf_summary_value(
prediction["outputs"], tag="%d_output" % ind)
input_summary = image_to_tf_summary_value(
prediction["inputs"], tag="%d_input" % ind)
all_summaries.append(input_summary)
all_summaries.append(output_summary)
return all_summaries
def resize_by_area(img, size):
"""image resize function used by quite a few image problems."""
return tf.to_int64(
tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA))
def make_multiscale(image, resolutions,
resize_method=tf.image.ResizeMethod.BICUBIC,
num_channels=3):
"""Returns list of scaled images, one for each resolution.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to.
resize_method: tf.image.ResizeMethod.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels].
"""
scaled_images = []
for height in resolutions:
scaled_image = tf.image.resize_images(
image,
size=[height, height], # assuming that height = width
method=resize_method)
scaled_image = tf.to_int64(scaled_image)
scaled_image.set_shape([height, height, num_channels])
scaled_images.append(scaled_image)
return scaled_images
def make_multiscale_dilated(image, resolutions, num_channels=3):
"""Returns list of scaled images, one for each resolution.
Resizes by skipping every nth pixel.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to. The function
assumes VALID padding, so the original image's height must be divisible
by each resolution's height to return the exact resolution size.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels] if resolutions properly
divide the original image's height; otherwise shape height and width is up
to valid skips.
"""
image_height = common_layers.shape_list(image)[0]
scaled_images = []
for height in resolutions:
dilation_rate = image_height // height # assuming height = width
scaled_image = image[::dilation_rate, ::dilation_rate]
scaled_image = tf.to_int64(scaled_image)
scaled_image.set_shape([None, None, num_channels])
scaled_images.append(scaled_image)
return scaled_images
class ImageProblem(problem.Problem):
"""Base class for problems with images."""
@property
def num_channels(self):
"""Number of color channels."""
return 3
@property
def vocab_size(self):
"""Number of pixel values."""
return 256
def example_reading_spec(self):
data_fields = {
"image/encoded": tf.FixedLenFeature((), tf.string),
"image/format": tf.FixedLenFeature((), tf.string),
}
data_items_to_decoders = {
"inputs":
tf.contrib.slim.tfexample_decoder.Image(
image_key="image/encoded",
format_key="image/format",
channels=self.num_channels),
}
return data_fields, data_items_to_decoders
def preprocess_example(self, example, mode, hparams):
if not self._was_reversed:
example["inputs"] = tf.image.per_image_standardization(example["inputs"])
return example
def eval_metrics(self):
eval_metrics = [
metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,
metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY
]
if self._was_reversed:
eval_metrics += [metrics.Metrics.IMAGE_SUMMARY]
return eval_metrics
@property
def decode_hooks(self):
return [convert_predictions_to_image_summaries]
class Image2ClassProblem(ImageProblem):
"""Base class for image classification problems."""
@property
def is_small(self):
raise NotImplementedError()
@property
def num_classes(self):
raise NotImplementedError()
@property
def train_shards(self):
raise NotImplementedError()
@property
def dev_shards(self):
return 1
@property
def class_labels(self):
return ["ID_%d" % i for i in range(self.num_classes)]
def feature_encoders(self, data_dir):
del data_dir
return {
"inputs": text_encoder.ImageEncoder(channels=self.num_channels),
"targets": text_encoder.ClassLabelEncoder(self.class_labels)
}
def generator(self, data_dir, tmp_dir, is_training):
raise NotImplementedError()
def example_reading_spec(self):
label_key = "image/class/label"
data_fields, data_items_to_decoders = (
super(Image2ClassProblem, self).example_reading_spec())
data_fields[label_key] = tf.FixedLenFeature((1,), tf.int64)
data_items_to_decoders[
"targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key)
return data_fields, data_items_to_decoders
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.ModalityType.IMAGE,
"targets": modalities.ModalityType.CLASS_LABEL}
p.vocab_size = {"inputs": 256,
"targets": self.num_classes}
p.batch_size_multiplier = 4 if self.is_small else 256
p.loss_multiplier = 3.0 if self.is_small else 1.0
if self._was_reversed:
p.loss_multiplier = 1.0
p.input_space_id = problem.SpaceID.IMAGE
p.target_space_id = problem.SpaceID.IMAGE_LABEL
def generate_data(self, data_dir, tmp_dir, task_id=-1):
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, True),
self.training_filepaths(data_dir, self.train_shards, shuffled=False),
self.generator(data_dir, tmp_dir, False),
self.dev_filepaths(data_dir, self.dev_shards, shuffled=False))
def encode_images_as_png(images):
"""Yield images encoded as pngs."""
if tf.executing_eagerly():
for image in images:
yield tf.image.encode_png(image).numpy()
else:
(height, width, channels) = images[0].shape
with tf.Graph().as_default():
image_t = tf.placeholder(dtype=tf.uint8, shape=(height, width, channels))
encoded_image_t = tf.image.encode_png(image_t)
with tf.Session() as sess:
for image in images:
enc_string = sess.run(encoded_image_t, feed_dict={image_t: image})
yield enc_string
def image_generator(images, labels):
"""Generator for images that takes image and labels lists and creates pngs.
Args:
images: list of images given as [width x height x channels] numpy arrays.
labels: list of ints, same length as images.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as PNG,
* image/format: the string "png" representing image format,
* image/class/label: an integer representing the label,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a singleton list of the corresponding type.
Raises:
ValueError: if images is an empty list.
"""
if not images:
raise ValueError("Must provide some images for the generator.")
width, height, _ = images[0].shape
for (enc_image, label) in zip(encode_images_as_png(images), labels):
yield {
"image/encoded": [enc_image],
"image/format": ["png"],
"image/class/label": [int(label)],
"image/height": [height],
"image/width": [width]
}
class Image2TextProblem(ImageProblem):
"""Base class for image-to-text problems."""
@property
def is_character_level(self):
raise NotImplementedError()
@property
def vocab_problem(self):
raise NotImplementedError() # Not needed if self.is_character_level.
@property
def | (self):
raise NotImplementedError()
@property
def train_shards(self):
raise NotImplementedError()
@property
def dev_shards(self):
raise NotImplementedError()
def generator(self, data_dir, tmp_dir, is_training):
raise NotImplementedError()
def example_reading_spec(self):
label_key = "image/class/label"
data_fields, data_items_to_decoders = (
super(Image2TextProblem, self).example_reading_spec())
data_fields[label_key] = tf.VarLenFeature(tf.int64)
data_items_to_decoders[
"targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key)
return data_fields, data_items_to_decoders
def feature_encoders(self, data_dir):
if self.is_character_level:
encoder = text_encoder.ByteTextEncoder()
else:
vocab_filename = os.path.join(
data_dir, self.vocab_problem.vocab_filename)
encoder = text_encoder.SubwordTextEncoder(vocab_filename)
input_encoder = text_encoder.ImageEncoder(channels=self.num_channels)
return {"inputs": input_encoder, "targets": encoder}
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.ModalityType.IMAGE,
"targets": modalities.ModalityType.SYMBOL}
p.vocab_size = {"inputs": 256,
"targets": self._encoders["targets"].vocab_size}
p.batch_size_multiplier = 256
p.loss_multiplier = 1.0
p.input_space_id = problem.SpaceID.IMAGE
p.target_space_id = self.target_space_id
def generate_data(self, data_dir, tmp_dir, task_id=-1):
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, True),
self.training_filepaths(data_dir, self.train_shards, shuffled=False),
self.generator(data_dir, tmp_dir, False),
self.dev_filepaths(data_dir, self.dev_shards, shuffled=False))
def image_augmentation(images, do_colors=False, crop_size=None):
"""Image augmentation: cropping, flipping, and color transforms."""
if crop_size is None:
crop_size = [299, 299]
images = tf.random_crop(images, crop_size + [3])
images = tf.image.random_flip_left_right(images)
if do_colors: # More augmentation, but might be slow.
images = tf.image.random_brightness(images, max_delta=32. / 255.)
images = tf.image.random_saturation(images, lower=0.5, upper=1.5)
images = tf.image.random_hue(images, max_delta=0.2)
images = tf.image.random_contrast(images, lower=0.5, upper=1.5)
return images
def cifar_image_augmentation(images):
"""Image augmentation suitable for CIFAR-10/100.
As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5).
Args:
images: a Tensor.
Returns:
Tensor of the same shape as images.
"""
images = tf.image.resize_image_with_crop_or_pad(images, 40, 40)
images = tf.random_crop(images, [32, 32, 3])
images = tf.image.random_flip_left_right(images)
return images
def random_shift(image, wsr=0.1, hsr=0.1):
"""Apply random horizontal and vertical shift to images.
This is the default data-augmentation strategy used on CIFAR in Glow.
Args:
image: a 3-D Tensor
wsr: Width shift range, as a float fraction of the width.
hsr: Height shift range, as a float fraction of the width.
Returns:
images: images translated by the provided wsr and hsr.
"""
height, width, _ = common_layers.shape_list(image)
width_range, height_range = wsr*width, hsr*height
height_translations = tf.random_uniform((1,), -height_range, height_range)
width_translations = tf.random_uniform((1,), -width_range, width_range)
translations = tf.concat((height_translations, width_translations), axis=0)
return tf.contrib.image.translate(image, translations=translations)
| target_space_id | identifier_name |
image_utils.py | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes and utilities for image datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.utils import metrics
import tensorflow as tf
def matplotlib_pyplot():
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("agg")
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
return plt
def image_to_tf_summary_value(image, tag):
"""Converts a NumPy image to a tf.Summary.Value object.
Args:
image: 3-D NumPy array.
tag: name for tf.Summary.Value for display in tensorboard.
Returns:
image_summary: A tf.Summary.Value object.
"""
curr_image = np.asarray(image, dtype=np.uint8)
height, width, n_channels = curr_image.shape
# If monochrome image, then reshape to [height, width]
if n_channels == 1:
curr_image = np.reshape(curr_image, [height, width])
s = io.BytesIO()
matplotlib_pyplot().imsave(s, curr_image, format="png")
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=height, width=width,
colorspace=n_channels)
return tf.Summary.Value(tag=tag, image=img_sum)
def convert_predictions_to_image_summaries(hook_args):
"""Optionally converts images from hooks_args to image summaries.
Args:
hook_args: DecodeHookArgs namedtuple
Returns:
summaries: list of tf.Summary values if hook_args.decode_hpara
"""
decode_hparams = hook_args.decode_hparams
if not decode_hparams.display_decoded_images:
return []
predictions = hook_args.predictions[0]
# Display ten random inputs and outputs so that tensorboard does not hang.
all_summaries = []
rand_predictions = np.random.choice(predictions, size=10)
for ind, prediction in enumerate(rand_predictions):
output_summary = image_to_tf_summary_value(
prediction["outputs"], tag="%d_output" % ind)
input_summary = image_to_tf_summary_value(
prediction["inputs"], tag="%d_input" % ind)
all_summaries.append(input_summary)
all_summaries.append(output_summary)
return all_summaries
def resize_by_area(img, size):
"""image resize function used by quite a few image problems."""
return tf.to_int64(
tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA))
def make_multiscale(image, resolutions,
resize_method=tf.image.ResizeMethod.BICUBIC,
num_channels=3):
"""Returns list of scaled images, one for each resolution.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to.
resize_method: tf.image.ResizeMethod.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels].
"""
scaled_images = []
for height in resolutions:
scaled_image = tf.image.resize_images(
image,
size=[height, height], # assuming that height = width
method=resize_method)
scaled_image = tf.to_int64(scaled_image)
scaled_image.set_shape([height, height, num_channels])
scaled_images.append(scaled_image)
return scaled_images
def make_multiscale_dilated(image, resolutions, num_channels=3):
"""Returns list of scaled images, one for each resolution.
Resizes by skipping every nth pixel.
Args: | assumes VALID padding, so the original image's height must be divisible
by each resolution's height to return the exact resolution size.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels] if resolutions properly
divide the original image's height; otherwise shape height and width is up
to valid skips.
"""
image_height = common_layers.shape_list(image)[0]
scaled_images = []
for height in resolutions:
dilation_rate = image_height // height # assuming height = width
scaled_image = image[::dilation_rate, ::dilation_rate]
scaled_image = tf.to_int64(scaled_image)
scaled_image.set_shape([None, None, num_channels])
scaled_images.append(scaled_image)
return scaled_images
class ImageProblem(problem.Problem):
"""Base class for problems with images."""
@property
def num_channels(self):
"""Number of color channels."""
return 3
@property
def vocab_size(self):
"""Number of pixel values."""
return 256
def example_reading_spec(self):
data_fields = {
"image/encoded": tf.FixedLenFeature((), tf.string),
"image/format": tf.FixedLenFeature((), tf.string),
}
data_items_to_decoders = {
"inputs":
tf.contrib.slim.tfexample_decoder.Image(
image_key="image/encoded",
format_key="image/format",
channels=self.num_channels),
}
return data_fields, data_items_to_decoders
def preprocess_example(self, example, mode, hparams):
if not self._was_reversed:
example["inputs"] = tf.image.per_image_standardization(example["inputs"])
return example
def eval_metrics(self):
eval_metrics = [
metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,
metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY
]
if self._was_reversed:
eval_metrics += [metrics.Metrics.IMAGE_SUMMARY]
return eval_metrics
@property
def decode_hooks(self):
return [convert_predictions_to_image_summaries]
class Image2ClassProblem(ImageProblem):
"""Base class for image classification problems."""
@property
def is_small(self):
raise NotImplementedError()
@property
def num_classes(self):
raise NotImplementedError()
@property
def train_shards(self):
raise NotImplementedError()
@property
def dev_shards(self):
return 1
@property
def class_labels(self):
return ["ID_%d" % i for i in range(self.num_classes)]
def feature_encoders(self, data_dir):
del data_dir
return {
"inputs": text_encoder.ImageEncoder(channels=self.num_channels),
"targets": text_encoder.ClassLabelEncoder(self.class_labels)
}
def generator(self, data_dir, tmp_dir, is_training):
raise NotImplementedError()
def example_reading_spec(self):
label_key = "image/class/label"
data_fields, data_items_to_decoders = (
super(Image2ClassProblem, self).example_reading_spec())
data_fields[label_key] = tf.FixedLenFeature((1,), tf.int64)
data_items_to_decoders[
"targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key)
return data_fields, data_items_to_decoders
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.ModalityType.IMAGE,
"targets": modalities.ModalityType.CLASS_LABEL}
p.vocab_size = {"inputs": 256,
"targets": self.num_classes}
p.batch_size_multiplier = 4 if self.is_small else 256
p.loss_multiplier = 3.0 if self.is_small else 1.0
if self._was_reversed:
p.loss_multiplier = 1.0
p.input_space_id = problem.SpaceID.IMAGE
p.target_space_id = problem.SpaceID.IMAGE_LABEL
def generate_data(self, data_dir, tmp_dir, task_id=-1):
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, True),
self.training_filepaths(data_dir, self.train_shards, shuffled=False),
self.generator(data_dir, tmp_dir, False),
self.dev_filepaths(data_dir, self.dev_shards, shuffled=False))
def encode_images_as_png(images):
"""Yield images encoded as pngs."""
if tf.executing_eagerly():
for image in images:
yield tf.image.encode_png(image).numpy()
else:
(height, width, channels) = images[0].shape
with tf.Graph().as_default():
image_t = tf.placeholder(dtype=tf.uint8, shape=(height, width, channels))
encoded_image_t = tf.image.encode_png(image_t)
with tf.Session() as sess:
for image in images:
enc_string = sess.run(encoded_image_t, feed_dict={image_t: image})
yield enc_string
def image_generator(images, labels):
"""Generator for images that takes image and labels lists and creates pngs.
Args:
images: list of images given as [width x height x channels] numpy arrays.
labels: list of ints, same length as images.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as PNG,
* image/format: the string "png" representing image format,
* image/class/label: an integer representing the label,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a singleton list of the corresponding type.
Raises:
ValueError: if images is an empty list.
"""
if not images:
raise ValueError("Must provide some images for the generator.")
width, height, _ = images[0].shape
for (enc_image, label) in zip(encode_images_as_png(images), labels):
yield {
"image/encoded": [enc_image],
"image/format": ["png"],
"image/class/label": [int(label)],
"image/height": [height],
"image/width": [width]
}
class Image2TextProblem(ImageProblem):
"""Base class for image-to-text problems."""
@property
def is_character_level(self):
raise NotImplementedError()
@property
def vocab_problem(self):
raise NotImplementedError() # Not needed if self.is_character_level.
@property
def target_space_id(self):
raise NotImplementedError()
@property
def train_shards(self):
raise NotImplementedError()
@property
def dev_shards(self):
raise NotImplementedError()
def generator(self, data_dir, tmp_dir, is_training):
raise NotImplementedError()
def example_reading_spec(self):
label_key = "image/class/label"
data_fields, data_items_to_decoders = (
super(Image2TextProblem, self).example_reading_spec())
data_fields[label_key] = tf.VarLenFeature(tf.int64)
data_items_to_decoders[
"targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key)
return data_fields, data_items_to_decoders
def feature_encoders(self, data_dir):
if self.is_character_level:
encoder = text_encoder.ByteTextEncoder()
else:
vocab_filename = os.path.join(
data_dir, self.vocab_problem.vocab_filename)
encoder = text_encoder.SubwordTextEncoder(vocab_filename)
input_encoder = text_encoder.ImageEncoder(channels=self.num_channels)
return {"inputs": input_encoder, "targets": encoder}
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.ModalityType.IMAGE,
"targets": modalities.ModalityType.SYMBOL}
p.vocab_size = {"inputs": 256,
"targets": self._encoders["targets"].vocab_size}
p.batch_size_multiplier = 256
p.loss_multiplier = 1.0
p.input_space_id = problem.SpaceID.IMAGE
p.target_space_id = self.target_space_id
def generate_data(self, data_dir, tmp_dir, task_id=-1):
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, True),
self.training_filepaths(data_dir, self.train_shards, shuffled=False),
self.generator(data_dir, tmp_dir, False),
self.dev_filepaths(data_dir, self.dev_shards, shuffled=False))
def image_augmentation(images, do_colors=False, crop_size=None):
"""Image augmentation: cropping, flipping, and color transforms."""
if crop_size is None:
crop_size = [299, 299]
images = tf.random_crop(images, crop_size + [3])
images = tf.image.random_flip_left_right(images)
if do_colors: # More augmentation, but might be slow.
images = tf.image.random_brightness(images, max_delta=32. / 255.)
images = tf.image.random_saturation(images, lower=0.5, upper=1.5)
images = tf.image.random_hue(images, max_delta=0.2)
images = tf.image.random_contrast(images, lower=0.5, upper=1.5)
return images
def cifar_image_augmentation(images):
"""Image augmentation suitable for CIFAR-10/100.
As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5).
Args:
images: a Tensor.
Returns:
Tensor of the same shape as images.
"""
images = tf.image.resize_image_with_crop_or_pad(images, 40, 40)
images = tf.random_crop(images, [32, 32, 3])
images = tf.image.random_flip_left_right(images)
return images
def random_shift(image, wsr=0.1, hsr=0.1):
"""Apply random horizontal and vertical shift to images.
This is the default data-augmentation strategy used on CIFAR in Glow.
Args:
image: a 3-D Tensor
wsr: Width shift range, as a float fraction of the width.
hsr: Height shift range, as a float fraction of the width.
Returns:
images: images translated by the provided wsr and hsr.
"""
height, width, _ = common_layers.shape_list(image)
width_range, height_range = wsr*width, hsr*height
height_translations = tf.random_uniform((1,), -height_range, height_range)
width_translations = tf.random_uniform((1,), -width_range, width_range)
translations = tf.concat((height_translations, width_translations), axis=0)
return tf.contrib.image.translate(image, translations=translations) | image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to. The function | random_line_split |
image_utils.py | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes and utilities for image datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import numpy as np
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.utils import metrics
import tensorflow as tf
def matplotlib_pyplot():
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("agg")
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
return plt
def image_to_tf_summary_value(image, tag):
"""Converts a NumPy image to a tf.Summary.Value object.
Args:
image: 3-D NumPy array.
tag: name for tf.Summary.Value for display in tensorboard.
Returns:
image_summary: A tf.Summary.Value object.
"""
curr_image = np.asarray(image, dtype=np.uint8)
height, width, n_channels = curr_image.shape
# If monochrome image, then reshape to [height, width]
if n_channels == 1:
curr_image = np.reshape(curr_image, [height, width])
s = io.BytesIO()
matplotlib_pyplot().imsave(s, curr_image, format="png")
img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(),
height=height, width=width,
colorspace=n_channels)
return tf.Summary.Value(tag=tag, image=img_sum)
def convert_predictions_to_image_summaries(hook_args):
"""Optionally converts images from hooks_args to image summaries.
Args:
hook_args: DecodeHookArgs namedtuple
Returns:
summaries: list of tf.Summary values if hook_args.decode_hpara
"""
decode_hparams = hook_args.decode_hparams
if not decode_hparams.display_decoded_images:
return []
predictions = hook_args.predictions[0]
# Display ten random inputs and outputs so that tensorboard does not hang.
all_summaries = []
rand_predictions = np.random.choice(predictions, size=10)
for ind, prediction in enumerate(rand_predictions):
output_summary = image_to_tf_summary_value(
prediction["outputs"], tag="%d_output" % ind)
input_summary = image_to_tf_summary_value(
prediction["inputs"], tag="%d_input" % ind)
all_summaries.append(input_summary)
all_summaries.append(output_summary)
return all_summaries
def resize_by_area(img, size):
"""image resize function used by quite a few image problems."""
return tf.to_int64(
tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA))
def make_multiscale(image, resolutions,
resize_method=tf.image.ResizeMethod.BICUBIC,
num_channels=3):
"""Returns list of scaled images, one for each resolution.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to.
resize_method: tf.image.ResizeMethod.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels].
"""
scaled_images = []
for height in resolutions:
scaled_image = tf.image.resize_images(
image,
size=[height, height], # assuming that height = width
method=resize_method)
scaled_image = tf.to_int64(scaled_image)
scaled_image.set_shape([height, height, num_channels])
scaled_images.append(scaled_image)
return scaled_images
def make_multiscale_dilated(image, resolutions, num_channels=3):
"""Returns list of scaled images, one for each resolution.
Resizes by skipping every nth pixel.
Args:
image: Tensor of shape [height, height, num_channels].
resolutions: List of heights that image's height is resized to. The function
assumes VALID padding, so the original image's height must be divisible
by each resolution's height to return the exact resolution size.
num_channels: Number of channels in image.
Returns:
List of Tensors, one for each resolution with shape given by
[resolutions[i], resolutions[i], num_channels] if resolutions properly
divide the original image's height; otherwise shape height and width is up
to valid skips.
"""
image_height = common_layers.shape_list(image)[0]
scaled_images = []
for height in resolutions:
dilation_rate = image_height // height # assuming height = width
scaled_image = image[::dilation_rate, ::dilation_rate]
scaled_image = tf.to_int64(scaled_image)
scaled_image.set_shape([None, None, num_channels])
scaled_images.append(scaled_image)
return scaled_images
class ImageProblem(problem.Problem):
"""Base class for problems with images."""
@property
def num_channels(self):
"""Number of color channels."""
return 3
@property
def vocab_size(self):
"""Number of pixel values."""
return 256
def example_reading_spec(self):
data_fields = {
"image/encoded": tf.FixedLenFeature((), tf.string),
"image/format": tf.FixedLenFeature((), tf.string),
}
data_items_to_decoders = {
"inputs":
tf.contrib.slim.tfexample_decoder.Image(
image_key="image/encoded",
format_key="image/format",
channels=self.num_channels),
}
return data_fields, data_items_to_decoders
def preprocess_example(self, example, mode, hparams):
if not self._was_reversed:
example["inputs"] = tf.image.per_image_standardization(example["inputs"])
return example
def eval_metrics(self):
eval_metrics = [
metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,
metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY
]
if self._was_reversed:
eval_metrics += [metrics.Metrics.IMAGE_SUMMARY]
return eval_metrics
@property
def decode_hooks(self):
return [convert_predictions_to_image_summaries]
class Image2ClassProblem(ImageProblem):
"""Base class for image classification problems."""
@property
def is_small(self):
raise NotImplementedError()
@property
def num_classes(self):
raise NotImplementedError()
@property
def train_shards(self):
raise NotImplementedError()
@property
def dev_shards(self):
return 1
@property
def class_labels(self):
return ["ID_%d" % i for i in range(self.num_classes)]
def feature_encoders(self, data_dir):
del data_dir
return {
"inputs": text_encoder.ImageEncoder(channels=self.num_channels),
"targets": text_encoder.ClassLabelEncoder(self.class_labels)
}
def generator(self, data_dir, tmp_dir, is_training):
raise NotImplementedError()
def example_reading_spec(self):
label_key = "image/class/label"
data_fields, data_items_to_decoders = (
super(Image2ClassProblem, self).example_reading_spec())
data_fields[label_key] = tf.FixedLenFeature((1,), tf.int64)
data_items_to_decoders[
"targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key)
return data_fields, data_items_to_decoders
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.ModalityType.IMAGE,
"targets": modalities.ModalityType.CLASS_LABEL}
p.vocab_size = {"inputs": 256,
"targets": self.num_classes}
p.batch_size_multiplier = 4 if self.is_small else 256
p.loss_multiplier = 3.0 if self.is_small else 1.0
if self._was_reversed:
p.loss_multiplier = 1.0
p.input_space_id = problem.SpaceID.IMAGE
p.target_space_id = problem.SpaceID.IMAGE_LABEL
def generate_data(self, data_dir, tmp_dir, task_id=-1):
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, True),
self.training_filepaths(data_dir, self.train_shards, shuffled=False),
self.generator(data_dir, tmp_dir, False),
self.dev_filepaths(data_dir, self.dev_shards, shuffled=False))
def encode_images_as_png(images):
|
def image_generator(images, labels):
"""Generator for images that takes image and labels lists and creates pngs.
Args:
images: list of images given as [width x height x channels] numpy arrays.
labels: list of ints, same length as images.
Yields:
A dictionary representing the images with the following fields:
* image/encoded: the string encoding the image as PNG,
* image/format: the string "png" representing image format,
* image/class/label: an integer representing the label,
* image/height: an integer representing the height,
* image/width: an integer representing the width.
Every field is actually a singleton list of the corresponding type.
Raises:
ValueError: if images is an empty list.
"""
if not images:
raise ValueError("Must provide some images for the generator.")
width, height, _ = images[0].shape
for (enc_image, label) in zip(encode_images_as_png(images), labels):
yield {
"image/encoded": [enc_image],
"image/format": ["png"],
"image/class/label": [int(label)],
"image/height": [height],
"image/width": [width]
}
class Image2TextProblem(ImageProblem):
"""Base class for image-to-text problems."""
@property
def is_character_level(self):
raise NotImplementedError()
@property
def vocab_problem(self):
raise NotImplementedError() # Not needed if self.is_character_level.
@property
def target_space_id(self):
raise NotImplementedError()
@property
def train_shards(self):
raise NotImplementedError()
@property
def dev_shards(self):
raise NotImplementedError()
def generator(self, data_dir, tmp_dir, is_training):
raise NotImplementedError()
def example_reading_spec(self):
label_key = "image/class/label"
data_fields, data_items_to_decoders = (
super(Image2TextProblem, self).example_reading_spec())
data_fields[label_key] = tf.VarLenFeature(tf.int64)
data_items_to_decoders[
"targets"] = tf.contrib.slim.tfexample_decoder.Tensor(label_key)
return data_fields, data_items_to_decoders
def feature_encoders(self, data_dir):
if self.is_character_level:
encoder = text_encoder.ByteTextEncoder()
else:
vocab_filename = os.path.join(
data_dir, self.vocab_problem.vocab_filename)
encoder = text_encoder.SubwordTextEncoder(vocab_filename)
input_encoder = text_encoder.ImageEncoder(channels=self.num_channels)
return {"inputs": input_encoder, "targets": encoder}
def hparams(self, defaults, unused_model_hparams):
p = defaults
p.modality = {"inputs": modalities.ModalityType.IMAGE,
"targets": modalities.ModalityType.SYMBOL}
p.vocab_size = {"inputs": 256,
"targets": self._encoders["targets"].vocab_size}
p.batch_size_multiplier = 256
p.loss_multiplier = 1.0
p.input_space_id = problem.SpaceID.IMAGE
p.target_space_id = self.target_space_id
def generate_data(self, data_dir, tmp_dir, task_id=-1):
generator_utils.generate_dataset_and_shuffle(
self.generator(data_dir, tmp_dir, True),
self.training_filepaths(data_dir, self.train_shards, shuffled=False),
self.generator(data_dir, tmp_dir, False),
self.dev_filepaths(data_dir, self.dev_shards, shuffled=False))
def image_augmentation(images, do_colors=False, crop_size=None):
"""Image augmentation: cropping, flipping, and color transforms."""
if crop_size is None:
crop_size = [299, 299]
images = tf.random_crop(images, crop_size + [3])
images = tf.image.random_flip_left_right(images)
if do_colors: # More augmentation, but might be slow.
images = tf.image.random_brightness(images, max_delta=32. / 255.)
images = tf.image.random_saturation(images, lower=0.5, upper=1.5)
images = tf.image.random_hue(images, max_delta=0.2)
images = tf.image.random_contrast(images, lower=0.5, upper=1.5)
return images
def cifar_image_augmentation(images):
"""Image augmentation suitable for CIFAR-10/100.
As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5).
Args:
images: a Tensor.
Returns:
Tensor of the same shape as images.
"""
images = tf.image.resize_image_with_crop_or_pad(images, 40, 40)
images = tf.random_crop(images, [32, 32, 3])
images = tf.image.random_flip_left_right(images)
return images
def random_shift(image, wsr=0.1, hsr=0.1):
"""Apply random horizontal and vertical shift to images.
This is the default data-augmentation strategy used on CIFAR in Glow.
Args:
image: a 3-D Tensor
wsr: Width shift range, as a float fraction of the width.
hsr: Height shift range, as a float fraction of the width.
Returns:
images: images translated by the provided wsr and hsr.
"""
height, width, _ = common_layers.shape_list(image)
width_range, height_range = wsr*width, hsr*height
height_translations = tf.random_uniform((1,), -height_range, height_range)
width_translations = tf.random_uniform((1,), -width_range, width_range)
translations = tf.concat((height_translations, width_translations), axis=0)
return tf.contrib.image.translate(image, translations=translations)
| """Yield images encoded as pngs."""
if tf.executing_eagerly():
for image in images:
yield tf.image.encode_png(image).numpy()
else:
(height, width, channels) = images[0].shape
with tf.Graph().as_default():
image_t = tf.placeholder(dtype=tf.uint8, shape=(height, width, channels))
encoded_image_t = tf.image.encode_png(image_t)
with tf.Session() as sess:
for image in images:
enc_string = sess.run(encoded_image_t, feed_dict={image_t: image})
yield enc_string | identifier_body |
congestion.go | /*
Package minq is a minimal implementation of QUIC, as documented at
https://quicwg.github.io/. Minq partly implements draft-04.
*/
package minq
import (
"math"
"time"
// "fmt"
)
// congestion control related constants
const (
kDefaultMss = 1460 // bytes
kInitalWindow = 10 * kDefaultMss
kMinimumWindow = 2 * kDefaultMss
kMaximumWindow = kInitalWindow
kLossReductionFactor = 0.5
)
// loss dectection related constants
const (
kMaxTLPs = 2
kReorderingThreshold = 3
kTimeReorderingFraction = 0.125
kMinTLPTimeout = 10 * time.Millisecond
kMinRTOTimeout = 200 * time.Millisecond
kDelayedAckTimeout = 25 * time.Millisecond
kDefaultInitialRtt = 100 * time.Millisecond
)
type CongestionController interface {
onPacketSent(pn uint64, isAckOnly bool, sentBytes int)
onAckReceived(acks ackRanges, delay time.Duration)
bytesAllowedToSend() int
setLostPacketHandler(handler func(pn uint64))
rto() time.Duration
}
/*
* DUMMY congestion controller
*/
type CongestionControllerDummy struct {
}
func (cc *CongestionControllerDummy) onPacketSent(pn uint64, isAckOnly bool, sentBytes int) {
}
func (cc *CongestionControllerDummy) onAckReceived(acks ackRanges, delay time.Duration) {
}
func (cc *CongestionControllerDummy) bytesAllowedToSend() int {
/* return the the maximum int value */
return int(^uint(0) >> 1)
}
func (cc *CongestionControllerDummy) setLostPacketHandler(handler func(pn uint64)) {
}
func (cc *CongestionControllerDummy) rto() time.Duration {
return kMinRTOTimeout
}
/*
* draft-ietf-quic-recovery congestion controller
*/
type CongestionControllerIetf struct {
// Congestion control related
bytesInFlight int
congestionWindow int
endOfRecovery uint64
sstresh int
// Loss detection related
lossDetectionAlarm int //TODO([email protected]) set this to the right type
handshakeCount int
tlpCount int
rtoCount int
largestSendBeforeRto uint64
timeOfLastSentPacket time.Time
largestSendPacket uint64
largestAckedPacket uint64
maxAckDelay time.Duration
minRtt time.Duration
// largestRtt time.Duration
smoothedRtt time.Duration
rttVar time.Duration
smoothedRttTcp time.Duration
rttVarTcp time.Duration
reorderingThreshold int
timeReorderingFraction float32
lossTime time.Time
sentPackets map[uint64]packetEntry
// others
lostPacketHandler func(pn uint64)
conn *Connection
}
type packetEntry struct {
pn uint64
txTime time.Time
bytes int
ackOnly bool
}
func (cc *CongestionControllerIetf) onPacketSent(pn uint64, isAckOnly bool, sentBytes int) {
cc.timeOfLastSentPacket = time.Now()
cc.largestSendPacket = pn
packetData := packetEntry{pn, time.Now(), 0, isAckOnly}
cc.conn.log(logTypeCongestion, "Packet send pn: %d len:%d ackonly: %v\n", pn, sentBytes, isAckOnly)
if !isAckOnly {
cc.onPacketSentCC(sentBytes)
packetData.bytes = sentBytes
cc.setLossDetectionAlarm()
}
cc.sentPackets[pn] = packetData
}
// acks is received to be a sorted list, where the largest packet numbers are at the beginning
func (cc *CongestionControllerIetf) onAckReceived(acks ackRanges, ackDelay time.Duration) {
// keep track of largest packet acked overall
if acks[0].lastPacket > cc.largestAckedPacket {
cc.largestAckedPacket = acks[0].lastPacket
}
// If the largest acked is newly acked update rtt
lastPacket, present := cc.sentPackets[acks[0].lastPacket]
if present {
latestRtt := time.Since(cc.sentPackets[acks[0].lastPacket].txTime)
cc.conn.log(logTypeCongestion, "latestRtt: %v, ackDelay: %v", latestRtt, ackDelay)
cc.updateRttTcp(latestRtt)
// Update the minRtt, but ignore ackDelay.
if latestRtt < cc.minRtt {
cc.minRtt = latestRtt
}
// Now reduce by ackDelay if it doesn't reduce the RTT below the minimum.
if latestRtt-cc.minRtt > ackDelay {
latestRtt -= ackDelay
// And update the maximum observed ACK delay.
if !lastPacket.ackOnly && ackDelay > cc.maxAckDelay {
cc.maxAckDelay = ackDelay
}
}
cc.updateRtt(latestRtt)
}
// find and proccess newly acked packets
for _, ackBlock := range acks {
for pn := ackBlock.lastPacket; pn > (ackBlock.lastPacket - ackBlock.count); pn-- {
cc.conn.log(logTypeCongestion, "Ack for pn %d received", pn)
_, present := cc.sentPackets[pn]
if present {
cc.conn.log(logTypeCongestion, "First ack for pn %d received", pn)
cc.onPacketAcked(pn)
}
}
}
cc.detectLostPackets()
cc.setLossDetectionAlarm()
}
func (cc *CongestionControllerIetf) setLostPacketHandler(handler func(pn uint64)) {
cc.lostPacketHandler = handler
}
func (cc *CongestionControllerIetf) updateRtt(latestRtt time.Duration) {
if cc.smoothedRtt == 0 {
cc.smoothedRtt = latestRtt
cc.rttVar = time.Duration(int64(latestRtt) / 2) | } else {
rttDelta := cc.smoothedRtt - latestRtt
if rttDelta < 0 {
rttDelta = -rttDelta
}
cc.rttVar = time.Duration(int64(cc.rttVar)*3/4 + int64(rttDelta)*1/4)
cc.smoothedRtt = time.Duration(int64(cc.smoothedRtt)*7/8 + int64(latestRtt)*1/8)
}
cc.conn.log(logTypeCongestion, "New RTT estimate: %v, variance: %v", cc.smoothedRtt, cc.rttVar)
}
func (cc *CongestionControllerIetf) updateRttTcp(latestRtt time.Duration) {
if cc.smoothedRttTcp == 0 {
cc.smoothedRttTcp = latestRtt
cc.rttVarTcp = time.Duration(int64(latestRtt) / 2)
} else {
rttDelta := cc.smoothedRttTcp - latestRtt
if rttDelta < 0 {
rttDelta = -rttDelta
}
cc.rttVarTcp = time.Duration(int64(cc.rttVarTcp)*3/4 + int64(rttDelta)*3/4)
cc.smoothedRttTcp = time.Duration(int64(cc.smoothedRttTcp)*7/8 + int64(latestRtt)*1/8)
}
cc.conn.log(logTypeCongestion, "New RTT(TCP) estimate: %v, variance: %v", cc.smoothedRttTcp, cc.rttVarTcp)
}
func (cc *CongestionControllerIetf) rto() time.Duration {
// max(SRTT + 4*RTTVAR + MaxAckDelay, minRTO)
rto := cc.smoothedRtt + 4*cc.rttVar + cc.maxAckDelay
if rto < kMinRTOTimeout {
return kMinRTOTimeout
}
return rto
}
func (cc *CongestionControllerIetf) onPacketAcked(pn uint64) {
cc.onPacketAckedCC(pn)
//TODO([email protected]) some RTO stuff here
delete(cc.sentPackets, pn)
}
func (cc *CongestionControllerIetf) setLossDetectionAlarm() {
//TODO([email protected])
}
func (cc *CongestionControllerIetf) onLossDetectionAlarm() {
//TODO([email protected])
}
func (cc *CongestionControllerIetf) detectLostPackets() {
var lostPackets []packetEntry
//TODO([email protected]) implement loss detection different from reorderingThreshold
for _, packet := range cc.sentPackets {
if (cc.largestAckedPacket > packet.pn) &&
(cc.largestAckedPacket-packet.pn > uint64(cc.reorderingThreshold)) {
lostPackets = append(lostPackets, packet)
}
}
if len(lostPackets) > 0 {
cc.onPacketsLost(lostPackets)
}
for _, packet := range lostPackets {
delete(cc.sentPackets, packet.pn)
}
}
func (cc *CongestionControllerIetf) onPacketSentCC(bytes_sent int) {
cc.bytesInFlight += bytes_sent
cc.conn.log(logTypeCongestion, "%d bytes added to bytesInFlight", bytes_sent)
}
func (cc *CongestionControllerIetf) onPacketAckedCC(pn uint64) {
cc.bytesInFlight -= cc.sentPackets[pn].bytes
cc.conn.log(logTypeCongestion, "%d bytes from packet %d removed from bytesInFlight", cc.sentPackets[pn].bytes, pn)
if pn < cc.endOfRecovery {
// Do not increase window size during recovery
return
}
if cc.congestionWindow < cc.sstresh {
// Slow start
cc.congestionWindow += cc.sentPackets[pn].bytes
cc.conn.log(logTypeCongestion, "PDV Slow Start: increasing window size with %d bytes to %d",
cc.sentPackets[pn].bytes, cc.congestionWindow)
} else {
// Congestion avoidance
cc.congestionWindow += kDefaultMss * cc.sentPackets[pn].bytes / cc.congestionWindow
cc.conn.log(logTypeCongestion, "PDV Congestion Avoidance: increasing window size to %d",
cc.congestionWindow)
}
}
func (cc *CongestionControllerIetf) onPacketsLost(packets []packetEntry) {
var largestLostPn uint64 = 0
for _, packet := range packets {
// First remove lost packets from bytesInFlight and inform the connection
// of the loss
cc.conn.log(logTypeCongestion, "Packet pn: %d len: %d is lost", packet.pn, packet.bytes)
cc.bytesInFlight -= packet.bytes
if cc.lostPacketHandler != nil {
cc.lostPacketHandler(packet.pn)
}
// and keep track of the largest lost packet
if packet.pn > largestLostPn {
largestLostPn = packet.pn
}
}
// Now start a new recovery epoch if the largest lost packet is larger than the
// end of the previous recovery epoch
if cc.endOfRecovery < largestLostPn {
cc.endOfRecovery = cc.largestSendPacket
cc.congestionWindow = int(float32(cc.congestionWindow) * kLossReductionFactor)
if kMinimumWindow > cc.congestionWindow {
cc.congestionWindow = kMinimumWindow
}
cc.sstresh = cc.congestionWindow
cc.conn.log(logTypeCongestion, "PDV Recovery started. Window size: %d, sstresh: %d, endOfRecovery %d",
cc.congestionWindow, cc.sstresh, cc.endOfRecovery)
}
}
func (cc *CongestionControllerIetf) bytesAllowedToSend() int {
cc.conn.log(logTypeCongestion, "Remaining congestion window size: %d", cc.congestionWindow-cc.bytesInFlight)
return cc.congestionWindow - cc.bytesInFlight
}
func newCongestionControllerIetf(conn *Connection) *CongestionControllerIetf {
return &CongestionControllerIetf{
0, // bytesInFlight
kInitalWindow, // congestionWindow
0, // endOfRecovery
int(^uint(0) >> 1), // sstresh
0, // lossDetectionAlarm
0, // handshakeCount
0, // tlpCount
0, // rtoCount
0, // largestSendBeforeRto
time.Unix(0, 0), // timeOfLastSentPacket
0, // largestSendPacket
0, // largestAckedPacket
0, // maxAckDelay
100 * time.Second, // minRtt
0, // smoothedRtt
0, // rttVar
0, // smoothedRttTcp
0, // rttVarTcp
kReorderingThreshold, // reorderingThreshold
math.MaxFloat32, // timeReorderingFraction
time.Unix(0, 0), // lossTime
make(map[uint64]packetEntry), // sentPackets
nil, // lostPacketHandler
conn, // conn
}
} | random_line_split |
|
congestion.go | /*
Package minq is a minimal implementation of QUIC, as documented at
https://quicwg.github.io/. Minq partly implements draft-04.
*/
package minq
import (
"math"
"time"
// "fmt"
)
// congestion control related constants
const (
kDefaultMss = 1460 // bytes
kInitalWindow = 10 * kDefaultMss
kMinimumWindow = 2 * kDefaultMss
kMaximumWindow = kInitalWindow
kLossReductionFactor = 0.5
)
// loss dectection related constants
const (
kMaxTLPs = 2
kReorderingThreshold = 3
kTimeReorderingFraction = 0.125
kMinTLPTimeout = 10 * time.Millisecond
kMinRTOTimeout = 200 * time.Millisecond
kDelayedAckTimeout = 25 * time.Millisecond
kDefaultInitialRtt = 100 * time.Millisecond
)
type CongestionController interface {
onPacketSent(pn uint64, isAckOnly bool, sentBytes int)
onAckReceived(acks ackRanges, delay time.Duration)
bytesAllowedToSend() int
setLostPacketHandler(handler func(pn uint64))
rto() time.Duration
}
/*
* DUMMY congestion controller
*/
type CongestionControllerDummy struct {
}
func (cc *CongestionControllerDummy) onPacketSent(pn uint64, isAckOnly bool, sentBytes int) {
}
func (cc *CongestionControllerDummy) onAckReceived(acks ackRanges, delay time.Duration) {
}
func (cc *CongestionControllerDummy) bytesAllowedToSend() int {
/* return the the maximum int value */
return int(^uint(0) >> 1)
}
func (cc *CongestionControllerDummy) setLostPacketHandler(handler func(pn uint64)) {
}
func (cc *CongestionControllerDummy) rto() time.Duration |
/*
* draft-ietf-quic-recovery congestion controller
*/
type CongestionControllerIetf struct {
// Congestion control related
bytesInFlight int
congestionWindow int
endOfRecovery uint64
sstresh int
// Loss detection related
lossDetectionAlarm int //TODO([email protected]) set this to the right type
handshakeCount int
tlpCount int
rtoCount int
largestSendBeforeRto uint64
timeOfLastSentPacket time.Time
largestSendPacket uint64
largestAckedPacket uint64
maxAckDelay time.Duration
minRtt time.Duration
// largestRtt time.Duration
smoothedRtt time.Duration
rttVar time.Duration
smoothedRttTcp time.Duration
rttVarTcp time.Duration
reorderingThreshold int
timeReorderingFraction float32
lossTime time.Time
sentPackets map[uint64]packetEntry
// others
lostPacketHandler func(pn uint64)
conn *Connection
}
type packetEntry struct {
pn uint64
txTime time.Time
bytes int
ackOnly bool
}
func (cc *CongestionControllerIetf) onPacketSent(pn uint64, isAckOnly bool, sentBytes int) {
cc.timeOfLastSentPacket = time.Now()
cc.largestSendPacket = pn
packetData := packetEntry{pn, time.Now(), 0, isAckOnly}
cc.conn.log(logTypeCongestion, "Packet send pn: %d len:%d ackonly: %v\n", pn, sentBytes, isAckOnly)
if !isAckOnly {
cc.onPacketSentCC(sentBytes)
packetData.bytes = sentBytes
cc.setLossDetectionAlarm()
}
cc.sentPackets[pn] = packetData
}
// acks is received to be a sorted list, where the largest packet numbers are at the beginning
func (cc *CongestionControllerIetf) onAckReceived(acks ackRanges, ackDelay time.Duration) {
// keep track of largest packet acked overall
if acks[0].lastPacket > cc.largestAckedPacket {
cc.largestAckedPacket = acks[0].lastPacket
}
// If the largest acked is newly acked update rtt
lastPacket, present := cc.sentPackets[acks[0].lastPacket]
if present {
latestRtt := time.Since(cc.sentPackets[acks[0].lastPacket].txTime)
cc.conn.log(logTypeCongestion, "latestRtt: %v, ackDelay: %v", latestRtt, ackDelay)
cc.updateRttTcp(latestRtt)
// Update the minRtt, but ignore ackDelay.
if latestRtt < cc.minRtt {
cc.minRtt = latestRtt
}
// Now reduce by ackDelay if it doesn't reduce the RTT below the minimum.
if latestRtt-cc.minRtt > ackDelay {
latestRtt -= ackDelay
// And update the maximum observed ACK delay.
if !lastPacket.ackOnly && ackDelay > cc.maxAckDelay {
cc.maxAckDelay = ackDelay
}
}
cc.updateRtt(latestRtt)
}
// find and proccess newly acked packets
for _, ackBlock := range acks {
for pn := ackBlock.lastPacket; pn > (ackBlock.lastPacket - ackBlock.count); pn-- {
cc.conn.log(logTypeCongestion, "Ack for pn %d received", pn)
_, present := cc.sentPackets[pn]
if present {
cc.conn.log(logTypeCongestion, "First ack for pn %d received", pn)
cc.onPacketAcked(pn)
}
}
}
cc.detectLostPackets()
cc.setLossDetectionAlarm()
}
func (cc *CongestionControllerIetf) setLostPacketHandler(handler func(pn uint64)) {
cc.lostPacketHandler = handler
}
func (cc *CongestionControllerIetf) updateRtt(latestRtt time.Duration) {
if cc.smoothedRtt == 0 {
cc.smoothedRtt = latestRtt
cc.rttVar = time.Duration(int64(latestRtt) / 2)
} else {
rttDelta := cc.smoothedRtt - latestRtt
if rttDelta < 0 {
rttDelta = -rttDelta
}
cc.rttVar = time.Duration(int64(cc.rttVar)*3/4 + int64(rttDelta)*1/4)
cc.smoothedRtt = time.Duration(int64(cc.smoothedRtt)*7/8 + int64(latestRtt)*1/8)
}
cc.conn.log(logTypeCongestion, "New RTT estimate: %v, variance: %v", cc.smoothedRtt, cc.rttVar)
}
func (cc *CongestionControllerIetf) updateRttTcp(latestRtt time.Duration) {
if cc.smoothedRttTcp == 0 {
cc.smoothedRttTcp = latestRtt
cc.rttVarTcp = time.Duration(int64(latestRtt) / 2)
} else {
rttDelta := cc.smoothedRttTcp - latestRtt
if rttDelta < 0 {
rttDelta = -rttDelta
}
cc.rttVarTcp = time.Duration(int64(cc.rttVarTcp)*3/4 + int64(rttDelta)*3/4)
cc.smoothedRttTcp = time.Duration(int64(cc.smoothedRttTcp)*7/8 + int64(latestRtt)*1/8)
}
cc.conn.log(logTypeCongestion, "New RTT(TCP) estimate: %v, variance: %v", cc.smoothedRttTcp, cc.rttVarTcp)
}
func (cc *CongestionControllerIetf) rto() time.Duration {
// max(SRTT + 4*RTTVAR + MaxAckDelay, minRTO)
rto := cc.smoothedRtt + 4*cc.rttVar + cc.maxAckDelay
if rto < kMinRTOTimeout {
return kMinRTOTimeout
}
return rto
}
func (cc *CongestionControllerIetf) onPacketAcked(pn uint64) {
cc.onPacketAckedCC(pn)
//TODO([email protected]) some RTO stuff here
delete(cc.sentPackets, pn)
}
func (cc *CongestionControllerIetf) setLossDetectionAlarm() {
//TODO([email protected])
}
func (cc *CongestionControllerIetf) onLossDetectionAlarm() {
//TODO([email protected])
}
func (cc *CongestionControllerIetf) detectLostPackets() {
var lostPackets []packetEntry
//TODO([email protected]) implement loss detection different from reorderingThreshold
for _, packet := range cc.sentPackets {
if (cc.largestAckedPacket > packet.pn) &&
(cc.largestAckedPacket-packet.pn > uint64(cc.reorderingThreshold)) {
lostPackets = append(lostPackets, packet)
}
}
if len(lostPackets) > 0 {
cc.onPacketsLost(lostPackets)
}
for _, packet := range lostPackets {
delete(cc.sentPackets, packet.pn)
}
}
func (cc *CongestionControllerIetf) onPacketSentCC(bytes_sent int) {
cc.bytesInFlight += bytes_sent
cc.conn.log(logTypeCongestion, "%d bytes added to bytesInFlight", bytes_sent)
}
func (cc *CongestionControllerIetf) onPacketAckedCC(pn uint64) {
cc.bytesInFlight -= cc.sentPackets[pn].bytes
cc.conn.log(logTypeCongestion, "%d bytes from packet %d removed from bytesInFlight", cc.sentPackets[pn].bytes, pn)
if pn < cc.endOfRecovery {
// Do not increase window size during recovery
return
}
if cc.congestionWindow < cc.sstresh {
// Slow start
cc.congestionWindow += cc.sentPackets[pn].bytes
cc.conn.log(logTypeCongestion, "PDV Slow Start: increasing window size with %d bytes to %d",
cc.sentPackets[pn].bytes, cc.congestionWindow)
} else {
// Congestion avoidance
cc.congestionWindow += kDefaultMss * cc.sentPackets[pn].bytes / cc.congestionWindow
cc.conn.log(logTypeCongestion, "PDV Congestion Avoidance: increasing window size to %d",
cc.congestionWindow)
}
}
func (cc *CongestionControllerIetf) onPacketsLost(packets []packetEntry) {
var largestLostPn uint64 = 0
for _, packet := range packets {
// First remove lost packets from bytesInFlight and inform the connection
// of the loss
cc.conn.log(logTypeCongestion, "Packet pn: %d len: %d is lost", packet.pn, packet.bytes)
cc.bytesInFlight -= packet.bytes
if cc.lostPacketHandler != nil {
cc.lostPacketHandler(packet.pn)
}
// and keep track of the largest lost packet
if packet.pn > largestLostPn {
largestLostPn = packet.pn
}
}
// Now start a new recovery epoch if the largest lost packet is larger than the
// end of the previous recovery epoch
if cc.endOfRecovery < largestLostPn {
cc.endOfRecovery = cc.largestSendPacket
cc.congestionWindow = int(float32(cc.congestionWindow) * kLossReductionFactor)
if kMinimumWindow > cc.congestionWindow {
cc.congestionWindow = kMinimumWindow
}
cc.sstresh = cc.congestionWindow
cc.conn.log(logTypeCongestion, "PDV Recovery started. Window size: %d, sstresh: %d, endOfRecovery %d",
cc.congestionWindow, cc.sstresh, cc.endOfRecovery)
}
}
func (cc *CongestionControllerIetf) bytesAllowedToSend() int {
cc.conn.log(logTypeCongestion, "Remaining congestion window size: %d", cc.congestionWindow-cc.bytesInFlight)
return cc.congestionWindow - cc.bytesInFlight
}
func newCongestionControllerIetf(conn *Connection) *CongestionControllerIetf {
return &CongestionControllerIetf{
0, // bytesInFlight
kInitalWindow, // congestionWindow
0, // endOfRecovery
int(^uint(0) >> 1), // sstresh
0, // lossDetectionAlarm
0, // handshakeCount
0, // tlpCount
0, // rtoCount
0, // largestSendBeforeRto
time.Unix(0, 0), // timeOfLastSentPacket
0, // largestSendPacket
0, // largestAckedPacket
0, // maxAckDelay
100 * time.Second, // minRtt
0, // smoothedRtt
0, // rttVar
0, // smoothedRttTcp
0, // rttVarTcp
kReorderingThreshold, // reorderingThreshold
math.MaxFloat32, // timeReorderingFraction
time.Unix(0, 0), // lossTime
make(map[uint64]packetEntry), // sentPackets
nil, // lostPacketHandler
conn, // conn
}
}
| {
return kMinRTOTimeout
} | identifier_body |
congestion.go | /*
Package minq is a minimal implementation of QUIC, as documented at
https://quicwg.github.io/. Minq partly implements draft-04.
*/
package minq
import (
"math"
"time"
// "fmt"
)
// congestion control related constants
const (
kDefaultMss = 1460 // bytes
kInitalWindow = 10 * kDefaultMss
kMinimumWindow = 2 * kDefaultMss
kMaximumWindow = kInitalWindow
kLossReductionFactor = 0.5
)
// loss dectection related constants
const (
kMaxTLPs = 2
kReorderingThreshold = 3
kTimeReorderingFraction = 0.125
kMinTLPTimeout = 10 * time.Millisecond
kMinRTOTimeout = 200 * time.Millisecond
kDelayedAckTimeout = 25 * time.Millisecond
kDefaultInitialRtt = 100 * time.Millisecond
)
type CongestionController interface {
onPacketSent(pn uint64, isAckOnly bool, sentBytes int)
onAckReceived(acks ackRanges, delay time.Duration)
bytesAllowedToSend() int
setLostPacketHandler(handler func(pn uint64))
rto() time.Duration
}
/*
* DUMMY congestion controller
*/
type CongestionControllerDummy struct {
}
func (cc *CongestionControllerDummy) onPacketSent(pn uint64, isAckOnly bool, sentBytes int) {
}
func (cc *CongestionControllerDummy) onAckReceived(acks ackRanges, delay time.Duration) {
}
func (cc *CongestionControllerDummy) bytesAllowedToSend() int {
/* return the the maximum int value */
return int(^uint(0) >> 1)
}
func (cc *CongestionControllerDummy) setLostPacketHandler(handler func(pn uint64)) {
}
func (cc *CongestionControllerDummy) rto() time.Duration {
return kMinRTOTimeout
}
/*
* draft-ietf-quic-recovery congestion controller
*/
type CongestionControllerIetf struct {
// Congestion control related
bytesInFlight int
congestionWindow int
endOfRecovery uint64
sstresh int
// Loss detection related
lossDetectionAlarm int //TODO([email protected]) set this to the right type
handshakeCount int
tlpCount int
rtoCount int
largestSendBeforeRto uint64
timeOfLastSentPacket time.Time
largestSendPacket uint64
largestAckedPacket uint64
maxAckDelay time.Duration
minRtt time.Duration
// largestRtt time.Duration
smoothedRtt time.Duration
rttVar time.Duration
smoothedRttTcp time.Duration
rttVarTcp time.Duration
reorderingThreshold int
timeReorderingFraction float32
lossTime time.Time
sentPackets map[uint64]packetEntry
// others
lostPacketHandler func(pn uint64)
conn *Connection
}
type packetEntry struct {
pn uint64
txTime time.Time
bytes int
ackOnly bool
}
func (cc *CongestionControllerIetf) onPacketSent(pn uint64, isAckOnly bool, sentBytes int) {
cc.timeOfLastSentPacket = time.Now()
cc.largestSendPacket = pn
packetData := packetEntry{pn, time.Now(), 0, isAckOnly}
cc.conn.log(logTypeCongestion, "Packet send pn: %d len:%d ackonly: %v\n", pn, sentBytes, isAckOnly)
if !isAckOnly {
cc.onPacketSentCC(sentBytes)
packetData.bytes = sentBytes
cc.setLossDetectionAlarm()
}
cc.sentPackets[pn] = packetData
}
// acks is received to be a sorted list, where the largest packet numbers are at the beginning
func (cc *CongestionControllerIetf) onAckReceived(acks ackRanges, ackDelay time.Duration) {
// keep track of largest packet acked overall
if acks[0].lastPacket > cc.largestAckedPacket {
cc.largestAckedPacket = acks[0].lastPacket
}
// If the largest acked is newly acked update rtt
lastPacket, present := cc.sentPackets[acks[0].lastPacket]
if present {
latestRtt := time.Since(cc.sentPackets[acks[0].lastPacket].txTime)
cc.conn.log(logTypeCongestion, "latestRtt: %v, ackDelay: %v", latestRtt, ackDelay)
cc.updateRttTcp(latestRtt)
// Update the minRtt, but ignore ackDelay.
if latestRtt < cc.minRtt {
cc.minRtt = latestRtt
}
// Now reduce by ackDelay if it doesn't reduce the RTT below the minimum.
if latestRtt-cc.minRtt > ackDelay {
latestRtt -= ackDelay
// And update the maximum observed ACK delay.
if !lastPacket.ackOnly && ackDelay > cc.maxAckDelay {
cc.maxAckDelay = ackDelay
}
}
cc.updateRtt(latestRtt)
}
// find and proccess newly acked packets
for _, ackBlock := range acks {
for pn := ackBlock.lastPacket; pn > (ackBlock.lastPacket - ackBlock.count); pn-- {
cc.conn.log(logTypeCongestion, "Ack for pn %d received", pn)
_, present := cc.sentPackets[pn]
if present {
cc.conn.log(logTypeCongestion, "First ack for pn %d received", pn)
cc.onPacketAcked(pn)
}
}
}
cc.detectLostPackets()
cc.setLossDetectionAlarm()
}
func (cc *CongestionControllerIetf) setLostPacketHandler(handler func(pn uint64)) {
cc.lostPacketHandler = handler
}
func (cc *CongestionControllerIetf) updateRtt(latestRtt time.Duration) {
if cc.smoothedRtt == 0 {
cc.smoothedRtt = latestRtt
cc.rttVar = time.Duration(int64(latestRtt) / 2)
} else {
rttDelta := cc.smoothedRtt - latestRtt
if rttDelta < 0 {
rttDelta = -rttDelta
}
cc.rttVar = time.Duration(int64(cc.rttVar)*3/4 + int64(rttDelta)*1/4)
cc.smoothedRtt = time.Duration(int64(cc.smoothedRtt)*7/8 + int64(latestRtt)*1/8)
}
cc.conn.log(logTypeCongestion, "New RTT estimate: %v, variance: %v", cc.smoothedRtt, cc.rttVar)
}
func (cc *CongestionControllerIetf) updateRttTcp(latestRtt time.Duration) {
if cc.smoothedRttTcp == 0 {
cc.smoothedRttTcp = latestRtt
cc.rttVarTcp = time.Duration(int64(latestRtt) / 2)
} else {
rttDelta := cc.smoothedRttTcp - latestRtt
if rttDelta < 0 {
rttDelta = -rttDelta
}
cc.rttVarTcp = time.Duration(int64(cc.rttVarTcp)*3/4 + int64(rttDelta)*3/4)
cc.smoothedRttTcp = time.Duration(int64(cc.smoothedRttTcp)*7/8 + int64(latestRtt)*1/8)
}
cc.conn.log(logTypeCongestion, "New RTT(TCP) estimate: %v, variance: %v", cc.smoothedRttTcp, cc.rttVarTcp)
}
func (cc *CongestionControllerIetf) rto() time.Duration {
// max(SRTT + 4*RTTVAR + MaxAckDelay, minRTO)
rto := cc.smoothedRtt + 4*cc.rttVar + cc.maxAckDelay
if rto < kMinRTOTimeout {
return kMinRTOTimeout
}
return rto
}
func (cc *CongestionControllerIetf) onPacketAcked(pn uint64) {
cc.onPacketAckedCC(pn)
//TODO([email protected]) some RTO stuff here
delete(cc.sentPackets, pn)
}
func (cc *CongestionControllerIetf) setLossDetectionAlarm() {
//TODO([email protected])
}
func (cc *CongestionControllerIetf) | () {
//TODO([email protected])
}
func (cc *CongestionControllerIetf) detectLostPackets() {
var lostPackets []packetEntry
//TODO([email protected]) implement loss detection different from reorderingThreshold
for _, packet := range cc.sentPackets {
if (cc.largestAckedPacket > packet.pn) &&
(cc.largestAckedPacket-packet.pn > uint64(cc.reorderingThreshold)) {
lostPackets = append(lostPackets, packet)
}
}
if len(lostPackets) > 0 {
cc.onPacketsLost(lostPackets)
}
for _, packet := range lostPackets {
delete(cc.sentPackets, packet.pn)
}
}
func (cc *CongestionControllerIetf) onPacketSentCC(bytes_sent int) {
cc.bytesInFlight += bytes_sent
cc.conn.log(logTypeCongestion, "%d bytes added to bytesInFlight", bytes_sent)
}
func (cc *CongestionControllerIetf) onPacketAckedCC(pn uint64) {
cc.bytesInFlight -= cc.sentPackets[pn].bytes
cc.conn.log(logTypeCongestion, "%d bytes from packet %d removed from bytesInFlight", cc.sentPackets[pn].bytes, pn)
if pn < cc.endOfRecovery {
// Do not increase window size during recovery
return
}
if cc.congestionWindow < cc.sstresh {
// Slow start
cc.congestionWindow += cc.sentPackets[pn].bytes
cc.conn.log(logTypeCongestion, "PDV Slow Start: increasing window size with %d bytes to %d",
cc.sentPackets[pn].bytes, cc.congestionWindow)
} else {
// Congestion avoidance
cc.congestionWindow += kDefaultMss * cc.sentPackets[pn].bytes / cc.congestionWindow
cc.conn.log(logTypeCongestion, "PDV Congestion Avoidance: increasing window size to %d",
cc.congestionWindow)
}
}
func (cc *CongestionControllerIetf) onPacketsLost(packets []packetEntry) {
var largestLostPn uint64 = 0
for _, packet := range packets {
// First remove lost packets from bytesInFlight and inform the connection
// of the loss
cc.conn.log(logTypeCongestion, "Packet pn: %d len: %d is lost", packet.pn, packet.bytes)
cc.bytesInFlight -= packet.bytes
if cc.lostPacketHandler != nil {
cc.lostPacketHandler(packet.pn)
}
// and keep track of the largest lost packet
if packet.pn > largestLostPn {
largestLostPn = packet.pn
}
}
// Now start a new recovery epoch if the largest lost packet is larger than the
// end of the previous recovery epoch
if cc.endOfRecovery < largestLostPn {
cc.endOfRecovery = cc.largestSendPacket
cc.congestionWindow = int(float32(cc.congestionWindow) * kLossReductionFactor)
if kMinimumWindow > cc.congestionWindow {
cc.congestionWindow = kMinimumWindow
}
cc.sstresh = cc.congestionWindow
cc.conn.log(logTypeCongestion, "PDV Recovery started. Window size: %d, sstresh: %d, endOfRecovery %d",
cc.congestionWindow, cc.sstresh, cc.endOfRecovery)
}
}
func (cc *CongestionControllerIetf) bytesAllowedToSend() int {
cc.conn.log(logTypeCongestion, "Remaining congestion window size: %d", cc.congestionWindow-cc.bytesInFlight)
return cc.congestionWindow - cc.bytesInFlight
}
func newCongestionControllerIetf(conn *Connection) *CongestionControllerIetf {
return &CongestionControllerIetf{
0, // bytesInFlight
kInitalWindow, // congestionWindow
0, // endOfRecovery
int(^uint(0) >> 1), // sstresh
0, // lossDetectionAlarm
0, // handshakeCount
0, // tlpCount
0, // rtoCount
0, // largestSendBeforeRto
time.Unix(0, 0), // timeOfLastSentPacket
0, // largestSendPacket
0, // largestAckedPacket
0, // maxAckDelay
100 * time.Second, // minRtt
0, // smoothedRtt
0, // rttVar
0, // smoothedRttTcp
0, // rttVarTcp
kReorderingThreshold, // reorderingThreshold
math.MaxFloat32, // timeReorderingFraction
time.Unix(0, 0), // lossTime
make(map[uint64]packetEntry), // sentPackets
nil, // lostPacketHandler
conn, // conn
}
}
| onLossDetectionAlarm | identifier_name |
congestion.go | /*
Package minq is a minimal implementation of QUIC, as documented at
https://quicwg.github.io/. Minq partly implements draft-04.
*/
package minq
import (
"math"
"time"
// "fmt"
)
// congestion control related constants
const (
kDefaultMss = 1460 // bytes
kInitalWindow = 10 * kDefaultMss
kMinimumWindow = 2 * kDefaultMss
kMaximumWindow = kInitalWindow
kLossReductionFactor = 0.5
)
// loss dectection related constants
const (
kMaxTLPs = 2
kReorderingThreshold = 3
kTimeReorderingFraction = 0.125
kMinTLPTimeout = 10 * time.Millisecond
kMinRTOTimeout = 200 * time.Millisecond
kDelayedAckTimeout = 25 * time.Millisecond
kDefaultInitialRtt = 100 * time.Millisecond
)
type CongestionController interface {
onPacketSent(pn uint64, isAckOnly bool, sentBytes int)
onAckReceived(acks ackRanges, delay time.Duration)
bytesAllowedToSend() int
setLostPacketHandler(handler func(pn uint64))
rto() time.Duration
}
/*
* DUMMY congestion controller
*/
type CongestionControllerDummy struct {
}
func (cc *CongestionControllerDummy) onPacketSent(pn uint64, isAckOnly bool, sentBytes int) {
}
func (cc *CongestionControllerDummy) onAckReceived(acks ackRanges, delay time.Duration) {
}
func (cc *CongestionControllerDummy) bytesAllowedToSend() int {
/* return the the maximum int value */
return int(^uint(0) >> 1)
}
func (cc *CongestionControllerDummy) setLostPacketHandler(handler func(pn uint64)) {
}
func (cc *CongestionControllerDummy) rto() time.Duration {
return kMinRTOTimeout
}
/*
* draft-ietf-quic-recovery congestion controller
*/
type CongestionControllerIetf struct {
// Congestion control related
bytesInFlight int
congestionWindow int
endOfRecovery uint64
sstresh int
// Loss detection related
lossDetectionAlarm int //TODO([email protected]) set this to the right type
handshakeCount int
tlpCount int
rtoCount int
largestSendBeforeRto uint64
timeOfLastSentPacket time.Time
largestSendPacket uint64
largestAckedPacket uint64
maxAckDelay time.Duration
minRtt time.Duration
// largestRtt time.Duration
smoothedRtt time.Duration
rttVar time.Duration
smoothedRttTcp time.Duration
rttVarTcp time.Duration
reorderingThreshold int
timeReorderingFraction float32
lossTime time.Time
sentPackets map[uint64]packetEntry
// others
lostPacketHandler func(pn uint64)
conn *Connection
}
type packetEntry struct {
pn uint64
txTime time.Time
bytes int
ackOnly bool
}
func (cc *CongestionControllerIetf) onPacketSent(pn uint64, isAckOnly bool, sentBytes int) {
cc.timeOfLastSentPacket = time.Now()
cc.largestSendPacket = pn
packetData := packetEntry{pn, time.Now(), 0, isAckOnly}
cc.conn.log(logTypeCongestion, "Packet send pn: %d len:%d ackonly: %v\n", pn, sentBytes, isAckOnly)
if !isAckOnly {
cc.onPacketSentCC(sentBytes)
packetData.bytes = sentBytes
cc.setLossDetectionAlarm()
}
cc.sentPackets[pn] = packetData
}
// acks is received to be a sorted list, where the largest packet numbers are at the beginning
func (cc *CongestionControllerIetf) onAckReceived(acks ackRanges, ackDelay time.Duration) {
// keep track of largest packet acked overall
if acks[0].lastPacket > cc.largestAckedPacket {
cc.largestAckedPacket = acks[0].lastPacket
}
// If the largest acked is newly acked update rtt
lastPacket, present := cc.sentPackets[acks[0].lastPacket]
if present {
latestRtt := time.Since(cc.sentPackets[acks[0].lastPacket].txTime)
cc.conn.log(logTypeCongestion, "latestRtt: %v, ackDelay: %v", latestRtt, ackDelay)
cc.updateRttTcp(latestRtt)
// Update the minRtt, but ignore ackDelay.
if latestRtt < cc.minRtt {
cc.minRtt = latestRtt
}
// Now reduce by ackDelay if it doesn't reduce the RTT below the minimum.
if latestRtt-cc.minRtt > ackDelay {
latestRtt -= ackDelay
// And update the maximum observed ACK delay.
if !lastPacket.ackOnly && ackDelay > cc.maxAckDelay {
cc.maxAckDelay = ackDelay
}
}
cc.updateRtt(latestRtt)
}
// find and proccess newly acked packets
for _, ackBlock := range acks {
for pn := ackBlock.lastPacket; pn > (ackBlock.lastPacket - ackBlock.count); pn-- {
cc.conn.log(logTypeCongestion, "Ack for pn %d received", pn)
_, present := cc.sentPackets[pn]
if present {
cc.conn.log(logTypeCongestion, "First ack for pn %d received", pn)
cc.onPacketAcked(pn)
}
}
}
cc.detectLostPackets()
cc.setLossDetectionAlarm()
}
func (cc *CongestionControllerIetf) setLostPacketHandler(handler func(pn uint64)) {
cc.lostPacketHandler = handler
}
func (cc *CongestionControllerIetf) updateRtt(latestRtt time.Duration) {
if cc.smoothedRtt == 0 | else {
rttDelta := cc.smoothedRtt - latestRtt
if rttDelta < 0 {
rttDelta = -rttDelta
}
cc.rttVar = time.Duration(int64(cc.rttVar)*3/4 + int64(rttDelta)*1/4)
cc.smoothedRtt = time.Duration(int64(cc.smoothedRtt)*7/8 + int64(latestRtt)*1/8)
}
cc.conn.log(logTypeCongestion, "New RTT estimate: %v, variance: %v", cc.smoothedRtt, cc.rttVar)
}
func (cc *CongestionControllerIetf) updateRttTcp(latestRtt time.Duration) {
if cc.smoothedRttTcp == 0 {
cc.smoothedRttTcp = latestRtt
cc.rttVarTcp = time.Duration(int64(latestRtt) / 2)
} else {
rttDelta := cc.smoothedRttTcp - latestRtt
if rttDelta < 0 {
rttDelta = -rttDelta
}
cc.rttVarTcp = time.Duration(int64(cc.rttVarTcp)*3/4 + int64(rttDelta)*3/4)
cc.smoothedRttTcp = time.Duration(int64(cc.smoothedRttTcp)*7/8 + int64(latestRtt)*1/8)
}
cc.conn.log(logTypeCongestion, "New RTT(TCP) estimate: %v, variance: %v", cc.smoothedRttTcp, cc.rttVarTcp)
}
func (cc *CongestionControllerIetf) rto() time.Duration {
// max(SRTT + 4*RTTVAR + MaxAckDelay, minRTO)
rto := cc.smoothedRtt + 4*cc.rttVar + cc.maxAckDelay
if rto < kMinRTOTimeout {
return kMinRTOTimeout
}
return rto
}
func (cc *CongestionControllerIetf) onPacketAcked(pn uint64) {
cc.onPacketAckedCC(pn)
//TODO([email protected]) some RTO stuff here
delete(cc.sentPackets, pn)
}
func (cc *CongestionControllerIetf) setLossDetectionAlarm() {
//TODO([email protected])
}
func (cc *CongestionControllerIetf) onLossDetectionAlarm() {
//TODO([email protected])
}
func (cc *CongestionControllerIetf) detectLostPackets() {
var lostPackets []packetEntry
//TODO([email protected]) implement loss detection different from reorderingThreshold
for _, packet := range cc.sentPackets {
if (cc.largestAckedPacket > packet.pn) &&
(cc.largestAckedPacket-packet.pn > uint64(cc.reorderingThreshold)) {
lostPackets = append(lostPackets, packet)
}
}
if len(lostPackets) > 0 {
cc.onPacketsLost(lostPackets)
}
for _, packet := range lostPackets {
delete(cc.sentPackets, packet.pn)
}
}
func (cc *CongestionControllerIetf) onPacketSentCC(bytes_sent int) {
cc.bytesInFlight += bytes_sent
cc.conn.log(logTypeCongestion, "%d bytes added to bytesInFlight", bytes_sent)
}
func (cc *CongestionControllerIetf) onPacketAckedCC(pn uint64) {
cc.bytesInFlight -= cc.sentPackets[pn].bytes
cc.conn.log(logTypeCongestion, "%d bytes from packet %d removed from bytesInFlight", cc.sentPackets[pn].bytes, pn)
if pn < cc.endOfRecovery {
// Do not increase window size during recovery
return
}
if cc.congestionWindow < cc.sstresh {
// Slow start
cc.congestionWindow += cc.sentPackets[pn].bytes
cc.conn.log(logTypeCongestion, "PDV Slow Start: increasing window size with %d bytes to %d",
cc.sentPackets[pn].bytes, cc.congestionWindow)
} else {
// Congestion avoidance
cc.congestionWindow += kDefaultMss * cc.sentPackets[pn].bytes / cc.congestionWindow
cc.conn.log(logTypeCongestion, "PDV Congestion Avoidance: increasing window size to %d",
cc.congestionWindow)
}
}
func (cc *CongestionControllerIetf) onPacketsLost(packets []packetEntry) {
var largestLostPn uint64 = 0
for _, packet := range packets {
// First remove lost packets from bytesInFlight and inform the connection
// of the loss
cc.conn.log(logTypeCongestion, "Packet pn: %d len: %d is lost", packet.pn, packet.bytes)
cc.bytesInFlight -= packet.bytes
if cc.lostPacketHandler != nil {
cc.lostPacketHandler(packet.pn)
}
// and keep track of the largest lost packet
if packet.pn > largestLostPn {
largestLostPn = packet.pn
}
}
// Now start a new recovery epoch if the largest lost packet is larger than the
// end of the previous recovery epoch
if cc.endOfRecovery < largestLostPn {
cc.endOfRecovery = cc.largestSendPacket
cc.congestionWindow = int(float32(cc.congestionWindow) * kLossReductionFactor)
if kMinimumWindow > cc.congestionWindow {
cc.congestionWindow = kMinimumWindow
}
cc.sstresh = cc.congestionWindow
cc.conn.log(logTypeCongestion, "PDV Recovery started. Window size: %d, sstresh: %d, endOfRecovery %d",
cc.congestionWindow, cc.sstresh, cc.endOfRecovery)
}
}
func (cc *CongestionControllerIetf) bytesAllowedToSend() int {
cc.conn.log(logTypeCongestion, "Remaining congestion window size: %d", cc.congestionWindow-cc.bytesInFlight)
return cc.congestionWindow - cc.bytesInFlight
}
func newCongestionControllerIetf(conn *Connection) *CongestionControllerIetf {
return &CongestionControllerIetf{
0, // bytesInFlight
kInitalWindow, // congestionWindow
0, // endOfRecovery
int(^uint(0) >> 1), // sstresh
0, // lossDetectionAlarm
0, // handshakeCount
0, // tlpCount
0, // rtoCount
0, // largestSendBeforeRto
time.Unix(0, 0), // timeOfLastSentPacket
0, // largestSendPacket
0, // largestAckedPacket
0, // maxAckDelay
100 * time.Second, // minRtt
0, // smoothedRtt
0, // rttVar
0, // smoothedRttTcp
0, // rttVarTcp
kReorderingThreshold, // reorderingThreshold
math.MaxFloat32, // timeReorderingFraction
time.Unix(0, 0), // lossTime
make(map[uint64]packetEntry), // sentPackets
nil, // lostPacketHandler
conn, // conn
}
}
| {
cc.smoothedRtt = latestRtt
cc.rttVar = time.Duration(int64(latestRtt) / 2)
} | conditional_block |
Leanote4MD.py | #!/usr/bin/env python
#encoding: utf8
#
# author: goodbest <[email protected]>
# github: github.com/goodbest
import requests
import json
import os
import sys
from datetime import datetime
import dateutil.parser
from dateutil import tz
from PIL import Image
from StringIO import StringIO
from requests_toolbelt import SSLAdapter
import ssl
from requests.packages import urllib3
urllib3.disable_warnings()
def is_ok(myjson):
try:
json_object = json.loads(myjson)
except ValueError, e:
return False
if 'Ok' in json_object:
if json_object['Ok']:
return True
else:
print json_object['Msg']
return False
else:
return True
def req_get(url, param = '', type = 'json', token = True):
if token:
if param:
param.update({'token': leanote_token})
else:
param={'token': leanote_token}
s = requests.Session()
if leanote_host.startswith('https'):
s.mount('https://', SSLAdapter(ssl.PROTOCOL_TLSv1))
r = s.get(leanote_host + '/api/' + url, params = param, verify=False)
if r.status_code == requests.codes.ok:
if type=='json':
if is_ok(r.text):
rj = json.loads(r.text)
# if 'Msg' in rj:
# rj=rj['Msg']
return rj
else:
print '[Err] requests to url %s fail' %(r.url)
return None
elif type=='image':
i = Image.open(StringIO(r.content))
return i
else:
print '[Err] connect to url %s fail, error code %d ' %(r.url, r.status_cde)
return None
def req_post(url, param = '', type = 'json', token = True):
if token:
if param:
param.update({'token': leanote_token})
else:
param={'token': leanote_token}
s = requests.Session()
if leanote_host.startswith('https'):
s.mount('https://', SSLAdapter(ssl.PROTOCOL_TLSv1))
r = s.post(leanote_host + '/api/' + url, data = param, verify=False)
if r.status_code == requests.codes.ok:
if type=='json':
if is_ok(r.text):
rj = json.loads(r.text)
# if 'Msg' in rj:
# rj=rj['Msg']
return rj
else:
print '[Err] requests to url %s fail' %(r.url)
return None
else:
print '[Err] connect to url %s fail, error code %d ' %(r.url, r.status_cde)
return None
#ret leanote_token
def login(email, pwd):
param = {
'email': email,
'pwd': pwd,
}
r = req_get('auth/login', param, token=False)
if r:
print 'Login success! Welcome %s (%s)' %(r['Username'], r['Email'])
return r['Token']
else:
print 'Login fail! Start again.'
exit()
def logout():
return req_get('auth/logout')
#ret dict(notebookId: type.Notebook}
def getNotebooks(includeTrash = False):
r = req_get('notebook/getNotebooks')
if r:
if includeTrash:
return {notebook['NotebookId'] : notebook for notebook in r}
else:
return {notebook['NotebookId'] : notebook for notebook in r if not notebook['IsDeleted']}
else:
return none
#ret [type.Note], which contains noteId, and note meta data
def getNotesMeta(notebookId):
param = {
'notebookId': notebookId,
} | def getNoteDetail(noteId):
param = {
'noteId': noteId,
}
return req_get('note/getNoteAndContent', param)
def getImage(fileId):
param = {
'fileId': fileId,
}
return req_get('file/getImage', param, type = 'image')
def addNotebook(title='Import', parentId='', seq=-1):
param = {
'title': title,
'parentNotebookId': parentId,
'seq' : seq
}
return req_post('notebook/addNotebook', param)
def addNote(NotebookId, Title, Content, Tags=[], IsMarkdown = True, Abstract= '', Files=[]):
param = {
'NotebookId': NotebookId,
'Title': Title,
'Content': Content,
'Tags[]': Tags,
'IsMarkdown': IsMarkdown,
'Abstract': Abstract,
#'Files' : seq
}
return req_post('note/addNote', param)
def readFromFile(filename):
import yaml
with open (filename) as file:
file_meta = ''
file_content = ''
meta_flag=False
for line in file:
#print line
if meta_flag:
file_content += line
else:
if line.find('---')>-1:
meta_flag = True
else:
file_meta += line
#print meta
if not meta_flag:
file_content = file_meta
file_meta = ''
if meta_flag:
meta = yaml.load(file_meta)
else:
meta = {}
return file_content, meta
def saveToFile(notes, noteBooks, path = '.'):
unique_noteTitle = set()
for note in notes:
if note['Title'] == '':
filename = note['NoteId']
else:
filename = note['Title']
if filename in unique_noteTitle:
filename='%s_%s' %(filename, note['NoteId'])
else:
unique_noteTitle.add(filename)
if note['IsMarkdown']:
filename += '.md'
else:
filename += '.txt'
try:
with open(path + '/' + filename, 'w') as file:
print 'write file: %s' %filename
file.write('title: %s\n' %note['Title'].encode('utf-8'))
date = dateutil.parser.parse(note['CreatedTime'])
file.write('date: %s\n' %datetime.strftime(date.astimezone(local_zone), '%Y/%m/%d %H:%M:%S'))
date = dateutil.parser.parse(note['UpdatedTime'])
file.write('updated: %s\n' %datetime.strftime(date.astimezone(local_zone), '%Y/%m/%d %H:%M:%S'))
if note['Tags']:
if len(note['Tags']) == 1:
if note['Tags'][0]:
file.write('tags:\n')
for tag in note['Tags']:
file.write('- %s\n' %tag.encode('utf-8'))
category = []
current_notebook = note['NotebookId']
category.append(noteBooks[current_notebook]['Title'])
while noteBooks[current_notebook]['ParentNotebookId'] != '':
category.append(noteBooks[noteBooks[current_notebook]['ParentNotebookId']]['Title'])
current_notebook = noteBooks[current_notebook]['ParentNotebookId']
file.write('categories:\n')
category.reverse()
for cat in category:
file.write('- %s\n' %cat.encode('utf-8'))
file.write('---\n')
file.write('%s' %note['Content'].encode('utf-8'))
file.close()
if note['Files']:
if len(note['Files']) > 0:
for attach in note['Files']:
if not attach['IsAttach']:
i = getImage(attach['FileId'])
print 'saving its image: %s.%s' %(attach['FileId'], i.format)
i.save(attach['FileId'] + '.' + i.format)
except Exception as e:
logging.exception(e)
print "error: ", filename
def LeanoteExportToMD(path = '.'):
print 'Reading your notebooks...'
noteBooks = getNotebooks()
#get not deleted notes list
notes=[]
for notebook in noteBooks.values():
if not notebook['IsDeleted']:
notesMeta = getNotesMeta(notebook['NotebookId'])
for noteMeta in notesMeta:
if not noteMeta['IsTrash']:
note = getNoteDetail(noteMeta['NoteId'])
notes.append(note)
print 'found %d notes' %len(notes)
#write file
saveToFile(notes, noteBooks, path = path)
logout()
print 'all done, bye~'
def LeanoteImportFromMD(path = '.'):
filelist = os.listdir(path)
filelist = [file for file in filelist if file.find('.md')>-1 or file.find('.txt')>-1]
importedNotebookTitleMapID = {}
ret = addNotebook(title='imported_note', parentId='', seq=-1)
if ret:
print 'imporing into a new notebook: %s' %ret['Title']
importedNotebookTitleMapID['import'] = ret['NotebookId']
for filename in filelist:
content, meta = readFromFile(path + '/' + filename)
parentTitle='import'
currentTitle=''
if not meta.get('categories'):
categories=['import']
else:
categories= meta.get('categories')
for cat in categories:
currentTitle=cat
if currentTitle in importedNotebookTitleMapID.keys():
parentTitle=currentTitle
else:
ret = addNotebook(title = currentTitle, parentId = importedNotebookTitleMapID[parentTitle])
importedNotebookTitleMapID[currentTitle] = ret['NotebookId']
parentTitle=currentTitle
if not meta.get('title'):
meta['title'] = filename.replace('.md','').replace('.txt','')
importedNote = addNote(NotebookId=importedNotebookTitleMapID[currentTitle], Title=meta.get('title'), Content=content, Tags=meta.get('tags', []), Abstract='')
if importedNote:
print 'imported %s' %filename
logout()
print 'all done, bye~'
if __name__ == '__main__':
choice = raw_input("Enter your choice: (import or export) ")
leanote_host = raw_input("Enter your host: (default is http://leanote.com) ")
if not leanote_host:
leanote_host = 'https://leanote.com' #使用http://leanote.com会报503错误
leanote_email = raw_input('Enter your email: ')
leanote_password = raw_input('Enter your password: ')
path = raw_input("Enter your save path: (default is current dir) ")
if not path:
path = '.'
# leanote_host='http://leanote.com'
# leanote_email='[email protected]'
# leanote_password='abc123'
# path = '.'
print 'Connecting to %s' %leanote_host
leanote_token = login(leanote_email, leanote_password)
local_zone=tz.tzlocal()
if choice == 'import':
LeanoteImportFromMD(path)
exit()
elif choice == 'export':
LeanoteExportToMD(path)
exit()
else:
print 'command format: \npython Leanote4MD.py import\npython Leanote4MD.py export' | return req_get('note/getNotes', param)
#ret type.NoteContent | random_line_split |
Leanote4MD.py | #!/usr/bin/env python
#encoding: utf8
#
# author: goodbest <[email protected]>
# github: github.com/goodbest
import requests
import json
import os
import sys
from datetime import datetime
import dateutil.parser
from dateutil import tz
from PIL import Image
from StringIO import StringIO
from requests_toolbelt import SSLAdapter
import ssl
from requests.packages import urllib3
urllib3.disable_warnings()
def is_ok(myjson):
try:
json_object = json.loads(myjson)
except ValueError, e:
return False
if 'Ok' in json_object:
if json_object['Ok']:
return True
else:
print json_object['Msg']
return False
else:
return True
def req_get(url, param = '', type = 'json', token = True):
if token:
if param:
param.update({'token': leanote_token})
else:
param={'token': leanote_token}
s = requests.Session()
if leanote_host.startswith('https'):
s.mount('https://', SSLAdapter(ssl.PROTOCOL_TLSv1))
r = s.get(leanote_host + '/api/' + url, params = param, verify=False)
if r.status_code == requests.codes.ok:
if type=='json':
if is_ok(r.text):
rj = json.loads(r.text)
# if 'Msg' in rj:
# rj=rj['Msg']
return rj
else:
print '[Err] requests to url %s fail' %(r.url)
return None
elif type=='image':
i = Image.open(StringIO(r.content))
return i
else:
print '[Err] connect to url %s fail, error code %d ' %(r.url, r.status_cde)
return None
def req_post(url, param = '', type = 'json', token = True):
if token:
if param:
param.update({'token': leanote_token})
else:
param={'token': leanote_token}
s = requests.Session()
if leanote_host.startswith('https'):
s.mount('https://', SSLAdapter(ssl.PROTOCOL_TLSv1))
r = s.post(leanote_host + '/api/' + url, data = param, verify=False)
if r.status_code == requests.codes.ok:
if type=='json':
if is_ok(r.text):
rj = json.loads(r.text)
# if 'Msg' in rj:
# rj=rj['Msg']
return rj
else:
print '[Err] requests to url %s fail' %(r.url)
return None
else:
print '[Err] connect to url %s fail, error code %d ' %(r.url, r.status_cde)
return None
#ret leanote_token
def login(email, pwd):
param = {
'email': email,
'pwd': pwd,
}
r = req_get('auth/login', param, token=False)
if r:
print 'Login success! Welcome %s (%s)' %(r['Username'], r['Email'])
return r['Token']
else:
print 'Login fail! Start again.'
exit()
def logout():
return req_get('auth/logout')
#ret dict(notebookId: type.Notebook}
def getNotebooks(includeTrash = False):
r = req_get('notebook/getNotebooks')
if r:
if includeTrash:
return {notebook['NotebookId'] : notebook for notebook in r}
else:
return {notebook['NotebookId'] : notebook for notebook in r if not notebook['IsDeleted']}
else:
return none
#ret [type.Note], which contains noteId, and note meta data
def getNotesMeta(notebookId):
param = {
'notebookId': notebookId,
}
return req_get('note/getNotes', param)
#ret type.NoteContent
def getNoteDetail(noteId):
param = {
'noteId': noteId,
}
return req_get('note/getNoteAndContent', param)
def getImage(fileId):
param = {
'fileId': fileId,
}
return req_get('file/getImage', param, type = 'image')
def addNotebook(title='Import', parentId='', seq=-1):
param = {
'title': title,
'parentNotebookId': parentId,
'seq' : seq
}
return req_post('notebook/addNotebook', param)
def addNote(NotebookId, Title, Content, Tags=[], IsMarkdown = True, Abstract= '', Files=[]):
param = {
'NotebookId': NotebookId,
'Title': Title,
'Content': Content,
'Tags[]': Tags,
'IsMarkdown': IsMarkdown,
'Abstract': Abstract,
#'Files' : seq
}
return req_post('note/addNote', param)
def readFromFile(filename):
import yaml
with open (filename) as file:
file_meta = ''
file_content = ''
meta_flag=False
for line in file:
#print line
|
if not meta_flag:
file_content = file_meta
file_meta = ''
if meta_flag:
meta = yaml.load(file_meta)
else:
meta = {}
return file_content, meta
def saveToFile(notes, noteBooks, path = '.'):
unique_noteTitle = set()
for note in notes:
if note['Title'] == '':
filename = note['NoteId']
else:
filename = note['Title']
if filename in unique_noteTitle:
filename='%s_%s' %(filename, note['NoteId'])
else:
unique_noteTitle.add(filename)
if note['IsMarkdown']:
filename += '.md'
else:
filename += '.txt'
try:
with open(path + '/' + filename, 'w') as file:
print 'write file: %s' %filename
file.write('title: %s\n' %note['Title'].encode('utf-8'))
date = dateutil.parser.parse(note['CreatedTime'])
file.write('date: %s\n' %datetime.strftime(date.astimezone(local_zone), '%Y/%m/%d %H:%M:%S'))
date = dateutil.parser.parse(note['UpdatedTime'])
file.write('updated: %s\n' %datetime.strftime(date.astimezone(local_zone), '%Y/%m/%d %H:%M:%S'))
if note['Tags']:
if len(note['Tags']) == 1:
if note['Tags'][0]:
file.write('tags:\n')
for tag in note['Tags']:
file.write('- %s\n' %tag.encode('utf-8'))
category = []
current_notebook = note['NotebookId']
category.append(noteBooks[current_notebook]['Title'])
while noteBooks[current_notebook]['ParentNotebookId'] != '':
category.append(noteBooks[noteBooks[current_notebook]['ParentNotebookId']]['Title'])
current_notebook = noteBooks[current_notebook]['ParentNotebookId']
file.write('categories:\n')
category.reverse()
for cat in category:
file.write('- %s\n' %cat.encode('utf-8'))
file.write('---\n')
file.write('%s' %note['Content'].encode('utf-8'))
file.close()
if note['Files']:
if len(note['Files']) > 0:
for attach in note['Files']:
if not attach['IsAttach']:
i = getImage(attach['FileId'])
print 'saving its image: %s.%s' %(attach['FileId'], i.format)
i.save(attach['FileId'] + '.' + i.format)
except Exception as e:
logging.exception(e)
print "error: ", filename
def LeanoteExportToMD(path = '.'):
print 'Reading your notebooks...'
noteBooks = getNotebooks()
#get not deleted notes list
notes=[]
for notebook in noteBooks.values():
if not notebook['IsDeleted']:
notesMeta = getNotesMeta(notebook['NotebookId'])
for noteMeta in notesMeta:
if not noteMeta['IsTrash']:
note = getNoteDetail(noteMeta['NoteId'])
notes.append(note)
print 'found %d notes' %len(notes)
#write file
saveToFile(notes, noteBooks, path = path)
logout()
print 'all done, bye~'
def LeanoteImportFromMD(path = '.'):
filelist = os.listdir(path)
filelist = [file for file in filelist if file.find('.md')>-1 or file.find('.txt')>-1]
importedNotebookTitleMapID = {}
ret = addNotebook(title='imported_note', parentId='', seq=-1)
if ret:
print 'imporing into a new notebook: %s' %ret['Title']
importedNotebookTitleMapID['import'] = ret['NotebookId']
for filename in filelist:
content, meta = readFromFile(path + '/' + filename)
parentTitle='import'
currentTitle=''
if not meta.get('categories'):
categories=['import']
else:
categories= meta.get('categories')
for cat in categories:
currentTitle=cat
if currentTitle in importedNotebookTitleMapID.keys():
parentTitle=currentTitle
else:
ret = addNotebook(title = currentTitle, parentId = importedNotebookTitleMapID[parentTitle])
importedNotebookTitleMapID[currentTitle] = ret['NotebookId']
parentTitle=currentTitle
if not meta.get('title'):
meta['title'] = filename.replace('.md','').replace('.txt','')
importedNote = addNote(NotebookId=importedNotebookTitleMapID[currentTitle], Title=meta.get('title'), Content=content, Tags=meta.get('tags', []), Abstract='')
if importedNote:
print 'imported %s' %filename
logout()
print 'all done, bye~'
if __name__ == '__main__':
choice = raw_input("Enter your choice: (import or export) ")
leanote_host = raw_input("Enter your host: (default is http://leanote.com) ")
if not leanote_host:
leanote_host = 'https://leanote.com' #使用http://leanote.com会报503错误
leanote_email = raw_input('Enter your email: ')
leanote_password = raw_input('Enter your password: ')
path = raw_input("Enter your save path: (default is current dir) ")
if not path:
path = '.'
# leanote_host='http://leanote.com'
# leanote_email='[email protected]'
# leanote_password='abc123'
# path = '.'
print 'Connecting to %s' %leanote_host
leanote_token = login(leanote_email, leanote_password)
local_zone=tz.tzlocal()
if choice == 'import':
LeanoteImportFromMD(path)
exit()
elif choice == 'export':
LeanoteExportToMD(path)
exit()
else:
print 'command format: \npython Leanote4MD.py import\npython Leanote4MD.py export'
| if meta_flag:
file_content += line
else:
if line.find('---')>-1:
meta_flag = True
else:
file_meta += line
#print meta | conditional_block |
Leanote4MD.py | #!/usr/bin/env python
#encoding: utf8
#
# author: goodbest <[email protected]>
# github: github.com/goodbest
import requests
import json
import os
import sys
from datetime import datetime
import dateutil.parser
from dateutil import tz
from PIL import Image
from StringIO import StringIO
from requests_toolbelt import SSLAdapter
import ssl
from requests.packages import urllib3
urllib3.disable_warnings()
def is_ok(myjson):
try:
json_object = json.loads(myjson)
except ValueError, e:
return False
if 'Ok' in json_object:
if json_object['Ok']:
return True
else:
print json_object['Msg']
return False
else:
return True
def req_get(url, param = '', type = 'json', token = True):
if token:
if param:
param.update({'token': leanote_token})
else:
param={'token': leanote_token}
s = requests.Session()
if leanote_host.startswith('https'):
s.mount('https://', SSLAdapter(ssl.PROTOCOL_TLSv1))
r = s.get(leanote_host + '/api/' + url, params = param, verify=False)
if r.status_code == requests.codes.ok:
if type=='json':
if is_ok(r.text):
rj = json.loads(r.text)
# if 'Msg' in rj:
# rj=rj['Msg']
return rj
else:
print '[Err] requests to url %s fail' %(r.url)
return None
elif type=='image':
i = Image.open(StringIO(r.content))
return i
else:
print '[Err] connect to url %s fail, error code %d ' %(r.url, r.status_cde)
return None
def req_post(url, param = '', type = 'json', token = True):
if token:
if param:
param.update({'token': leanote_token})
else:
param={'token': leanote_token}
s = requests.Session()
if leanote_host.startswith('https'):
s.mount('https://', SSLAdapter(ssl.PROTOCOL_TLSv1))
r = s.post(leanote_host + '/api/' + url, data = param, verify=False)
if r.status_code == requests.codes.ok:
if type=='json':
if is_ok(r.text):
rj = json.loads(r.text)
# if 'Msg' in rj:
# rj=rj['Msg']
return rj
else:
print '[Err] requests to url %s fail' %(r.url)
return None
else:
print '[Err] connect to url %s fail, error code %d ' %(r.url, r.status_cde)
return None
#ret leanote_token
def login(email, pwd):
param = {
'email': email,
'pwd': pwd,
}
r = req_get('auth/login', param, token=False)
if r:
print 'Login success! Welcome %s (%s)' %(r['Username'], r['Email'])
return r['Token']
else:
print 'Login fail! Start again.'
exit()
def logout():
return req_get('auth/logout')
#ret dict(notebookId: type.Notebook}
def getNotebooks(includeTrash = False):
r = req_get('notebook/getNotebooks')
if r:
if includeTrash:
return {notebook['NotebookId'] : notebook for notebook in r}
else:
return {notebook['NotebookId'] : notebook for notebook in r if not notebook['IsDeleted']}
else:
return none
#ret [type.Note], which contains noteId, and note meta data
def getNotesMeta(notebookId):
param = {
'notebookId': notebookId,
}
return req_get('note/getNotes', param)
#ret type.NoteContent
def getNoteDetail(noteId):
param = {
'noteId': noteId,
}
return req_get('note/getNoteAndContent', param)
def getImage(fileId):
param = {
'fileId': fileId,
}
return req_get('file/getImage', param, type = 'image')
def addNotebook(title='Import', parentId='', seq=-1):
|
def addNote(NotebookId, Title, Content, Tags=[], IsMarkdown = True, Abstract= '', Files=[]):
param = {
'NotebookId': NotebookId,
'Title': Title,
'Content': Content,
'Tags[]': Tags,
'IsMarkdown': IsMarkdown,
'Abstract': Abstract,
#'Files' : seq
}
return req_post('note/addNote', param)
def readFromFile(filename):
import yaml
with open (filename) as file:
file_meta = ''
file_content = ''
meta_flag=False
for line in file:
#print line
if meta_flag:
file_content += line
else:
if line.find('---')>-1:
meta_flag = True
else:
file_meta += line
#print meta
if not meta_flag:
file_content = file_meta
file_meta = ''
if meta_flag:
meta = yaml.load(file_meta)
else:
meta = {}
return file_content, meta
def saveToFile(notes, noteBooks, path = '.'):
unique_noteTitle = set()
for note in notes:
if note['Title'] == '':
filename = note['NoteId']
else:
filename = note['Title']
if filename in unique_noteTitle:
filename='%s_%s' %(filename, note['NoteId'])
else:
unique_noteTitle.add(filename)
if note['IsMarkdown']:
filename += '.md'
else:
filename += '.txt'
try:
with open(path + '/' + filename, 'w') as file:
print 'write file: %s' %filename
file.write('title: %s\n' %note['Title'].encode('utf-8'))
date = dateutil.parser.parse(note['CreatedTime'])
file.write('date: %s\n' %datetime.strftime(date.astimezone(local_zone), '%Y/%m/%d %H:%M:%S'))
date = dateutil.parser.parse(note['UpdatedTime'])
file.write('updated: %s\n' %datetime.strftime(date.astimezone(local_zone), '%Y/%m/%d %H:%M:%S'))
if note['Tags']:
if len(note['Tags']) == 1:
if note['Tags'][0]:
file.write('tags:\n')
for tag in note['Tags']:
file.write('- %s\n' %tag.encode('utf-8'))
category = []
current_notebook = note['NotebookId']
category.append(noteBooks[current_notebook]['Title'])
while noteBooks[current_notebook]['ParentNotebookId'] != '':
category.append(noteBooks[noteBooks[current_notebook]['ParentNotebookId']]['Title'])
current_notebook = noteBooks[current_notebook]['ParentNotebookId']
file.write('categories:\n')
category.reverse()
for cat in category:
file.write('- %s\n' %cat.encode('utf-8'))
file.write('---\n')
file.write('%s' %note['Content'].encode('utf-8'))
file.close()
if note['Files']:
if len(note['Files']) > 0:
for attach in note['Files']:
if not attach['IsAttach']:
i = getImage(attach['FileId'])
print 'saving its image: %s.%s' %(attach['FileId'], i.format)
i.save(attach['FileId'] + '.' + i.format)
except Exception as e:
logging.exception(e)
print "error: ", filename
def LeanoteExportToMD(path = '.'):
print 'Reading your notebooks...'
noteBooks = getNotebooks()
#get not deleted notes list
notes=[]
for notebook in noteBooks.values():
if not notebook['IsDeleted']:
notesMeta = getNotesMeta(notebook['NotebookId'])
for noteMeta in notesMeta:
if not noteMeta['IsTrash']:
note = getNoteDetail(noteMeta['NoteId'])
notes.append(note)
print 'found %d notes' %len(notes)
#write file
saveToFile(notes, noteBooks, path = path)
logout()
print 'all done, bye~'
def LeanoteImportFromMD(path = '.'):
filelist = os.listdir(path)
filelist = [file for file in filelist if file.find('.md')>-1 or file.find('.txt')>-1]
importedNotebookTitleMapID = {}
ret = addNotebook(title='imported_note', parentId='', seq=-1)
if ret:
print 'imporing into a new notebook: %s' %ret['Title']
importedNotebookTitleMapID['import'] = ret['NotebookId']
for filename in filelist:
content, meta = readFromFile(path + '/' + filename)
parentTitle='import'
currentTitle=''
if not meta.get('categories'):
categories=['import']
else:
categories= meta.get('categories')
for cat in categories:
currentTitle=cat
if currentTitle in importedNotebookTitleMapID.keys():
parentTitle=currentTitle
else:
ret = addNotebook(title = currentTitle, parentId = importedNotebookTitleMapID[parentTitle])
importedNotebookTitleMapID[currentTitle] = ret['NotebookId']
parentTitle=currentTitle
if not meta.get('title'):
meta['title'] = filename.replace('.md','').replace('.txt','')
importedNote = addNote(NotebookId=importedNotebookTitleMapID[currentTitle], Title=meta.get('title'), Content=content, Tags=meta.get('tags', []), Abstract='')
if importedNote:
print 'imported %s' %filename
logout()
print 'all done, bye~'
if __name__ == '__main__':
choice = raw_input("Enter your choice: (import or export) ")
leanote_host = raw_input("Enter your host: (default is http://leanote.com) ")
if not leanote_host:
leanote_host = 'https://leanote.com' #使用http://leanote.com会报503错误
leanote_email = raw_input('Enter your email: ')
leanote_password = raw_input('Enter your password: ')
path = raw_input("Enter your save path: (default is current dir) ")
if not path:
path = '.'
# leanote_host='http://leanote.com'
# leanote_email='[email protected]'
# leanote_password='abc123'
# path = '.'
print 'Connecting to %s' %leanote_host
leanote_token = login(leanote_email, leanote_password)
local_zone=tz.tzlocal()
if choice == 'import':
LeanoteImportFromMD(path)
exit()
elif choice == 'export':
LeanoteExportToMD(path)
exit()
else:
print 'command format: \npython Leanote4MD.py import\npython Leanote4MD.py export'
| param = {
'title': title,
'parentNotebookId': parentId,
'seq' : seq
}
return req_post('notebook/addNotebook', param) | identifier_body |
Leanote4MD.py | #!/usr/bin/env python
#encoding: utf8
#
# author: goodbest <[email protected]>
# github: github.com/goodbest
import requests
import json
import os
import sys
from datetime import datetime
import dateutil.parser
from dateutil import tz
from PIL import Image
from StringIO import StringIO
from requests_toolbelt import SSLAdapter
import ssl
from requests.packages import urllib3
urllib3.disable_warnings()
def is_ok(myjson):
try:
json_object = json.loads(myjson)
except ValueError, e:
return False
if 'Ok' in json_object:
if json_object['Ok']:
return True
else:
print json_object['Msg']
return False
else:
return True
def req_get(url, param = '', type = 'json', token = True):
if token:
if param:
param.update({'token': leanote_token})
else:
param={'token': leanote_token}
s = requests.Session()
if leanote_host.startswith('https'):
s.mount('https://', SSLAdapter(ssl.PROTOCOL_TLSv1))
r = s.get(leanote_host + '/api/' + url, params = param, verify=False)
if r.status_code == requests.codes.ok:
if type=='json':
if is_ok(r.text):
rj = json.loads(r.text)
# if 'Msg' in rj:
# rj=rj['Msg']
return rj
else:
print '[Err] requests to url %s fail' %(r.url)
return None
elif type=='image':
i = Image.open(StringIO(r.content))
return i
else:
print '[Err] connect to url %s fail, error code %d ' %(r.url, r.status_cde)
return None
def req_post(url, param = '', type = 'json', token = True):
if token:
if param:
param.update({'token': leanote_token})
else:
param={'token': leanote_token}
s = requests.Session()
if leanote_host.startswith('https'):
s.mount('https://', SSLAdapter(ssl.PROTOCOL_TLSv1))
r = s.post(leanote_host + '/api/' + url, data = param, verify=False)
if r.status_code == requests.codes.ok:
if type=='json':
if is_ok(r.text):
rj = json.loads(r.text)
# if 'Msg' in rj:
# rj=rj['Msg']
return rj
else:
print '[Err] requests to url %s fail' %(r.url)
return None
else:
print '[Err] connect to url %s fail, error code %d ' %(r.url, r.status_cde)
return None
#ret leanote_token
def login(email, pwd):
param = {
'email': email,
'pwd': pwd,
}
r = req_get('auth/login', param, token=False)
if r:
print 'Login success! Welcome %s (%s)' %(r['Username'], r['Email'])
return r['Token']
else:
print 'Login fail! Start again.'
exit()
def logout():
return req_get('auth/logout')
#ret dict(notebookId: type.Notebook}
def getNotebooks(includeTrash = False):
r = req_get('notebook/getNotebooks')
if r:
if includeTrash:
return {notebook['NotebookId'] : notebook for notebook in r}
else:
return {notebook['NotebookId'] : notebook for notebook in r if not notebook['IsDeleted']}
else:
return none
#ret [type.Note], which contains noteId, and note meta data
def getNotesMeta(notebookId):
param = {
'notebookId': notebookId,
}
return req_get('note/getNotes', param)
#ret type.NoteContent
def getNoteDetail(noteId):
param = {
'noteId': noteId,
}
return req_get('note/getNoteAndContent', param)
def | (fileId):
param = {
'fileId': fileId,
}
return req_get('file/getImage', param, type = 'image')
def addNotebook(title='Import', parentId='', seq=-1):
param = {
'title': title,
'parentNotebookId': parentId,
'seq' : seq
}
return req_post('notebook/addNotebook', param)
def addNote(NotebookId, Title, Content, Tags=[], IsMarkdown = True, Abstract= '', Files=[]):
param = {
'NotebookId': NotebookId,
'Title': Title,
'Content': Content,
'Tags[]': Tags,
'IsMarkdown': IsMarkdown,
'Abstract': Abstract,
#'Files' : seq
}
return req_post('note/addNote', param)
def readFromFile(filename):
import yaml
with open (filename) as file:
file_meta = ''
file_content = ''
meta_flag=False
for line in file:
#print line
if meta_flag:
file_content += line
else:
if line.find('---')>-1:
meta_flag = True
else:
file_meta += line
#print meta
if not meta_flag:
file_content = file_meta
file_meta = ''
if meta_flag:
meta = yaml.load(file_meta)
else:
meta = {}
return file_content, meta
def saveToFile(notes, noteBooks, path = '.'):
unique_noteTitle = set()
for note in notes:
if note['Title'] == '':
filename = note['NoteId']
else:
filename = note['Title']
if filename in unique_noteTitle:
filename='%s_%s' %(filename, note['NoteId'])
else:
unique_noteTitle.add(filename)
if note['IsMarkdown']:
filename += '.md'
else:
filename += '.txt'
try:
with open(path + '/' + filename, 'w') as file:
print 'write file: %s' %filename
file.write('title: %s\n' %note['Title'].encode('utf-8'))
date = dateutil.parser.parse(note['CreatedTime'])
file.write('date: %s\n' %datetime.strftime(date.astimezone(local_zone), '%Y/%m/%d %H:%M:%S'))
date = dateutil.parser.parse(note['UpdatedTime'])
file.write('updated: %s\n' %datetime.strftime(date.astimezone(local_zone), '%Y/%m/%d %H:%M:%S'))
if note['Tags']:
if len(note['Tags']) == 1:
if note['Tags'][0]:
file.write('tags:\n')
for tag in note['Tags']:
file.write('- %s\n' %tag.encode('utf-8'))
category = []
current_notebook = note['NotebookId']
category.append(noteBooks[current_notebook]['Title'])
while noteBooks[current_notebook]['ParentNotebookId'] != '':
category.append(noteBooks[noteBooks[current_notebook]['ParentNotebookId']]['Title'])
current_notebook = noteBooks[current_notebook]['ParentNotebookId']
file.write('categories:\n')
category.reverse()
for cat in category:
file.write('- %s\n' %cat.encode('utf-8'))
file.write('---\n')
file.write('%s' %note['Content'].encode('utf-8'))
file.close()
if note['Files']:
if len(note['Files']) > 0:
for attach in note['Files']:
if not attach['IsAttach']:
i = getImage(attach['FileId'])
print 'saving its image: %s.%s' %(attach['FileId'], i.format)
i.save(attach['FileId'] + '.' + i.format)
except Exception as e:
logging.exception(e)
print "error: ", filename
def LeanoteExportToMD(path = '.'):
print 'Reading your notebooks...'
noteBooks = getNotebooks()
#get not deleted notes list
notes=[]
for notebook in noteBooks.values():
if not notebook['IsDeleted']:
notesMeta = getNotesMeta(notebook['NotebookId'])
for noteMeta in notesMeta:
if not noteMeta['IsTrash']:
note = getNoteDetail(noteMeta['NoteId'])
notes.append(note)
print 'found %d notes' %len(notes)
#write file
saveToFile(notes, noteBooks, path = path)
logout()
print 'all done, bye~'
def LeanoteImportFromMD(path = '.'):
filelist = os.listdir(path)
filelist = [file for file in filelist if file.find('.md')>-1 or file.find('.txt')>-1]
importedNotebookTitleMapID = {}
ret = addNotebook(title='imported_note', parentId='', seq=-1)
if ret:
print 'imporing into a new notebook: %s' %ret['Title']
importedNotebookTitleMapID['import'] = ret['NotebookId']
for filename in filelist:
content, meta = readFromFile(path + '/' + filename)
parentTitle='import'
currentTitle=''
if not meta.get('categories'):
categories=['import']
else:
categories= meta.get('categories')
for cat in categories:
currentTitle=cat
if currentTitle in importedNotebookTitleMapID.keys():
parentTitle=currentTitle
else:
ret = addNotebook(title = currentTitle, parentId = importedNotebookTitleMapID[parentTitle])
importedNotebookTitleMapID[currentTitle] = ret['NotebookId']
parentTitle=currentTitle
if not meta.get('title'):
meta['title'] = filename.replace('.md','').replace('.txt','')
importedNote = addNote(NotebookId=importedNotebookTitleMapID[currentTitle], Title=meta.get('title'), Content=content, Tags=meta.get('tags', []), Abstract='')
if importedNote:
print 'imported %s' %filename
logout()
print 'all done, bye~'
if __name__ == '__main__':
choice = raw_input("Enter your choice: (import or export) ")
leanote_host = raw_input("Enter your host: (default is http://leanote.com) ")
if not leanote_host:
leanote_host = 'https://leanote.com' #使用http://leanote.com会报503错误
leanote_email = raw_input('Enter your email: ')
leanote_password = raw_input('Enter your password: ')
path = raw_input("Enter your save path: (default is current dir) ")
if not path:
path = '.'
# leanote_host='http://leanote.com'
# leanote_email='[email protected]'
# leanote_password='abc123'
# path = '.'
print 'Connecting to %s' %leanote_host
leanote_token = login(leanote_email, leanote_password)
local_zone=tz.tzlocal()
if choice == 'import':
LeanoteImportFromMD(path)
exit()
elif choice == 'export':
LeanoteExportToMD(path)
exit()
else:
print 'command format: \npython Leanote4MD.py import\npython Leanote4MD.py export'
| getImage | identifier_name |
AddDoctorForm.js | import { ToastProvider, useToasts } from 'react-toast-notifications'
import LoaderComponent from "./LoaderComponent"
import Select from "../Select"
import React, { useRef, useState, useEffect } from "react"
import { is_positive_whole_number, get_url_params } from "../../utils/common_utilities"
const AddDoctorForm= (props) => {
console.log(props,"props in AddDoctor form")
const myRef = useRef(null)
const executeScroll = () => scrollToRef(myRef)
const scrollToRef = (ref) => window.scrollTo(0, ref.current.offsetTop)
useEffect(() => {
console.log(myRef,"myref")
executeScroll(myRef)
console.log('mount it!');
}, [])
const { addToast } = useToasts()
const doctorImageRef = useRef()
if(!!props.uploadRet){
if(!!props.uploadRet.success){
console.log(props.uploadRet,"props.uploadRet")
props.setImage(props.uploadRet.data)
addToast(props.uploadRet.message, {appearance: 'success', autoDismiss:true})
}else |
props.loadingImageOff()
props.uploadRetClr()
}
if(!!props.addDoctorRet){
if(!!props.addDoctorRet.success){
addToast(props.addDoctorRet.message, {appearance: 'success', autoDismiss:true})
console.log(props.addDoctorRet,"props.addDoctorRet")
props.set_user_info({
...props.prof_data,
doctors:[...props.addDoctorRet.data.doctors]
})
props.clear_data()
}else{
addToast(props.addDoctorRet.message, {appearance: 'error', autoDismiss:true})
}
props.getUserDetails()
props.addDoctorClr()
props.addDoctorLoadingOff()
}
const submitdetails = () => {
if(props.name === '' || props.department === '' ||props.designation==='' || props.experience==="" || props.education==="" || props.specialitie_chosen===" " || (props.services_chosen.length===0) ){
addToast("Enter all the details",{ appearance: 'error', autoDismiss:true })
}else if(!!!props.doctorProfileImage){
addToast("Please provide a profile image",{ appearance: 'error', autoDismiss:true })
}else{
props.submitdetails({
name:props.name,
designation:props.designation,
department:props.department,
experience:props.experience,
education:props.education,
services_chosen:props.services_chosen,
specialitie_chosen:props.specialitie_chosen,
doctorProfileImage:props.doctorProfileImage,
doctorId:get_url_params('id')?get_url_params('id'):undefined
})
}
}
const handleImageClick = ()=>{
let element = document.getElementById('doctorImageInput')
element.click()
}
const handleUploadImage = (e) => {
e.preventDefault();
e.stopPropagation()
var reader = new FileReader();
var file = e.target.files[0];
if(!!file){
if (file.size > 2 * 1024 * 1024) {
addToast('File size should be less than 2MB', {appearance: 'error', autoDismiss:true})
} else {
props.upload({ file: file, field: 'file' })
reader.onloadend = () => {
reader.readAsDataURL(file);
}
}
}else{
addToast('No File Found', {appearance: 'error', autoDismiss:true})
}
}
console.log(props,"props in AddDoctorForm")
return (
<React.Fragment>
<div className="profile_secti">
<div style={{height:'10px',width:'10px',position:'absolute',top:'-10px'}} ref={myRef}></div>
{props.addDoctorLoading && <LoaderComponent />}
<h5 className="pfo_im">Profile Image</h5>
<div className="row">
<div className="col-lg-2 col-md-4 image_wrapper_add_doctor position-relative">
{props.laodingImage && <LoaderComponent />}
<input
style={{display:'inline',display:'none'}}
id="doctorImageInput"
type="file" accept="image/jpe ,image/png, image/jpeg"
onChange ={(e)=>handleUploadImage(e)}
ref = {doctorImageRef}
/>
<img src={!!props.doctorProfileImage?props.doctorProfileImage:'/account.svg'} className="accout"/>
<img onClick={(e)=>handleImageClick(e)} src="/camera.svg" className=" profile_camera_rish cursor-pointer" />
</div>
<div className="col-lg-3">
<h6 className="fil_nm">{!!props.doctorImageName?props.doctorImageName:'File Name'}</h6>
<button onClick={(e)=>handleImageClick(e)} className="upld common-button">Upload</button>
</div>
</div>
<form class="shake" role="form" method="post" id="contactForm" name="contact-form" data-toggle="validator">
<div class="form-group label-floating">
<label class="control-label control_label_ris" for="name">Name</label>
<input class="form-control no_padding_ris btm_in_bdr" value= {props.name} onChange={props.handleChange} id="name" type="text" name="name" required data-error="Please enter your name" />
<div class="help-block with-errors"></div>
</div>
<div class="form-group label-floating">
<label class="control-label control_label_ris" for="education">Education Qualification</label>
<input class="form-control no_padding_ris btm_in_bdr" value= {props.education} onChange={props.handleChange} id="educationqua" type="education" name="education" required data-error="Please enter your education qulification" />
<div class="help-block with-errors"></div>
</div>
<div class="form-group label-floating">
<label class="control-label control_label_ris" for="education">Department</label>
<input class="form-control no_padding_ris btm_in_bdr" value= {props.department} onChange={props.handleChange} id="department" name="department" required data-error="Please enter your education qulification" />
<div class="help-block with-errors"></div>
</div>
<div class="form-group label-floating">
<label class="control-label control_label_ris">Designation</label>
<input class="form-control no_padding_ris btm_in_bdr" id="msg_Designation" value= {props.designation} onChange={props.handleChange} type="text" name="designation" required data-error="Please enter your message Designation" />
<div class="help-block with-errors"></div>
</div>
<div className="row form-group label-floating">
<div class="col-lg-6 col-12">
<Select
options = {props.specialities}
handleChange = {props.handleSelectChange}
value = {props.specialitie_chosen}
multiple ={false}
name = "specialitie_chosen"
label = "Speciality"
placeholder = "Choose Spectiality"
/>
</div>
<div class="col-lg-6 col-12">
<Select
options = {props.services}
handleChange = {props.handleSelectChange}
value = {props.services_chosen}
name = "services_chosen"
label = "Service"
placeholder = "Choose Servives"
/>
</div>
</div>
<div class="form-group label-floating">
<label for="message" class="control-label control_label_ris">Experience</label>
<input class="form-control no_padding_ris btm_in_bdr" value= {props.experience} onChange={props.handleChange} id="msg_Experience" type="number" name="experience" required data-error="Please enter your message Experience" />
<div class="help-block with-errors"></div>
</div>
</form>
</div>
<div className="time_she">
<h3 className="abaily text-center">Availability</h3>
<div className="row text-center">
<div className="col-lg-2"><h4>All</h4></div>
<div className="col-lg-4"><h4>From - To</h4></div>
<div className="col-lg-4"><h4>From - To</h4></div>
<div className="col-lg-2"><h4>Closed</h4></div>
</div>
{props.slots.map((item,i)=>(
<div className="row text-center">
<div className="col-lg-2"><p className="m">{item.day.charAt(0).toUpperCase()}</p></div>
<div className="col-lg-4"><p><span onClick={()=>props.slotClicked(item.slots.morning,'morning','from', item)} className="time_bor cursor-pointer">{props.timeToString(item.slots.morning.from)}</span><span onClick={()=>props.slotClicked(item.slots.morning,'morning','to', item)} className="time_bor cursor-pointer">{props.timeToString(item.slots.morning.to)}</span></p></div>
<div className="col-lg-4"><p><span onClick={()=>props.slotClicked(item.slots.evening,'evening','from', item)} className="time_bor cursor-pointer">{props.timeToString(item.slots.evening.from)}</span><span onClick={()=>props.slotClicked(item.slots.evening,'evening','to', item)} className="time_bor cursor-pointer">{props.timeToString(item.slots.evening.to)}</span></p></div>
<div className="col-lg-2">
<div
onClick = {(e)=>props.handleCloseDay(item,i,e)}
className='circul_rund'>
<label className={item.closed?'green-background ':''} for="checkbox"></label></div></div>
{/* <div className="col-lg-2">
<div
onClick = {(e)=>props.handleCloseDay(item,i,e)}
className='round'>
<label className={item.closed?'green-background ':''} for="checkbox"></label></div></div> */}
</div>
))}
<div className="consul_fee">
<div className="cdcd_sfd">
{props.editConsultFlag && <span onClick={()=>props.submitConsultaion()} className="edi_intr hover_underline margin-5">Save</span> }
<span onClick={()=>props.toggleSubmitConultation()} className="edi_intr hover_underline margin-5">{props.editConsultFlag?"Cancel":'Edit'}</span>
</div>
<div className="row">
<div className="col-lg-8"><h2 className="fee_cun_ch">Consultation Fee</h2></div>
<div className="col-lg-4">
<h2 className="fee_ru">
₹
{!!props.editConsultFlag?<input value={props.consultationFee} onChange={(e)=>{
if(is_positive_whole_number(e.target.value)){
props.handleChange(e)
}
}}
name="consultationFee"
className="no_brdr_input consultaion_input"
type="number"
/>:props.consultationFee}
</h2>
</div>
</div>
<div className="time_clo text-center">
<button onClick={()=>submitdetails()} className="common-button">Submit</button>
</div>
</div>
</div>
</React.Fragment>
)
}
export default AddDoctorForm | {
addToast(props.uploadRet.message, {appearance: 'success', autoDismiss:true})
} | conditional_block |
AddDoctorForm.js | import { ToastProvider, useToasts } from 'react-toast-notifications'
import LoaderComponent from "./LoaderComponent"
import Select from "../Select"
import React, { useRef, useState, useEffect } from "react"
import { is_positive_whole_number, get_url_params } from "../../utils/common_utilities"
const AddDoctorForm= (props) => {
console.log(props,"props in AddDoctor form")
const myRef = useRef(null)
const executeScroll = () => scrollToRef(myRef)
const scrollToRef = (ref) => window.scrollTo(0, ref.current.offsetTop)
useEffect(() => {
console.log(myRef,"myref")
executeScroll(myRef)
console.log('mount it!');
}, [])
const { addToast } = useToasts()
const doctorImageRef = useRef()
if(!!props.uploadRet){
if(!!props.uploadRet.success){
console.log(props.uploadRet,"props.uploadRet")
props.setImage(props.uploadRet.data)
addToast(props.uploadRet.message, {appearance: 'success', autoDismiss:true})
}else{
addToast(props.uploadRet.message, {appearance: 'success', autoDismiss:true})
}
props.loadingImageOff()
props.uploadRetClr()
}
if(!!props.addDoctorRet){
if(!!props.addDoctorRet.success){
addToast(props.addDoctorRet.message, {appearance: 'success', autoDismiss:true})
console.log(props.addDoctorRet,"props.addDoctorRet")
props.set_user_info({
...props.prof_data,
doctors:[...props.addDoctorRet.data.doctors]
})
props.clear_data()
}else{
addToast(props.addDoctorRet.message, {appearance: 'error', autoDismiss:true})
}
props.getUserDetails()
props.addDoctorClr()
props.addDoctorLoadingOff()
}
const submitdetails = () => {
if(props.name === '' || props.department === '' ||props.designation==='' || props.experience==="" || props.education==="" || props.specialitie_chosen===" " || (props.services_chosen.length===0) ){
addToast("Enter all the details",{ appearance: 'error', autoDismiss:true })
}else if(!!!props.doctorProfileImage){
addToast("Please provide a profile image",{ appearance: 'error', autoDismiss:true })
}else{
props.submitdetails({
name:props.name,
designation:props.designation,
department:props.department,
experience:props.experience,
education:props.education,
services_chosen:props.services_chosen,
specialitie_chosen:props.specialitie_chosen,
doctorProfileImage:props.doctorProfileImage,
doctorId:get_url_params('id')?get_url_params('id'):undefined
})
}
}
const handleImageClick = ()=>{
let element = document.getElementById('doctorImageInput')
element.click()
}
const handleUploadImage = (e) => {
e.preventDefault();
e.stopPropagation()
var reader = new FileReader();
var file = e.target.files[0];
if(!!file){
if (file.size > 2 * 1024 * 1024) {
addToast('File size should be less than 2MB', {appearance: 'error', autoDismiss:true})
} else {
props.upload({ file: file, field: 'file' })
reader.onloadend = () => {
reader.readAsDataURL(file);
}
}
}else{
addToast('No File Found', {appearance: 'error', autoDismiss:true})
}
}
console.log(props,"props in AddDoctorForm")
return (
<React.Fragment>
<div className="profile_secti">
<div style={{height:'10px',width:'10px',position:'absolute',top:'-10px'}} ref={myRef}></div>
{props.addDoctorLoading && <LoaderComponent />}
<h5 className="pfo_im">Profile Image</h5>
<div className="row">
<div className="col-lg-2 col-md-4 image_wrapper_add_doctor position-relative">
{props.laodingImage && <LoaderComponent />}
<input
style={{display:'inline',display:'none'}}
id="doctorImageInput"
type="file" accept="image/jpe ,image/png, image/jpeg"
onChange ={(e)=>handleUploadImage(e)}
ref = {doctorImageRef}
/>
<img src={!!props.doctorProfileImage?props.doctorProfileImage:'/account.svg'} className="accout"/>
<img onClick={(e)=>handleImageClick(e)} src="/camera.svg" className=" profile_camera_rish cursor-pointer" />
</div>
<div className="col-lg-3">
<h6 className="fil_nm">{!!props.doctorImageName?props.doctorImageName:'File Name'}</h6>
<button onClick={(e)=>handleImageClick(e)} className="upld common-button">Upload</button>
</div>
</div>
<form class="shake" role="form" method="post" id="contactForm" name="contact-form" data-toggle="validator">
<div class="form-group label-floating">
<label class="control-label control_label_ris" for="name">Name</label>
<input class="form-control no_padding_ris btm_in_bdr" value= {props.name} onChange={props.handleChange} id="name" type="text" name="name" required data-error="Please enter your name" />
<div class="help-block with-errors"></div>
</div>
<div class="form-group label-floating">
<label class="control-label control_label_ris" for="education">Education Qualification</label>
<input class="form-control no_padding_ris btm_in_bdr" value= {props.education} onChange={props.handleChange} id="educationqua" type="education" name="education" required data-error="Please enter your education qulification" />
<div class="help-block with-errors"></div>
</div>
<div class="form-group label-floating">
<label class="control-label control_label_ris" for="education">Department</label>
<input class="form-control no_padding_ris btm_in_bdr" value= {props.department} onChange={props.handleChange} id="department" name="department" required data-error="Please enter your education qulification" />
<div class="help-block with-errors"></div>
</div>
<div class="form-group label-floating">
<label class="control-label control_label_ris">Designation</label>
<input class="form-control no_padding_ris btm_in_bdr" id="msg_Designation" value= {props.designation} onChange={props.handleChange} type="text" name="designation" required data-error="Please enter your message Designation" />
<div class="help-block with-errors"></div>
</div>
<div className="row form-group label-floating">
<div class="col-lg-6 col-12">
<Select
options = {props.specialities}
handleChange = {props.handleSelectChange}
value = {props.specialitie_chosen}
multiple ={false}
name = "specialitie_chosen"
label = "Speciality"
placeholder = "Choose Spectiality"
/>
</div>
<div class="col-lg-6 col-12">
<Select
options = {props.services}
handleChange = {props.handleSelectChange}
value = {props.services_chosen}
name = "services_chosen"
label = "Service"
placeholder = "Choose Servives"
/>
</div>
</div>
<div class="form-group label-floating">
<label for="message" class="control-label control_label_ris">Experience</label>
<input class="form-control no_padding_ris btm_in_bdr" value= {props.experience} onChange={props.handleChange} id="msg_Experience" type="number" name="experience" required data-error="Please enter your message Experience" />
<div class="help-block with-errors"></div>
</div>
</form>
</div>
<div className="time_she">
<h3 className="abaily text-center">Availability</h3>
<div className="row text-center">
<div className="col-lg-2"><h4>All</h4></div>
<div className="col-lg-4"><h4>From - To</h4></div>
<div className="col-lg-4"><h4>From - To</h4></div>
<div className="col-lg-2"><h4>Closed</h4></div>
</div>
{props.slots.map((item,i)=>(
<div className="row text-center">
<div className="col-lg-2"><p className="m">{item.day.charAt(0).toUpperCase()}</p></div>
<div className="col-lg-4"><p><span onClick={()=>props.slotClicked(item.slots.morning,'morning','from', item)} className="time_bor cursor-pointer">{props.timeToString(item.slots.morning.from)}</span><span onClick={()=>props.slotClicked(item.slots.morning,'morning','to', item)} className="time_bor cursor-pointer">{props.timeToString(item.slots.morning.to)}</span></p></div>
<div className="col-lg-4"><p><span onClick={()=>props.slotClicked(item.slots.evening,'evening','from', item)} className="time_bor cursor-pointer">{props.timeToString(item.slots.evening.from)}</span><span onClick={()=>props.slotClicked(item.slots.evening,'evening','to', item)} className="time_bor cursor-pointer">{props.timeToString(item.slots.evening.to)}</span></p></div>
<div className="col-lg-2"> | <div
onClick = {(e)=>props.handleCloseDay(item,i,e)}
className='circul_rund'>
<label className={item.closed?'green-background ':''} for="checkbox"></label></div></div>
{/* <div className="col-lg-2">
<div
onClick = {(e)=>props.handleCloseDay(item,i,e)}
className='round'>
<label className={item.closed?'green-background ':''} for="checkbox"></label></div></div> */}
</div>
))}
<div className="consul_fee">
<div className="cdcd_sfd">
{props.editConsultFlag && <span onClick={()=>props.submitConsultaion()} className="edi_intr hover_underline margin-5">Save</span> }
<span onClick={()=>props.toggleSubmitConultation()} className="edi_intr hover_underline margin-5">{props.editConsultFlag?"Cancel":'Edit'}</span>
</div>
<div className="row">
<div className="col-lg-8"><h2 className="fee_cun_ch">Consultation Fee</h2></div>
<div className="col-lg-4">
<h2 className="fee_ru">
₹
{!!props.editConsultFlag?<input value={props.consultationFee} onChange={(e)=>{
if(is_positive_whole_number(e.target.value)){
props.handleChange(e)
}
}}
name="consultationFee"
className="no_brdr_input consultaion_input"
type="number"
/>:props.consultationFee}
</h2>
</div>
</div>
<div className="time_clo text-center">
<button onClick={()=>submitdetails()} className="common-button">Submit</button>
</div>
</div>
</div>
</React.Fragment>
)
}
export default AddDoctorForm | random_line_split |
|
io_file_browser_search.py | # file_brower_search.py Copyright (C) 2012, Jakub Zolcik
#
# Relaxes selected vertices while retaining the shape as much as possible
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
bl_info = {
"name": "File Browser Search",
"author": "Jakub Zolcik",
"version": (0, 1, 1),
"blender": (2, 6, 2),
"api": 35622,
"location": "File Browser",
"description": "Allows You to find files in File Browser by name.",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/User:Sftd/Extensions:2.6/Py/Scripts/Import-Export/File_Browser_Search",
"tracker_url": "http://projects.blender.org/tracker/?func=detail&aid=30386&group_id=153&atid=467",
"category": "Import-Export"}
"""
Usage:
Launches in File Browser
"""
import bpy
import os
import re
class FilteredFileItem(bpy.types.PropertyGroup):
name = bpy.props.StringProperty(name="File name", default="")
dname = bpy.props.StringProperty(name="Display name", default="")
def fileSearch(self, context):
# print("file")
filter = context.window_manager.filtered_search_prop
directory = context.window_manager.last_directory_prop
filecol = context.window_manager.filtered_files_prop
for fname in filecol:
filecol.remove(0)
if filter == "":
return None
pattern = ""
special = ('\\', '.', '^', '$', '*', '+', '?', '{', '}', '[', ']', '|', '(', ')')
for c in special:
filter = filter.replace(c, '\\' + c)
if ('*' in filter):
filter = filter.replace('\*', '.*')
pattern = ('^' + filter.lower() + '$')
else:
if(len(filter) < 3):
pattern = ('^' + filter.lower() + r".*\..*" + '$')
else:
pattern = ('^' + r".*" + filter.lower() + r".*\..*" + '$')
prog = re.compile(pattern)
maxf = 100
cf = 0
dlen = len(directory)
maxd = 100
cd = 0
if context.window_manager.file_searchtree:
for path, dirs, files in os.walk(directory):
cd += 1
if cd > maxd:
break
for filename in files:
filename = (os.path.join(path, filename))[dlen:]
# rfilename = (os.path.join(path, filename))[dlen:]
if prog.match(filename.lower()) != None:
p = context.window_manager.filtered_files_prop.add()
# p.name = rfilename
p.name = filename
if context.blend_data.scenes[0].file_hideextensions:
ind = filename.rfind(".")
if ind > -1:
filename = filename[0:ind]
p.dname = filename
cf += 1
if(cf >= maxf):
break
if(cf >= maxf):
break
else:
filesList = os.listdir(directory)
for filename in filesList:
if prog.match(filename.lower()) != None:
p = context.window_manager.filtered_files_prop.add()
p.name = filename
if context.blend_data.scenes[0].file_hideextensions:
ind = filename.rfind(".")
if ind > -1:
filename = filename[0:ind]
p.dname = filename
return None
def blendDataFromFile(file, part):
with bpy.data.libraries.load(file) as (data_from, data_to):
if (part == "Action"):
return data_from.actions
elif part == "Armature":
return data_from.brushes
elif part == "Brush":
return data_from.brushes
elif part == "Camera":
return data_from.cameras
elif part == "Curve":
return data_from.curves
elif part == "Font":
return data_from.fonts
elif part == "Group":
return data_from.groups
elif part == "Image":
return data_from.images
elif part == "Lamp":
return data_from.lamps
elif part == "Lattice":
return data_from.lattices
elif part == "Library":
return data_from.libraries
elif part == "FreestyleLineStyle":
return data_from.linestyles
elif part == "Mask":
return data_from.masks
elif part == "Material":
return data_from.materials
elif part == "Mesh":
return data_from.meshes
elif part == "NodeTree":
return data_from.node_groups
elif part == "Object":
return data_from.objects
elif part == "Particle":
return data_from.particles
elif part == "Scene":
return data_from.scenes
elif part == "Screen":
return data_from.screens
elif part == "Script":
return data_from.scripts
elif part == "Sound":
return data_from.sounds
elif part == "Speaker":
return data_from.speakers
elif part == "Text":
return data_from.texts
elif part == "Texture":
return data_from.textures
elif part == "World":
return data_from.worlds
else:
return None
def notFileSearch(self, context):
# print("not file")
filter = context.window_manager.filtered_search_prop
directory = context.window_manager.last_directory_prop
filecol = context.window_manager.filtered_files_prop
for fname in filecol:
filecol.remove(0)
if filter == "":
return None
ind_e = directory.find(".blend")
if(ind_e == -1):
return None
ind_e = ind_e + 6
file = directory[0:ind_e]
part = directory[ind_e + 1:-1]
if (part == ""):
return None
data = None
data = blendDataFromFile(file, part)
pattern = ""
if(len(filter) < 3):
pattern = (filter.lower() + r".*")
else:
pattern = (r".*" + filter.lower() + r".*")
prog = re.compile(pattern)
for name in data:
if prog.match(name.lower()) != None:
p = context.window_manager.filtered_files_prop.add()
p.name = name
p.dname = name
return None
def filteredSearchFunc(self, context):
if(context.active_operator.bl_idname == "WM_OT_link_append"):
return notFileSearch(self, context)
else: |
class FilteredFileSelectOperator(bpy.types.Operator):
bl_idname = "file.filtered_file_select"
bl_label = "Select File"
fname = bpy.props.StringProperty()
fexec = bpy.props.BoolProperty()
def execute(self, context):
context.space_data.params.filename = self.fname
if self.fexec:
bpy.ops.file.execute('INVOKE_DEFAULT')
return {'FINISHED'}
class FilteredSearchPanel(bpy.types.Panel):
bl_idname = "FILE_PT_filteredsearch"
bl_label = "Search:"
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'CHANNELS'
@classmethod
def poll(cls, context):
return (context.space_data.params is not None)
def draw(self, context):
layout = self.layout
directory = context.space_data.params.directory
if context.window_manager.last_directory_prop != directory:
context.window_manager.last_directory_prop = directory
filteredSearchFunc(self, context)
layout.prop(context.window_manager, "filtered_search_prop", "")
box = layout.box()
length = len(context.window_manager.filtered_files_prop)
incolumn = int(length / context.blend_data.scenes[0].file_columnsnumber)
r = length % context.blend_data.scenes[0].file_columnsnumber
row = box.row()
col = row.column()
it = 0
tr = 0
for f in context.window_manager.filtered_files_prop:
op = col.operator("file.filtered_file_select", text=f.dname, emboss=False)
op.fname = f.name
op.fexec = context.blend_data.scenes[0].file_autoexecute
it += 1
if tr < r:
if it % (incolumn + 1) == 0:
tr += 1
if(it < length):
col = row.column()
else:
if (it - tr) % incolumn == 0:
if(it < length):
col = row.column()
layout.prop(context.blend_data.scenes[0], "file_autoexecute")
layout.prop(context.window_manager, "file_searchtree")
layout.prop(context.blend_data.scenes[0], "file_hideextensions")
layout.prop(context.blend_data.scenes[0], "file_columnsnumber")
def register():
bpy.utils.register_module(__name__)
bpy.types.WindowManager.filtered_search_prop = bpy.props.StringProperty(update=filteredSearchFunc)
bpy.types.WindowManager.last_directory_prop = bpy.props.StringProperty()
bpy.types.Scene.file_autoexecute = bpy.props.BoolProperty(name="Open Automatically", default=True)
bpy.types.Scene.file_hideextensions = bpy.props.BoolProperty(name="Hide Extensions", update=filteredSearchFunc)
bpy.types.WindowManager.file_searchtree = bpy.props.BoolProperty(name="Search Subdirectories", update=filteredSearchFunc)
bpy.types.Scene.file_columnsnumber = bpy.props.IntProperty(name="Number of Columns", default=2, min=1, max=15, update=filteredSearchFunc)
bpy.types.WindowManager.filtered_files_prop = bpy.props.CollectionProperty(type=FilteredFileItem)
def unregister():
del bpy.types.WindowManager.filtered_search_prop
del bpy.types.WindowManager.last_directory_prop
del bpy.types.Scene.file_autoexecute
del bpy.types.WindowManager.filtered_files_prop
del bpy.types.WindowManager.file_searchtree
del bpy.types.Scene.file_hideextensions
del bpy.types.Scene.file_columnsnumber
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register() | return fileSearch(self, context) | random_line_split |
io_file_browser_search.py | # file_brower_search.py Copyright (C) 2012, Jakub Zolcik
#
# Relaxes selected vertices while retaining the shape as much as possible
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
bl_info = {
"name": "File Browser Search",
"author": "Jakub Zolcik",
"version": (0, 1, 1),
"blender": (2, 6, 2),
"api": 35622,
"location": "File Browser",
"description": "Allows You to find files in File Browser by name.",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/User:Sftd/Extensions:2.6/Py/Scripts/Import-Export/File_Browser_Search",
"tracker_url": "http://projects.blender.org/tracker/?func=detail&aid=30386&group_id=153&atid=467",
"category": "Import-Export"}
"""
Usage:
Launches in File Browser
"""
import bpy
import os
import re
class FilteredFileItem(bpy.types.PropertyGroup):
name = bpy.props.StringProperty(name="File name", default="")
dname = bpy.props.StringProperty(name="Display name", default="")
def fileSearch(self, context):
# print("file")
filter = context.window_manager.filtered_search_prop
directory = context.window_manager.last_directory_prop
filecol = context.window_manager.filtered_files_prop
for fname in filecol:
filecol.remove(0)
if filter == "":
return None
pattern = ""
special = ('\\', '.', '^', '$', '*', '+', '?', '{', '}', '[', ']', '|', '(', ')')
for c in special:
filter = filter.replace(c, '\\' + c)
if ('*' in filter):
filter = filter.replace('\*', '.*')
pattern = ('^' + filter.lower() + '$')
else:
if(len(filter) < 3):
pattern = ('^' + filter.lower() + r".*\..*" + '$')
else:
pattern = ('^' + r".*" + filter.lower() + r".*\..*" + '$')
prog = re.compile(pattern)
maxf = 100
cf = 0
dlen = len(directory)
maxd = 100
cd = 0
if context.window_manager.file_searchtree:
for path, dirs, files in os.walk(directory):
cd += 1
if cd > maxd:
break
for filename in files:
filename = (os.path.join(path, filename))[dlen:]
# rfilename = (os.path.join(path, filename))[dlen:]
if prog.match(filename.lower()) != None:
p = context.window_manager.filtered_files_prop.add()
# p.name = rfilename
p.name = filename
if context.blend_data.scenes[0].file_hideextensions:
ind = filename.rfind(".")
if ind > -1:
filename = filename[0:ind]
p.dname = filename
cf += 1
if(cf >= maxf):
break
if(cf >= maxf):
break
else:
filesList = os.listdir(directory)
for filename in filesList:
if prog.match(filename.lower()) != None:
p = context.window_manager.filtered_files_prop.add()
p.name = filename
if context.blend_data.scenes[0].file_hideextensions:
ind = filename.rfind(".")
if ind > -1:
filename = filename[0:ind]
p.dname = filename
return None
def blendDataFromFile(file, part):
with bpy.data.libraries.load(file) as (data_from, data_to):
if (part == "Action"):
return data_from.actions
elif part == "Armature":
return data_from.brushes
elif part == "Brush":
return data_from.brushes
elif part == "Camera":
return data_from.cameras
elif part == "Curve":
return data_from.curves
elif part == "Font":
return data_from.fonts
elif part == "Group":
return data_from.groups
elif part == "Image":
return data_from.images
elif part == "Lamp":
return data_from.lamps
elif part == "Lattice":
return data_from.lattices
elif part == "Library":
return data_from.libraries
elif part == "FreestyleLineStyle":
return data_from.linestyles
elif part == "Mask":
return data_from.masks
elif part == "Material":
return data_from.materials
elif part == "Mesh":
return data_from.meshes
elif part == "NodeTree":
return data_from.node_groups
elif part == "Object":
return data_from.objects
elif part == "Particle":
return data_from.particles
elif part == "Scene":
return data_from.scenes
elif part == "Screen":
return data_from.screens
elif part == "Script":
return data_from.scripts
elif part == "Sound":
return data_from.sounds
elif part == "Speaker":
return data_from.speakers
elif part == "Text":
return data_from.texts
elif part == "Texture":
return data_from.textures
elif part == "World":
return data_from.worlds
else:
return None
def notFileSearch(self, context):
# print("not file")
filter = context.window_manager.filtered_search_prop
directory = context.window_manager.last_directory_prop
filecol = context.window_manager.filtered_files_prop
for fname in filecol:
filecol.remove(0)
if filter == "":
return None
ind_e = directory.find(".blend")
if(ind_e == -1):
return None
ind_e = ind_e + 6
file = directory[0:ind_e]
part = directory[ind_e + 1:-1]
if (part == ""):
return None
data = None
data = blendDataFromFile(file, part)
pattern = ""
if(len(filter) < 3):
pattern = (filter.lower() + r".*")
else:
pattern = (r".*" + filter.lower() + r".*")
prog = re.compile(pattern)
for name in data:
if prog.match(name.lower()) != None:
p = context.window_manager.filtered_files_prop.add()
p.name = name
p.dname = name
return None
def filteredSearchFunc(self, context):
if(context.active_operator.bl_idname == "WM_OT_link_append"):
return notFileSearch(self, context)
else:
return fileSearch(self, context)
class | (bpy.types.Operator):
bl_idname = "file.filtered_file_select"
bl_label = "Select File"
fname = bpy.props.StringProperty()
fexec = bpy.props.BoolProperty()
def execute(self, context):
context.space_data.params.filename = self.fname
if self.fexec:
bpy.ops.file.execute('INVOKE_DEFAULT')
return {'FINISHED'}
class FilteredSearchPanel(bpy.types.Panel):
bl_idname = "FILE_PT_filteredsearch"
bl_label = "Search:"
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'CHANNELS'
@classmethod
def poll(cls, context):
return (context.space_data.params is not None)
def draw(self, context):
layout = self.layout
directory = context.space_data.params.directory
if context.window_manager.last_directory_prop != directory:
context.window_manager.last_directory_prop = directory
filteredSearchFunc(self, context)
layout.prop(context.window_manager, "filtered_search_prop", "")
box = layout.box()
length = len(context.window_manager.filtered_files_prop)
incolumn = int(length / context.blend_data.scenes[0].file_columnsnumber)
r = length % context.blend_data.scenes[0].file_columnsnumber
row = box.row()
col = row.column()
it = 0
tr = 0
for f in context.window_manager.filtered_files_prop:
op = col.operator("file.filtered_file_select", text=f.dname, emboss=False)
op.fname = f.name
op.fexec = context.blend_data.scenes[0].file_autoexecute
it += 1
if tr < r:
if it % (incolumn + 1) == 0:
tr += 1
if(it < length):
col = row.column()
else:
if (it - tr) % incolumn == 0:
if(it < length):
col = row.column()
layout.prop(context.blend_data.scenes[0], "file_autoexecute")
layout.prop(context.window_manager, "file_searchtree")
layout.prop(context.blend_data.scenes[0], "file_hideextensions")
layout.prop(context.blend_data.scenes[0], "file_columnsnumber")
def register():
bpy.utils.register_module(__name__)
bpy.types.WindowManager.filtered_search_prop = bpy.props.StringProperty(update=filteredSearchFunc)
bpy.types.WindowManager.last_directory_prop = bpy.props.StringProperty()
bpy.types.Scene.file_autoexecute = bpy.props.BoolProperty(name="Open Automatically", default=True)
bpy.types.Scene.file_hideextensions = bpy.props.BoolProperty(name="Hide Extensions", update=filteredSearchFunc)
bpy.types.WindowManager.file_searchtree = bpy.props.BoolProperty(name="Search Subdirectories", update=filteredSearchFunc)
bpy.types.Scene.file_columnsnumber = bpy.props.IntProperty(name="Number of Columns", default=2, min=1, max=15, update=filteredSearchFunc)
bpy.types.WindowManager.filtered_files_prop = bpy.props.CollectionProperty(type=FilteredFileItem)
def unregister():
del bpy.types.WindowManager.filtered_search_prop
del bpy.types.WindowManager.last_directory_prop
del bpy.types.Scene.file_autoexecute
del bpy.types.WindowManager.filtered_files_prop
del bpy.types.WindowManager.file_searchtree
del bpy.types.Scene.file_hideextensions
del bpy.types.Scene.file_columnsnumber
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
| FilteredFileSelectOperator | identifier_name |
io_file_browser_search.py | # file_brower_search.py Copyright (C) 2012, Jakub Zolcik
#
# Relaxes selected vertices while retaining the shape as much as possible
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
bl_info = {
"name": "File Browser Search",
"author": "Jakub Zolcik",
"version": (0, 1, 1),
"blender": (2, 6, 2),
"api": 35622,
"location": "File Browser",
"description": "Allows You to find files in File Browser by name.",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/User:Sftd/Extensions:2.6/Py/Scripts/Import-Export/File_Browser_Search",
"tracker_url": "http://projects.blender.org/tracker/?func=detail&aid=30386&group_id=153&atid=467",
"category": "Import-Export"}
"""
Usage:
Launches in File Browser
"""
import bpy
import os
import re
class FilteredFileItem(bpy.types.PropertyGroup):
name = bpy.props.StringProperty(name="File name", default="")
dname = bpy.props.StringProperty(name="Display name", default="")
def fileSearch(self, context):
# print("file")
filter = context.window_manager.filtered_search_prop
directory = context.window_manager.last_directory_prop
filecol = context.window_manager.filtered_files_prop
for fname in filecol:
filecol.remove(0)
if filter == "":
return None
pattern = ""
special = ('\\', '.', '^', '$', '*', '+', '?', '{', '}', '[', ']', '|', '(', ')')
for c in special:
filter = filter.replace(c, '\\' + c)
if ('*' in filter):
filter = filter.replace('\*', '.*')
pattern = ('^' + filter.lower() + '$')
else:
if(len(filter) < 3):
pattern = ('^' + filter.lower() + r".*\..*" + '$')
else:
pattern = ('^' + r".*" + filter.lower() + r".*\..*" + '$')
prog = re.compile(pattern)
maxf = 100
cf = 0
dlen = len(directory)
maxd = 100
cd = 0
if context.window_manager.file_searchtree:
for path, dirs, files in os.walk(directory):
cd += 1
if cd > maxd:
break
for filename in files:
filename = (os.path.join(path, filename))[dlen:]
# rfilename = (os.path.join(path, filename))[dlen:]
if prog.match(filename.lower()) != None:
p = context.window_manager.filtered_files_prop.add()
# p.name = rfilename
p.name = filename
if context.blend_data.scenes[0].file_hideextensions:
ind = filename.rfind(".")
if ind > -1:
filename = filename[0:ind]
p.dname = filename
cf += 1
if(cf >= maxf):
break
if(cf >= maxf):
break
else:
filesList = os.listdir(directory)
for filename in filesList:
if prog.match(filename.lower()) != None:
p = context.window_manager.filtered_files_prop.add()
p.name = filename
if context.blend_data.scenes[0].file_hideextensions:
ind = filename.rfind(".")
if ind > -1:
filename = filename[0:ind]
p.dname = filename
return None
def blendDataFromFile(file, part):
with bpy.data.libraries.load(file) as (data_from, data_to):
if (part == "Action"):
return data_from.actions
elif part == "Armature":
return data_from.brushes
elif part == "Brush":
return data_from.brushes
elif part == "Camera":
return data_from.cameras
elif part == "Curve":
return data_from.curves
elif part == "Font":
return data_from.fonts
elif part == "Group":
return data_from.groups
elif part == "Image":
return data_from.images
elif part == "Lamp":
return data_from.lamps
elif part == "Lattice":
return data_from.lattices
elif part == "Library":
return data_from.libraries
elif part == "FreestyleLineStyle":
return data_from.linestyles
elif part == "Mask":
return data_from.masks
elif part == "Material":
return data_from.materials
elif part == "Mesh":
return data_from.meshes
elif part == "NodeTree":
return data_from.node_groups
elif part == "Object":
return data_from.objects
elif part == "Particle":
return data_from.particles
elif part == "Scene":
return data_from.scenes
elif part == "Screen":
return data_from.screens
elif part == "Script":
return data_from.scripts
elif part == "Sound":
return data_from.sounds
elif part == "Speaker":
return data_from.speakers
elif part == "Text":
return data_from.texts
elif part == "Texture":
return data_from.textures
elif part == "World":
return data_from.worlds
else:
return None
def notFileSearch(self, context):
# print("not file")
filter = context.window_manager.filtered_search_prop
directory = context.window_manager.last_directory_prop
filecol = context.window_manager.filtered_files_prop
for fname in filecol:
filecol.remove(0)
if filter == "":
return None
ind_e = directory.find(".blend")
if(ind_e == -1):
return None
ind_e = ind_e + 6
file = directory[0:ind_e]
part = directory[ind_e + 1:-1]
if (part == ""):
return None
data = None
data = blendDataFromFile(file, part)
pattern = ""
if(len(filter) < 3):
pattern = (filter.lower() + r".*")
else:
pattern = (r".*" + filter.lower() + r".*")
prog = re.compile(pattern)
for name in data:
if prog.match(name.lower()) != None:
p = context.window_manager.filtered_files_prop.add()
p.name = name
p.dname = name
return None
def filteredSearchFunc(self, context):
if(context.active_operator.bl_idname == "WM_OT_link_append"):
return notFileSearch(self, context)
else:
return fileSearch(self, context)
class FilteredFileSelectOperator(bpy.types.Operator):
bl_idname = "file.filtered_file_select"
bl_label = "Select File"
fname = bpy.props.StringProperty()
fexec = bpy.props.BoolProperty()
def execute(self, context):
context.space_data.params.filename = self.fname
if self.fexec:
bpy.ops.file.execute('INVOKE_DEFAULT')
return {'FINISHED'}
class FilteredSearchPanel(bpy.types.Panel):
bl_idname = "FILE_PT_filteredsearch"
bl_label = "Search:"
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'CHANNELS'
@classmethod
def poll(cls, context):
return (context.space_data.params is not None)
def draw(self, context):
layout = self.layout
directory = context.space_data.params.directory
if context.window_manager.last_directory_prop != directory:
context.window_manager.last_directory_prop = directory
filteredSearchFunc(self, context)
layout.prop(context.window_manager, "filtered_search_prop", "")
box = layout.box()
length = len(context.window_manager.filtered_files_prop)
incolumn = int(length / context.blend_data.scenes[0].file_columnsnumber)
r = length % context.blend_data.scenes[0].file_columnsnumber
row = box.row()
col = row.column()
it = 0
tr = 0
for f in context.window_manager.filtered_files_prop:
op = col.operator("file.filtered_file_select", text=f.dname, emboss=False)
op.fname = f.name
op.fexec = context.blend_data.scenes[0].file_autoexecute
it += 1
if tr < r:
if it % (incolumn + 1) == 0:
tr += 1
if(it < length):
col = row.column()
else:
if (it - tr) % incolumn == 0:
if(it < length):
col = row.column()
layout.prop(context.blend_data.scenes[0], "file_autoexecute")
layout.prop(context.window_manager, "file_searchtree")
layout.prop(context.blend_data.scenes[0], "file_hideextensions")
layout.prop(context.blend_data.scenes[0], "file_columnsnumber")
def register():
|
def unregister():
del bpy.types.WindowManager.filtered_search_prop
del bpy.types.WindowManager.last_directory_prop
del bpy.types.Scene.file_autoexecute
del bpy.types.WindowManager.filtered_files_prop
del bpy.types.WindowManager.file_searchtree
del bpy.types.Scene.file_hideextensions
del bpy.types.Scene.file_columnsnumber
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
| bpy.utils.register_module(__name__)
bpy.types.WindowManager.filtered_search_prop = bpy.props.StringProperty(update=filteredSearchFunc)
bpy.types.WindowManager.last_directory_prop = bpy.props.StringProperty()
bpy.types.Scene.file_autoexecute = bpy.props.BoolProperty(name="Open Automatically", default=True)
bpy.types.Scene.file_hideextensions = bpy.props.BoolProperty(name="Hide Extensions", update=filteredSearchFunc)
bpy.types.WindowManager.file_searchtree = bpy.props.BoolProperty(name="Search Subdirectories", update=filteredSearchFunc)
bpy.types.Scene.file_columnsnumber = bpy.props.IntProperty(name="Number of Columns", default=2, min=1, max=15, update=filteredSearchFunc)
bpy.types.WindowManager.filtered_files_prop = bpy.props.CollectionProperty(type=FilteredFileItem) | identifier_body |
io_file_browser_search.py | # file_brower_search.py Copyright (C) 2012, Jakub Zolcik
#
# Relaxes selected vertices while retaining the shape as much as possible
#
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
bl_info = {
"name": "File Browser Search",
"author": "Jakub Zolcik",
"version": (0, 1, 1),
"blender": (2, 6, 2),
"api": 35622,
"location": "File Browser",
"description": "Allows You to find files in File Browser by name.",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/User:Sftd/Extensions:2.6/Py/Scripts/Import-Export/File_Browser_Search",
"tracker_url": "http://projects.blender.org/tracker/?func=detail&aid=30386&group_id=153&atid=467",
"category": "Import-Export"}
"""
Usage:
Launches in File Browser
"""
import bpy
import os
import re
class FilteredFileItem(bpy.types.PropertyGroup):
name = bpy.props.StringProperty(name="File name", default="")
dname = bpy.props.StringProperty(name="Display name", default="")
def fileSearch(self, context):
# print("file")
filter = context.window_manager.filtered_search_prop
directory = context.window_manager.last_directory_prop
filecol = context.window_manager.filtered_files_prop
for fname in filecol:
filecol.remove(0)
if filter == "":
return None
pattern = ""
special = ('\\', '.', '^', '$', '*', '+', '?', '{', '}', '[', ']', '|', '(', ')')
for c in special:
filter = filter.replace(c, '\\' + c)
if ('*' in filter):
filter = filter.replace('\*', '.*')
pattern = ('^' + filter.lower() + '$')
else:
if(len(filter) < 3):
pattern = ('^' + filter.lower() + r".*\..*" + '$')
else:
pattern = ('^' + r".*" + filter.lower() + r".*\..*" + '$')
prog = re.compile(pattern)
maxf = 100
cf = 0
dlen = len(directory)
maxd = 100
cd = 0
if context.window_manager.file_searchtree:
for path, dirs, files in os.walk(directory):
cd += 1
if cd > maxd:
break
for filename in files:
filename = (os.path.join(path, filename))[dlen:]
# rfilename = (os.path.join(path, filename))[dlen:]
if prog.match(filename.lower()) != None:
p = context.window_manager.filtered_files_prop.add()
# p.name = rfilename
p.name = filename
if context.blend_data.scenes[0].file_hideextensions:
ind = filename.rfind(".")
if ind > -1:
filename = filename[0:ind]
p.dname = filename
cf += 1
if(cf >= maxf):
|
if(cf >= maxf):
break
else:
filesList = os.listdir(directory)
for filename in filesList:
if prog.match(filename.lower()) != None:
p = context.window_manager.filtered_files_prop.add()
p.name = filename
if context.blend_data.scenes[0].file_hideextensions:
ind = filename.rfind(".")
if ind > -1:
filename = filename[0:ind]
p.dname = filename
return None
def blendDataFromFile(file, part):
with bpy.data.libraries.load(file) as (data_from, data_to):
if (part == "Action"):
return data_from.actions
elif part == "Armature":
return data_from.brushes
elif part == "Brush":
return data_from.brushes
elif part == "Camera":
return data_from.cameras
elif part == "Curve":
return data_from.curves
elif part == "Font":
return data_from.fonts
elif part == "Group":
return data_from.groups
elif part == "Image":
return data_from.images
elif part == "Lamp":
return data_from.lamps
elif part == "Lattice":
return data_from.lattices
elif part == "Library":
return data_from.libraries
elif part == "FreestyleLineStyle":
return data_from.linestyles
elif part == "Mask":
return data_from.masks
elif part == "Material":
return data_from.materials
elif part == "Mesh":
return data_from.meshes
elif part == "NodeTree":
return data_from.node_groups
elif part == "Object":
return data_from.objects
elif part == "Particle":
return data_from.particles
elif part == "Scene":
return data_from.scenes
elif part == "Screen":
return data_from.screens
elif part == "Script":
return data_from.scripts
elif part == "Sound":
return data_from.sounds
elif part == "Speaker":
return data_from.speakers
elif part == "Text":
return data_from.texts
elif part == "Texture":
return data_from.textures
elif part == "World":
return data_from.worlds
else:
return None
def notFileSearch(self, context):
# print("not file")
filter = context.window_manager.filtered_search_prop
directory = context.window_manager.last_directory_prop
filecol = context.window_manager.filtered_files_prop
for fname in filecol:
filecol.remove(0)
if filter == "":
return None
ind_e = directory.find(".blend")
if(ind_e == -1):
return None
ind_e = ind_e + 6
file = directory[0:ind_e]
part = directory[ind_e + 1:-1]
if (part == ""):
return None
data = None
data = blendDataFromFile(file, part)
pattern = ""
if(len(filter) < 3):
pattern = (filter.lower() + r".*")
else:
pattern = (r".*" + filter.lower() + r".*")
prog = re.compile(pattern)
for name in data:
if prog.match(name.lower()) != None:
p = context.window_manager.filtered_files_prop.add()
p.name = name
p.dname = name
return None
def filteredSearchFunc(self, context):
if(context.active_operator.bl_idname == "WM_OT_link_append"):
return notFileSearch(self, context)
else:
return fileSearch(self, context)
class FilteredFileSelectOperator(bpy.types.Operator):
bl_idname = "file.filtered_file_select"
bl_label = "Select File"
fname = bpy.props.StringProperty()
fexec = bpy.props.BoolProperty()
def execute(self, context):
context.space_data.params.filename = self.fname
if self.fexec:
bpy.ops.file.execute('INVOKE_DEFAULT')
return {'FINISHED'}
class FilteredSearchPanel(bpy.types.Panel):
bl_idname = "FILE_PT_filteredsearch"
bl_label = "Search:"
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'CHANNELS'
@classmethod
def poll(cls, context):
return (context.space_data.params is not None)
def draw(self, context):
layout = self.layout
directory = context.space_data.params.directory
if context.window_manager.last_directory_prop != directory:
context.window_manager.last_directory_prop = directory
filteredSearchFunc(self, context)
layout.prop(context.window_manager, "filtered_search_prop", "")
box = layout.box()
length = len(context.window_manager.filtered_files_prop)
incolumn = int(length / context.blend_data.scenes[0].file_columnsnumber)
r = length % context.blend_data.scenes[0].file_columnsnumber
row = box.row()
col = row.column()
it = 0
tr = 0
for f in context.window_manager.filtered_files_prop:
op = col.operator("file.filtered_file_select", text=f.dname, emboss=False)
op.fname = f.name
op.fexec = context.blend_data.scenes[0].file_autoexecute
it += 1
if tr < r:
if it % (incolumn + 1) == 0:
tr += 1
if(it < length):
col = row.column()
else:
if (it - tr) % incolumn == 0:
if(it < length):
col = row.column()
layout.prop(context.blend_data.scenes[0], "file_autoexecute")
layout.prop(context.window_manager, "file_searchtree")
layout.prop(context.blend_data.scenes[0], "file_hideextensions")
layout.prop(context.blend_data.scenes[0], "file_columnsnumber")
def register():
bpy.utils.register_module(__name__)
bpy.types.WindowManager.filtered_search_prop = bpy.props.StringProperty(update=filteredSearchFunc)
bpy.types.WindowManager.last_directory_prop = bpy.props.StringProperty()
bpy.types.Scene.file_autoexecute = bpy.props.BoolProperty(name="Open Automatically", default=True)
bpy.types.Scene.file_hideextensions = bpy.props.BoolProperty(name="Hide Extensions", update=filteredSearchFunc)
bpy.types.WindowManager.file_searchtree = bpy.props.BoolProperty(name="Search Subdirectories", update=filteredSearchFunc)
bpy.types.Scene.file_columnsnumber = bpy.props.IntProperty(name="Number of Columns", default=2, min=1, max=15, update=filteredSearchFunc)
bpy.types.WindowManager.filtered_files_prop = bpy.props.CollectionProperty(type=FilteredFileItem)
def unregister():
del bpy.types.WindowManager.filtered_search_prop
del bpy.types.WindowManager.last_directory_prop
del bpy.types.Scene.file_autoexecute
del bpy.types.WindowManager.filtered_files_prop
del bpy.types.WindowManager.file_searchtree
del bpy.types.Scene.file_hideextensions
del bpy.types.Scene.file_columnsnumber
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
| break | conditional_block |
node_dir_ops.go | package fusefrontend
import (
"context"
"fmt"
"io"
"runtime"
"syscall"
"golang.org/x/sys/unix"
"github.com/hanwen/go-fuse/v2/fs"
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/rfjakob/gocryptfs/v2/internal/configfile"
"github.com/rfjakob/gocryptfs/v2/internal/cryptocore"
"github.com/rfjakob/gocryptfs/v2/internal/nametransform"
"github.com/rfjakob/gocryptfs/v2/internal/syscallcompat"
"github.com/rfjakob/gocryptfs/v2/internal/tlog"
)
const dsStoreName = ".DS_Store"
// haveDsstore return true if one of the entries in "names" is ".DS_Store".
func haveDsstore(entries []fuse.DirEntry) bool {
for _, e := range entries {
if e.Name == dsStoreName {
return true
}
}
return false
}
// mkdirWithIv - create a new directory and corresponding diriv file. dirfd
// should be a handle to the parent directory, cName is the name of the new
// directory and mode specifies the access permissions to use.
// If DeterministicNames is set, the diriv file is NOT created.
func (n *Node) mkdirWithIv(dirfd int, cName string, mode uint32, context *fuse.Context) error {
rn := n.rootNode()
if rn.args.DeterministicNames {
return syscallcompat.MkdiratUser(dirfd, cName, mode, context)
}
// Between the creation of the directory and the creation of gocryptfs.diriv
// the directory is inconsistent. Take the lock to prevent other readers
// from seeing it.
rn.dirIVLock.Lock()
defer rn.dirIVLock.Unlock()
err := syscallcompat.MkdiratUser(dirfd, cName, mode, context)
if err != nil {
return err
}
dirfd2, err := syscallcompat.Openat(dirfd, cName, syscall.O_DIRECTORY|syscall.O_NOFOLLOW|syscallcompat.O_PATH, 0)
if err == nil {
// Create gocryptfs.diriv
err = nametransform.WriteDirIVAt(dirfd2)
syscall.Close(dirfd2)
}
if err != nil {
// Delete inconsistent directory (missing gocryptfs.diriv!)
err2 := syscallcompat.Unlinkat(dirfd, cName, unix.AT_REMOVEDIR)
if err2 != nil {
tlog.Warn.Printf("mkdirWithIv: rollback failed: %v", err2)
}
}
return err
}
// Mkdir - FUSE call. Create a directory at "newPath" with permissions "mode".
//
// Symlink-safe through use of Mkdirat().
func (n *Node) Mkdir(ctx context.Context, name string, mode uint32, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) {
dirfd, cName, errno := n.prepareAtSyscall(name)
if errno != 0 {
return nil, errno
}
defer syscall.Close(dirfd)
rn := n.rootNode()
var context *fuse.Context
if rn.args.PreserveOwner {
context = toFuseCtx(ctx)
}
var st syscall.Stat_t
if rn.args.PlaintextNames {
err := syscallcompat.MkdiratUser(dirfd, cName, mode, context)
if err != nil {
return nil, fs.ToErrno(err)
}
var ust unix.Stat_t
err = syscallcompat.Fstatat(dirfd, cName, &ust, unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
return nil, fs.ToErrno(err)
}
st = syscallcompat.Unix2syscall(ust)
// Create child node & return
ch := n.newChild(ctx, &st, out)
return ch, 0
}
// We need write and execute permissions to create gocryptfs.diriv.
// Also, we need read permissions to open the directory (to avoid
// race-conditions between getting and setting the mode).
origMode := mode
mode = mode | 0700
// Handle long file name
if nametransform.IsLongContent(cName) {
// Create ".name"
err := rn.nameTransform.WriteLongNameAt(dirfd, cName, name)
if err != nil {
return nil, fs.ToErrno(err)
}
// Create directory & rollback .name file on error
err = rn.mkdirWithIv(dirfd, cName, mode, context)
if err != nil {
nametransform.DeleteLongNameAt(dirfd, cName)
return nil, fs.ToErrno(err)
}
} else {
err := rn.mkdirWithIv(dirfd, cName, mode, context)
if err != nil {
return nil, fs.ToErrno(err)
}
}
// Fill `st`
fd, err := syscallcompat.Openat(dirfd, cName,
syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
tlog.Warn.Printf("Mkdir %q: Openat failed: %v", cName, err)
return nil, fs.ToErrno(err)
}
defer syscall.Close(fd)
err = syscall.Fstat(fd, &st)
if err != nil {
tlog.Warn.Printf("Mkdir %q: Fstat failed: %v", cName, err)
return nil, fs.ToErrno(err)
}
// Fix permissions
if origMode != mode {
// Preserve SGID bit if it was set due to inheritance.
origMode = uint32(st.Mode&^0777) | origMode
err = syscall.Fchmod(fd, origMode)
if err != nil {
tlog.Warn.Printf("Mkdir %q: Fchmod %#o -> %#o failed: %v", cName, mode, origMode, err)
}
}
// Create child node & return
ch := n.newChild(ctx, &st, out)
return ch, 0
}
// Readdir - FUSE call.
//
// This function is symlink-safe through use of openBackingDir() and
// ReadDirIVAt().
func (n *Node) Readdir(ctx context.Context) (fs.DirStream, syscall.Errno) {
parentDirFd, cDirName, errno := n.prepareAtSyscallMyself()
if errno != 0 {
return nil, errno
}
defer syscall.Close(parentDirFd)
// Read ciphertext directory
fd, err := syscallcompat.Openat(parentDirFd, cDirName, syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
return nil, fs.ToErrno(err)
}
defer syscall.Close(fd)
cipherEntries, specialEntries, err := syscallcompat.GetdentsSpecial(fd)
if err != nil {
return nil, fs.ToErrno(err)
}
// Get DirIV (stays nil if PlaintextNames is used)
var cachedIV []byte
rn := n.rootNode()
if !rn.args.PlaintextNames {
// Read the DirIV from disk
cachedIV, err = rn.nameTransform.ReadDirIVAt(fd)
if err != nil {
tlog.Warn.Printf("OpenDir %q: could not read %s: %v", cDirName, nametransform.DirIVFilename, err)
return nil, syscall.EIO
}
}
// Decrypted directory entries
var plain []fuse.DirEntry
// Add "." and ".."
plain = append(plain, specialEntries...)
// Filter and decrypt filenames
for i := range cipherEntries {
cName := cipherEntries[i].Name
if n.IsRoot() && cName == configfile.ConfDefaultName {
// silently ignore "gocryptfs.conf" in the top level dir
continue
}
if rn.args.PlaintextNames {
plain = append(plain, cipherEntries[i])
continue
}
if !rn.args.DeterministicNames && cName == nametransform.DirIVFilename {
// silently ignore "gocryptfs.diriv" everywhere if dirIV is enabled
continue
}
// Handle long file name
isLong := nametransform.LongNameNone
if rn.args.LongNames {
isLong = nametransform.NameType(cName)
}
if isLong == nametransform.LongNameContent {
cNameLong, err := nametransform.ReadLongNameAt(fd, cName)
if err != nil {
tlog.Warn.Printf("OpenDir %q: invalid entry %q: Could not read .name: %v",
cDirName, cName, err)
rn.reportMitigatedCorruption(cName)
continue
}
cName = cNameLong
} else if isLong == nametransform.LongNameFilename {
// ignore "gocryptfs.longname.*.name"
continue
}
name, err := rn.nameTransform.DecryptName(cName, cachedIV)
if err != nil {
tlog.Warn.Printf("OpenDir %q: invalid entry %q: %v",
cDirName, cName, err)
rn.reportMitigatedCorruption(cName)
continue
}
// Override the ciphertext name with the plaintext name but reuse the rest
// of the structure
cipherEntries[i].Name = name
plain = append(plain, cipherEntries[i])
}
return fs.NewListDirStream(plain), 0
}
// Rmdir - FUSE call.
//
// Symlink-safe through Unlinkat() + AT_REMOVEDIR.
func (n *Node) Rmdir(ctx context.Context, name string) (code syscall.Errno) {
rn := n.rootNode()
parentDirFd, cName, errno := n.prepareAtSyscall(name)
if errno != 0 |
defer syscall.Close(parentDirFd)
if rn.args.PlaintextNames {
// Unlinkat with AT_REMOVEDIR is equivalent to Rmdir
err := unix.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR)
return fs.ToErrno(err)
}
if rn.args.DeterministicNames {
if err := unix.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR); err != nil {
return fs.ToErrno(err)
}
if nametransform.IsLongContent(cName) {
nametransform.DeleteLongNameAt(parentDirFd, cName)
}
return 0
}
// Unless we are running as root, we need read, write and execute permissions
// to handle gocryptfs.diriv.
permWorkaround := false
var origMode uint32
if !rn.args.PreserveOwner {
var st unix.Stat_t
err := syscallcompat.Fstatat(parentDirFd, cName, &st, unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
return fs.ToErrno(err)
}
if st.Mode&0700 != 0700 {
tlog.Debug.Printf("Rmdir: permWorkaround")
permWorkaround = true
// This cast is needed on Darwin, where st.Mode is uint16.
origMode = uint32(st.Mode)
err = syscallcompat.FchmodatNofollow(parentDirFd, cName, origMode|0700)
if err != nil {
tlog.Debug.Printf("Rmdir: permWorkaround: chmod failed: %v", err)
return fs.ToErrno(err)
}
}
}
dirfd, err := syscallcompat.Openat(parentDirFd, cName,
syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
tlog.Debug.Printf("Rmdir: Open: %v", err)
return fs.ToErrno(err)
}
defer syscall.Close(dirfd)
// Undo the chmod if removing the directory failed. This must run before
// closing dirfd, so defer it after (defer is LIFO).
if permWorkaround {
defer func() {
if code != 0 {
err = unix.Fchmod(dirfd, origMode)
if err != nil {
tlog.Warn.Printf("Rmdir: permWorkaround: rollback failed: %v", err)
}
}
}()
}
retry:
// Check directory contents
children, err := syscallcompat.Getdents(dirfd)
if err == io.EOF {
// The directory is empty
tlog.Warn.Printf("Rmdir: %q: %s is missing", cName, nametransform.DirIVFilename)
err = unix.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR)
return fs.ToErrno(err)
}
if err != nil {
tlog.Warn.Printf("Rmdir: Getdents: %v", err)
return fs.ToErrno(err)
}
// MacOS sprinkles .DS_Store files everywhere. This is hard to avoid for
// users, so handle it transparently here.
if runtime.GOOS == "darwin" && len(children) <= 2 && haveDsstore(children) {
err = unix.Unlinkat(dirfd, dsStoreName, 0)
if err != nil {
tlog.Warn.Printf("Rmdir: failed to delete blocking file %q: %v", dsStoreName, err)
return fs.ToErrno(err)
}
tlog.Warn.Printf("Rmdir: had to delete blocking file %q", dsStoreName)
goto retry
}
// If the directory is not empty besides gocryptfs.diriv, do not even
// attempt the dance around gocryptfs.diriv.
if len(children) > 1 {
return fs.ToErrno(syscall.ENOTEMPTY)
}
// Move "gocryptfs.diriv" to the parent dir as "gocryptfs.diriv.rmdir.XYZ"
tmpName := fmt.Sprintf("%s.rmdir.%d", nametransform.DirIVFilename, cryptocore.RandUint64())
tlog.Debug.Printf("Rmdir: Renaming %s to %s", nametransform.DirIVFilename, tmpName)
// The directory is in an inconsistent state between rename and rmdir.
// Protect against concurrent readers.
rn.dirIVLock.Lock()
defer rn.dirIVLock.Unlock()
err = syscallcompat.Renameat(dirfd, nametransform.DirIVFilename,
parentDirFd, tmpName)
if err != nil {
tlog.Warn.Printf("Rmdir: Renaming %s to %s failed: %v",
nametransform.DirIVFilename, tmpName, err)
return fs.ToErrno(err)
}
// Actual Rmdir
err = syscallcompat.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR)
if err != nil {
// This can happen if another file in the directory was created in the
// meantime, undo the rename
err2 := syscallcompat.Renameat(parentDirFd, tmpName,
dirfd, nametransform.DirIVFilename)
if err2 != nil {
tlog.Warn.Printf("Rmdir: Rename rollback failed: %v", err2)
}
return fs.ToErrno(err)
}
// Delete "gocryptfs.diriv.rmdir.XYZ"
err = syscallcompat.Unlinkat(parentDirFd, tmpName, 0)
if err != nil {
tlog.Warn.Printf("Rmdir: Could not clean up %s: %v", tmpName, err)
}
// Delete .name file
if nametransform.IsLongContent(cName) {
nametransform.DeleteLongNameAt(parentDirFd, cName)
}
return 0
}
// Opendir is a FUSE call to check if the directory can be opened.
func (n *Node) Opendir(ctx context.Context) (errno syscall.Errno) {
dirfd, cName, errno := n.prepareAtSyscallMyself()
if errno != 0 {
return
}
defer syscall.Close(dirfd)
// Open backing directory
fd, err := syscallcompat.Openat(dirfd, cName, syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
return fs.ToErrno(err)
}
syscall.Close(fd)
return 0
}
| {
return errno
} | conditional_block |
node_dir_ops.go | package fusefrontend
import (
"context"
"fmt"
"io"
"runtime"
"syscall"
"golang.org/x/sys/unix"
"github.com/hanwen/go-fuse/v2/fs"
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/rfjakob/gocryptfs/v2/internal/configfile"
"github.com/rfjakob/gocryptfs/v2/internal/cryptocore"
"github.com/rfjakob/gocryptfs/v2/internal/nametransform"
"github.com/rfjakob/gocryptfs/v2/internal/syscallcompat"
"github.com/rfjakob/gocryptfs/v2/internal/tlog"
)
const dsStoreName = ".DS_Store"
// haveDsstore return true if one of the entries in "names" is ".DS_Store".
func haveDsstore(entries []fuse.DirEntry) bool {
for _, e := range entries {
if e.Name == dsStoreName {
return true
}
}
return false
}
// mkdirWithIv - create a new directory and corresponding diriv file. dirfd
// should be a handle to the parent directory, cName is the name of the new
// directory and mode specifies the access permissions to use.
// If DeterministicNames is set, the diriv file is NOT created.
func (n *Node) mkdirWithIv(dirfd int, cName string, mode uint32, context *fuse.Context) error {
rn := n.rootNode()
if rn.args.DeterministicNames {
return syscallcompat.MkdiratUser(dirfd, cName, mode, context)
}
// Between the creation of the directory and the creation of gocryptfs.diriv
// the directory is inconsistent. Take the lock to prevent other readers
// from seeing it.
rn.dirIVLock.Lock()
defer rn.dirIVLock.Unlock()
err := syscallcompat.MkdiratUser(dirfd, cName, mode, context)
if err != nil {
return err
}
dirfd2, err := syscallcompat.Openat(dirfd, cName, syscall.O_DIRECTORY|syscall.O_NOFOLLOW|syscallcompat.O_PATH, 0)
if err == nil {
// Create gocryptfs.diriv
err = nametransform.WriteDirIVAt(dirfd2)
syscall.Close(dirfd2)
}
if err != nil {
// Delete inconsistent directory (missing gocryptfs.diriv!)
err2 := syscallcompat.Unlinkat(dirfd, cName, unix.AT_REMOVEDIR)
if err2 != nil {
tlog.Warn.Printf("mkdirWithIv: rollback failed: %v", err2)
}
}
return err
}
// Mkdir - FUSE call. Create a directory at "newPath" with permissions "mode".
//
// Symlink-safe through use of Mkdirat().
func (n *Node) Mkdir(ctx context.Context, name string, mode uint32, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) {
dirfd, cName, errno := n.prepareAtSyscall(name)
if errno != 0 {
return nil, errno
}
defer syscall.Close(dirfd)
rn := n.rootNode()
var context *fuse.Context
if rn.args.PreserveOwner {
context = toFuseCtx(ctx)
}
var st syscall.Stat_t
if rn.args.PlaintextNames {
err := syscallcompat.MkdiratUser(dirfd, cName, mode, context)
if err != nil {
return nil, fs.ToErrno(err)
}
var ust unix.Stat_t
err = syscallcompat.Fstatat(dirfd, cName, &ust, unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
return nil, fs.ToErrno(err)
}
st = syscallcompat.Unix2syscall(ust)
// Create child node & return
ch := n.newChild(ctx, &st, out)
return ch, 0
}
// We need write and execute permissions to create gocryptfs.diriv.
// Also, we need read permissions to open the directory (to avoid
// race-conditions between getting and setting the mode).
origMode := mode
mode = mode | 0700
// Handle long file name
if nametransform.IsLongContent(cName) {
// Create ".name"
err := rn.nameTransform.WriteLongNameAt(dirfd, cName, name)
if err != nil {
return nil, fs.ToErrno(err)
}
// Create directory & rollback .name file on error
err = rn.mkdirWithIv(dirfd, cName, mode, context)
if err != nil {
nametransform.DeleteLongNameAt(dirfd, cName)
return nil, fs.ToErrno(err)
}
} else {
err := rn.mkdirWithIv(dirfd, cName, mode, context)
if err != nil {
return nil, fs.ToErrno(err)
}
}
// Fill `st`
fd, err := syscallcompat.Openat(dirfd, cName,
syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
tlog.Warn.Printf("Mkdir %q: Openat failed: %v", cName, err)
return nil, fs.ToErrno(err)
}
defer syscall.Close(fd)
err = syscall.Fstat(fd, &st)
if err != nil {
tlog.Warn.Printf("Mkdir %q: Fstat failed: %v", cName, err)
return nil, fs.ToErrno(err)
}
// Fix permissions
if origMode != mode {
// Preserve SGID bit if it was set due to inheritance.
origMode = uint32(st.Mode&^0777) | origMode
err = syscall.Fchmod(fd, origMode)
if err != nil {
tlog.Warn.Printf("Mkdir %q: Fchmod %#o -> %#o failed: %v", cName, mode, origMode, err)
}
}
// Create child node & return
ch := n.newChild(ctx, &st, out)
return ch, 0
}
// Readdir - FUSE call.
//
// This function is symlink-safe through use of openBackingDir() and
// ReadDirIVAt().
func (n *Node) Readdir(ctx context.Context) (fs.DirStream, syscall.Errno) {
parentDirFd, cDirName, errno := n.prepareAtSyscallMyself()
if errno != 0 {
return nil, errno
}
defer syscall.Close(parentDirFd)
// Read ciphertext directory
fd, err := syscallcompat.Openat(parentDirFd, cDirName, syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
return nil, fs.ToErrno(err)
}
defer syscall.Close(fd)
cipherEntries, specialEntries, err := syscallcompat.GetdentsSpecial(fd)
if err != nil {
return nil, fs.ToErrno(err)
}
// Get DirIV (stays nil if PlaintextNames is used)
var cachedIV []byte
rn := n.rootNode()
if !rn.args.PlaintextNames {
// Read the DirIV from disk
cachedIV, err = rn.nameTransform.ReadDirIVAt(fd)
if err != nil {
tlog.Warn.Printf("OpenDir %q: could not read %s: %v", cDirName, nametransform.DirIVFilename, err)
return nil, syscall.EIO
}
}
// Decrypted directory entries
var plain []fuse.DirEntry
// Add "." and ".."
plain = append(plain, specialEntries...)
// Filter and decrypt filenames
for i := range cipherEntries {
cName := cipherEntries[i].Name
if n.IsRoot() && cName == configfile.ConfDefaultName {
// silently ignore "gocryptfs.conf" in the top level dir
continue
}
if rn.args.PlaintextNames {
plain = append(plain, cipherEntries[i])
continue
}
if !rn.args.DeterministicNames && cName == nametransform.DirIVFilename {
// silently ignore "gocryptfs.diriv" everywhere if dirIV is enabled
continue
}
// Handle long file name
isLong := nametransform.LongNameNone
if rn.args.LongNames {
isLong = nametransform.NameType(cName)
}
if isLong == nametransform.LongNameContent {
cNameLong, err := nametransform.ReadLongNameAt(fd, cName)
if err != nil {
tlog.Warn.Printf("OpenDir %q: invalid entry %q: Could not read .name: %v",
cDirName, cName, err)
rn.reportMitigatedCorruption(cName)
continue
}
cName = cNameLong
} else if isLong == nametransform.LongNameFilename {
// ignore "gocryptfs.longname.*.name"
continue
}
name, err := rn.nameTransform.DecryptName(cName, cachedIV)
if err != nil {
tlog.Warn.Printf("OpenDir %q: invalid entry %q: %v",
cDirName, cName, err)
rn.reportMitigatedCorruption(cName)
continue
}
// Override the ciphertext name with the plaintext name but reuse the rest
// of the structure
cipherEntries[i].Name = name
plain = append(plain, cipherEntries[i])
}
return fs.NewListDirStream(plain), 0
}
// Rmdir - FUSE call.
//
// Symlink-safe through Unlinkat() + AT_REMOVEDIR.
func (n *Node) Rmdir(ctx context.Context, name string) (code syscall.Errno) |
// Opendir is a FUSE call to check if the directory can be opened.
func (n *Node) Opendir(ctx context.Context) (errno syscall.Errno) {
dirfd, cName, errno := n.prepareAtSyscallMyself()
if errno != 0 {
return
}
defer syscall.Close(dirfd)
// Open backing directory
fd, err := syscallcompat.Openat(dirfd, cName, syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
return fs.ToErrno(err)
}
syscall.Close(fd)
return 0
}
| {
rn := n.rootNode()
parentDirFd, cName, errno := n.prepareAtSyscall(name)
if errno != 0 {
return errno
}
defer syscall.Close(parentDirFd)
if rn.args.PlaintextNames {
// Unlinkat with AT_REMOVEDIR is equivalent to Rmdir
err := unix.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR)
return fs.ToErrno(err)
}
if rn.args.DeterministicNames {
if err := unix.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR); err != nil {
return fs.ToErrno(err)
}
if nametransform.IsLongContent(cName) {
nametransform.DeleteLongNameAt(parentDirFd, cName)
}
return 0
}
// Unless we are running as root, we need read, write and execute permissions
// to handle gocryptfs.diriv.
permWorkaround := false
var origMode uint32
if !rn.args.PreserveOwner {
var st unix.Stat_t
err := syscallcompat.Fstatat(parentDirFd, cName, &st, unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
return fs.ToErrno(err)
}
if st.Mode&0700 != 0700 {
tlog.Debug.Printf("Rmdir: permWorkaround")
permWorkaround = true
// This cast is needed on Darwin, where st.Mode is uint16.
origMode = uint32(st.Mode)
err = syscallcompat.FchmodatNofollow(parentDirFd, cName, origMode|0700)
if err != nil {
tlog.Debug.Printf("Rmdir: permWorkaround: chmod failed: %v", err)
return fs.ToErrno(err)
}
}
}
dirfd, err := syscallcompat.Openat(parentDirFd, cName,
syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
tlog.Debug.Printf("Rmdir: Open: %v", err)
return fs.ToErrno(err)
}
defer syscall.Close(dirfd)
// Undo the chmod if removing the directory failed. This must run before
// closing dirfd, so defer it after (defer is LIFO).
if permWorkaround {
defer func() {
if code != 0 {
err = unix.Fchmod(dirfd, origMode)
if err != nil {
tlog.Warn.Printf("Rmdir: permWorkaround: rollback failed: %v", err)
}
}
}()
}
retry:
// Check directory contents
children, err := syscallcompat.Getdents(dirfd)
if err == io.EOF {
// The directory is empty
tlog.Warn.Printf("Rmdir: %q: %s is missing", cName, nametransform.DirIVFilename)
err = unix.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR)
return fs.ToErrno(err)
}
if err != nil {
tlog.Warn.Printf("Rmdir: Getdents: %v", err)
return fs.ToErrno(err)
}
// MacOS sprinkles .DS_Store files everywhere. This is hard to avoid for
// users, so handle it transparently here.
if runtime.GOOS == "darwin" && len(children) <= 2 && haveDsstore(children) {
err = unix.Unlinkat(dirfd, dsStoreName, 0)
if err != nil {
tlog.Warn.Printf("Rmdir: failed to delete blocking file %q: %v", dsStoreName, err)
return fs.ToErrno(err)
}
tlog.Warn.Printf("Rmdir: had to delete blocking file %q", dsStoreName)
goto retry
}
// If the directory is not empty besides gocryptfs.diriv, do not even
// attempt the dance around gocryptfs.diriv.
if len(children) > 1 {
return fs.ToErrno(syscall.ENOTEMPTY)
}
// Move "gocryptfs.diriv" to the parent dir as "gocryptfs.diriv.rmdir.XYZ"
tmpName := fmt.Sprintf("%s.rmdir.%d", nametransform.DirIVFilename, cryptocore.RandUint64())
tlog.Debug.Printf("Rmdir: Renaming %s to %s", nametransform.DirIVFilename, tmpName)
// The directory is in an inconsistent state between rename and rmdir.
// Protect against concurrent readers.
rn.dirIVLock.Lock()
defer rn.dirIVLock.Unlock()
err = syscallcompat.Renameat(dirfd, nametransform.DirIVFilename,
parentDirFd, tmpName)
if err != nil {
tlog.Warn.Printf("Rmdir: Renaming %s to %s failed: %v",
nametransform.DirIVFilename, tmpName, err)
return fs.ToErrno(err)
}
// Actual Rmdir
err = syscallcompat.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR)
if err != nil {
// This can happen if another file in the directory was created in the
// meantime, undo the rename
err2 := syscallcompat.Renameat(parentDirFd, tmpName,
dirfd, nametransform.DirIVFilename)
if err2 != nil {
tlog.Warn.Printf("Rmdir: Rename rollback failed: %v", err2)
}
return fs.ToErrno(err)
}
// Delete "gocryptfs.diriv.rmdir.XYZ"
err = syscallcompat.Unlinkat(parentDirFd, tmpName, 0)
if err != nil {
tlog.Warn.Printf("Rmdir: Could not clean up %s: %v", tmpName, err)
}
// Delete .name file
if nametransform.IsLongContent(cName) {
nametransform.DeleteLongNameAt(parentDirFd, cName)
}
return 0
} | identifier_body |
node_dir_ops.go | package fusefrontend
import (
"context"
"fmt"
"io"
"runtime"
"syscall"
"golang.org/x/sys/unix"
"github.com/hanwen/go-fuse/v2/fs"
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/rfjakob/gocryptfs/v2/internal/configfile"
"github.com/rfjakob/gocryptfs/v2/internal/cryptocore"
"github.com/rfjakob/gocryptfs/v2/internal/nametransform"
"github.com/rfjakob/gocryptfs/v2/internal/syscallcompat"
"github.com/rfjakob/gocryptfs/v2/internal/tlog"
)
const dsStoreName = ".DS_Store"
// haveDsstore return true if one of the entries in "names" is ".DS_Store".
func haveDsstore(entries []fuse.DirEntry) bool {
for _, e := range entries {
if e.Name == dsStoreName {
return true
}
}
return false
}
// mkdirWithIv - create a new directory and corresponding diriv file. dirfd
// should be a handle to the parent directory, cName is the name of the new
// directory and mode specifies the access permissions to use.
// If DeterministicNames is set, the diriv file is NOT created.
func (n *Node) | (dirfd int, cName string, mode uint32, context *fuse.Context) error {
rn := n.rootNode()
if rn.args.DeterministicNames {
return syscallcompat.MkdiratUser(dirfd, cName, mode, context)
}
// Between the creation of the directory and the creation of gocryptfs.diriv
// the directory is inconsistent. Take the lock to prevent other readers
// from seeing it.
rn.dirIVLock.Lock()
defer rn.dirIVLock.Unlock()
err := syscallcompat.MkdiratUser(dirfd, cName, mode, context)
if err != nil {
return err
}
dirfd2, err := syscallcompat.Openat(dirfd, cName, syscall.O_DIRECTORY|syscall.O_NOFOLLOW|syscallcompat.O_PATH, 0)
if err == nil {
// Create gocryptfs.diriv
err = nametransform.WriteDirIVAt(dirfd2)
syscall.Close(dirfd2)
}
if err != nil {
// Delete inconsistent directory (missing gocryptfs.diriv!)
err2 := syscallcompat.Unlinkat(dirfd, cName, unix.AT_REMOVEDIR)
if err2 != nil {
tlog.Warn.Printf("mkdirWithIv: rollback failed: %v", err2)
}
}
return err
}
// Mkdir - FUSE call. Create a directory at "newPath" with permissions "mode".
//
// Symlink-safe through use of Mkdirat().
func (n *Node) Mkdir(ctx context.Context, name string, mode uint32, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) {
dirfd, cName, errno := n.prepareAtSyscall(name)
if errno != 0 {
return nil, errno
}
defer syscall.Close(dirfd)
rn := n.rootNode()
var context *fuse.Context
if rn.args.PreserveOwner {
context = toFuseCtx(ctx)
}
var st syscall.Stat_t
if rn.args.PlaintextNames {
err := syscallcompat.MkdiratUser(dirfd, cName, mode, context)
if err != nil {
return nil, fs.ToErrno(err)
}
var ust unix.Stat_t
err = syscallcompat.Fstatat(dirfd, cName, &ust, unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
return nil, fs.ToErrno(err)
}
st = syscallcompat.Unix2syscall(ust)
// Create child node & return
ch := n.newChild(ctx, &st, out)
return ch, 0
}
// We need write and execute permissions to create gocryptfs.diriv.
// Also, we need read permissions to open the directory (to avoid
// race-conditions between getting and setting the mode).
origMode := mode
mode = mode | 0700
// Handle long file name
if nametransform.IsLongContent(cName) {
// Create ".name"
err := rn.nameTransform.WriteLongNameAt(dirfd, cName, name)
if err != nil {
return nil, fs.ToErrno(err)
}
// Create directory & rollback .name file on error
err = rn.mkdirWithIv(dirfd, cName, mode, context)
if err != nil {
nametransform.DeleteLongNameAt(dirfd, cName)
return nil, fs.ToErrno(err)
}
} else {
err := rn.mkdirWithIv(dirfd, cName, mode, context)
if err != nil {
return nil, fs.ToErrno(err)
}
}
// Fill `st`
fd, err := syscallcompat.Openat(dirfd, cName,
syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
tlog.Warn.Printf("Mkdir %q: Openat failed: %v", cName, err)
return nil, fs.ToErrno(err)
}
defer syscall.Close(fd)
err = syscall.Fstat(fd, &st)
if err != nil {
tlog.Warn.Printf("Mkdir %q: Fstat failed: %v", cName, err)
return nil, fs.ToErrno(err)
}
// Fix permissions
if origMode != mode {
// Preserve SGID bit if it was set due to inheritance.
origMode = uint32(st.Mode&^0777) | origMode
err = syscall.Fchmod(fd, origMode)
if err != nil {
tlog.Warn.Printf("Mkdir %q: Fchmod %#o -> %#o failed: %v", cName, mode, origMode, err)
}
}
// Create child node & return
ch := n.newChild(ctx, &st, out)
return ch, 0
}
// Readdir - FUSE call.
//
// This function is symlink-safe through use of openBackingDir() and
// ReadDirIVAt().
func (n *Node) Readdir(ctx context.Context) (fs.DirStream, syscall.Errno) {
parentDirFd, cDirName, errno := n.prepareAtSyscallMyself()
if errno != 0 {
return nil, errno
}
defer syscall.Close(parentDirFd)
// Read ciphertext directory
fd, err := syscallcompat.Openat(parentDirFd, cDirName, syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
return nil, fs.ToErrno(err)
}
defer syscall.Close(fd)
cipherEntries, specialEntries, err := syscallcompat.GetdentsSpecial(fd)
if err != nil {
return nil, fs.ToErrno(err)
}
// Get DirIV (stays nil if PlaintextNames is used)
var cachedIV []byte
rn := n.rootNode()
if !rn.args.PlaintextNames {
// Read the DirIV from disk
cachedIV, err = rn.nameTransform.ReadDirIVAt(fd)
if err != nil {
tlog.Warn.Printf("OpenDir %q: could not read %s: %v", cDirName, nametransform.DirIVFilename, err)
return nil, syscall.EIO
}
}
// Decrypted directory entries
var plain []fuse.DirEntry
// Add "." and ".."
plain = append(plain, specialEntries...)
// Filter and decrypt filenames
for i := range cipherEntries {
cName := cipherEntries[i].Name
if n.IsRoot() && cName == configfile.ConfDefaultName {
// silently ignore "gocryptfs.conf" in the top level dir
continue
}
if rn.args.PlaintextNames {
plain = append(plain, cipherEntries[i])
continue
}
if !rn.args.DeterministicNames && cName == nametransform.DirIVFilename {
// silently ignore "gocryptfs.diriv" everywhere if dirIV is enabled
continue
}
// Handle long file name
isLong := nametransform.LongNameNone
if rn.args.LongNames {
isLong = nametransform.NameType(cName)
}
if isLong == nametransform.LongNameContent {
cNameLong, err := nametransform.ReadLongNameAt(fd, cName)
if err != nil {
tlog.Warn.Printf("OpenDir %q: invalid entry %q: Could not read .name: %v",
cDirName, cName, err)
rn.reportMitigatedCorruption(cName)
continue
}
cName = cNameLong
} else if isLong == nametransform.LongNameFilename {
// ignore "gocryptfs.longname.*.name"
continue
}
name, err := rn.nameTransform.DecryptName(cName, cachedIV)
if err != nil {
tlog.Warn.Printf("OpenDir %q: invalid entry %q: %v",
cDirName, cName, err)
rn.reportMitigatedCorruption(cName)
continue
}
// Override the ciphertext name with the plaintext name but reuse the rest
// of the structure
cipherEntries[i].Name = name
plain = append(plain, cipherEntries[i])
}
return fs.NewListDirStream(plain), 0
}
// Rmdir - FUSE call.
//
// Symlink-safe through Unlinkat() + AT_REMOVEDIR.
func (n *Node) Rmdir(ctx context.Context, name string) (code syscall.Errno) {
rn := n.rootNode()
parentDirFd, cName, errno := n.prepareAtSyscall(name)
if errno != 0 {
return errno
}
defer syscall.Close(parentDirFd)
if rn.args.PlaintextNames {
// Unlinkat with AT_REMOVEDIR is equivalent to Rmdir
err := unix.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR)
return fs.ToErrno(err)
}
if rn.args.DeterministicNames {
if err := unix.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR); err != nil {
return fs.ToErrno(err)
}
if nametransform.IsLongContent(cName) {
nametransform.DeleteLongNameAt(parentDirFd, cName)
}
return 0
}
// Unless we are running as root, we need read, write and execute permissions
// to handle gocryptfs.diriv.
permWorkaround := false
var origMode uint32
if !rn.args.PreserveOwner {
var st unix.Stat_t
err := syscallcompat.Fstatat(parentDirFd, cName, &st, unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
return fs.ToErrno(err)
}
if st.Mode&0700 != 0700 {
tlog.Debug.Printf("Rmdir: permWorkaround")
permWorkaround = true
// This cast is needed on Darwin, where st.Mode is uint16.
origMode = uint32(st.Mode)
err = syscallcompat.FchmodatNofollow(parentDirFd, cName, origMode|0700)
if err != nil {
tlog.Debug.Printf("Rmdir: permWorkaround: chmod failed: %v", err)
return fs.ToErrno(err)
}
}
}
dirfd, err := syscallcompat.Openat(parentDirFd, cName,
syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
tlog.Debug.Printf("Rmdir: Open: %v", err)
return fs.ToErrno(err)
}
defer syscall.Close(dirfd)
// Undo the chmod if removing the directory failed. This must run before
// closing dirfd, so defer it after (defer is LIFO).
if permWorkaround {
defer func() {
if code != 0 {
err = unix.Fchmod(dirfd, origMode)
if err != nil {
tlog.Warn.Printf("Rmdir: permWorkaround: rollback failed: %v", err)
}
}
}()
}
retry:
// Check directory contents
children, err := syscallcompat.Getdents(dirfd)
if err == io.EOF {
// The directory is empty
tlog.Warn.Printf("Rmdir: %q: %s is missing", cName, nametransform.DirIVFilename)
err = unix.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR)
return fs.ToErrno(err)
}
if err != nil {
tlog.Warn.Printf("Rmdir: Getdents: %v", err)
return fs.ToErrno(err)
}
// MacOS sprinkles .DS_Store files everywhere. This is hard to avoid for
// users, so handle it transparently here.
if runtime.GOOS == "darwin" && len(children) <= 2 && haveDsstore(children) {
err = unix.Unlinkat(dirfd, dsStoreName, 0)
if err != nil {
tlog.Warn.Printf("Rmdir: failed to delete blocking file %q: %v", dsStoreName, err)
return fs.ToErrno(err)
}
tlog.Warn.Printf("Rmdir: had to delete blocking file %q", dsStoreName)
goto retry
}
// If the directory is not empty besides gocryptfs.diriv, do not even
// attempt the dance around gocryptfs.diriv.
if len(children) > 1 {
return fs.ToErrno(syscall.ENOTEMPTY)
}
// Move "gocryptfs.diriv" to the parent dir as "gocryptfs.diriv.rmdir.XYZ"
tmpName := fmt.Sprintf("%s.rmdir.%d", nametransform.DirIVFilename, cryptocore.RandUint64())
tlog.Debug.Printf("Rmdir: Renaming %s to %s", nametransform.DirIVFilename, tmpName)
// The directory is in an inconsistent state between rename and rmdir.
// Protect against concurrent readers.
rn.dirIVLock.Lock()
defer rn.dirIVLock.Unlock()
err = syscallcompat.Renameat(dirfd, nametransform.DirIVFilename,
parentDirFd, tmpName)
if err != nil {
tlog.Warn.Printf("Rmdir: Renaming %s to %s failed: %v",
nametransform.DirIVFilename, tmpName, err)
return fs.ToErrno(err)
}
// Actual Rmdir
err = syscallcompat.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR)
if err != nil {
// This can happen if another file in the directory was created in the
// meantime, undo the rename
err2 := syscallcompat.Renameat(parentDirFd, tmpName,
dirfd, nametransform.DirIVFilename)
if err2 != nil {
tlog.Warn.Printf("Rmdir: Rename rollback failed: %v", err2)
}
return fs.ToErrno(err)
}
// Delete "gocryptfs.diriv.rmdir.XYZ"
err = syscallcompat.Unlinkat(parentDirFd, tmpName, 0)
if err != nil {
tlog.Warn.Printf("Rmdir: Could not clean up %s: %v", tmpName, err)
}
// Delete .name file
if nametransform.IsLongContent(cName) {
nametransform.DeleteLongNameAt(parentDirFd, cName)
}
return 0
}
// Opendir is a FUSE call to check if the directory can be opened.
func (n *Node) Opendir(ctx context.Context) (errno syscall.Errno) {
dirfd, cName, errno := n.prepareAtSyscallMyself()
if errno != 0 {
return
}
defer syscall.Close(dirfd)
// Open backing directory
fd, err := syscallcompat.Openat(dirfd, cName, syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
return fs.ToErrno(err)
}
syscall.Close(fd)
return 0
}
| mkdirWithIv | identifier_name |
node_dir_ops.go | package fusefrontend
import (
"context"
"fmt"
"io"
"runtime"
"syscall"
"golang.org/x/sys/unix"
"github.com/hanwen/go-fuse/v2/fs"
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/rfjakob/gocryptfs/v2/internal/configfile"
"github.com/rfjakob/gocryptfs/v2/internal/cryptocore"
"github.com/rfjakob/gocryptfs/v2/internal/nametransform"
"github.com/rfjakob/gocryptfs/v2/internal/syscallcompat"
"github.com/rfjakob/gocryptfs/v2/internal/tlog"
)
const dsStoreName = ".DS_Store"
// haveDsstore return true if one of the entries in "names" is ".DS_Store".
func haveDsstore(entries []fuse.DirEntry) bool {
for _, e := range entries {
if e.Name == dsStoreName {
return true
}
}
return false
}
// mkdirWithIv - create a new directory and corresponding diriv file. dirfd
// should be a handle to the parent directory, cName is the name of the new
// directory and mode specifies the access permissions to use.
// If DeterministicNames is set, the diriv file is NOT created.
func (n *Node) mkdirWithIv(dirfd int, cName string, mode uint32, context *fuse.Context) error {
rn := n.rootNode()
if rn.args.DeterministicNames {
return syscallcompat.MkdiratUser(dirfd, cName, mode, context)
}
// Between the creation of the directory and the creation of gocryptfs.diriv
// the directory is inconsistent. Take the lock to prevent other readers
// from seeing it.
rn.dirIVLock.Lock()
defer rn.dirIVLock.Unlock()
err := syscallcompat.MkdiratUser(dirfd, cName, mode, context)
if err != nil {
return err
}
dirfd2, err := syscallcompat.Openat(dirfd, cName, syscall.O_DIRECTORY|syscall.O_NOFOLLOW|syscallcompat.O_PATH, 0)
if err == nil {
// Create gocryptfs.diriv
err = nametransform.WriteDirIVAt(dirfd2)
syscall.Close(dirfd2)
}
if err != nil { | // Delete inconsistent directory (missing gocryptfs.diriv!)
err2 := syscallcompat.Unlinkat(dirfd, cName, unix.AT_REMOVEDIR)
if err2 != nil {
tlog.Warn.Printf("mkdirWithIv: rollback failed: %v", err2)
}
}
return err
}
// Mkdir - FUSE call. Create a directory at "newPath" with permissions "mode".
//
// Symlink-safe through use of Mkdirat().
func (n *Node) Mkdir(ctx context.Context, name string, mode uint32, out *fuse.EntryOut) (*fs.Inode, syscall.Errno) {
dirfd, cName, errno := n.prepareAtSyscall(name)
if errno != 0 {
return nil, errno
}
defer syscall.Close(dirfd)
rn := n.rootNode()
var context *fuse.Context
if rn.args.PreserveOwner {
context = toFuseCtx(ctx)
}
var st syscall.Stat_t
if rn.args.PlaintextNames {
err := syscallcompat.MkdiratUser(dirfd, cName, mode, context)
if err != nil {
return nil, fs.ToErrno(err)
}
var ust unix.Stat_t
err = syscallcompat.Fstatat(dirfd, cName, &ust, unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
return nil, fs.ToErrno(err)
}
st = syscallcompat.Unix2syscall(ust)
// Create child node & return
ch := n.newChild(ctx, &st, out)
return ch, 0
}
// We need write and execute permissions to create gocryptfs.diriv.
// Also, we need read permissions to open the directory (to avoid
// race-conditions between getting and setting the mode).
origMode := mode
mode = mode | 0700
// Handle long file name
if nametransform.IsLongContent(cName) {
// Create ".name"
err := rn.nameTransform.WriteLongNameAt(dirfd, cName, name)
if err != nil {
return nil, fs.ToErrno(err)
}
// Create directory & rollback .name file on error
err = rn.mkdirWithIv(dirfd, cName, mode, context)
if err != nil {
nametransform.DeleteLongNameAt(dirfd, cName)
return nil, fs.ToErrno(err)
}
} else {
err := rn.mkdirWithIv(dirfd, cName, mode, context)
if err != nil {
return nil, fs.ToErrno(err)
}
}
// Fill `st`
fd, err := syscallcompat.Openat(dirfd, cName,
syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
tlog.Warn.Printf("Mkdir %q: Openat failed: %v", cName, err)
return nil, fs.ToErrno(err)
}
defer syscall.Close(fd)
err = syscall.Fstat(fd, &st)
if err != nil {
tlog.Warn.Printf("Mkdir %q: Fstat failed: %v", cName, err)
return nil, fs.ToErrno(err)
}
// Fix permissions
if origMode != mode {
// Preserve SGID bit if it was set due to inheritance.
origMode = uint32(st.Mode&^0777) | origMode
err = syscall.Fchmod(fd, origMode)
if err != nil {
tlog.Warn.Printf("Mkdir %q: Fchmod %#o -> %#o failed: %v", cName, mode, origMode, err)
}
}
// Create child node & return
ch := n.newChild(ctx, &st, out)
return ch, 0
}
// Readdir - FUSE call.
//
// This function is symlink-safe through use of openBackingDir() and
// ReadDirIVAt().
func (n *Node) Readdir(ctx context.Context) (fs.DirStream, syscall.Errno) {
parentDirFd, cDirName, errno := n.prepareAtSyscallMyself()
if errno != 0 {
return nil, errno
}
defer syscall.Close(parentDirFd)
// Read ciphertext directory
fd, err := syscallcompat.Openat(parentDirFd, cDirName, syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
return nil, fs.ToErrno(err)
}
defer syscall.Close(fd)
cipherEntries, specialEntries, err := syscallcompat.GetdentsSpecial(fd)
if err != nil {
return nil, fs.ToErrno(err)
}
// Get DirIV (stays nil if PlaintextNames is used)
var cachedIV []byte
rn := n.rootNode()
if !rn.args.PlaintextNames {
// Read the DirIV from disk
cachedIV, err = rn.nameTransform.ReadDirIVAt(fd)
if err != nil {
tlog.Warn.Printf("OpenDir %q: could not read %s: %v", cDirName, nametransform.DirIVFilename, err)
return nil, syscall.EIO
}
}
// Decrypted directory entries
var plain []fuse.DirEntry
// Add "." and ".."
plain = append(plain, specialEntries...)
// Filter and decrypt filenames
for i := range cipherEntries {
cName := cipherEntries[i].Name
if n.IsRoot() && cName == configfile.ConfDefaultName {
// silently ignore "gocryptfs.conf" in the top level dir
continue
}
if rn.args.PlaintextNames {
plain = append(plain, cipherEntries[i])
continue
}
if !rn.args.DeterministicNames && cName == nametransform.DirIVFilename {
// silently ignore "gocryptfs.diriv" everywhere if dirIV is enabled
continue
}
// Handle long file name
isLong := nametransform.LongNameNone
if rn.args.LongNames {
isLong = nametransform.NameType(cName)
}
if isLong == nametransform.LongNameContent {
cNameLong, err := nametransform.ReadLongNameAt(fd, cName)
if err != nil {
tlog.Warn.Printf("OpenDir %q: invalid entry %q: Could not read .name: %v",
cDirName, cName, err)
rn.reportMitigatedCorruption(cName)
continue
}
cName = cNameLong
} else if isLong == nametransform.LongNameFilename {
// ignore "gocryptfs.longname.*.name"
continue
}
name, err := rn.nameTransform.DecryptName(cName, cachedIV)
if err != nil {
tlog.Warn.Printf("OpenDir %q: invalid entry %q: %v",
cDirName, cName, err)
rn.reportMitigatedCorruption(cName)
continue
}
// Override the ciphertext name with the plaintext name but reuse the rest
// of the structure
cipherEntries[i].Name = name
plain = append(plain, cipherEntries[i])
}
return fs.NewListDirStream(plain), 0
}
// Rmdir - FUSE call.
//
// Symlink-safe through Unlinkat() + AT_REMOVEDIR.
func (n *Node) Rmdir(ctx context.Context, name string) (code syscall.Errno) {
rn := n.rootNode()
parentDirFd, cName, errno := n.prepareAtSyscall(name)
if errno != 0 {
return errno
}
defer syscall.Close(parentDirFd)
if rn.args.PlaintextNames {
// Unlinkat with AT_REMOVEDIR is equivalent to Rmdir
err := unix.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR)
return fs.ToErrno(err)
}
if rn.args.DeterministicNames {
if err := unix.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR); err != nil {
return fs.ToErrno(err)
}
if nametransform.IsLongContent(cName) {
nametransform.DeleteLongNameAt(parentDirFd, cName)
}
return 0
}
// Unless we are running as root, we need read, write and execute permissions
// to handle gocryptfs.diriv.
permWorkaround := false
var origMode uint32
if !rn.args.PreserveOwner {
var st unix.Stat_t
err := syscallcompat.Fstatat(parentDirFd, cName, &st, unix.AT_SYMLINK_NOFOLLOW)
if err != nil {
return fs.ToErrno(err)
}
if st.Mode&0700 != 0700 {
tlog.Debug.Printf("Rmdir: permWorkaround")
permWorkaround = true
// This cast is needed on Darwin, where st.Mode is uint16.
origMode = uint32(st.Mode)
err = syscallcompat.FchmodatNofollow(parentDirFd, cName, origMode|0700)
if err != nil {
tlog.Debug.Printf("Rmdir: permWorkaround: chmod failed: %v", err)
return fs.ToErrno(err)
}
}
}
dirfd, err := syscallcompat.Openat(parentDirFd, cName,
syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
tlog.Debug.Printf("Rmdir: Open: %v", err)
return fs.ToErrno(err)
}
defer syscall.Close(dirfd)
// Undo the chmod if removing the directory failed. This must run before
// closing dirfd, so defer it after (defer is LIFO).
if permWorkaround {
defer func() {
if code != 0 {
err = unix.Fchmod(dirfd, origMode)
if err != nil {
tlog.Warn.Printf("Rmdir: permWorkaround: rollback failed: %v", err)
}
}
}()
}
retry:
// Check directory contents
children, err := syscallcompat.Getdents(dirfd)
if err == io.EOF {
// The directory is empty
tlog.Warn.Printf("Rmdir: %q: %s is missing", cName, nametransform.DirIVFilename)
err = unix.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR)
return fs.ToErrno(err)
}
if err != nil {
tlog.Warn.Printf("Rmdir: Getdents: %v", err)
return fs.ToErrno(err)
}
// MacOS sprinkles .DS_Store files everywhere. This is hard to avoid for
// users, so handle it transparently here.
if runtime.GOOS == "darwin" && len(children) <= 2 && haveDsstore(children) {
err = unix.Unlinkat(dirfd, dsStoreName, 0)
if err != nil {
tlog.Warn.Printf("Rmdir: failed to delete blocking file %q: %v", dsStoreName, err)
return fs.ToErrno(err)
}
tlog.Warn.Printf("Rmdir: had to delete blocking file %q", dsStoreName)
goto retry
}
// If the directory is not empty besides gocryptfs.diriv, do not even
// attempt the dance around gocryptfs.diriv.
if len(children) > 1 {
return fs.ToErrno(syscall.ENOTEMPTY)
}
// Move "gocryptfs.diriv" to the parent dir as "gocryptfs.diriv.rmdir.XYZ"
tmpName := fmt.Sprintf("%s.rmdir.%d", nametransform.DirIVFilename, cryptocore.RandUint64())
tlog.Debug.Printf("Rmdir: Renaming %s to %s", nametransform.DirIVFilename, tmpName)
// The directory is in an inconsistent state between rename and rmdir.
// Protect against concurrent readers.
rn.dirIVLock.Lock()
defer rn.dirIVLock.Unlock()
err = syscallcompat.Renameat(dirfd, nametransform.DirIVFilename,
parentDirFd, tmpName)
if err != nil {
tlog.Warn.Printf("Rmdir: Renaming %s to %s failed: %v",
nametransform.DirIVFilename, tmpName, err)
return fs.ToErrno(err)
}
// Actual Rmdir
err = syscallcompat.Unlinkat(parentDirFd, cName, unix.AT_REMOVEDIR)
if err != nil {
// This can happen if another file in the directory was created in the
// meantime, undo the rename
err2 := syscallcompat.Renameat(parentDirFd, tmpName,
dirfd, nametransform.DirIVFilename)
if err2 != nil {
tlog.Warn.Printf("Rmdir: Rename rollback failed: %v", err2)
}
return fs.ToErrno(err)
}
// Delete "gocryptfs.diriv.rmdir.XYZ"
err = syscallcompat.Unlinkat(parentDirFd, tmpName, 0)
if err != nil {
tlog.Warn.Printf("Rmdir: Could not clean up %s: %v", tmpName, err)
}
// Delete .name file
if nametransform.IsLongContent(cName) {
nametransform.DeleteLongNameAt(parentDirFd, cName)
}
return 0
}
// Opendir is a FUSE call to check if the directory can be opened.
func (n *Node) Opendir(ctx context.Context) (errno syscall.Errno) {
dirfd, cName, errno := n.prepareAtSyscallMyself()
if errno != 0 {
return
}
defer syscall.Close(dirfd)
// Open backing directory
fd, err := syscallcompat.Openat(dirfd, cName, syscall.O_RDONLY|syscall.O_DIRECTORY|syscall.O_NOFOLLOW, 0)
if err != nil {
return fs.ToErrno(err)
}
syscall.Close(fd)
return 0
} | random_line_split |
|
engine.py | #!/usr/bin/env pypy
import random
import math
import argparse
import cPickle as pickle
import logging
import os
import sys
import re
import colorsys
import bisect
import operator
from xml.dom.minidom import parse
InkscapePath = "/Applications/Inkscape.app/Contents/Resources/bin/inkscape"
try:
import Image
import ImageDraw
except ImportError:
from PIL import Image
from PIL import ImageDraw
# local
from sfgen import *
sys.modules["curves"] = curves
def avg_stdev(data):
avg = sum(data) / float(len(data))
stdev = math.sqrt(sum((x - avg) ** 2 for x in data) / float(len(data)))
return (avg, stdev)
class CrystalEnvironment(dict):
def __init__(self, curves=None, **kw):
self.curves = curves
self._init_defaults()
self.update(**kw)
self.set_factory_settings()
def set_factory_settings(self):
self.factory_settings = self.copy()
def __getattr__(self, name):
if name not in self:
return AttributeError, "no such thing brah: %s" % name
return self[name]
def __getnewargs__(self):
return ()
def __getstate__(self):
return (self.curves, self.factory_settings, dict(self))
def __setstate__(self, state):
if type(state) == dict:
self.update(state)
self.curves = None
self.set_factory_settings()
else:
self.curves = state[0]
self.factory_settings = state[1]
self.update(state[2])
def step(self, x):
if self.curves == None:
return
for key in self.curves:
self[key] = self.curves[key][x]
@classmethod
def build_env(self, name, steps, min_gamma=0.45, max_gamma=0.85):
curves = {
"beta": (1.3, 2),
"theta": (0.01, 0.04),
"alpha": (0.02, 0.1),
"kappa": (0.001, 0.01),
"mu": (0.01, 0.1),
"upilson": (0.00001, 0.0001),
"sigma": (0.00001, 0.000001),
}
cs = CurveSet(name, steps, curves)
cs.run_graph()
env = {key: cs[key][0] for key in curves}
env["gamma"] = random.random() * (max_gamma - min_gamma) + min_gamma
return CrystalEnvironment(curves=cs, **env)
def get_default(self, key):
return self.factory_settings[key]
def randomize(self):
for key in self:
if key == "sigma":
continue
if key == "gamma":
self[key] += 1.0 / random.randint(100, 1000)
else:
self[key] += random.choice([1.0, -1.0]) / random.randint(100, 1000)
self.set_factory_settings()
def _init_defaults(self):
# (3a)
# "A boundary site with 1 or 2 attached neighbors needs boundary mass at least beta to join the crystal
# This is the case when the local mesoscopic geometry near x corresponds to a tip or flat spot of the crystal.
# (Distinguishing the two cases turns out to be of minor significance.) In our simulations, beta is typically
# between about 1.05 and 3. We assume beta > 1 since 1 is the basic threshold of the case to follow next.
self["beta"] = 1.3
# (3b)
# "A boundary site with 3 attached neighbors joins the crystal if either it has boundary mass >= 1,
# or it has diffusive mass < theta in its neighborhood and it has boundary mass >= alpha"
self["theta"] = 0.025
self["alpha"] = 0.08
# (2)
# "Proportion kappa of the diffusive mass at each boundary site crystallizes.
# The remainder (proportion 1 - kappa) becomes boundary mass."
self["kappa"] = 0.003
# (4)
# "Proportion mu of the boundary mass and proportion upsilon of the crystal mass at each boundary site become diffusive mass.
# Melting represents mass flow at the boundary from ice and quasi-liquid back to vapor, reverse
# effects from the freezing of step ii. Typically mu is small and upsilon extremely small."
self["mu"] = 0.07
self["upsilon"] = 0.00005
# (5)
# "The diffusive mass at each site undergoes an independent random perturbation of proportion sigma"
self["sigma"] = 0.00001
# initial diffusion
self["gamma"] = 0.5
def _init_special(self):
pass
# class RenderMovie(object) moved to movie.py
# 2018-0212
# class LatticeReplay(object) moved to movie.py
# 2018-0212
class CrystalLattice(object):
LogHeader = ["dm", "cm", "bm", "acnt", "bcnt", "width", "beta", "theta", "alpha", "kappa", "mu", "upsilon"]
def __init__(self, size, environment=None, celltype=None, max_steps=0, margin=None, curves=None, datalog=False, debug=False):
self.size = size
if environment == None:
environment = CrystalEnvironment()
self.environment = environment
self.datalog = None
self.celllog = None
if datalog:
self.datalog = []
self.celllog = []
if celltype == None:
celltype = SnowflakeCell
self.debug = debug
self.celltype = celltype
self.iteration = 1
assert margin > 0 and margin <= 1.0
self.margin = margin
self.curves = curves
self.max_steps = max_steps
self._init_cells()
def __setstate__(self, state):
# 0.1->0.2 format changes
if "radius" in state:
state["size"] = state["radius"]
del state["radius"]
if "angle" in state:
del state["angle"]
self.__dict__.update(state)
def save_lattice(self, fn):
msg = "Saving %s..." % fn
log(msg)
f = open(fn, 'wb')
pickle.dump(self, f, protocol=-1)
@classmethod
def load_lattice(cls, fn):
msg = "Loading %s..." % fn
log(msg)
f = open(fn, 'rb')
obj = pickle.load(f)
for cell in obj.cells:
cell.lattice = obj
cell.env = obj.environment
cell.update_boundary()
return obj
def get_neighbors(self, xy):
(x, y) = xy
nlist = [(x, y + 1), (x, y - 1), (x - 1, y), (x + 1, y), (x - 1, y - 1), (x + 1, y + 1)]
nlist = map(self._cell_index, filter(self._xy_ok, nlist))
res = tuple([self.cells[nidx] for nidx in nlist if self.cells[nidx] != None])
return res
def reality_check(self):
for cell in self.cells:
cell.reality_check()
def _init_cells(self):
self.cells = [None] * (self.size * self.size)
for x in range(self.size):
for y in range(self.size):
xy = (x, y)
cell = self.celltype(xy, self)
idx = self._cell_index(xy)
self.cells[idx] = cell
self.reality_check()
center_pt = self._cell_index((self.size / 2, self.size / 2))
self.cells[center_pt].attach(1)
# fun experiments
#self.cells[center_pt+4].attach(1)
#self.cells[center_pt-4].attach(1)
def _xy_ok(self, xy):
(x, y) = xy
return (x >= 0 and x < self.size and y >= 0 and y < self.size)
def _cell_index(self, xy):
(x, y) = xy
return int(round(y * self.size + x))
def _cell_xy(self, idx):
y = idx / self.size
x = idx % self.size
return (x, y)
def adjust_humidity(self, val):
val = abs(val)
for cell in self.cells:
if cell.attached or cell.boundary:
continue
cell.diffusive_mass += val * self.environment.sigma
# only mutate the cells outside our margin
#if self.xy_to_polar(cell.xy)[1] > (self.size * self.margin):
# we use the same coef as the noise coef
#cell.diffusive_mass += val * self.environment.sigma
def log_status(self):
if self.datalog == None:
return
row = []
#row.append(self.iteration)
dm = [cell.diffusive_mass for cell in self.cells if cell]
row.append(sum(dm))
cm = [cell.crystal_mass for cell in self.cells if cell]
row.append(sum(cm))
bm = [cell.boundary_mass for cell in self.cells if cell]
row.append(sum(bm))
acnt = len([cell for cell in self.cells if cell and cell.attached])
row.append(acnt)
bcnt = len([cell for cell in self.cells if cell and cell.boundary])
row.append(bcnt)
d = self.snowflake_radius()
row.append(d)
row.append(self.environment.beta)
row.append(self.environment.theta)
row.append(self.environment.alpha)
row.append(self.environment.kappa)
row.append(self.environment.mu)
row.append(self.environment.upsilon)
#row.append(self.environment.sigma)
#row.append(self.environment.gamma)
self.datalog.append(row)
# log the cells
self.celllog.append((self.iteration, dm, cm))
def write_log(self):
self.write_datalog()
self.write_celllog()
def write_datalog(self):
if self.datalog == None:
return
logfn = "datalog.csv"
msg = "Saving runtime data to %s" % logfn
log(msg)
f = open(logfn, 'w')
txt = ''
txt += str.join(',', self.LogHeader) + '\n'
for row in self.datalog:
txt += str.join(',', map(str, row)) + '\n'
f.write(txt)
def write_celllog(self):
if not self.celllog:
return
logfn = "cell_log_%d.pickle" % self.iteration
f = open(logfn, 'wb')
pickle.dump(self.celllog, f, protocol=-1)
self.celllog = []
def print_status(self):
dm = sum([cell.diffusive_mass for cell in self.cells if cell])
cm = sum([cell.crystal_mass for cell in self.cells if cell])
bm = sum([cell.boundary_mass for cell in self.cells if cell])
acnt = len([cell for cell in self.cells if cell and cell.attached])
bcnt = len([cell for cell in self.cells if cell and cell.boundary])
#msg = "Step #%d, %d attached, %d boundary, %.2f dM, %.2f bM, %.2f cM, tot %.2f M" % (self.iteration, acnt, bcnt, dm, bm, cm, dm + cm + bm)
d = self.snowflake_radius()
msg = "Step #%d/%dp (%.2f%% scl), %d/%d (%.2f%%), %.2f dM, %.2f bM, %.2f cM, tot %.2f M" % (self.iteration, d, (float(d * 2 * X_SCALE_FACTOR) / self.iteration) * 100, acnt, bcnt, (float(bcnt) / acnt) * 100, dm, bm, cm, dm + cm + bm)
log(msg)
def step(self):
self.log_status()
for cell in self.cells:
if cell == None or cell.attached:
continue
cell.step_one()
for cell in self.cells:
if cell == None or cell.attached:
continue
cell.step_two()
for cell in self.cells:
if cell == None or cell.attached:
continue
cell.step_three()
# run curves
self.iteration += 1
self.environment.step(self.iteration)
def translate_xy(self, xy):
(x, y) = xy
x = int(round(x * X_SCALE_FACTOR))
return (x, y)
def polar_to_xy(self, args):
(angle, distance) = args
half = self.size / 2.0
angle = math.radians(angle)
y = int(round(half - (math.sin(angle) * distance)))
x = int(round(half + (math.cos(angle) * distance)))
return (x, y)
def xy_to_polar(self, args):
(x, y) = args
half = self.size / 2.0
x -= half
y += half
angle = math.degrees(math.atan2(y, x))
distance = math.hypot(x, y)
return (angle, distance)
def snowflake_radius(self, angle=135):
# we cast a ray on the 135 degeree axis
radius = 0
half = self.size / 2.0
while radius < half:
radius += 1
xy = self.polar_to_xy((angle, radius))
cell = self.cells[self._cell_index(xy)]
if cell.attached or cell.boundary:
continue
return radius
# uhh
return int(round(half))
def crop_snowflake(self, margin=None):
def scale(val):
return int(round(X_SCALE_FACTOR * val))
if margin == None:
margin = 15
half = self.size / 2
radius = scale(self.snowflake_radius())
distance = min(radius + margin, half)
half_s = scale(half)
distance_s = scale(distance)
box = (half_s - distance, half - distance, half_s + distance, half + distance)
return box
def headroom(self, margin=None):
if self.max_steps and self.iteration >= self.max_steps:
return False
if margin == None:
margin = self.margin
assert margin > 0 and margin <= 1
cutoff = int(round(margin * (self.size / 2.0)))
radius = self.snowflake_radius()
if radius > cutoff:
return False
return True
def grow(self):
while True:
if self.debug:
self.print_status()
self.step()
if self.iteration % 50 == 0:
self.write_celllog()
if not self.debug:
self.print_status()
if not self.headroom():
break
if self.debug:
self.print_status()
def save_image(self, fn, **kw):
import sfgen
r = sfgen.RenderSnowflake(self)
r.save_image(fn, **kw)
class SnowflakeCell(object):
def __init__(self, xy, lattice):
self.xy = xy
self.lattice = lattice
self.env = lattice.environment
self.diffusive_mass = self.env.gamma
self.boundary_mass = 0.0
self.crystal_mass = 0.0
self.attached = False
self.age = 0
self.boundary = 0
self.attached_neighbors = []
self.__neighbors = None
def __getstate__(self):
return (self.xy, self.diffusive_mass, self.boundary_mass, self.crystal_mass, self.attached, self.age)
def __setstate__(self, state):
self.xy = state[0]
self.diffusive_mass = state[1]
self.boundary_mass = state[2]
self.crystal_mass = state[3]
self.attached = state[4]
# 0.2 -> 0.3
try:
self.age = state[5]
except IndexError:
self.age = 0
self.__neighbors = None
self.lattice = None
self.env = None
def reality_check(self):
assert len(self.neighbors)
for neighbor in self.neighbors:
assert self in neighbor.neighbors, "%s not in %s" % (str(self), str(neighbor.neighbors))
def __repr__(self):
return "(%d,%d)" % self.xy
@property
def neighbors(self):
if self.__neighbors == None:
self.__neighbors = self.lattice.get_neighbors(self.xy)
return self.__neighbors
#@property
#def attached_neighbors(self):
# return [cell for cell in self.neighbors if cell.attached]
#@property
#def boundary(self):
# return (not self.attached) and any([cell.attached for cell in self.neighbors])
def update_boundary(self):
self.boundary = (not self.attached) and any([cell.attached for cell in self.neighbors])
def step_one(self):
self.update_boundary()
if self.boundary:
self.attached_neighbors = [cell for cell in self.neighbors if cell.attached]
self._next_dm = self.diffusion_calc()
def step_two(self):
self.diffusive_mass = self._next_dm
self.attachment_flag = self.attached
self.freezing_step()
self.attachment_flag = self.attachment_step()
self.melting_step()
def step_three(self):
if self.boundary and self.attachment_flag:
self.attach()
self.noise_step()
def diffusion_calc(self):
next_dm = self.diffusive_mass | if cell.attached:
next_dm += self.diffusive_mass
else:
next_dm += cell.diffusive_mass
return float(next_dm) / (len(self.neighbors) + 1)
def attach(self, offset=0.0):
self.crystal_mass = self.boundary_mass + self.crystal_mass + offset
self.boundary_mass = 0
self.attached = True
def freezing_step(self):
if not self.boundary:
return
self.boundary_mass += (1 - self.env.kappa) * self.diffusive_mass
self.crystal_mass += (self.env.kappa * self.diffusive_mass)
self.diffusive_mass = 0
def attachment_step(self):
if not self.boundary:
return False
attach_count = len(self.attached_neighbors)
if attach_count <= 2:
if self.boundary_mass > self.env.beta:
return True
elif attach_count == 3:
if self.boundary_mass >= 1:
return True
else:
summed_diffusion = self.diffusive_mass
for cell in self.neighbors:
summed_diffusion += cell.diffusive_mass
if summed_diffusion < self.env.theta and self.boundary_mass >= self.env.alpha:
return True
elif attach_count >= 4:
return True
return False
def melting_step(self):
if not self.boundary:
return
self.diffusive_mass += self.env.mu * self.boundary_mass + self.env.upsilon * self.crystal_mass
self.boundary_mass = (1 - self.env.mu) * self.boundary_mass
self.crystal_mass = (1 - self.env.upsilon) * self.crystal_mass
def noise_step(self):
if (self.boundary or self.attached):
return
if random.random() >= .5:
self.diffusive_mass = (1 - self.env.sigma) * self.diffusive_mass
else:
self.diffusive_mass = (1 + self.env.sigma) * self.diffusive_mass
# def check_basecut() moved to render.py
# 2018-0212
# def merge_svg() moved to render.py
# 2018-0212
# def potrace() moved to render.py
# 2018-0212
# laser cutter pipeline moved to render.py
# 2018-0212
# 3d pipeline moved to render.py
# 2018-0212
# SNOWFLAKE_DEFAULTS moved to snowflake.py
# 2018-0212
# def run() moved to runner.py
# 2018-0212 | if self.attached:
return next_dm
self.age += 1
for cell in self.neighbors: | random_line_split |
engine.py | #!/usr/bin/env pypy
import random
import math
import argparse
import cPickle as pickle
import logging
import os
import sys
import re
import colorsys
import bisect
import operator
from xml.dom.minidom import parse
InkscapePath = "/Applications/Inkscape.app/Contents/Resources/bin/inkscape"
try:
import Image
import ImageDraw
except ImportError:
from PIL import Image
from PIL import ImageDraw
# local
from sfgen import *
sys.modules["curves"] = curves
def avg_stdev(data):
avg = sum(data) / float(len(data))
stdev = math.sqrt(sum((x - avg) ** 2 for x in data) / float(len(data)))
return (avg, stdev)
class CrystalEnvironment(dict):
def __init__(self, curves=None, **kw):
self.curves = curves
self._init_defaults()
self.update(**kw)
self.set_factory_settings()
def set_factory_settings(self):
self.factory_settings = self.copy()
def __getattr__(self, name):
if name not in self:
return AttributeError, "no such thing brah: %s" % name
return self[name]
def __getnewargs__(self):
return ()
def __getstate__(self):
return (self.curves, self.factory_settings, dict(self))
def __setstate__(self, state):
if type(state) == dict:
self.update(state)
self.curves = None
self.set_factory_settings()
else:
self.curves = state[0]
self.factory_settings = state[1]
self.update(state[2])
def step(self, x):
if self.curves == None:
return
for key in self.curves:
self[key] = self.curves[key][x]
@classmethod
def build_env(self, name, steps, min_gamma=0.45, max_gamma=0.85):
curves = {
"beta": (1.3, 2),
"theta": (0.01, 0.04),
"alpha": (0.02, 0.1),
"kappa": (0.001, 0.01),
"mu": (0.01, 0.1),
"upilson": (0.00001, 0.0001),
"sigma": (0.00001, 0.000001),
}
cs = CurveSet(name, steps, curves)
cs.run_graph()
env = {key: cs[key][0] for key in curves}
env["gamma"] = random.random() * (max_gamma - min_gamma) + min_gamma
return CrystalEnvironment(curves=cs, **env)
def get_default(self, key):
return self.factory_settings[key]
def randomize(self):
for key in self:
if key == "sigma":
continue
if key == "gamma":
self[key] += 1.0 / random.randint(100, 1000)
else:
self[key] += random.choice([1.0, -1.0]) / random.randint(100, 1000)
self.set_factory_settings()
def _init_defaults(self):
# (3a)
# "A boundary site with 1 or 2 attached neighbors needs boundary mass at least beta to join the crystal
# This is the case when the local mesoscopic geometry near x corresponds to a tip or flat spot of the crystal.
# (Distinguishing the two cases turns out to be of minor significance.) In our simulations, beta is typically
# between about 1.05 and 3. We assume beta > 1 since 1 is the basic threshold of the case to follow next.
self["beta"] = 1.3
# (3b)
# "A boundary site with 3 attached neighbors joins the crystal if either it has boundary mass >= 1,
# or it has diffusive mass < theta in its neighborhood and it has boundary mass >= alpha"
self["theta"] = 0.025
self["alpha"] = 0.08
# (2)
# "Proportion kappa of the diffusive mass at each boundary site crystallizes.
# The remainder (proportion 1 - kappa) becomes boundary mass."
self["kappa"] = 0.003
# (4)
# "Proportion mu of the boundary mass and proportion upsilon of the crystal mass at each boundary site become diffusive mass.
# Melting represents mass flow at the boundary from ice and quasi-liquid back to vapor, reverse
# effects from the freezing of step ii. Typically mu is small and upsilon extremely small."
self["mu"] = 0.07
self["upsilon"] = 0.00005
# (5)
# "The diffusive mass at each site undergoes an independent random perturbation of proportion sigma"
self["sigma"] = 0.00001
# initial diffusion
self["gamma"] = 0.5
def _init_special(self):
pass
# class RenderMovie(object) moved to movie.py
# 2018-0212
# class LatticeReplay(object) moved to movie.py
# 2018-0212
class CrystalLattice(object):
LogHeader = ["dm", "cm", "bm", "acnt", "bcnt", "width", "beta", "theta", "alpha", "kappa", "mu", "upsilon"]
def __init__(self, size, environment=None, celltype=None, max_steps=0, margin=None, curves=None, datalog=False, debug=False):
self.size = size
if environment == None:
environment = CrystalEnvironment()
self.environment = environment
self.datalog = None
self.celllog = None
if datalog:
self.datalog = []
self.celllog = []
if celltype == None:
celltype = SnowflakeCell
self.debug = debug
self.celltype = celltype
self.iteration = 1
assert margin > 0 and margin <= 1.0
self.margin = margin
self.curves = curves
self.max_steps = max_steps
self._init_cells()
def __setstate__(self, state):
# 0.1->0.2 format changes
if "radius" in state:
state["size"] = state["radius"]
del state["radius"]
if "angle" in state:
del state["angle"]
self.__dict__.update(state)
def save_lattice(self, fn):
msg = "Saving %s..." % fn
log(msg)
f = open(fn, 'wb')
pickle.dump(self, f, protocol=-1)
@classmethod
def load_lattice(cls, fn):
msg = "Loading %s..." % fn
log(msg)
f = open(fn, 'rb')
obj = pickle.load(f)
for cell in obj.cells:
cell.lattice = obj
cell.env = obj.environment
cell.update_boundary()
return obj
def get_neighbors(self, xy):
(x, y) = xy
nlist = [(x, y + 1), (x, y - 1), (x - 1, y), (x + 1, y), (x - 1, y - 1), (x + 1, y + 1)]
nlist = map(self._cell_index, filter(self._xy_ok, nlist))
res = tuple([self.cells[nidx] for nidx in nlist if self.cells[nidx] != None])
return res
def reality_check(self):
for cell in self.cells:
cell.reality_check()
def _init_cells(self):
self.cells = [None] * (self.size * self.size)
for x in range(self.size):
for y in range(self.size):
xy = (x, y)
cell = self.celltype(xy, self)
idx = self._cell_index(xy)
self.cells[idx] = cell
self.reality_check()
center_pt = self._cell_index((self.size / 2, self.size / 2))
self.cells[center_pt].attach(1)
# fun experiments
#self.cells[center_pt+4].attach(1)
#self.cells[center_pt-4].attach(1)
def _xy_ok(self, xy):
(x, y) = xy
return (x >= 0 and x < self.size and y >= 0 and y < self.size)
def _cell_index(self, xy):
(x, y) = xy
return int(round(y * self.size + x))
def _cell_xy(self, idx):
y = idx / self.size
x = idx % self.size
return (x, y)
def adjust_humidity(self, val):
val = abs(val)
for cell in self.cells:
if cell.attached or cell.boundary:
continue
cell.diffusive_mass += val * self.environment.sigma
# only mutate the cells outside our margin
#if self.xy_to_polar(cell.xy)[1] > (self.size * self.margin):
# we use the same coef as the noise coef
#cell.diffusive_mass += val * self.environment.sigma
def log_status(self):
if self.datalog == None:
return
row = []
#row.append(self.iteration)
dm = [cell.diffusive_mass for cell in self.cells if cell]
row.append(sum(dm))
cm = [cell.crystal_mass for cell in self.cells if cell]
row.append(sum(cm))
bm = [cell.boundary_mass for cell in self.cells if cell]
row.append(sum(bm))
acnt = len([cell for cell in self.cells if cell and cell.attached])
row.append(acnt)
bcnt = len([cell for cell in self.cells if cell and cell.boundary])
row.append(bcnt)
d = self.snowflake_radius()
row.append(d)
row.append(self.environment.beta)
row.append(self.environment.theta)
row.append(self.environment.alpha)
row.append(self.environment.kappa)
row.append(self.environment.mu)
row.append(self.environment.upsilon)
#row.append(self.environment.sigma)
#row.append(self.environment.gamma)
self.datalog.append(row)
# log the cells
self.celllog.append((self.iteration, dm, cm))
def write_log(self):
self.write_datalog()
self.write_celllog()
def write_datalog(self):
if self.datalog == None:
return
logfn = "datalog.csv"
msg = "Saving runtime data to %s" % logfn
log(msg)
f = open(logfn, 'w')
txt = ''
txt += str.join(',', self.LogHeader) + '\n'
for row in self.datalog:
txt += str.join(',', map(str, row)) + '\n'
f.write(txt)
def write_celllog(self):
if not self.celllog:
return
logfn = "cell_log_%d.pickle" % self.iteration
f = open(logfn, 'wb')
pickle.dump(self.celllog, f, protocol=-1)
self.celllog = []
def print_status(self):
dm = sum([cell.diffusive_mass for cell in self.cells if cell])
cm = sum([cell.crystal_mass for cell in self.cells if cell])
bm = sum([cell.boundary_mass for cell in self.cells if cell])
acnt = len([cell for cell in self.cells if cell and cell.attached])
bcnt = len([cell for cell in self.cells if cell and cell.boundary])
#msg = "Step #%d, %d attached, %d boundary, %.2f dM, %.2f bM, %.2f cM, tot %.2f M" % (self.iteration, acnt, bcnt, dm, bm, cm, dm + cm + bm)
d = self.snowflake_radius()
msg = "Step #%d/%dp (%.2f%% scl), %d/%d (%.2f%%), %.2f dM, %.2f bM, %.2f cM, tot %.2f M" % (self.iteration, d, (float(d * 2 * X_SCALE_FACTOR) / self.iteration) * 100, acnt, bcnt, (float(bcnt) / acnt) * 100, dm, bm, cm, dm + cm + bm)
log(msg)
def step(self):
self.log_status()
for cell in self.cells:
if cell == None or cell.attached:
continue
cell.step_one()
for cell in self.cells:
if cell == None or cell.attached:
continue
cell.step_two()
for cell in self.cells:
if cell == None or cell.attached:
continue
cell.step_three()
# run curves
self.iteration += 1
self.environment.step(self.iteration)
def translate_xy(self, xy):
(x, y) = xy
x = int(round(x * X_SCALE_FACTOR))
return (x, y)
def polar_to_xy(self, args):
(angle, distance) = args
half = self.size / 2.0
angle = math.radians(angle)
y = int(round(half - (math.sin(angle) * distance)))
x = int(round(half + (math.cos(angle) * distance)))
return (x, y)
def xy_to_polar(self, args):
(x, y) = args
half = self.size / 2.0
x -= half
y += half
angle = math.degrees(math.atan2(y, x))
distance = math.hypot(x, y)
return (angle, distance)
def snowflake_radius(self, angle=135):
# we cast a ray on the 135 degeree axis
radius = 0
half = self.size / 2.0
while radius < half:
radius += 1
xy = self.polar_to_xy((angle, radius))
cell = self.cells[self._cell_index(xy)]
if cell.attached or cell.boundary:
continue
return radius
# uhh
return int(round(half))
def crop_snowflake(self, margin=None):
def scale(val):
return int(round(X_SCALE_FACTOR * val))
if margin == None:
margin = 15
half = self.size / 2
radius = scale(self.snowflake_radius())
distance = min(radius + margin, half)
half_s = scale(half)
distance_s = scale(distance)
box = (half_s - distance, half - distance, half_s + distance, half + distance)
return box
def headroom(self, margin=None):
if self.max_steps and self.iteration >= self.max_steps:
return False
if margin == None:
margin = self.margin
assert margin > 0 and margin <= 1
cutoff = int(round(margin * (self.size / 2.0)))
radius = self.snowflake_radius()
if radius > cutoff:
return False
return True
def grow(self):
while True:
if self.debug:
self.print_status()
self.step()
if self.iteration % 50 == 0:
self.write_celllog()
if not self.debug:
self.print_status()
if not self.headroom():
break
if self.debug:
self.print_status()
def save_image(self, fn, **kw):
import sfgen
r = sfgen.RenderSnowflake(self)
r.save_image(fn, **kw)
class SnowflakeCell(object):
def __init__(self, xy, lattice):
self.xy = xy
self.lattice = lattice
self.env = lattice.environment
self.diffusive_mass = self.env.gamma
self.boundary_mass = 0.0
self.crystal_mass = 0.0
self.attached = False
self.age = 0
self.boundary = 0
self.attached_neighbors = []
self.__neighbors = None
def __getstate__(self):
return (self.xy, self.diffusive_mass, self.boundary_mass, self.crystal_mass, self.attached, self.age)
def __setstate__(self, state):
self.xy = state[0]
self.diffusive_mass = state[1]
self.boundary_mass = state[2]
self.crystal_mass = state[3]
self.attached = state[4]
# 0.2 -> 0.3
try:
self.age = state[5]
except IndexError:
self.age = 0
self.__neighbors = None
self.lattice = None
self.env = None
def reality_check(self):
assert len(self.neighbors)
for neighbor in self.neighbors:
assert self in neighbor.neighbors, "%s not in %s" % (str(self), str(neighbor.neighbors))
def __repr__(self):
return "(%d,%d)" % self.xy
@property
def neighbors(self):
if self.__neighbors == None:
|
return self.__neighbors
#@property
#def attached_neighbors(self):
# return [cell for cell in self.neighbors if cell.attached]
#@property
#def boundary(self):
# return (not self.attached) and any([cell.attached for cell in self.neighbors])
def update_boundary(self):
self.boundary = (not self.attached) and any([cell.attached for cell in self.neighbors])
def step_one(self):
self.update_boundary()
if self.boundary:
self.attached_neighbors = [cell for cell in self.neighbors if cell.attached]
self._next_dm = self.diffusion_calc()
def step_two(self):
self.diffusive_mass = self._next_dm
self.attachment_flag = self.attached
self.freezing_step()
self.attachment_flag = self.attachment_step()
self.melting_step()
def step_three(self):
if self.boundary and self.attachment_flag:
self.attach()
self.noise_step()
def diffusion_calc(self):
next_dm = self.diffusive_mass
if self.attached:
return next_dm
self.age += 1
for cell in self.neighbors:
if cell.attached:
next_dm += self.diffusive_mass
else:
next_dm += cell.diffusive_mass
return float(next_dm) / (len(self.neighbors) + 1)
def attach(self, offset=0.0):
self.crystal_mass = self.boundary_mass + self.crystal_mass + offset
self.boundary_mass = 0
self.attached = True
def freezing_step(self):
if not self.boundary:
return
self.boundary_mass += (1 - self.env.kappa) * self.diffusive_mass
self.crystal_mass += (self.env.kappa * self.diffusive_mass)
self.diffusive_mass = 0
def attachment_step(self):
if not self.boundary:
return False
attach_count = len(self.attached_neighbors)
if attach_count <= 2:
if self.boundary_mass > self.env.beta:
return True
elif attach_count == 3:
if self.boundary_mass >= 1:
return True
else:
summed_diffusion = self.diffusive_mass
for cell in self.neighbors:
summed_diffusion += cell.diffusive_mass
if summed_diffusion < self.env.theta and self.boundary_mass >= self.env.alpha:
return True
elif attach_count >= 4:
return True
return False
def melting_step(self):
if not self.boundary:
return
self.diffusive_mass += self.env.mu * self.boundary_mass + self.env.upsilon * self.crystal_mass
self.boundary_mass = (1 - self.env.mu) * self.boundary_mass
self.crystal_mass = (1 - self.env.upsilon) * self.crystal_mass
def noise_step(self):
if (self.boundary or self.attached):
return
if random.random() >= .5:
self.diffusive_mass = (1 - self.env.sigma) * self.diffusive_mass
else:
self.diffusive_mass = (1 + self.env.sigma) * self.diffusive_mass
# def check_basecut() moved to render.py
# 2018-0212
# def merge_svg() moved to render.py
# 2018-0212
# def potrace() moved to render.py
# 2018-0212
# laser cutter pipeline moved to render.py
# 2018-0212
# 3d pipeline moved to render.py
# 2018-0212
# SNOWFLAKE_DEFAULTS moved to snowflake.py
# 2018-0212
# def run() moved to runner.py
# 2018-0212
| self.__neighbors = self.lattice.get_neighbors(self.xy) | conditional_block |
engine.py | #!/usr/bin/env pypy
import random
import math
import argparse
import cPickle as pickle
import logging
import os
import sys
import re
import colorsys
import bisect
import operator
from xml.dom.minidom import parse
InkscapePath = "/Applications/Inkscape.app/Contents/Resources/bin/inkscape"
try:
import Image
import ImageDraw
except ImportError:
from PIL import Image
from PIL import ImageDraw
# local
from sfgen import *
sys.modules["curves"] = curves
def avg_stdev(data):
avg = sum(data) / float(len(data))
stdev = math.sqrt(sum((x - avg) ** 2 for x in data) / float(len(data)))
return (avg, stdev)
class CrystalEnvironment(dict):
def __init__(self, curves=None, **kw):
self.curves = curves
self._init_defaults()
self.update(**kw)
self.set_factory_settings()
def set_factory_settings(self):
self.factory_settings = self.copy()
def __getattr__(self, name):
if name not in self:
return AttributeError, "no such thing brah: %s" % name
return self[name]
def __getnewargs__(self):
return ()
def __getstate__(self):
return (self.curves, self.factory_settings, dict(self))
def __setstate__(self, state):
|
def step(self, x):
if self.curves == None:
return
for key in self.curves:
self[key] = self.curves[key][x]
@classmethod
def build_env(self, name, steps, min_gamma=0.45, max_gamma=0.85):
curves = {
"beta": (1.3, 2),
"theta": (0.01, 0.04),
"alpha": (0.02, 0.1),
"kappa": (0.001, 0.01),
"mu": (0.01, 0.1),
"upilson": (0.00001, 0.0001),
"sigma": (0.00001, 0.000001),
}
cs = CurveSet(name, steps, curves)
cs.run_graph()
env = {key: cs[key][0] for key in curves}
env["gamma"] = random.random() * (max_gamma - min_gamma) + min_gamma
return CrystalEnvironment(curves=cs, **env)
def get_default(self, key):
return self.factory_settings[key]
def randomize(self):
for key in self:
if key == "sigma":
continue
if key == "gamma":
self[key] += 1.0 / random.randint(100, 1000)
else:
self[key] += random.choice([1.0, -1.0]) / random.randint(100, 1000)
self.set_factory_settings()
def _init_defaults(self):
# (3a)
# "A boundary site with 1 or 2 attached neighbors needs boundary mass at least beta to join the crystal
# This is the case when the local mesoscopic geometry near x corresponds to a tip or flat spot of the crystal.
# (Distinguishing the two cases turns out to be of minor significance.) In our simulations, beta is typically
# between about 1.05 and 3. We assume beta > 1 since 1 is the basic threshold of the case to follow next.
self["beta"] = 1.3
# (3b)
# "A boundary site with 3 attached neighbors joins the crystal if either it has boundary mass >= 1,
# or it has diffusive mass < theta in its neighborhood and it has boundary mass >= alpha"
self["theta"] = 0.025
self["alpha"] = 0.08
# (2)
# "Proportion kappa of the diffusive mass at each boundary site crystallizes.
# The remainder (proportion 1 - kappa) becomes boundary mass."
self["kappa"] = 0.003
# (4)
# "Proportion mu of the boundary mass and proportion upsilon of the crystal mass at each boundary site become diffusive mass.
# Melting represents mass flow at the boundary from ice and quasi-liquid back to vapor, reverse
# effects from the freezing of step ii. Typically mu is small and upsilon extremely small."
self["mu"] = 0.07
self["upsilon"] = 0.00005
# (5)
# "The diffusive mass at each site undergoes an independent random perturbation of proportion sigma"
self["sigma"] = 0.00001
# initial diffusion
self["gamma"] = 0.5
def _init_special(self):
pass
# class RenderMovie(object) moved to movie.py
# 2018-0212
# class LatticeReplay(object) moved to movie.py
# 2018-0212
class CrystalLattice(object):
LogHeader = ["dm", "cm", "bm", "acnt", "bcnt", "width", "beta", "theta", "alpha", "kappa", "mu", "upsilon"]
def __init__(self, size, environment=None, celltype=None, max_steps=0, margin=None, curves=None, datalog=False, debug=False):
self.size = size
if environment == None:
environment = CrystalEnvironment()
self.environment = environment
self.datalog = None
self.celllog = None
if datalog:
self.datalog = []
self.celllog = []
if celltype == None:
celltype = SnowflakeCell
self.debug = debug
self.celltype = celltype
self.iteration = 1
assert margin > 0 and margin <= 1.0
self.margin = margin
self.curves = curves
self.max_steps = max_steps
self._init_cells()
def __setstate__(self, state):
# 0.1->0.2 format changes
if "radius" in state:
state["size"] = state["radius"]
del state["radius"]
if "angle" in state:
del state["angle"]
self.__dict__.update(state)
def save_lattice(self, fn):
msg = "Saving %s..." % fn
log(msg)
f = open(fn, 'wb')
pickle.dump(self, f, protocol=-1)
@classmethod
def load_lattice(cls, fn):
msg = "Loading %s..." % fn
log(msg)
f = open(fn, 'rb')
obj = pickle.load(f)
for cell in obj.cells:
cell.lattice = obj
cell.env = obj.environment
cell.update_boundary()
return obj
def get_neighbors(self, xy):
(x, y) = xy
nlist = [(x, y + 1), (x, y - 1), (x - 1, y), (x + 1, y), (x - 1, y - 1), (x + 1, y + 1)]
nlist = map(self._cell_index, filter(self._xy_ok, nlist))
res = tuple([self.cells[nidx] for nidx in nlist if self.cells[nidx] != None])
return res
def reality_check(self):
for cell in self.cells:
cell.reality_check()
def _init_cells(self):
self.cells = [None] * (self.size * self.size)
for x in range(self.size):
for y in range(self.size):
xy = (x, y)
cell = self.celltype(xy, self)
idx = self._cell_index(xy)
self.cells[idx] = cell
self.reality_check()
center_pt = self._cell_index((self.size / 2, self.size / 2))
self.cells[center_pt].attach(1)
# fun experiments
#self.cells[center_pt+4].attach(1)
#self.cells[center_pt-4].attach(1)
def _xy_ok(self, xy):
(x, y) = xy
return (x >= 0 and x < self.size and y >= 0 and y < self.size)
def _cell_index(self, xy):
(x, y) = xy
return int(round(y * self.size + x))
def _cell_xy(self, idx):
y = idx / self.size
x = idx % self.size
return (x, y)
def adjust_humidity(self, val):
val = abs(val)
for cell in self.cells:
if cell.attached or cell.boundary:
continue
cell.diffusive_mass += val * self.environment.sigma
# only mutate the cells outside our margin
#if self.xy_to_polar(cell.xy)[1] > (self.size * self.margin):
# we use the same coef as the noise coef
#cell.diffusive_mass += val * self.environment.sigma
def log_status(self):
if self.datalog == None:
return
row = []
#row.append(self.iteration)
dm = [cell.diffusive_mass for cell in self.cells if cell]
row.append(sum(dm))
cm = [cell.crystal_mass for cell in self.cells if cell]
row.append(sum(cm))
bm = [cell.boundary_mass for cell in self.cells if cell]
row.append(sum(bm))
acnt = len([cell for cell in self.cells if cell and cell.attached])
row.append(acnt)
bcnt = len([cell for cell in self.cells if cell and cell.boundary])
row.append(bcnt)
d = self.snowflake_radius()
row.append(d)
row.append(self.environment.beta)
row.append(self.environment.theta)
row.append(self.environment.alpha)
row.append(self.environment.kappa)
row.append(self.environment.mu)
row.append(self.environment.upsilon)
#row.append(self.environment.sigma)
#row.append(self.environment.gamma)
self.datalog.append(row)
# log the cells
self.celllog.append((self.iteration, dm, cm))
def write_log(self):
self.write_datalog()
self.write_celllog()
def write_datalog(self):
if self.datalog == None:
return
logfn = "datalog.csv"
msg = "Saving runtime data to %s" % logfn
log(msg)
f = open(logfn, 'w')
txt = ''
txt += str.join(',', self.LogHeader) + '\n'
for row in self.datalog:
txt += str.join(',', map(str, row)) + '\n'
f.write(txt)
def write_celllog(self):
if not self.celllog:
return
logfn = "cell_log_%d.pickle" % self.iteration
f = open(logfn, 'wb')
pickle.dump(self.celllog, f, protocol=-1)
self.celllog = []
def print_status(self):
dm = sum([cell.diffusive_mass for cell in self.cells if cell])
cm = sum([cell.crystal_mass for cell in self.cells if cell])
bm = sum([cell.boundary_mass for cell in self.cells if cell])
acnt = len([cell for cell in self.cells if cell and cell.attached])
bcnt = len([cell for cell in self.cells if cell and cell.boundary])
#msg = "Step #%d, %d attached, %d boundary, %.2f dM, %.2f bM, %.2f cM, tot %.2f M" % (self.iteration, acnt, bcnt, dm, bm, cm, dm + cm + bm)
d = self.snowflake_radius()
msg = "Step #%d/%dp (%.2f%% scl), %d/%d (%.2f%%), %.2f dM, %.2f bM, %.2f cM, tot %.2f M" % (self.iteration, d, (float(d * 2 * X_SCALE_FACTOR) / self.iteration) * 100, acnt, bcnt, (float(bcnt) / acnt) * 100, dm, bm, cm, dm + cm + bm)
log(msg)
def step(self):
self.log_status()
for cell in self.cells:
if cell == None or cell.attached:
continue
cell.step_one()
for cell in self.cells:
if cell == None or cell.attached:
continue
cell.step_two()
for cell in self.cells:
if cell == None or cell.attached:
continue
cell.step_three()
# run curves
self.iteration += 1
self.environment.step(self.iteration)
def translate_xy(self, xy):
(x, y) = xy
x = int(round(x * X_SCALE_FACTOR))
return (x, y)
def polar_to_xy(self, args):
(angle, distance) = args
half = self.size / 2.0
angle = math.radians(angle)
y = int(round(half - (math.sin(angle) * distance)))
x = int(round(half + (math.cos(angle) * distance)))
return (x, y)
def xy_to_polar(self, args):
(x, y) = args
half = self.size / 2.0
x -= half
y += half
angle = math.degrees(math.atan2(y, x))
distance = math.hypot(x, y)
return (angle, distance)
def snowflake_radius(self, angle=135):
# we cast a ray on the 135 degeree axis
radius = 0
half = self.size / 2.0
while radius < half:
radius += 1
xy = self.polar_to_xy((angle, radius))
cell = self.cells[self._cell_index(xy)]
if cell.attached or cell.boundary:
continue
return radius
# uhh
return int(round(half))
def crop_snowflake(self, margin=None):
def scale(val):
return int(round(X_SCALE_FACTOR * val))
if margin == None:
margin = 15
half = self.size / 2
radius = scale(self.snowflake_radius())
distance = min(radius + margin, half)
half_s = scale(half)
distance_s = scale(distance)
box = (half_s - distance, half - distance, half_s + distance, half + distance)
return box
def headroom(self, margin=None):
if self.max_steps and self.iteration >= self.max_steps:
return False
if margin == None:
margin = self.margin
assert margin > 0 and margin <= 1
cutoff = int(round(margin * (self.size / 2.0)))
radius = self.snowflake_radius()
if radius > cutoff:
return False
return True
def grow(self):
while True:
if self.debug:
self.print_status()
self.step()
if self.iteration % 50 == 0:
self.write_celllog()
if not self.debug:
self.print_status()
if not self.headroom():
break
if self.debug:
self.print_status()
def save_image(self, fn, **kw):
import sfgen
r = sfgen.RenderSnowflake(self)
r.save_image(fn, **kw)
class SnowflakeCell(object):
def __init__(self, xy, lattice):
self.xy = xy
self.lattice = lattice
self.env = lattice.environment
self.diffusive_mass = self.env.gamma
self.boundary_mass = 0.0
self.crystal_mass = 0.0
self.attached = False
self.age = 0
self.boundary = 0
self.attached_neighbors = []
self.__neighbors = None
def __getstate__(self):
return (self.xy, self.diffusive_mass, self.boundary_mass, self.crystal_mass, self.attached, self.age)
def __setstate__(self, state):
self.xy = state[0]
self.diffusive_mass = state[1]
self.boundary_mass = state[2]
self.crystal_mass = state[3]
self.attached = state[4]
# 0.2 -> 0.3
try:
self.age = state[5]
except IndexError:
self.age = 0
self.__neighbors = None
self.lattice = None
self.env = None
def reality_check(self):
assert len(self.neighbors)
for neighbor in self.neighbors:
assert self in neighbor.neighbors, "%s not in %s" % (str(self), str(neighbor.neighbors))
def __repr__(self):
return "(%d,%d)" % self.xy
@property
def neighbors(self):
if self.__neighbors == None:
self.__neighbors = self.lattice.get_neighbors(self.xy)
return self.__neighbors
#@property
#def attached_neighbors(self):
# return [cell for cell in self.neighbors if cell.attached]
#@property
#def boundary(self):
# return (not self.attached) and any([cell.attached for cell in self.neighbors])
def update_boundary(self):
self.boundary = (not self.attached) and any([cell.attached for cell in self.neighbors])
def step_one(self):
self.update_boundary()
if self.boundary:
self.attached_neighbors = [cell for cell in self.neighbors if cell.attached]
self._next_dm = self.diffusion_calc()
def step_two(self):
self.diffusive_mass = self._next_dm
self.attachment_flag = self.attached
self.freezing_step()
self.attachment_flag = self.attachment_step()
self.melting_step()
def step_three(self):
if self.boundary and self.attachment_flag:
self.attach()
self.noise_step()
def diffusion_calc(self):
next_dm = self.diffusive_mass
if self.attached:
return next_dm
self.age += 1
for cell in self.neighbors:
if cell.attached:
next_dm += self.diffusive_mass
else:
next_dm += cell.diffusive_mass
return float(next_dm) / (len(self.neighbors) + 1)
def attach(self, offset=0.0):
self.crystal_mass = self.boundary_mass + self.crystal_mass + offset
self.boundary_mass = 0
self.attached = True
def freezing_step(self):
if not self.boundary:
return
self.boundary_mass += (1 - self.env.kappa) * self.diffusive_mass
self.crystal_mass += (self.env.kappa * self.diffusive_mass)
self.diffusive_mass = 0
def attachment_step(self):
if not self.boundary:
return False
attach_count = len(self.attached_neighbors)
if attach_count <= 2:
if self.boundary_mass > self.env.beta:
return True
elif attach_count == 3:
if self.boundary_mass >= 1:
return True
else:
summed_diffusion = self.diffusive_mass
for cell in self.neighbors:
summed_diffusion += cell.diffusive_mass
if summed_diffusion < self.env.theta and self.boundary_mass >= self.env.alpha:
return True
elif attach_count >= 4:
return True
return False
def melting_step(self):
if not self.boundary:
return
self.diffusive_mass += self.env.mu * self.boundary_mass + self.env.upsilon * self.crystal_mass
self.boundary_mass = (1 - self.env.mu) * self.boundary_mass
self.crystal_mass = (1 - self.env.upsilon) * self.crystal_mass
def noise_step(self):
if (self.boundary or self.attached):
return
if random.random() >= .5:
self.diffusive_mass = (1 - self.env.sigma) * self.diffusive_mass
else:
self.diffusive_mass = (1 + self.env.sigma) * self.diffusive_mass
# def check_basecut() moved to render.py
# 2018-0212
# def merge_svg() moved to render.py
# 2018-0212
# def potrace() moved to render.py
# 2018-0212
# laser cutter pipeline moved to render.py
# 2018-0212
# 3d pipeline moved to render.py
# 2018-0212
# SNOWFLAKE_DEFAULTS moved to snowflake.py
# 2018-0212
# def run() moved to runner.py
# 2018-0212
| if type(state) == dict:
self.update(state)
self.curves = None
self.set_factory_settings()
else:
self.curves = state[0]
self.factory_settings = state[1]
self.update(state[2]) | identifier_body |
engine.py | #!/usr/bin/env pypy
import random
import math
import argparse
import cPickle as pickle
import logging
import os
import sys
import re
import colorsys
import bisect
import operator
from xml.dom.minidom import parse
InkscapePath = "/Applications/Inkscape.app/Contents/Resources/bin/inkscape"
try:
import Image
import ImageDraw
except ImportError:
from PIL import Image
from PIL import ImageDraw
# local
from sfgen import *
sys.modules["curves"] = curves
def avg_stdev(data):
avg = sum(data) / float(len(data))
stdev = math.sqrt(sum((x - avg) ** 2 for x in data) / float(len(data)))
return (avg, stdev)
class CrystalEnvironment(dict):
def __init__(self, curves=None, **kw):
self.curves = curves
self._init_defaults()
self.update(**kw)
self.set_factory_settings()
def set_factory_settings(self):
self.factory_settings = self.copy()
def __getattr__(self, name):
if name not in self:
return AttributeError, "no such thing brah: %s" % name
return self[name]
def __getnewargs__(self):
return ()
def __getstate__(self):
return (self.curves, self.factory_settings, dict(self))
def __setstate__(self, state):
if type(state) == dict:
self.update(state)
self.curves = None
self.set_factory_settings()
else:
self.curves = state[0]
self.factory_settings = state[1]
self.update(state[2])
def step(self, x):
if self.curves == None:
return
for key in self.curves:
self[key] = self.curves[key][x]
@classmethod
def build_env(self, name, steps, min_gamma=0.45, max_gamma=0.85):
curves = {
"beta": (1.3, 2),
"theta": (0.01, 0.04),
"alpha": (0.02, 0.1),
"kappa": (0.001, 0.01),
"mu": (0.01, 0.1),
"upilson": (0.00001, 0.0001),
"sigma": (0.00001, 0.000001),
}
cs = CurveSet(name, steps, curves)
cs.run_graph()
env = {key: cs[key][0] for key in curves}
env["gamma"] = random.random() * (max_gamma - min_gamma) + min_gamma
return CrystalEnvironment(curves=cs, **env)
def get_default(self, key):
return self.factory_settings[key]
def randomize(self):
for key in self:
if key == "sigma":
continue
if key == "gamma":
self[key] += 1.0 / random.randint(100, 1000)
else:
self[key] += random.choice([1.0, -1.0]) / random.randint(100, 1000)
self.set_factory_settings()
def _init_defaults(self):
# (3a)
# "A boundary site with 1 or 2 attached neighbors needs boundary mass at least beta to join the crystal
# This is the case when the local mesoscopic geometry near x corresponds to a tip or flat spot of the crystal.
# (Distinguishing the two cases turns out to be of minor significance.) In our simulations, beta is typically
# between about 1.05 and 3. We assume beta > 1 since 1 is the basic threshold of the case to follow next.
self["beta"] = 1.3
# (3b)
# "A boundary site with 3 attached neighbors joins the crystal if either it has boundary mass >= 1,
# or it has diffusive mass < theta in its neighborhood and it has boundary mass >= alpha"
self["theta"] = 0.025
self["alpha"] = 0.08
# (2)
# "Proportion kappa of the diffusive mass at each boundary site crystallizes.
# The remainder (proportion 1 - kappa) becomes boundary mass."
self["kappa"] = 0.003
# (4)
# "Proportion mu of the boundary mass and proportion upsilon of the crystal mass at each boundary site become diffusive mass.
# Melting represents mass flow at the boundary from ice and quasi-liquid back to vapor, reverse
# effects from the freezing of step ii. Typically mu is small and upsilon extremely small."
self["mu"] = 0.07
self["upsilon"] = 0.00005
# (5)
# "The diffusive mass at each site undergoes an independent random perturbation of proportion sigma"
self["sigma"] = 0.00001
# initial diffusion
self["gamma"] = 0.5
def _init_special(self):
pass
# class RenderMovie(object) moved to movie.py
# 2018-0212
# class LatticeReplay(object) moved to movie.py
# 2018-0212
class CrystalLattice(object):
LogHeader = ["dm", "cm", "bm", "acnt", "bcnt", "width", "beta", "theta", "alpha", "kappa", "mu", "upsilon"]
def __init__(self, size, environment=None, celltype=None, max_steps=0, margin=None, curves=None, datalog=False, debug=False):
self.size = size
if environment == None:
environment = CrystalEnvironment()
self.environment = environment
self.datalog = None
self.celllog = None
if datalog:
self.datalog = []
self.celllog = []
if celltype == None:
celltype = SnowflakeCell
self.debug = debug
self.celltype = celltype
self.iteration = 1
assert margin > 0 and margin <= 1.0
self.margin = margin
self.curves = curves
self.max_steps = max_steps
self._init_cells()
def __setstate__(self, state):
# 0.1->0.2 format changes
if "radius" in state:
state["size"] = state["radius"]
del state["radius"]
if "angle" in state:
del state["angle"]
self.__dict__.update(state)
def save_lattice(self, fn):
msg = "Saving %s..." % fn
log(msg)
f = open(fn, 'wb')
pickle.dump(self, f, protocol=-1)
@classmethod
def load_lattice(cls, fn):
msg = "Loading %s..." % fn
log(msg)
f = open(fn, 'rb')
obj = pickle.load(f)
for cell in obj.cells:
cell.lattice = obj
cell.env = obj.environment
cell.update_boundary()
return obj
def get_neighbors(self, xy):
(x, y) = xy
nlist = [(x, y + 1), (x, y - 1), (x - 1, y), (x + 1, y), (x - 1, y - 1), (x + 1, y + 1)]
nlist = map(self._cell_index, filter(self._xy_ok, nlist))
res = tuple([self.cells[nidx] for nidx in nlist if self.cells[nidx] != None])
return res
def reality_check(self):
for cell in self.cells:
cell.reality_check()
def _init_cells(self):
self.cells = [None] * (self.size * self.size)
for x in range(self.size):
for y in range(self.size):
xy = (x, y)
cell = self.celltype(xy, self)
idx = self._cell_index(xy)
self.cells[idx] = cell
self.reality_check()
center_pt = self._cell_index((self.size / 2, self.size / 2))
self.cells[center_pt].attach(1)
# fun experiments
#self.cells[center_pt+4].attach(1)
#self.cells[center_pt-4].attach(1)
def _xy_ok(self, xy):
(x, y) = xy
return (x >= 0 and x < self.size and y >= 0 and y < self.size)
def _cell_index(self, xy):
(x, y) = xy
return int(round(y * self.size + x))
def _cell_xy(self, idx):
y = idx / self.size
x = idx % self.size
return (x, y)
def adjust_humidity(self, val):
val = abs(val)
for cell in self.cells:
if cell.attached or cell.boundary:
continue
cell.diffusive_mass += val * self.environment.sigma
# only mutate the cells outside our margin
#if self.xy_to_polar(cell.xy)[1] > (self.size * self.margin):
# we use the same coef as the noise coef
#cell.diffusive_mass += val * self.environment.sigma
def log_status(self):
if self.datalog == None:
return
row = []
#row.append(self.iteration)
dm = [cell.diffusive_mass for cell in self.cells if cell]
row.append(sum(dm))
cm = [cell.crystal_mass for cell in self.cells if cell]
row.append(sum(cm))
bm = [cell.boundary_mass for cell in self.cells if cell]
row.append(sum(bm))
acnt = len([cell for cell in self.cells if cell and cell.attached])
row.append(acnt)
bcnt = len([cell for cell in self.cells if cell and cell.boundary])
row.append(bcnt)
d = self.snowflake_radius()
row.append(d)
row.append(self.environment.beta)
row.append(self.environment.theta)
row.append(self.environment.alpha)
row.append(self.environment.kappa)
row.append(self.environment.mu)
row.append(self.environment.upsilon)
#row.append(self.environment.sigma)
#row.append(self.environment.gamma)
self.datalog.append(row)
# log the cells
self.celllog.append((self.iteration, dm, cm))
def write_log(self):
self.write_datalog()
self.write_celllog()
def write_datalog(self):
if self.datalog == None:
return
logfn = "datalog.csv"
msg = "Saving runtime data to %s" % logfn
log(msg)
f = open(logfn, 'w')
txt = ''
txt += str.join(',', self.LogHeader) + '\n'
for row in self.datalog:
txt += str.join(',', map(str, row)) + '\n'
f.write(txt)
def write_celllog(self):
if not self.celllog:
return
logfn = "cell_log_%d.pickle" % self.iteration
f = open(logfn, 'wb')
pickle.dump(self.celllog, f, protocol=-1)
self.celllog = []
def print_status(self):
dm = sum([cell.diffusive_mass for cell in self.cells if cell])
cm = sum([cell.crystal_mass for cell in self.cells if cell])
bm = sum([cell.boundary_mass for cell in self.cells if cell])
acnt = len([cell for cell in self.cells if cell and cell.attached])
bcnt = len([cell for cell in self.cells if cell and cell.boundary])
#msg = "Step #%d, %d attached, %d boundary, %.2f dM, %.2f bM, %.2f cM, tot %.2f M" % (self.iteration, acnt, bcnt, dm, bm, cm, dm + cm + bm)
d = self.snowflake_radius()
msg = "Step #%d/%dp (%.2f%% scl), %d/%d (%.2f%%), %.2f dM, %.2f bM, %.2f cM, tot %.2f M" % (self.iteration, d, (float(d * 2 * X_SCALE_FACTOR) / self.iteration) * 100, acnt, bcnt, (float(bcnt) / acnt) * 100, dm, bm, cm, dm + cm + bm)
log(msg)
def step(self):
self.log_status()
for cell in self.cells:
if cell == None or cell.attached:
continue
cell.step_one()
for cell in self.cells:
if cell == None or cell.attached:
continue
cell.step_two()
for cell in self.cells:
if cell == None or cell.attached:
continue
cell.step_three()
# run curves
self.iteration += 1
self.environment.step(self.iteration)
def translate_xy(self, xy):
(x, y) = xy
x = int(round(x * X_SCALE_FACTOR))
return (x, y)
def polar_to_xy(self, args):
(angle, distance) = args
half = self.size / 2.0
angle = math.radians(angle)
y = int(round(half - (math.sin(angle) * distance)))
x = int(round(half + (math.cos(angle) * distance)))
return (x, y)
def xy_to_polar(self, args):
(x, y) = args
half = self.size / 2.0
x -= half
y += half
angle = math.degrees(math.atan2(y, x))
distance = math.hypot(x, y)
return (angle, distance)
def snowflake_radius(self, angle=135):
# we cast a ray on the 135 degeree axis
radius = 0
half = self.size / 2.0
while radius < half:
radius += 1
xy = self.polar_to_xy((angle, radius))
cell = self.cells[self._cell_index(xy)]
if cell.attached or cell.boundary:
continue
return radius
# uhh
return int(round(half))
def crop_snowflake(self, margin=None):
def scale(val):
return int(round(X_SCALE_FACTOR * val))
if margin == None:
margin = 15
half = self.size / 2
radius = scale(self.snowflake_radius())
distance = min(radius + margin, half)
half_s = scale(half)
distance_s = scale(distance)
box = (half_s - distance, half - distance, half_s + distance, half + distance)
return box
def headroom(self, margin=None):
if self.max_steps and self.iteration >= self.max_steps:
return False
if margin == None:
margin = self.margin
assert margin > 0 and margin <= 1
cutoff = int(round(margin * (self.size / 2.0)))
radius = self.snowflake_radius()
if radius > cutoff:
return False
return True
def grow(self):
while True:
if self.debug:
self.print_status()
self.step()
if self.iteration % 50 == 0:
self.write_celllog()
if not self.debug:
self.print_status()
if not self.headroom():
break
if self.debug:
self.print_status()
def save_image(self, fn, **kw):
import sfgen
r = sfgen.RenderSnowflake(self)
r.save_image(fn, **kw)
class SnowflakeCell(object):
def __init__(self, xy, lattice):
self.xy = xy
self.lattice = lattice
self.env = lattice.environment
self.diffusive_mass = self.env.gamma
self.boundary_mass = 0.0
self.crystal_mass = 0.0
self.attached = False
self.age = 0
self.boundary = 0
self.attached_neighbors = []
self.__neighbors = None
def __getstate__(self):
return (self.xy, self.diffusive_mass, self.boundary_mass, self.crystal_mass, self.attached, self.age)
def __setstate__(self, state):
self.xy = state[0]
self.diffusive_mass = state[1]
self.boundary_mass = state[2]
self.crystal_mass = state[3]
self.attached = state[4]
# 0.2 -> 0.3
try:
self.age = state[5]
except IndexError:
self.age = 0
self.__neighbors = None
self.lattice = None
self.env = None
def reality_check(self):
assert len(self.neighbors)
for neighbor in self.neighbors:
assert self in neighbor.neighbors, "%s not in %s" % (str(self), str(neighbor.neighbors))
def __repr__(self):
return "(%d,%d)" % self.xy
@property
def neighbors(self):
if self.__neighbors == None:
self.__neighbors = self.lattice.get_neighbors(self.xy)
return self.__neighbors
#@property
#def attached_neighbors(self):
# return [cell for cell in self.neighbors if cell.attached]
#@property
#def boundary(self):
# return (not self.attached) and any([cell.attached for cell in self.neighbors])
def update_boundary(self):
self.boundary = (not self.attached) and any([cell.attached for cell in self.neighbors])
def step_one(self):
self.update_boundary()
if self.boundary:
self.attached_neighbors = [cell for cell in self.neighbors if cell.attached]
self._next_dm = self.diffusion_calc()
def step_two(self):
self.diffusive_mass = self._next_dm
self.attachment_flag = self.attached
self.freezing_step()
self.attachment_flag = self.attachment_step()
self.melting_step()
def step_three(self):
if self.boundary and self.attachment_flag:
self.attach()
self.noise_step()
def diffusion_calc(self):
next_dm = self.diffusive_mass
if self.attached:
return next_dm
self.age += 1
for cell in self.neighbors:
if cell.attached:
next_dm += self.diffusive_mass
else:
next_dm += cell.diffusive_mass
return float(next_dm) / (len(self.neighbors) + 1)
def attach(self, offset=0.0):
self.crystal_mass = self.boundary_mass + self.crystal_mass + offset
self.boundary_mass = 0
self.attached = True
def freezing_step(self):
if not self.boundary:
return
self.boundary_mass += (1 - self.env.kappa) * self.diffusive_mass
self.crystal_mass += (self.env.kappa * self.diffusive_mass)
self.diffusive_mass = 0
def | (self):
if not self.boundary:
return False
attach_count = len(self.attached_neighbors)
if attach_count <= 2:
if self.boundary_mass > self.env.beta:
return True
elif attach_count == 3:
if self.boundary_mass >= 1:
return True
else:
summed_diffusion = self.diffusive_mass
for cell in self.neighbors:
summed_diffusion += cell.diffusive_mass
if summed_diffusion < self.env.theta and self.boundary_mass >= self.env.alpha:
return True
elif attach_count >= 4:
return True
return False
def melting_step(self):
if not self.boundary:
return
self.diffusive_mass += self.env.mu * self.boundary_mass + self.env.upsilon * self.crystal_mass
self.boundary_mass = (1 - self.env.mu) * self.boundary_mass
self.crystal_mass = (1 - self.env.upsilon) * self.crystal_mass
def noise_step(self):
if (self.boundary or self.attached):
return
if random.random() >= .5:
self.diffusive_mass = (1 - self.env.sigma) * self.diffusive_mass
else:
self.diffusive_mass = (1 + self.env.sigma) * self.diffusive_mass
# def check_basecut() moved to render.py
# 2018-0212
# def merge_svg() moved to render.py
# 2018-0212
# def potrace() moved to render.py
# 2018-0212
# laser cutter pipeline moved to render.py
# 2018-0212
# 3d pipeline moved to render.py
# 2018-0212
# SNOWFLAKE_DEFAULTS moved to snowflake.py
# 2018-0212
# def run() moved to runner.py
# 2018-0212
| attachment_step | identifier_name |
rcparser.py | __author__="Adam Walker"
import sys, os, shlex
import win32con
import commctrl
_controlMap = {"DEFPUSHBUTTON":0x80,
"PUSHBUTTON":0x80,
"Button":0x80,
"GROUPBOX":0x80,
"Static":0x82,
"CTEXT":0x82,
"RTEXT":0x82,
"LTEXT":0x82,
"LISTBOX":0x83,
"SCROLLBAR":0x84,
"COMBOBOX":0x85,
"EDITTEXT":0x81,
}
_addDefaults = {"EDITTEXT":win32con.WS_BORDER,
"GROUPBOX":win32con.BS_GROUPBOX,
"LTEXT":win32con.SS_LEFT,
"DEFPUSHBUTTON":win32con.BS_DEFPUSHBUTTON,
"CTEXT":win32con.SS_CENTER,
"RTEXT":win32con.SS_RIGHT}
defaultControlStyle = win32con.WS_CHILD | win32con.WS_VISIBLE
class DialogDef:
name = ""
id = 0
style = 0
styleEx = None
caption = ""
font = "MS Sans Serif"
fontSize = 8
x = 0
y = 0
w = 0
h = 0
template = None
def __init__(self, n, i):
self.name = n
self.id = i
self.styles = []
self.stylesEx = []
self.controls = []
def createDialogTemplate(self):
t = None
self.template = [[self.caption, (self.x,self.y,self.w,self.h), self.style, self.styleEx, (self.fontSize, self.font)]]
for control in self.controls:
self.template.append(control.createDialogTemplate())
return self.template
class ControlDef:
id = ""
controlType = ""
subType = ""
idNum = 0
style = defaultControlStyle
label = ""
x = 0
y = 0
w = 0
h = 0
def __init__(self):
self.styles = []
def toString(self):
s = "<Control id:"+self.id+" controlType:"+self.controlType+" subType:"+self.subType\
+" idNum:"+str(self.idNum)+" style:"+str(self.style)+" styles:"+str(self.styles)+" label:"+self.label\
+" x:"+str(self.x)+" y:"+str(self.y)+" w:"+str(self.w)+" h:"+str(self.h)+">"
return s
def createDialogTemplate(self):
ct = self.controlType
if "CONTROL"==ct:
ct = self.subType
if ct in _addDefaults:
self.style |= _addDefaults[ct]
if ct in _controlMap:
ct = _controlMap[ct]
t = [ct, self.label, self.idNum, (self.x, self.y, self.w, self.h), self.style]
return t
class gt_str(str):
"""Change a string to a gettext version of itself."""
def __repr__(self):
if len(self) > 0:
return "_(" + super(gt_str, self).__repr__() + ")"
else:
return super(gt_str, self).__repr__()
class RCParser:
next_id = 1001
dialogs = {}
_dialogs = {}
debugEnabled = False
token = ""
def __init__(self):
self.ids = {"IDOK":1, "IDCANCEL":2, "IDC_STATIC": -1}
self.names = {1:"IDOK", 2:"IDCANCEL", -1:"IDC_STATIC"}
self.bitmaps = {}
self.gettexted = False
def debug(self, *args):
if self.debugEnabled:
print(args)
def | (self):
self.token = self.lex.get_token()
self.debug("getToken returns:", self.token)
if self.token=="":
self.token = None
return self.token
def getCommaToken(self):
tok = self.getToken()
assert tok == ",", "Token '%s' should be a comma!" % tok
def loadDialogs(self, rcFileName):
"""
RCParser.loadDialogs(rcFileName) -> None
Load the dialog information into the parser. Dialog Definations can then be accessed
using the "dialogs" dictionary member (name->DialogDef). The "ids" member contains the dictionary of id->name.
The "names" member contains the dictionary of name->id
"""
hFileName = rcFileName[:-2]+"h"
if not os.path.exists(hFileName):
hFileName = os.path.join(os.path.dirname(__file__),
os.path.basename(hFileName))
try:
h = open(hFileName, "rU")
self.parseH(h)
h.close()
except IOError:
print("No .h file. ignoring.")
f = open(rcFileName)
self.open(f)
self.getToken()
while self.token!=None:
self.parse()
self.getToken()
f.close()
def open(self, file):
self.lex = shlex.shlex(file)
self.lex.commenters = "//#"
def parseH(self, file):
lex = shlex.shlex(file)
lex.commenters = "//"
token = " "
while token is not None:
token = lex.get_token()
if token == "" or token is None:
token = None
else:
if token=='define':
n = lex.get_token()
i = int(lex.get_token())
self.ids[n] = i
if i in self.names:
if not n.startswith("_APS_"):
print("Duplicate id",i,"for",n,"is", self.names[i])
else:
self.names[i] = n
if self.next_id<=i:
self.next_id = i+1
def parse(self):
deep = 0
if self.token == None:
more == None
elif "BEGIN" == self.token:
deep = 1
while deep!=0:
self.getToken()
if "BEGIN" == self.token:
deep += 1
elif "END" == self.token:
deep -= 1
elif "IDD_" == self.token[:4]:
possibleDlgName = self.token
self.getToken()
if "DIALOG" == self.token or "DIALOGEX" == self.token:
self.dialog(possibleDlgName)
elif "IDB_" == self.token[:4]:
possibleBitmap = self.token
self.getToken()
if "BITMAP" == self.token:
self.getToken()
if self.token=="MOVEABLE":
self.getToken() # PURE
self.getToken() # bmpname
bmf = self.token[1:-1] # quotes
self.bitmaps[possibleBitmap] = bmf
print("BITMAP", possibleBitmap, bmf)
def addId(self, id_name):
if id_name in self.ids:
id = self.ids[id_name]
else:
id = self.next_id
self.next_id += 1
self.ids[id_name] = id
self.names[id] = id_name
return id
def lang(self):
while self.token[0:4]=="LANG" or self.token[0:7]=="SUBLANG" or self.token==',':
self.getToken();
def dialog(self, name):
dlg = DialogDef(name,self.addId(name))
assert len(dlg.controls)==0
self._dialogs[name] = dlg
extras = []
self.getToken()
while not self.token.isdigit():
self.debug("extra", self.token)
extras.append(self.token)
self.getToken()
dlg.x = int(self.token)
self.getCommaToken()
self.getToken() # number
dlg.y = int(self.token)
self.getCommaToken()
self.getToken() # number
dlg.w = int(self.token)
self.getCommaToken()
self.getToken() # number
dlg.h = int(self.token)
self.getToken()
while not (self.token==None or self.token=="" or self.token=="END"):
if self.token=="STYLE":
self.dialogStyle(dlg)
elif self.token=="EXSTYLE":
self.dialogExStyle(dlg)
elif self.token=="CAPTION":
self.dialogCaption(dlg)
elif self.token=="FONT":
self.dialogFont(dlg)
elif self.token=="BEGIN":
self.controls(dlg)
else:
break
self.dialogs[name] = dlg.createDialogTemplate()
def dialogStyle(self, dlg):
dlg.style, dlg.styles = self.styles( [], win32con.WS_VISIBLE | win32con.DS_SETFONT)
def dialogExStyle(self, dlg):
self.getToken()
dlg.styleEx, dlg.stylesEx = self.styles( [], 0)
def styles(self, defaults, defaultStyle):
list = defaults
style = defaultStyle
if "STYLE"==self.token:
self.getToken()
i = 0
Not = False
while ((i%2==1 and ("|"==self.token or "NOT"==self.token)) or (i%2==0)) and not self.token==None:
Not = False;
if "NOT"==self.token:
Not = True
self.getToken()
i += 1
if self.token!="|":
if self.token in win32con.__dict__:
value = getattr(win32con,self.token)
else:
if self.token in commctrl.__dict__:
value = getattr(commctrl,self.token)
else:
value = 0
if Not:
list.append("NOT "+self.token)
self.debug("styles add Not",self.token, value)
style &= ~value
else:
list.append(self.token)
self.debug("styles add", self.token, value)
style |= value
self.getToken()
self.debug("style is ",style)
return style, list
def dialogCaption(self, dlg):
if "CAPTION"==self.token:
self.getToken()
self.token = self.token[1:-1]
self.debug("Caption is:",self.token)
if self.gettexted:
dlg.caption = gt_str(self.token)
else:
dlg.caption = self.token
self.getToken()
def dialogFont(self, dlg):
if "FONT"==self.token:
self.getToken()
dlg.fontSize = int(self.token)
self.getCommaToken()
self.getToken() # Font name
dlg.font = self.token[1:-1] # it's quoted
self.getToken()
while "BEGIN"!=self.token:
self.getToken()
def controls(self, dlg):
if self.token=="BEGIN": self.getToken()
while self.token!="END":
control = ControlDef()
control.controlType = self.token;
self.getToken()
if self.token[0:1]=='"':
if self.gettexted:
control.label = gt_str(self.token[1:-1])
else:
control.label = self.token[1:-1]
self.getCommaToken()
self.getToken()
elif self.token.isdigit():
control.label = self.token
self.getCommaToken()
self.getToken()
if self.token=='-':
if self.getToken() != '1':
raise RuntimeError("Negative literal in rc script (other than -1) - don't know what to do")
self.token = "IDC_STATIC"
control.id = self.token
control.idNum = self.addId(control.id)
self.getCommaToken()
if control.controlType == "CONTROL":
self.getToken()
control.subType = self.token[1:-1]
self.getCommaToken()
self.getToken()
control.style, control.styles = self.styles([], defaultControlStyle)
control.x = int(self.getToken())
self.getCommaToken()
control.y = int(self.getToken())
self.getCommaToken()
control.w = int(self.getToken())
self.getCommaToken()
self.getToken()
control.h = int(self.token)
self.getToken()
if self.token==",":
self.getToken()
control.style, control.styles = self.styles([], defaultControlStyle)
dlg.controls.append(control)
def ParseDialogs(rc_file, gettexted=False):
rcp = RCParser()
rcp.gettexted = gettexted
try:
rcp.loadDialogs(rc_file)
except:
lex = getattr(rcp, "lex", None)
if lex:
print("ERROR parsing dialogs at line", lex.lineno)
print("Next 10 tokens are:")
for i in range(10):
print(lex.get_token(), end=' ')
print()
raise
return rcp
if __name__=='__main__':
rc_file = os.path.join(os.path.dirname(__file__), "dialogs.rc")
d = ParseDialogs(rc_file)
import pprint
for id, ddef in list(d.dialogs.items()):
print("Dialog %s (%d controls)" % (id, len(ddef)))
pprint.pprint(ddef)
print()
| getToken | identifier_name |
rcparser.py | __author__="Adam Walker"
import sys, os, shlex
import win32con
import commctrl
_controlMap = {"DEFPUSHBUTTON":0x80,
"PUSHBUTTON":0x80,
"Button":0x80,
"GROUPBOX":0x80,
"Static":0x82,
"CTEXT":0x82,
"RTEXT":0x82,
"LTEXT":0x82,
"LISTBOX":0x83,
"SCROLLBAR":0x84,
"COMBOBOX":0x85,
"EDITTEXT":0x81,
}
_addDefaults = {"EDITTEXT":win32con.WS_BORDER,
"GROUPBOX":win32con.BS_GROUPBOX,
"LTEXT":win32con.SS_LEFT,
"DEFPUSHBUTTON":win32con.BS_DEFPUSHBUTTON,
"CTEXT":win32con.SS_CENTER,
"RTEXT":win32con.SS_RIGHT}
defaultControlStyle = win32con.WS_CHILD | win32con.WS_VISIBLE
class DialogDef:
name = ""
id = 0
style = 0
styleEx = None
caption = ""
font = "MS Sans Serif"
fontSize = 8
x = 0
y = 0
w = 0
h = 0
template = None
def __init__(self, n, i):
self.name = n
self.id = i
self.styles = []
self.stylesEx = []
self.controls = []
def createDialogTemplate(self):
t = None
self.template = [[self.caption, (self.x,self.y,self.w,self.h), self.style, self.styleEx, (self.fontSize, self.font)]]
for control in self.controls:
self.template.append(control.createDialogTemplate())
return self.template
class ControlDef:
id = ""
controlType = ""
subType = ""
idNum = 0
style = defaultControlStyle
label = ""
x = 0
y = 0
w = 0
h = 0
def __init__(self):
self.styles = []
def toString(self):
s = "<Control id:"+self.id+" controlType:"+self.controlType+" subType:"+self.subType\
+" idNum:"+str(self.idNum)+" style:"+str(self.style)+" styles:"+str(self.styles)+" label:"+self.label\
+" x:"+str(self.x)+" y:"+str(self.y)+" w:"+str(self.w)+" h:"+str(self.h)+">"
return s
def createDialogTemplate(self):
ct = self.controlType
if "CONTROL"==ct:
ct = self.subType
if ct in _addDefaults:
self.style |= _addDefaults[ct]
if ct in _controlMap:
ct = _controlMap[ct]
t = [ct, self.label, self.idNum, (self.x, self.y, self.w, self.h), self.style]
return t
class gt_str(str):
"""Change a string to a gettext version of itself."""
def __repr__(self):
if len(self) > 0:
return "_(" + super(gt_str, self).__repr__() + ")"
else:
return super(gt_str, self).__repr__()
class RCParser:
next_id = 1001
dialogs = {}
_dialogs = {}
debugEnabled = False
token = ""
def __init__(self):
self.ids = {"IDOK":1, "IDCANCEL":2, "IDC_STATIC": -1}
self.names = {1:"IDOK", 2:"IDCANCEL", -1:"IDC_STATIC"}
self.bitmaps = {}
self.gettexted = False
def debug(self, *args):
if self.debugEnabled:
print(args)
def getToken(self):
self.token = self.lex.get_token()
self.debug("getToken returns:", self.token)
if self.token=="":
self.token = None
return self.token
def getCommaToken(self):
tok = self.getToken()
assert tok == ",", "Token '%s' should be a comma!" % tok
def loadDialogs(self, rcFileName):
"""
RCParser.loadDialogs(rcFileName) -> None
Load the dialog information into the parser. Dialog Definations can then be accessed
using the "dialogs" dictionary member (name->DialogDef). The "ids" member contains the dictionary of id->name.
The "names" member contains the dictionary of name->id
"""
hFileName = rcFileName[:-2]+"h"
if not os.path.exists(hFileName):
hFileName = os.path.join(os.path.dirname(__file__),
os.path.basename(hFileName))
try:
h = open(hFileName, "rU")
self.parseH(h)
h.close()
except IOError:
print("No .h file. ignoring.")
f = open(rcFileName)
self.open(f)
self.getToken()
while self.token!=None:
self.parse()
self.getToken()
f.close()
def open(self, file):
self.lex = shlex.shlex(file)
self.lex.commenters = "//#"
def parseH(self, file):
lex = shlex.shlex(file)
lex.commenters = "//"
token = " "
while token is not None:
token = lex.get_token()
if token == "" or token is None:
token = None
else:
if token=='define':
n = lex.get_token()
i = int(lex.get_token())
self.ids[n] = i
if i in self.names:
if not n.startswith("_APS_"):
print("Duplicate id",i,"for",n,"is", self.names[i])
else:
self.names[i] = n
if self.next_id<=i:
self.next_id = i+1
def parse(self):
deep = 0
if self.token == None:
more == None
elif "BEGIN" == self.token:
deep = 1
while deep!=0:
self.getToken()
if "BEGIN" == self.token:
deep += 1
elif "END" == self.token:
deep -= 1
elif "IDD_" == self.token[:4]:
possibleDlgName = self.token
self.getToken()
if "DIALOG" == self.token or "DIALOGEX" == self.token:
self.dialog(possibleDlgName)
elif "IDB_" == self.token[:4]:
possibleBitmap = self.token
self.getToken()
if "BITMAP" == self.token:
self.getToken()
if self.token=="MOVEABLE":
self.getToken() # PURE
self.getToken() # bmpname
bmf = self.token[1:-1] # quotes
self.bitmaps[possibleBitmap] = bmf
print("BITMAP", possibleBitmap, bmf)
def addId(self, id_name):
if id_name in self.ids:
id = self.ids[id_name]
else:
id = self.next_id
self.next_id += 1
self.ids[id_name] = id
self.names[id] = id_name
return id
def lang(self):
while self.token[0:4]=="LANG" or self.token[0:7]=="SUBLANG" or self.token==',':
self.getToken();
def dialog(self, name):
dlg = DialogDef(name,self.addId(name))
assert len(dlg.controls)==0
self._dialogs[name] = dlg
extras = []
self.getToken()
while not self.token.isdigit():
self.debug("extra", self.token)
extras.append(self.token)
self.getToken()
dlg.x = int(self.token)
self.getCommaToken()
self.getToken() # number
dlg.y = int(self.token)
self.getCommaToken()
self.getToken() # number
dlg.w = int(self.token)
self.getCommaToken()
self.getToken() # number
dlg.h = int(self.token)
self.getToken()
while not (self.token==None or self.token=="" or self.token=="END"):
|
self.dialogs[name] = dlg.createDialogTemplate()
def dialogStyle(self, dlg):
dlg.style, dlg.styles = self.styles( [], win32con.WS_VISIBLE | win32con.DS_SETFONT)
def dialogExStyle(self, dlg):
self.getToken()
dlg.styleEx, dlg.stylesEx = self.styles( [], 0)
def styles(self, defaults, defaultStyle):
list = defaults
style = defaultStyle
if "STYLE"==self.token:
self.getToken()
i = 0
Not = False
while ((i%2==1 and ("|"==self.token or "NOT"==self.token)) or (i%2==0)) and not self.token==None:
Not = False;
if "NOT"==self.token:
Not = True
self.getToken()
i += 1
if self.token!="|":
if self.token in win32con.__dict__:
value = getattr(win32con,self.token)
else:
if self.token in commctrl.__dict__:
value = getattr(commctrl,self.token)
else:
value = 0
if Not:
list.append("NOT "+self.token)
self.debug("styles add Not",self.token, value)
style &= ~value
else:
list.append(self.token)
self.debug("styles add", self.token, value)
style |= value
self.getToken()
self.debug("style is ",style)
return style, list
def dialogCaption(self, dlg):
if "CAPTION"==self.token:
self.getToken()
self.token = self.token[1:-1]
self.debug("Caption is:",self.token)
if self.gettexted:
dlg.caption = gt_str(self.token)
else:
dlg.caption = self.token
self.getToken()
def dialogFont(self, dlg):
if "FONT"==self.token:
self.getToken()
dlg.fontSize = int(self.token)
self.getCommaToken()
self.getToken() # Font name
dlg.font = self.token[1:-1] # it's quoted
self.getToken()
while "BEGIN"!=self.token:
self.getToken()
def controls(self, dlg):
if self.token=="BEGIN": self.getToken()
while self.token!="END":
control = ControlDef()
control.controlType = self.token;
self.getToken()
if self.token[0:1]=='"':
if self.gettexted:
control.label = gt_str(self.token[1:-1])
else:
control.label = self.token[1:-1]
self.getCommaToken()
self.getToken()
elif self.token.isdigit():
control.label = self.token
self.getCommaToken()
self.getToken()
if self.token=='-':
if self.getToken() != '1':
raise RuntimeError("Negative literal in rc script (other than -1) - don't know what to do")
self.token = "IDC_STATIC"
control.id = self.token
control.idNum = self.addId(control.id)
self.getCommaToken()
if control.controlType == "CONTROL":
self.getToken()
control.subType = self.token[1:-1]
self.getCommaToken()
self.getToken()
control.style, control.styles = self.styles([], defaultControlStyle)
control.x = int(self.getToken())
self.getCommaToken()
control.y = int(self.getToken())
self.getCommaToken()
control.w = int(self.getToken())
self.getCommaToken()
self.getToken()
control.h = int(self.token)
self.getToken()
if self.token==",":
self.getToken()
control.style, control.styles = self.styles([], defaultControlStyle)
dlg.controls.append(control)
def ParseDialogs(rc_file, gettexted=False):
rcp = RCParser()
rcp.gettexted = gettexted
try:
rcp.loadDialogs(rc_file)
except:
lex = getattr(rcp, "lex", None)
if lex:
print("ERROR parsing dialogs at line", lex.lineno)
print("Next 10 tokens are:")
for i in range(10):
print(lex.get_token(), end=' ')
print()
raise
return rcp
if __name__=='__main__':
rc_file = os.path.join(os.path.dirname(__file__), "dialogs.rc")
d = ParseDialogs(rc_file)
import pprint
for id, ddef in list(d.dialogs.items()):
print("Dialog %s (%d controls)" % (id, len(ddef)))
pprint.pprint(ddef)
print()
| if self.token=="STYLE":
self.dialogStyle(dlg)
elif self.token=="EXSTYLE":
self.dialogExStyle(dlg)
elif self.token=="CAPTION":
self.dialogCaption(dlg)
elif self.token=="FONT":
self.dialogFont(dlg)
elif self.token=="BEGIN":
self.controls(dlg)
else:
break | conditional_block |
rcparser.py | __author__="Adam Walker"
import sys, os, shlex
import win32con
import commctrl
_controlMap = {"DEFPUSHBUTTON":0x80,
"PUSHBUTTON":0x80,
"Button":0x80,
"GROUPBOX":0x80,
"Static":0x82,
"CTEXT":0x82,
"RTEXT":0x82,
"LTEXT":0x82,
"LISTBOX":0x83,
"SCROLLBAR":0x84,
"COMBOBOX":0x85,
"EDITTEXT":0x81,
}
_addDefaults = {"EDITTEXT":win32con.WS_BORDER,
"GROUPBOX":win32con.BS_GROUPBOX,
"LTEXT":win32con.SS_LEFT,
"DEFPUSHBUTTON":win32con.BS_DEFPUSHBUTTON,
"CTEXT":win32con.SS_CENTER,
"RTEXT":win32con.SS_RIGHT}
defaultControlStyle = win32con.WS_CHILD | win32con.WS_VISIBLE
class DialogDef:
name = ""
id = 0
style = 0
styleEx = None
caption = ""
font = "MS Sans Serif"
fontSize = 8
x = 0
y = 0
w = 0
h = 0
template = None
def __init__(self, n, i):
self.name = n
self.id = i
self.styles = []
self.stylesEx = []
self.controls = []
def createDialogTemplate(self):
t = None
self.template = [[self.caption, (self.x,self.y,self.w,self.h), self.style, self.styleEx, (self.fontSize, self.font)]]
for control in self.controls:
self.template.append(control.createDialogTemplate())
return self.template
class ControlDef:
id = ""
controlType = ""
subType = ""
idNum = 0
style = defaultControlStyle
label = ""
x = 0
y = 0
w = 0
h = 0
def __init__(self):
self.styles = []
def toString(self):
s = "<Control id:"+self.id+" controlType:"+self.controlType+" subType:"+self.subType\
+" idNum:"+str(self.idNum)+" style:"+str(self.style)+" styles:"+str(self.styles)+" label:"+self.label\
+" x:"+str(self.x)+" y:"+str(self.y)+" w:"+str(self.w)+" h:"+str(self.h)+">"
return s
def createDialogTemplate(self):
ct = self.controlType
if "CONTROL"==ct:
ct = self.subType
if ct in _addDefaults:
self.style |= _addDefaults[ct]
if ct in _controlMap:
ct = _controlMap[ct]
t = [ct, self.label, self.idNum, (self.x, self.y, self.w, self.h), self.style]
return t
class gt_str(str):
"""Change a string to a gettext version of itself."""
def __repr__(self):
if len(self) > 0:
return "_(" + super(gt_str, self).__repr__() + ")"
else:
return super(gt_str, self).__repr__()
class RCParser:
next_id = 1001
dialogs = {}
_dialogs = {}
debugEnabled = False
token = ""
def __init__(self):
self.ids = {"IDOK":1, "IDCANCEL":2, "IDC_STATIC": -1}
self.names = {1:"IDOK", 2:"IDCANCEL", -1:"IDC_STATIC"}
self.bitmaps = {}
self.gettexted = False
def debug(self, *args):
if self.debugEnabled:
print(args)
def getToken(self):
self.token = self.lex.get_token()
self.debug("getToken returns:", self.token)
if self.token=="":
self.token = None
return self.token
def getCommaToken(self):
tok = self.getToken()
assert tok == ",", "Token '%s' should be a comma!" % tok
def loadDialogs(self, rcFileName):
"""
RCParser.loadDialogs(rcFileName) -> None
Load the dialog information into the parser. Dialog Definations can then be accessed
using the "dialogs" dictionary member (name->DialogDef). The "ids" member contains the dictionary of id->name.
The "names" member contains the dictionary of name->id
"""
hFileName = rcFileName[:-2]+"h"
if not os.path.exists(hFileName):
hFileName = os.path.join(os.path.dirname(__file__),
os.path.basename(hFileName))
try:
h = open(hFileName, "rU")
self.parseH(h)
h.close()
except IOError:
print("No .h file. ignoring.")
f = open(rcFileName)
self.open(f)
self.getToken()
while self.token!=None:
self.parse()
self.getToken()
f.close()
def open(self, file):
self.lex = shlex.shlex(file)
self.lex.commenters = "//#"
def parseH(self, file):
lex = shlex.shlex(file)
lex.commenters = "//"
token = " "
while token is not None:
token = lex.get_token()
if token == "" or token is None:
token = None
else:
if token=='define':
n = lex.get_token()
i = int(lex.get_token())
self.ids[n] = i
if i in self.names:
if not n.startswith("_APS_"):
print("Duplicate id",i,"for",n,"is", self.names[i])
else:
self.names[i] = n
if self.next_id<=i:
self.next_id = i+1
def parse(self):
deep = 0
if self.token == None:
more == None
elif "BEGIN" == self.token:
deep = 1
while deep!=0:
self.getToken()
if "BEGIN" == self.token:
deep += 1
elif "END" == self.token:
deep -= 1
elif "IDD_" == self.token[:4]:
possibleDlgName = self.token
self.getToken()
if "DIALOG" == self.token or "DIALOGEX" == self.token:
self.dialog(possibleDlgName)
elif "IDB_" == self.token[:4]:
possibleBitmap = self.token
self.getToken()
if "BITMAP" == self.token:
self.getToken()
if self.token=="MOVEABLE":
self.getToken() # PURE
self.getToken() # bmpname
bmf = self.token[1:-1] # quotes
self.bitmaps[possibleBitmap] = bmf
print("BITMAP", possibleBitmap, bmf)
def addId(self, id_name):
if id_name in self.ids:
id = self.ids[id_name]
else:
id = self.next_id
self.next_id += 1
self.ids[id_name] = id
self.names[id] = id_name
return id
def lang(self):
while self.token[0:4]=="LANG" or self.token[0:7]=="SUBLANG" or self.token==',':
self.getToken();
def dialog(self, name):
dlg = DialogDef(name,self.addId(name))
assert len(dlg.controls)==0
self._dialogs[name] = dlg
extras = []
self.getToken()
while not self.token.isdigit():
self.debug("extra", self.token)
extras.append(self.token)
self.getToken()
dlg.x = int(self.token)
self.getCommaToken()
self.getToken() # number
dlg.y = int(self.token)
self.getCommaToken()
self.getToken() # number
dlg.w = int(self.token)
self.getCommaToken()
self.getToken() # number
dlg.h = int(self.token)
self.getToken()
while not (self.token==None or self.token=="" or self.token=="END"):
if self.token=="STYLE":
self.dialogStyle(dlg)
elif self.token=="EXSTYLE":
self.dialogExStyle(dlg)
elif self.token=="CAPTION":
self.dialogCaption(dlg)
elif self.token=="FONT":
self.dialogFont(dlg)
elif self.token=="BEGIN":
self.controls(dlg)
else:
break
self.dialogs[name] = dlg.createDialogTemplate()
def dialogStyle(self, dlg):
dlg.style, dlg.styles = self.styles( [], win32con.WS_VISIBLE | win32con.DS_SETFONT)
def dialogExStyle(self, dlg):
self.getToken()
dlg.styleEx, dlg.stylesEx = self.styles( [], 0)
def styles(self, defaults, defaultStyle):
list = defaults
style = defaultStyle
if "STYLE"==self.token:
self.getToken()
i = 0
Not = False
while ((i%2==1 and ("|"==self.token or "NOT"==self.token)) or (i%2==0)) and not self.token==None:
Not = False;
if "NOT"==self.token:
Not = True
self.getToken()
i += 1
if self.token!="|":
if self.token in win32con.__dict__:
value = getattr(win32con,self.token)
else:
if self.token in commctrl.__dict__:
value = getattr(commctrl,self.token)
else:
value = 0
if Not:
list.append("NOT "+self.token)
self.debug("styles add Not",self.token, value)
style &= ~value
else:
list.append(self.token)
self.debug("styles add", self.token, value)
style |= value
self.getToken()
self.debug("style is ",style)
return style, list
def dialogCaption(self, dlg):
|
def dialogFont(self, dlg):
if "FONT"==self.token:
self.getToken()
dlg.fontSize = int(self.token)
self.getCommaToken()
self.getToken() # Font name
dlg.font = self.token[1:-1] # it's quoted
self.getToken()
while "BEGIN"!=self.token:
self.getToken()
def controls(self, dlg):
if self.token=="BEGIN": self.getToken()
while self.token!="END":
control = ControlDef()
control.controlType = self.token;
self.getToken()
if self.token[0:1]=='"':
if self.gettexted:
control.label = gt_str(self.token[1:-1])
else:
control.label = self.token[1:-1]
self.getCommaToken()
self.getToken()
elif self.token.isdigit():
control.label = self.token
self.getCommaToken()
self.getToken()
if self.token=='-':
if self.getToken() != '1':
raise RuntimeError("Negative literal in rc script (other than -1) - don't know what to do")
self.token = "IDC_STATIC"
control.id = self.token
control.idNum = self.addId(control.id)
self.getCommaToken()
if control.controlType == "CONTROL":
self.getToken()
control.subType = self.token[1:-1]
self.getCommaToken()
self.getToken()
control.style, control.styles = self.styles([], defaultControlStyle)
control.x = int(self.getToken())
self.getCommaToken()
control.y = int(self.getToken())
self.getCommaToken()
control.w = int(self.getToken())
self.getCommaToken()
self.getToken()
control.h = int(self.token)
self.getToken()
if self.token==",":
self.getToken()
control.style, control.styles = self.styles([], defaultControlStyle)
dlg.controls.append(control)
def ParseDialogs(rc_file, gettexted=False):
rcp = RCParser()
rcp.gettexted = gettexted
try:
rcp.loadDialogs(rc_file)
except:
lex = getattr(rcp, "lex", None)
if lex:
print("ERROR parsing dialogs at line", lex.lineno)
print("Next 10 tokens are:")
for i in range(10):
print(lex.get_token(), end=' ')
print()
raise
return rcp
if __name__=='__main__':
rc_file = os.path.join(os.path.dirname(__file__), "dialogs.rc")
d = ParseDialogs(rc_file)
import pprint
for id, ddef in list(d.dialogs.items()):
print("Dialog %s (%d controls)" % (id, len(ddef)))
pprint.pprint(ddef)
print()
| if "CAPTION"==self.token:
self.getToken()
self.token = self.token[1:-1]
self.debug("Caption is:",self.token)
if self.gettexted:
dlg.caption = gt_str(self.token)
else:
dlg.caption = self.token
self.getToken() | identifier_body |
rcparser.py | __author__="Adam Walker"
import sys, os, shlex
import win32con
import commctrl
_controlMap = {"DEFPUSHBUTTON":0x80,
"PUSHBUTTON":0x80,
"Button":0x80,
"GROUPBOX":0x80,
"Static":0x82,
"CTEXT":0x82,
"RTEXT":0x82,
"LTEXT":0x82,
"LISTBOX":0x83,
"SCROLLBAR":0x84,
"COMBOBOX":0x85,
"EDITTEXT":0x81,
}
_addDefaults = {"EDITTEXT":win32con.WS_BORDER,
"GROUPBOX":win32con.BS_GROUPBOX,
"LTEXT":win32con.SS_LEFT,
"DEFPUSHBUTTON":win32con.BS_DEFPUSHBUTTON,
"CTEXT":win32con.SS_CENTER,
"RTEXT":win32con.SS_RIGHT}
defaultControlStyle = win32con.WS_CHILD | win32con.WS_VISIBLE
class DialogDef:
name = ""
id = 0
style = 0
styleEx = None
caption = ""
font = "MS Sans Serif"
fontSize = 8
x = 0
y = 0
w = 0
h = 0
template = None
def __init__(self, n, i):
self.name = n
self.id = i
self.styles = []
self.stylesEx = []
self.controls = []
def createDialogTemplate(self):
t = None
self.template = [[self.caption, (self.x,self.y,self.w,self.h), self.style, self.styleEx, (self.fontSize, self.font)]]
for control in self.controls:
self.template.append(control.createDialogTemplate())
return self.template
class ControlDef:
id = ""
controlType = ""
subType = "" | label = ""
x = 0
y = 0
w = 0
h = 0
def __init__(self):
self.styles = []
def toString(self):
s = "<Control id:"+self.id+" controlType:"+self.controlType+" subType:"+self.subType\
+" idNum:"+str(self.idNum)+" style:"+str(self.style)+" styles:"+str(self.styles)+" label:"+self.label\
+" x:"+str(self.x)+" y:"+str(self.y)+" w:"+str(self.w)+" h:"+str(self.h)+">"
return s
def createDialogTemplate(self):
ct = self.controlType
if "CONTROL"==ct:
ct = self.subType
if ct in _addDefaults:
self.style |= _addDefaults[ct]
if ct in _controlMap:
ct = _controlMap[ct]
t = [ct, self.label, self.idNum, (self.x, self.y, self.w, self.h), self.style]
return t
class gt_str(str):
"""Change a string to a gettext version of itself."""
def __repr__(self):
if len(self) > 0:
return "_(" + super(gt_str, self).__repr__() + ")"
else:
return super(gt_str, self).__repr__()
class RCParser:
next_id = 1001
dialogs = {}
_dialogs = {}
debugEnabled = False
token = ""
def __init__(self):
self.ids = {"IDOK":1, "IDCANCEL":2, "IDC_STATIC": -1}
self.names = {1:"IDOK", 2:"IDCANCEL", -1:"IDC_STATIC"}
self.bitmaps = {}
self.gettexted = False
def debug(self, *args):
if self.debugEnabled:
print(args)
def getToken(self):
self.token = self.lex.get_token()
self.debug("getToken returns:", self.token)
if self.token=="":
self.token = None
return self.token
def getCommaToken(self):
tok = self.getToken()
assert tok == ",", "Token '%s' should be a comma!" % tok
def loadDialogs(self, rcFileName):
"""
RCParser.loadDialogs(rcFileName) -> None
Load the dialog information into the parser. Dialog Definations can then be accessed
using the "dialogs" dictionary member (name->DialogDef). The "ids" member contains the dictionary of id->name.
The "names" member contains the dictionary of name->id
"""
hFileName = rcFileName[:-2]+"h"
if not os.path.exists(hFileName):
hFileName = os.path.join(os.path.dirname(__file__),
os.path.basename(hFileName))
try:
h = open(hFileName, "rU")
self.parseH(h)
h.close()
except IOError:
print("No .h file. ignoring.")
f = open(rcFileName)
self.open(f)
self.getToken()
while self.token!=None:
self.parse()
self.getToken()
f.close()
def open(self, file):
self.lex = shlex.shlex(file)
self.lex.commenters = "//#"
def parseH(self, file):
lex = shlex.shlex(file)
lex.commenters = "//"
token = " "
while token is not None:
token = lex.get_token()
if token == "" or token is None:
token = None
else:
if token=='define':
n = lex.get_token()
i = int(lex.get_token())
self.ids[n] = i
if i in self.names:
if not n.startswith("_APS_"):
print("Duplicate id",i,"for",n,"is", self.names[i])
else:
self.names[i] = n
if self.next_id<=i:
self.next_id = i+1
def parse(self):
deep = 0
if self.token == None:
more == None
elif "BEGIN" == self.token:
deep = 1
while deep!=0:
self.getToken()
if "BEGIN" == self.token:
deep += 1
elif "END" == self.token:
deep -= 1
elif "IDD_" == self.token[:4]:
possibleDlgName = self.token
self.getToken()
if "DIALOG" == self.token or "DIALOGEX" == self.token:
self.dialog(possibleDlgName)
elif "IDB_" == self.token[:4]:
possibleBitmap = self.token
self.getToken()
if "BITMAP" == self.token:
self.getToken()
if self.token=="MOVEABLE":
self.getToken() # PURE
self.getToken() # bmpname
bmf = self.token[1:-1] # quotes
self.bitmaps[possibleBitmap] = bmf
print("BITMAP", possibleBitmap, bmf)
def addId(self, id_name):
if id_name in self.ids:
id = self.ids[id_name]
else:
id = self.next_id
self.next_id += 1
self.ids[id_name] = id
self.names[id] = id_name
return id
def lang(self):
while self.token[0:4]=="LANG" or self.token[0:7]=="SUBLANG" or self.token==',':
self.getToken();
def dialog(self, name):
dlg = DialogDef(name,self.addId(name))
assert len(dlg.controls)==0
self._dialogs[name] = dlg
extras = []
self.getToken()
while not self.token.isdigit():
self.debug("extra", self.token)
extras.append(self.token)
self.getToken()
dlg.x = int(self.token)
self.getCommaToken()
self.getToken() # number
dlg.y = int(self.token)
self.getCommaToken()
self.getToken() # number
dlg.w = int(self.token)
self.getCommaToken()
self.getToken() # number
dlg.h = int(self.token)
self.getToken()
while not (self.token==None or self.token=="" or self.token=="END"):
if self.token=="STYLE":
self.dialogStyle(dlg)
elif self.token=="EXSTYLE":
self.dialogExStyle(dlg)
elif self.token=="CAPTION":
self.dialogCaption(dlg)
elif self.token=="FONT":
self.dialogFont(dlg)
elif self.token=="BEGIN":
self.controls(dlg)
else:
break
self.dialogs[name] = dlg.createDialogTemplate()
def dialogStyle(self, dlg):
dlg.style, dlg.styles = self.styles( [], win32con.WS_VISIBLE | win32con.DS_SETFONT)
def dialogExStyle(self, dlg):
self.getToken()
dlg.styleEx, dlg.stylesEx = self.styles( [], 0)
def styles(self, defaults, defaultStyle):
list = defaults
style = defaultStyle
if "STYLE"==self.token:
self.getToken()
i = 0
Not = False
while ((i%2==1 and ("|"==self.token or "NOT"==self.token)) or (i%2==0)) and not self.token==None:
Not = False;
if "NOT"==self.token:
Not = True
self.getToken()
i += 1
if self.token!="|":
if self.token in win32con.__dict__:
value = getattr(win32con,self.token)
else:
if self.token in commctrl.__dict__:
value = getattr(commctrl,self.token)
else:
value = 0
if Not:
list.append("NOT "+self.token)
self.debug("styles add Not",self.token, value)
style &= ~value
else:
list.append(self.token)
self.debug("styles add", self.token, value)
style |= value
self.getToken()
self.debug("style is ",style)
return style, list
def dialogCaption(self, dlg):
if "CAPTION"==self.token:
self.getToken()
self.token = self.token[1:-1]
self.debug("Caption is:",self.token)
if self.gettexted:
dlg.caption = gt_str(self.token)
else:
dlg.caption = self.token
self.getToken()
def dialogFont(self, dlg):
if "FONT"==self.token:
self.getToken()
dlg.fontSize = int(self.token)
self.getCommaToken()
self.getToken() # Font name
dlg.font = self.token[1:-1] # it's quoted
self.getToken()
while "BEGIN"!=self.token:
self.getToken()
def controls(self, dlg):
if self.token=="BEGIN": self.getToken()
while self.token!="END":
control = ControlDef()
control.controlType = self.token;
self.getToken()
if self.token[0:1]=='"':
if self.gettexted:
control.label = gt_str(self.token[1:-1])
else:
control.label = self.token[1:-1]
self.getCommaToken()
self.getToken()
elif self.token.isdigit():
control.label = self.token
self.getCommaToken()
self.getToken()
if self.token=='-':
if self.getToken() != '1':
raise RuntimeError("Negative literal in rc script (other than -1) - don't know what to do")
self.token = "IDC_STATIC"
control.id = self.token
control.idNum = self.addId(control.id)
self.getCommaToken()
if control.controlType == "CONTROL":
self.getToken()
control.subType = self.token[1:-1]
self.getCommaToken()
self.getToken()
control.style, control.styles = self.styles([], defaultControlStyle)
control.x = int(self.getToken())
self.getCommaToken()
control.y = int(self.getToken())
self.getCommaToken()
control.w = int(self.getToken())
self.getCommaToken()
self.getToken()
control.h = int(self.token)
self.getToken()
if self.token==",":
self.getToken()
control.style, control.styles = self.styles([], defaultControlStyle)
dlg.controls.append(control)
def ParseDialogs(rc_file, gettexted=False):
rcp = RCParser()
rcp.gettexted = gettexted
try:
rcp.loadDialogs(rc_file)
except:
lex = getattr(rcp, "lex", None)
if lex:
print("ERROR parsing dialogs at line", lex.lineno)
print("Next 10 tokens are:")
for i in range(10):
print(lex.get_token(), end=' ')
print()
raise
return rcp
if __name__=='__main__':
rc_file = os.path.join(os.path.dirname(__file__), "dialogs.rc")
d = ParseDialogs(rc_file)
import pprint
for id, ddef in list(d.dialogs.items()):
print("Dialog %s (%d controls)" % (id, len(ddef)))
pprint.pprint(ddef)
print() | idNum = 0
style = defaultControlStyle | random_line_split |
ann_interface.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
2017.1.24---
時間
2017.1.4----
音声を生で扱う
Annotationとは別で扱う
2016.12.12----
20161210_proc3p.jsonを使う。反転させてみる
2016.12.9----
三人の対話におけるannotationのInterface
手動でannotationする
slider barとtableを同期させた
2016.10.19----
interaction dataをannotationするためのinterface
"""
import sys
import os.path
import math
import json
import time
import copy
import numpy as np
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib.pyplot as plt
import rospy
from visualization_msgs.msg import MarkerArray
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point
from geometry_msgs.msg import PointStamped
from std_msgs.msg import ColorRGBA
import data_proc2
from std_msgs.msg import Float64MultiArray
class ANNOTATION(QtGui.QWidget):
def __init__(self):
super(ANNOTATION, self).__init__()
#UIの初期化
self.initUI()
#ROSのパブリッシャなどの初期化
rospy.init_node('annotation_interface2', anonymous=True)
self.mpub = rospy.Publisher('visualization_marker_array', MarkerArray, queue_size=10)
#self.ppub = rospy.Publisher('joint_diff', PointStamped, queue_size=10)
self.speakpub = rospy.Publisher('/speaks', Float64MultiArray, queue_size=10)
#rvizのカラー設定(未)
self.carray = []
clist = [[1, 0, 0, 1], [0, 1, 0, 1], [1, 1, 0, 1], [1, 0.5, 0, 1]]
for c in clist:
color = ColorRGBA()
color.r, color.g, color.b, color.a = c[0], c[1], c[2], c[3]
self.carray.append(color)
# set extra data param
self.dim_x = 72
self.llist = [[0, 1, 10, 2, 3, 11], [10, 4, 5, 6], [10, 7, 8, 9]]
self.input_joints = []
self.input_speaks = []
self.anno_dim = 2
self.r, self.c = 0, 0
def initUI(self):
#Botton Objectの作成
def boxBtnObj(name, func, maxlen=30):
box = QtGui.QHBoxLayout()
btn = btnObj(name, func, maxlen=maxlen)
box.addWidget(btn)
return box
def btnObj(name, func, maxlen=30):
btn = QtGui.QPushButton(name)
btn.setMaximumWidth(maxlen)
btn.clicked.connect(func)
return btn
grid = QtGui.QGridLayout()
form = QtGui.QFormLayout()
#frame size
self.frmSizeBox = QtGui.QLineEdit()
self.frmSizeBox.setText('-1')
self.frmSizeBox.setFixedWidth(100)
form.addRow('size', self.frmSizeBox)
#ファイル入力ボックス
self.txtSepFile = QtGui.QLineEdit()
btnSepFile = btnObj("...", self.chooseDbFile, maxlen=40)
btnRawInput = btnObj("raw", self.inputData, maxlen=60)
btnProcedInput = btnObj("proced", self.inputProcedData, maxlen=60)
boxSepFile = QtGui.QHBoxLayout()
boxSepFile.addWidget(self.txtSepFile)
boxSepFile.addWidget(btnSepFile)
boxSepFile.addWidget(btnRawInput)
boxSepFile.addWidget(btnProcedInput)
form.addRow('input', boxSepFile)
#ファイル出力ボックス
self.txtOutputFile = QtGui.QLineEdit()
self.txtOutputFile.setText('test_proc.json')
btnOutputFile = btnObj("save", self.outputData, maxlen=100)
boxOutputFile = QtGui.QHBoxLayout()
boxOutputFile.addWidget(self.txtOutputFile)
boxOutputFile.addWidget(btnOutputFile)
form.addRow('output', boxOutputFile)
#data cut
cutRange = QtGui.QHBoxLayout()
self.cutStart = QtGui.QLineEdit()
self.cutStart.setText('0')
self.cutEnd = QtGui.QLineEdit()
self.cutEnd.setText('0')
btnCutRange = btnObj("exec", self.cutRangeData, maxlen=60)
cutRange.addWidget(self.cutStart)
cutRange.addWidget(self.cutEnd)
cutRange.addWidget(btnCutRange)
form.addRow('trimming', cutRange)
# check range
checkRange = QtGui.QHBoxLayout()
self.checkUser = QtGui.QLineEdit()
self.checkUser.setText('0')
self.checkSt = QtGui.QLineEdit()
self.checkSt.setText('0')
self.checkEd = QtGui.QLineEdit()
self.checkEd.setText('0')
btnRangeCheck = btnObj("check", self.checkRangeData, maxlen=60)
checkRange.addWidget(self.checkUser)
checkRange.addWidget(self.checkSt)
checkRange.addWidget(self.checkEd)
checkRange.addWidget(btnRangeCheck)
form.addRow('U/S/E', checkRange)
# direct
boxDirect = boxBtnObj("d", self.directJoints, maxlen=20)
form.addRow('direct', boxDirect)
# Reset
boxResetAnno = boxBtnObj("e", self.resetAnno, maxlen=20)
form.addRow('reset', boxResetAnno)
# Reverse
boxReverse = boxBtnObj("e", self.reverseData, maxlen=20)
form.addRow('reverse', boxReverse)
# Time Line
boxSld = QtGui.QHBoxLayout()
lcd = QtGui.QLCDNumber(self)
self.sld = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.sld.valueChanged.connect(lcd.display)
self.sld.valueChanged.connect(self.sliderChange)
boxSld.addWidget(lcd)
boxSld.addWidget(self.sld)
#テーブルの初期化
#horizonはuser2の時間
self.table = QtGui.QTableWidget(self)
self.table.setColumnCount(0)
#self.table.setHorizontalHeaderLabels("use_2 time")
jItem = QtGui.QTableWidgetItem(str(0))
self.table.setHorizontalHeaderItem(0, jItem)
#アイテムがクリックされたらグラフを更新
self.table.itemClicked.connect(self.clickUpdateTable)
#self.table.itemActivated.connect(self.activatedUpdateTable)
self.table.setItem(0, 0, QtGui.QTableWidgetItem(1))
#self.itemSelectionChanged.connect(self.selection_changed)
#self.tableSlider = self.table.verticalScrollBar()
boxTable = QtGui.QHBoxLayout()
boxTable.addWidget(self.table)
#配置
grid.addLayout(form,1,0)
grid.addLayout(boxSld,2,0)
grid.addLayout(boxTable,3,0)
self.setLayout(grid)
self.resize(400,100)
self.setWindowTitle("cca window")
self.show()
def chooseDbFile(self):
dialog = QtGui.QFileDialog()
dialog.setFileMode(QtGui.QFileDialog.ExistingFile)
if dialog.exec_():
fileNames = dialog.selectedFiles()
for f in fileNames:
self.txtSepFile.setText(f)
return
return self.txtSepFile.setText('')
# reset
def resetAnno(self):
print "reset Data before:",self.edited_joints.shape, self.edited_annos.shape
self.edited_joints = copy.deepcopy(self.input_joints)
self.edited_speaks = copy.deepcopy(self.input_speaks)
#datalen = self.edited_speaks.shape[1]
self.edited_annos = copy.deepcopy(self.input_annos) #np.zeros((datalen, self.anno_dim)) #(1000, 2)
self.edited_flags = copy.deepcopy(self.input_flags)
self.sld.setMaximum(self.edited_speaks.shape[0]-1) #
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
print "reset Data after:",self.edited_joints.shape, self.edited_speaks.shape, self.edited_annos.shape
def speakNorm(self, data, upper=300, lower=80):
# 上限
c_data=data if data <upper else upper
# 下限
c_data=c_data if c_data>lower else lower
# 最大値で割る
return (c_data-lower)/float(upper-lower)
def convDecibel(self, data):
return 20*math.log10(data)
#decibel
def speakDecibelNorm(self, data):
decibel = self.convDecibel(data)
return self.speakNorm(decibel, upper=70, lower=30)
def updateTable(self, data, anno, times):
#plt.plot(data)
#plt.show()
th = 0#float(self.ThesholdBox.text())
if(len(data)==0):
print "No Data! Push exec button..."
d_row, d_col = data.shape #(1000, 2)
add_ann = anno.shape[1] #annotationの個数
#print add_ann
add_flag = 1
#t_row:時間軸, t_col:次元数, add_ann:加えるAnnotation
t_col = d_col+add_ann+add_flag
t_row = d_row
#print "t, d, a",t_col,d_col,add_ann
self.table.clear()
font = QtGui.QFont()
font.setFamily(u"DejaVu Sans")
font.setPointSize(5)
self.table.horizontalHeader().setFont(font)
self.table.verticalHeader().setFont(font)
self.table.setColumnCount(t_row) # 次元数(tableはcolだけど値はrow!)+flag
self.table.setRowCount(t_col) # 時間軸(tableはrowだけど値はcol!)
# print "t:", t_row, t_col
#self.table.setRowCount(data.shape[0])
#self.table.setColumnCount(ann_num)
# 軸の値をSet
#for i in range(len(times)):
# jItem = QtGui.QTableWidgetItem(str(i))
# self.table.setHorizontalHeaderItem(i, jItem)
hor = True
for i in range(t_col):
iItem = QtGui.QTableWidgetItem(str(i))
self.table.setVerticalHeaderItem(i, iItem)
self.table.verticalHeaderItem(i).setToolTip(str(i))
#時間軸にデータを入れるなら↓
#self.table.verticalHeaderItem(i).setToolTip(str(times[i]))
for j in range(t_row):
if hor == True:
jItem = QtGui.QTableWidgetItem(str(j))
self.table.setHorizontalHeaderItem(j, jItem)
self.table.horizontalHeaderItem(j).setToolTip(str(times[j]))
#print "%.10f"%times[j]
if i < d_col: #data(speak)の可視化
# 音声Dataがrmsの場合
# set_data:範囲は 0-1
set_data = data[j][i] #self.speakDecibelNorm(data[j][i])
#ON/OFF むちゃくちゃすぎる
#set_data = 1 if set_data > 0.3 else 0
#self.edited_speaks[j][i] = set_data
#一時的なOffset
#set_data = data_proc2.speakNorm(set_data, upper=0.75, lower=0.25)
#self.edited_speaks[j][i] = set_data
#color_dataの範囲を0-255に変更
#color_data=int(set_data*255)
color_data = set_data
color_data = 1 if color_data > 0.5 else 0
color_data=int(color_data*255)
#print color_data
#print "at",color_data
color = [255-color_data]*3
elif i >= d_col and i < t_col-add_flag: # annotationの可視化
#print i
"""
set_data = anno[j][i-d_col] #anno(1000, 2)
if set_data == 0:
color = [255, 255, 255]
else:
color = [0, 0, 0]
"""
set_data = anno[j][i-d_col] #anno(1000, 2)
set_data = 0 if set_data < 0 else set_data
set_data = 1 if set_data > 1 else set_data
color_data=int(set_data*255)
color = [255-color_data]*3
else: #flag
set_data = self.edited_flags[j][0]
if set_data == 0:
color = [255, 255, 255]
else:
color = [0, 0, 0]
self.table.setItem(i, j, QtGui.QTableWidgetItem())
self.table.item(i, j).setBackground(QtGui.QColor(color[0],color[1],color[2]))
self.table.item(i, j).setToolTip(str(set_data))
hor = False
self.table.setVisible(False)
self.table.resizeRowsToContents()
self.table.resizeColumnsToContents()
self.table.setVisible(True)
# TableがClickされたとき
def clickUpdateTable(self, cItem):
self.tip = float(cItem.toolTip())
self.r = cItem.row()
self.c = cItem.column()
print "r:",self.r,", c:",self.c, ". tip:",self.tip
set_data = 0 if self.tip == 1 else 1
speak_dim = self.edited_speaks.shape[1]
anno_dim = self.edited_annos.shape[1]
if self.r < speak_dim:
self.edited_speaks[self.c][self.r] = set_data
elif self.r >= speak_dim and self.r < speak_dim+anno_dim:
self.edited_annos[self.c][self.r-speak_dim] = set_data
else:
self.edited_flags[self.c][0] = set_data
#indexes = self.table.selectedIndexes()
#print indexes
color = [[255, 255, 255], [0, 0, 0]]
self.table.setItem(self.r, self.c, QtGui.QTableWidgetItem())
self.table.item(self.r, self.c).setBackground(QtGui.QColor(color[set_data][0],color[set_data][1],color[set_data][2]))
self.table.item(self.r, self.c).setToolTip(str(set_data))
#jointsの可視化
# self.vizJoint(self.c)
def checkRangeData(self):
set_row = int(self.checkUser.text()) #0~4 User1=2, User2=3
start = int(self.checkSt.text())
end = int(self.checkEd.text())
color = [[255, 255, 255], [0, 0, 0]]
table_offset = self.edited_speaks.shape[1]
#変更するUserの指定
user = set_row - table_offset
#print user, table_offset
if user < 0:
print "Not Change!"
return
print "Change [User:", user, ", Start:",start,", end:",end,"]"
for i in range(start, end):
get_data = self.edited_annos[i][user]
set_data = 0 if get_data == 1 else 1
self.table.setItem(set_row, i, QtGui.QTableWidgetItem())
self.table.item(set_row, i).setBackground(QtGui.QColor(color[set_data][0],color[set_data][1],color[set_data][2]))
self.table.item(set_row, i).setToolTip(str(set_data))
self.edited_annos[i][user] = set_data
def sliderChange(self, timeline):
if len(self.input_joints)==0:
print "now no data:", timeline
return
#self.table.selectColum(timeline)
#self.table.verticalScrollBar().setValue(timeline)
self.table.setCurrentCell(self.r, timeline)
self.pubViz(timeline)
"""
def activatedUpdateTable(self, cItem):
row = cItem.row()
col = cItem.column()
print "row:", row,", col:", col#, ". tip:",self.tip
"""
# 生Dataの入力
def inputData(self):
filesize = int(self.frmSizeBox.text())
print "Input raw data:", filesize
self.fname = [str(self.txtSepFile.text())]
input_data = data_proc2.load_persons(self.fname, annobool=False, datalen=filesize)
#print input_data["times"][0]
# もし手をくわえるなら
#input_data[2] = data_proc.proc_anno(input_data[0], input_data[2], use_vote=True, use_speak=False)
self.loadInputData(input_data)
# 加工済のDataの入力
def inputProcedData(self):
filesize = int(self.frmSizeBox.text())
print "Input proced data", filesize
self.fname = [str(self.txtSepFile.text())]
input_data = data_proc2.load_proced_data_flag(self.fname, datalen=filesize)
#print input_data["times"]
self.loadInputData(input_data)
def loadInputData(self, input_data):
self.input_joints = input_data["joints"]
self.input_speaks = input_data["speaks"]
datalen=self.input_speaks.shape[0]
if input_data.has_key("annos"):
self.input_annos = input_data["annos"]
#とりあえずuser1だけ編集(0124_personsはまた別)
#self.input_annos[:,:1] = np.zeros((self.input_annos.shape[0],1))
"""
user_joint = self.input_joints[:,:36]
user_anno = self.input_annos[:,:1]
calc_user_anno = data_proc2.proc_anno(user_joint, user_anno, use_vote=True, use_anno=False, threshold=-0.2)
self.input_annos[:,:1] = calc_user_anno
print "now_persons mode"
user_joint = self.input_joints[:,36:]
user_anno = self.input_annos[:,1:]
calc_user_anno = data_proc2.proc_anno(user_joint, user_anno, use_vote=True, use_anno=False, threshold=-0.2)
self.input_annos[:,1:] = calc_user_anno
"""
#print "now model mode"
self.input_annos[:,:] = np.round(self.input_annos[:,:])
else:
self.input_annos = np.zeros((datalen, self.anno_dim))#(1000,2)
if input_data.has_key("flags"):
print "load flags"
self.input_flags = input_data["flags"]
else:
print "create flags"
self.input_flags = np.zeros((datalen, 1))
if input_data.has_key("times"):
print "load times"
self.input_times = input_data["times"]
else:
print "create times"
self.input_times = np.zeros((datalen, 1))
"""
diff_times = []
for t in range(len(self.input_times)):
if t == 0:
diff_times.append(0)
else:
diff_times.append(self.input_times[t]-self.input_times[t-1])
fps = np.round(1/float(np.mean(diff_times)))
plt.plot(self.input_annos[:,0], label="person", color="red")
plt.plot(self.input_annos[:,1], label="robot", color="green")
plt.xlabel("Frame (fps:" | im(0,len(self.input_annos))
plt.ylim(-0.2, 1.2)
plt.legend()
plt.show()
"""
print "joints shape:", self.input_joints.shape
print "speaks shape:", self.input_speaks.shape
print "annos shape:", self.input_annos.shape
print "flags shape:", self.input_flags.shape
print "times shape:", self.input_times.shape
#for j in range(len(self.input_times)):
# print j,"%.10f"%self.input_times[j]
self.timediff(self.input_times)
self.edited_joints = copy.deepcopy(self.input_joints)#np.arrayでCopyされる
self.edited_speaks = copy.deepcopy(self.input_speaks)#speakは編集しない(今のところ)
self.edited_annos = copy.deepcopy(self.input_annos)
self.edited_flags = copy.deepcopy(self.input_flags)
self.edited_times = copy.deepcopy(self.input_times)
#self.updateTable(self.input_joints)
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
#Sliderの最大値をset
self.sld.setMaximum(datalen - 1)
print "end"
def outputData(self):
name_json = str(self.txtOutputFile.text())
keys = ["joints", "speaks", "annos", "flags", "times"]
data = [self.edited_joints, self.edited_speaks, self.edited_annos, self.edited_flags, self.edited_times]
"""
if len(keys) != len(data):
print "Save false! keys len:"
"""
data_proc2.save_data(name_json, keys, data)
def timediff(self, times):
fpss = []
for i in range(len(times)-1):
fps = 1/(times[i+1]-times[i])
fpss.append(fps)
#print i, fps
fpss = np.array(fpss)
#print "mean:",np.mean(fpss),",std:",np.std(fpss)
def cutRangeData(self):
start = int(self.cutStart.text())
end = int(self.cutEnd.text())
print "cut data:",start,"-",end
self.edited_joints = self.edited_joints[start:end]
self.edited_speaks = self.edited_speaks[start:end]
self.edited_annos = self.edited_annos[start:end]
self.edited_flags = self.edited_flags[start:end]
self.edited_times = self.edited_times[start:end]
print "joints shape:", self.edited_joints.shape
print "speaks shape:", self.edited_speaks.shape
print "annos shape:", self.edited_annos.shape
print "flags shape:", self.edited_flags.shape
print "times shape:", self.edited_times.shape
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
#Sliderの最大値をset
self.sld.setMaximum(self.edited_joints.shape[0]-1)
print "end"
def directJoints(self):
print "direct joints"
size, dim = self.edited_joints.shape
user1_nose_y_idx = 35+33+2
pair = 1
pair_stack = 0
offset_y = 0.03
for i in range(size):
anno = self.edited_annos[i]
if anno[1] == 0:#聞き手なら
if anno[0] == 1 and anno[2] == 0:
#print i,":user0",self.input_joints[i][user1_nose_y_idx]
self.edited_joints[i][user1_nose_y_idx] += offset_y
pair = 0
pair_stack = 0
#print self.input_joints[i][user1_nose_y_idx]
elif anno[0] == 0 and anno[2] == 1:
#print i,":user2",self.input_joints[i][user1_nose_y_idx]
self.edited_joints[i][user1_nose_y_idx] -= offset_y
pair = 2
pair_stack = 0
#print self.input_joints[i][user1_nose_y_idx]
else: #誰も話していないなら
pair_stack += 1
else:#話し手ならどこを向くか
if pair == 0:
self.edited_joints[i][user1_nose_y_idx] += offset_y
elif pair == 2:
self.edited_joints[i][user1_nose_y_idx] -= offset_y
# 引っ繰り返して追加
def reverseData(self):
#y方向だけ引っくり返す
def reverse_y(joints):
#(10000, 36)
rev = np.array([[1,-1,1]*12]*joints.shape[0])
return joints*rev
print "reverseData before:",self.edited_joints.shape, self.edited_annos.shape
datalen = self.edited_annos.shape[0]
if self.anno_dim == 2:
j_r = np.hstack((self.edited_joints[:,36:], self.edited_joints[:,:36]))
self.edited_joints = np.vstack((self.edited_joints, j_r))
s_r = np.hstack((self.edited_speaks[:,1].reshape(datalen,1), self.edited_speaks[:,0].reshape(datalen,1)))
self.edited_speaks = np.vstack((self.edited_speaks, s_r))
a_r = np.hstack((self.edited_annos[:,1].reshape(datalen,1), self.edited_annos[:,0].reshape(datalen,1)))
self.edited_annos = np.vstack((self.edited_annos, a_r))
self.edited_flags = np.vstack((self.edited_flags, self.edited_flags))
self.edited_times = np.append(self.edited_times, self.edited_times)
else:
print "bad shape"
#print self.edited_times.shape
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
#Sliderの最大値をset
self.sld.setMaximum(self.edited_joints.shape[0]-1)
print "reverseData after:",self.edited_joints.shape,self.edited_speaks.shape,self.edited_annos.shape
def rviz_obj(self, obj_id, obj_ns, obj_type, obj_size, obj_color=[0, 0, 0, 0], obj_life=0):
obj = Marker()
obj.header.frame_id, obj.header.stamp = "camera_link", rospy.Time.now()
obj.ns, obj.action, obj.type = str(obj_ns), 0, obj_type
obj.scale.x, obj.scale.y, obj.scale.z = obj_size[0], obj_size[1], obj_size[2]
obj.color = obj_color
obj.lifetime = rospy.Duration.from_sec(obj_life)
obj.pose.orientation.w = 1.0
return obj
def set_point(self, pos, addx=0, addy=0, addz=0, rotate=False):
pt = Point()
if rotate == True:
pt.x, pt.y, pt.z = -1*pos[0]+addx, -1*pos[1]+addy, pos[2]+addz
else:
pt.x, pt.y, pt.z = pos[0]+addx, pos[1]+addy, pos[2]+addz
return pt
def set_vizmsg_point(self, u, data, color, psize, ofs, addx=0, addy=0, rotate=False):
pmsg = self.rviz_obj(u, 'p'+str(u), 7, [psize, psize, psize], color, 0)
points = []
for p in range(data.shape[1]/ofs):
points.append(self.set_point([data[0, p*ofs],
data[0, p*ofs+1],
data[0, p*ofs+2]],
addx=addx, addy=addy, rotate=rotate))
pmsg.points = points
return pmsg
def set_vizmsg_line(self, u, data, color, lsize, llist, addx=0, addy=0, rotate=False):
lmsg = self.rviz_obj(u, 'l'+str(u), 5, [lsize, lsize, lsize], color, 0)
for ls in llist:
for l in range(len(ls)-1):
for add in range(2):
#print person[0, ls[l+add]], ls[l+add], l, add
linepoint=self.set_point([data[0,ls[l+add]*3],
data[0,ls[l+add]*3+1],
data[0,ls[l+add]*3+2]],
addx=addx, addy=addy, rotate=rotate)
lmsg.points.append(linepoint)
return lmsg
def pubViz(self, tl):
#print "pub Viz:", tl
# drawing
msgs = MarkerArray()
amsg = Float64MultiArray()
per_js = []
dim_p = 36
dim = len(self.edited_joints[tl])
for i in range(dim/dim_p):
per_js.append(self.edited_joints[tl,dim_p*i:dim_p*(i+1)].reshape(1, dim_p))
ofs = 3
#ofs_xyr = [[-0.5, -0.5, 1], [0.5, 0, 0], [-0.5, 0.5, 1]]
ofs_xyr = [[-1, 0, 1], [0, 0, 0]]
for u, (person, speak, anno) in enumerate(zip(per_js, self.edited_speaks[tl], self.edited_annos[tl])):
# ---Person points---
offset = 0
psize = speak*0.05
#psize = 0.03
#psize = 0.05 if anno > 0.7 else 0.03
pmsg = self.set_vizmsg_point(u, person, self.carray[0], psize, ofs,
addx=ofs_xyr[u][0], addy=ofs_xyr[u][1], rotate=ofs_xyr[u][2])
msgs.markers.append(pmsg)
# ---Person lines---
lsize = 0.03 if anno > 0.5 else 0.01
cid = 3 if anno > 0.5 else 2
lmsg = self.set_vizmsg_line(u, person, self.carray[cid], lsize, self.llist,
addx=ofs_xyr[u][0], addy=ofs_xyr[u][1], rotate=ofs_xyr[u][2])
msgs.markers.append(lmsg)
# ------text------
tidx, tsize = 3, 0.1 # tidx=3 is Head
tmsg = self.rviz_obj(u, 't'+str(u), 9, [tsize, tsize, tsize], self.carray[u], 0)
tmsg.pose.position = self.set_point([person[0, tidx*ofs], person[0, tidx*ofs+1], person[0, tidx*ofs+2]],
addx=ofs_xyr[u][0], addy=ofs_xyr[u][1], addz=0.3)
tmsg.pose.orientation.w = 1
tmsg.text = "User"+str(u)+":"+str(speak)
msgs.markers.append(tmsg)
amsg.data.append(speak)
# ------text------
if self.edited_times[tl] > 0.00001:
tidx, tsize = 3, 0.1 # tidx=3 is Head
tmsg = self.rviz_obj(u, 'time'+str(u), 9, [tsize, tsize, tsize], self.carray[u], 0)
tmsg.pose.position = self.set_point([-0.5, 0, 0])
tmsg.pose.orientation.w = 1
tmsg.text = "time: "+str(self.edited_times[tl])
msgs.markers.append(tmsg)
self.mpub.publish(msgs)
#print pred[1].data[0, 0]
self.speakpub.publish(amsg)
def main():
app = QtGui.QApplication(sys.argv)
anotation = ANNOTATION()
#graph = GRAPH()
sys.exit(app.exec_())
if __name__=='__main__':
main()
| +str(fps)+")")
plt.title("speaker/listener result")
plt.xl | conditional_block |
ann_interface.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
2017.1.24---
時間
2017.1.4----
音声を生で扱う
Annotationとは別で扱う
2016.12.12----
20161210_proc3p.jsonを使う。反転させてみる
2016.12.9----
三人の対話におけるannotationのInterface
手動でannotationする
slider barとtableを同期させた
2016.10.19----
interaction dataをannotationするためのinterface
"""
import sys
import os.path
import math
import json
import time
import copy
import numpy as np
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib.pyplot as plt
import rospy
from visualization_msgs.msg import MarkerArray
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point
from geometry_msgs.msg import PointStamped
from std_msgs.msg import ColorRGBA
import data_proc2
from std_msgs.msg import Float64MultiArray
class ANNOTATION(QtGui.QWidget):
def __init__(self):
super(ANNOTATION, self).__init__()
#UIの初期化
self.initUI()
#ROSのパブリッシャなどの初期化
rospy.init_node('annotation_interface2', anonymous=True)
self.mpub = rospy.Publisher('visualization_marker_array', MarkerArray, queue_size=10)
#self.ppub = rospy.Publisher('joint_diff', PointStamped, queue_size=10)
self.speakpub = rospy.Publisher('/speaks', Float64MultiArray, queue_size=10)
#rvizのカラー設定(未)
self.carray = []
clist = [[1, 0, 0, 1], [0, 1, 0, 1], [1, 1, 0, 1], [1, 0.5, 0, 1]]
for c in clist:
color = ColorRGBA()
color.r, color.g, color.b, color.a = c[0], c[1], c[2], c[3]
self.carray.append(color)
# set extra data param
self.dim_x = 72
self.llist = [[0, 1, 10, 2, 3, 11], [10, 4, 5, 6], [10, 7, 8, 9]]
self.input_joints = []
self.input_speaks = []
self.anno_dim = 2
self.r, self.c = 0, 0
def initUI(self):
#Botton Objectの作成
def boxBtnObj(name, func, maxlen=30):
box = QtGui.QHBoxLayout()
btn = btnObj(name, func, maxlen=maxlen)
box.addWidget(btn)
return box
def btnObj(name, func, maxlen=30):
btn = QtGui.QPushButton(name)
btn.setMaximumWidth(maxlen)
btn.clicked.connect(func)
return btn
grid = QtGui.QGridLayout()
form = QtGui.QFormLayout()
#frame size
self.frmSizeBox = QtGui.QLineEdit()
self.frmSizeBox.setText('-1')
self.frmSizeBox.setFixedWidth(100)
form.addRow('size', self.frmSizeBox)
#ファイル入力ボックス
self.txtSepFile = QtGui.QLineEdit()
btnSepFile = btnObj("...", self.chooseDbFile, maxlen=40)
btnRawInput = btnObj("raw", self.inputData, maxlen=60)
btnProcedInput = btnObj("proced", self.inputProcedData, maxlen=60)
boxSepFile = QtGui.QHBoxLayout()
boxSepFile.addWidget(self.txtSepFile)
boxSepFile.addWidget(btnSepFile)
boxSepFile.addWidget(btnRawInput)
boxSepFile.addWidget(btnProcedInput)
form.addRow('input', boxSepFile)
#ファイル出力ボックス
self.txtOutputFile = QtGui.QLineEdit()
self.txtOutputFile.setText('test_proc.json')
btnOutputFile = btnObj("save", self.outputData, maxlen=100)
boxOutputFile = QtGui.QHBoxLayout()
boxOutputFile.addWidget(self.txtOutputFile)
boxOutputFile.addWidget(btnOutputFile)
form.addRow('output', boxOutputFile)
#data cut
cutRange = QtGui.QHBoxLayout()
self.cutStart = QtGui.QLineEdit()
self.cutStart.setText('0')
self.cutEnd = QtGui.QLineEdit()
self.cutEnd.setText('0')
btnCutRange = btnObj("exec", self.cutRangeData, maxlen=60)
cutRange.addWidget(self.cutStart)
cutRange.addWidget(self.cutEnd)
cutRange.addWidget(btnCutRange)
form.addRow('trimming', cutRange)
# check range
checkRange = QtGui.QHBoxLayout()
self.checkUser = QtGui.QLineEdit()
self.checkUser.setText('0')
self.checkSt = QtGui.QLineEdit()
self.checkSt.setText('0')
self.checkEd = QtGui.QLineEdit()
self.checkEd.setText('0')
btnRangeCheck = btnObj("check", self.checkRangeData, maxlen=60)
checkRange.addWidget(self.checkUser)
checkRange.addWidget(self.checkSt)
checkRange.addWidget(self.checkEd)
checkRange.addWidget(btnRangeCheck)
form.addRow('U/S/E', checkRange)
# direct
boxDirect = boxBtnObj("d", self.directJoints, maxlen=20)
form.addRow('direct', boxDirect)
# Reset
boxResetAnno = boxBtnObj("e", self.resetAnno, maxlen=20)
form.addRow('reset', boxResetAnno)
# Reverse
boxReverse = boxBtnObj("e", self.reverseData, maxlen=20)
form.addRow('reverse', boxReverse)
# Time Line
boxSld = QtGui.QHBoxLayout()
lcd = QtGui.QLCDNumber(self)
self.sld = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.sld.valueChanged.connect(lcd.display)
self.sld.valueChanged.connect(self.sliderChange)
boxSld.addWidget(lcd)
boxSld.addWidget(self.sld)
#テーブルの初期化
#horizonはuser2の時間
self.table = QtGui.QTableWidget(self)
self.table.setColumnCount(0)
#self.table.setHorizontalHeaderLabels("use_2 time")
jItem = QtGui.QTableWidgetItem(str(0))
self.table.setHorizontalHeaderItem(0, jItem)
#アイテムがクリックされたらグラフを更新
self.table.itemClicked.connect(self.clickUpdateTable)
#self.table.itemActivated.connect(self.activatedUpdateTable)
self.table.setItem(0, 0, QtGui.QTableWidgetItem(1))
#self.itemSelectionChanged.connect(self.selection_changed)
#self.tableSlider = self.table.verticalScrollBar()
boxTable = QtGui.QHBoxLayout()
boxTable.addWidget(self.table)
#配置
grid.addLayout(form,1,0)
grid.addLayout(boxSld,2,0)
grid.addLayout(boxTable,3,0)
self.setLayout(grid)
self.resize(400,100)
self.setWindowTitle("cca window")
self.show()
def chooseDbFile(self):
dialog = QtGui.QFileDialog()
dialog.setFileMode(QtGui.QFileDialog.ExistingFile)
if dialog.exec_():
fileNames = dialog.selectedFiles()
for f in fileNames:
self.txtSepFile.setText(f)
return
return self.txtSepFile.setText('')
# reset
def resetAnno(self):
print "reset Data before:",self.edited_joints.shape, self.edited_annos.shape
self.edited_joints = copy.deepcopy(self.input_joints)
self.edited_speaks = copy.deepcopy(self.input_speaks)
#datalen = self.edited_speaks.shape[1]
self.edited_annos = copy.deepcopy(self.input_annos) #np.zeros((datalen, self.anno_dim)) #(1000, 2)
self.edited_flags = copy.deepcopy(self.input_flags)
self.sld.setMaximum(self.edited_speaks.shape[0]-1) #
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
print "reset Data after:",self.edited_joints.shape, self.edited_speaks.shape, self.edited_annos.shape
def speakNorm(self, data, upper=300, lower=80):
# 上限
c_data=data if data <upper else upper
# 下限
c_data=c_data if c_data>lower else lower
# 最大値で割る
return (c_data-lower)/float(upper-lower)
def convDecibel(self, data):
return 20*math.log10(data)
#decibel
def speakDecibelNorm(self, data):
decibel = self.convDecibel(data)
return self.speakNorm(decibel, upper=70, lower=30)
def updateTable(self, data, anno, times):
#plt.plot(data)
#plt.show()
th = 0#float(self.ThesholdBox.text())
if(len(data)==0):
print "No Data! Push exec button..."
d_row, d_col = data.shape #(1000, 2)
add_ann = anno.shape[1] #annotationの個数
#print add_ann
add_flag = 1
#t_row:時間軸, t_col:次元数, add_ann:加えるAnnotation
t_col = d_col+add_ann+add_flag
t_row = d_row
#print "t, d, a",t_col,d_col,add_ann
self.table.clear()
font = QtGui.QFont()
font.setFamily(u"DejaVu Sans")
font.setPointSize(5)
self.table.horizontalHeader().setFont(font)
self.table.verticalHeader().setFont(font)
self.table.setColumnCount(t_row) # 次元数(tableはcolだけど値はrow!)+flag
self.table.setRowCount(t_col) # 時間軸(tableはrowだけど値はcol!)
# print "t:", t_row, t_col
#self.table.setRowCount(data.shape[0])
#self.table.setColumnCount(ann_num)
# 軸の値をSet
#for i in range(len(times)):
# jItem = QtGui.QTableWidgetItem(str(i))
# self.table.setHorizontalHeaderItem(i, jItem)
hor = True
for i in range(t_col):
iItem = QtGui.QTableWidgetItem(str(i))
self.table.setVerticalHeaderItem(i, iItem)
self.table.verticalHeaderItem(i).setToolTip(str(i))
#時間軸にデータを入れるなら↓
#self.table.verticalHeaderItem(i).setToolTip(str(times[i]))
for j in range(t_row):
if hor == True:
jItem = QtGui.QTableWidgetItem(str(j))
self.table.setHorizontalHeaderItem(j, jItem)
self.table.horizontalHeaderItem(j).setToolTip(str(times[j]))
#print "%.10f"%times[j]
if i < d_col: #data(speak)の可視化
# 音声Dataがrmsの場合
# set_data:範囲は 0-1
set_data = data[j][i] #self.speakDecibelNorm(data[j][i])
#ON/OFF むちゃくちゃすぎる
#set_data = 1 if set_data > 0.3 else 0
#self.edited_speaks[j][i] = set_data
#一時的なOffset
#set_data = data_proc2.speakNorm(set_data, upper=0.75, lower=0.25)
#self.edited_speaks[j][i] = set_data
#color_dataの範囲を0-255に変更
#color_data=int(set_data*255)
color_data = set_data
color_data = 1 if color_data > 0.5 else 0
color_data=int(color_data*255)
#print color_data
#print "at",color_data
color = [255-color_data]*3
elif i >= d_col and i < t_col-add_flag: # annotationの可視化
#print i
"""
set_data = anno[j][i-d_col] #anno(1000, 2)
if set_data == 0:
color = [255, 255, 255]
else:
color = [0, 0, 0]
"""
set_data = anno[j][i-d_col] #anno(1000, 2)
set_data = 0 if set_data < 0 else set_data
set_data = 1 if set_data > 1 else set_data
color_data=int(set_data*255)
color = [255-color_data]*3
else: #flag
set_data = self.edited_flags[j][0]
if set_data == 0:
color = [255, 255, 255]
else:
color = [0, 0, 0]
self.table.setItem(i, j, QtGui.QTableWidgetItem())
self.table.item(i, j).setBackground(QtGui.QColor(color[0],color[1],color[2]))
self.table.item(i, j).setToolTip(str(set_data))
hor = False
self.table.setVisible(False)
self.table.resizeRowsToContents()
self.table.resizeColumnsToContents()
self.table.setVisible(True)
# TableがClickされたとき
def clickUpdateTable(self, cItem):
self.tip = float(cItem.toolTip())
self.r = cItem.row()
self.c = cItem.column()
print "r:",self.r,", c:",self.c, ". tip:",self.tip
set_data = 0 if self.tip == 1 else 1
speak_dim = self.edited_speaks.shape[1]
anno_dim = self.edited_annos.shape[1]
if self.r < speak_dim:
self.edited_speaks[self.c][self.r] = set_data
elif self.r >= speak_dim and self.r < speak_dim+anno_dim:
self.edited_annos[self.c][self.r-speak_dim] = set_data
else:
self.edited_flags[self.c][0] = set_data
#indexes = self.table.selectedIndexes()
#print indexes
color = [[255, 255, 255], [0, 0, 0]]
self.table.setItem(self.r, self.c, QtGui.QTableWidgetItem())
self.table.item(self.r, self.c).setBackground(QtGui.QColor(color[set_data][0],color[set_data][1],color[set_data][2]))
self.table.item(self.r, self.c).setToolTip(str(set_data))
#jointsの可視化
# self.vizJoint(self.c)
def checkRangeData(self):
set_row = int(self.checkUser.text()) #0~4 User1=2, User2=3
start = int(self.checkSt.text())
end = int(self.checkEd.text())
color = [[255, 255, 255], [0, 0, 0]]
table_offset = self.edited_speaks.shape[1]
#変更するUserの指定
user = set_row - table_offset
#print user, table_offset
if user < 0:
print "Not Change!"
return
print "Change [User:", user, ", Start:",start,", end:",end,"]"
for i in range(start, end):
get_data = self.edited_annos[i][user]
set_data = 0 if get_data == 1 else 1
self.table.setItem(set_row, i, QtGui.QTableWidgetItem())
self.table.item(set_row, i).setBackground(QtGui.QColor(color[set_data][0],color[set_data][1],color[set_data][2]))
self.table.item(set_row, i).setToolTip(str(set_data))
self.edited_annos[i][user] = set_data
def sliderChange(self, timeline):
if len(self.input_joints)==0:
print "now no data:", timeline
return
#self.table.selectColum(timeline)
#self.table.verticalScrollBar().setValue(timeline)
self.table.setCurrentCell(self.r, timeline)
self.pubViz(timeline)
""" | def activatedUpdateTable(self, cItem):
row = cItem.row()
col = cItem.column()
print "row:", row,", col:", col#, ". tip:",self.tip
"""
# 生Dataの入力
def inputData(self):
filesize = int(self.frmSizeBox.text())
print "Input raw data:", filesize
self.fname = [str(self.txtSepFile.text())]
input_data = data_proc2.load_persons(self.fname, annobool=False, datalen=filesize)
#print input_data["times"][0]
# もし手をくわえるなら
#input_data[2] = data_proc.proc_anno(input_data[0], input_data[2], use_vote=True, use_speak=False)
self.loadInputData(input_data)
# 加工済のDataの入力
def inputProcedData(self):
filesize = int(self.frmSizeBox.text())
print "Input proced data", filesize
self.fname = [str(self.txtSepFile.text())]
input_data = data_proc2.load_proced_data_flag(self.fname, datalen=filesize)
#print input_data["times"]
self.loadInputData(input_data)
def loadInputData(self, input_data):
self.input_joints = input_data["joints"]
self.input_speaks = input_data["speaks"]
datalen=self.input_speaks.shape[0]
if input_data.has_key("annos"):
self.input_annos = input_data["annos"]
#とりあえずuser1だけ編集(0124_personsはまた別)
#self.input_annos[:,:1] = np.zeros((self.input_annos.shape[0],1))
"""
user_joint = self.input_joints[:,:36]
user_anno = self.input_annos[:,:1]
calc_user_anno = data_proc2.proc_anno(user_joint, user_anno, use_vote=True, use_anno=False, threshold=-0.2)
self.input_annos[:,:1] = calc_user_anno
print "now_persons mode"
user_joint = self.input_joints[:,36:]
user_anno = self.input_annos[:,1:]
calc_user_anno = data_proc2.proc_anno(user_joint, user_anno, use_vote=True, use_anno=False, threshold=-0.2)
self.input_annos[:,1:] = calc_user_anno
"""
#print "now model mode"
self.input_annos[:,:] = np.round(self.input_annos[:,:])
else:
self.input_annos = np.zeros((datalen, self.anno_dim))#(1000,2)
if input_data.has_key("flags"):
print "load flags"
self.input_flags = input_data["flags"]
else:
print "create flags"
self.input_flags = np.zeros((datalen, 1))
if input_data.has_key("times"):
print "load times"
self.input_times = input_data["times"]
else:
print "create times"
self.input_times = np.zeros((datalen, 1))
"""
diff_times = []
for t in range(len(self.input_times)):
if t == 0:
diff_times.append(0)
else:
diff_times.append(self.input_times[t]-self.input_times[t-1])
fps = np.round(1/float(np.mean(diff_times)))
plt.plot(self.input_annos[:,0], label="person", color="red")
plt.plot(self.input_annos[:,1], label="robot", color="green")
plt.xlabel("Frame (fps:"+str(fps)+")")
plt.title("speaker/listener result")
plt.xlim(0,len(self.input_annos))
plt.ylim(-0.2, 1.2)
plt.legend()
plt.show()
"""
print "joints shape:", self.input_joints.shape
print "speaks shape:", self.input_speaks.shape
print "annos shape:", self.input_annos.shape
print "flags shape:", self.input_flags.shape
print "times shape:", self.input_times.shape
#for j in range(len(self.input_times)):
# print j,"%.10f"%self.input_times[j]
self.timediff(self.input_times)
self.edited_joints = copy.deepcopy(self.input_joints)#np.arrayでCopyされる
self.edited_speaks = copy.deepcopy(self.input_speaks)#speakは編集しない(今のところ)
self.edited_annos = copy.deepcopy(self.input_annos)
self.edited_flags = copy.deepcopy(self.input_flags)
self.edited_times = copy.deepcopy(self.input_times)
#self.updateTable(self.input_joints)
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
#Sliderの最大値をset
self.sld.setMaximum(datalen - 1)
print "end"
def outputData(self):
name_json = str(self.txtOutputFile.text())
keys = ["joints", "speaks", "annos", "flags", "times"]
data = [self.edited_joints, self.edited_speaks, self.edited_annos, self.edited_flags, self.edited_times]
"""
if len(keys) != len(data):
print "Save false! keys len:"
"""
data_proc2.save_data(name_json, keys, data)
def timediff(self, times):
fpss = []
for i in range(len(times)-1):
fps = 1/(times[i+1]-times[i])
fpss.append(fps)
#print i, fps
fpss = np.array(fpss)
#print "mean:",np.mean(fpss),",std:",np.std(fpss)
def cutRangeData(self):
start = int(self.cutStart.text())
end = int(self.cutEnd.text())
print "cut data:",start,"-",end
self.edited_joints = self.edited_joints[start:end]
self.edited_speaks = self.edited_speaks[start:end]
self.edited_annos = self.edited_annos[start:end]
self.edited_flags = self.edited_flags[start:end]
self.edited_times = self.edited_times[start:end]
print "joints shape:", self.edited_joints.shape
print "speaks shape:", self.edited_speaks.shape
print "annos shape:", self.edited_annos.shape
print "flags shape:", self.edited_flags.shape
print "times shape:", self.edited_times.shape
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
#Sliderの最大値をset
self.sld.setMaximum(self.edited_joints.shape[0]-1)
print "end"
def directJoints(self):
print "direct joints"
size, dim = self.edited_joints.shape
user1_nose_y_idx = 35+33+2
pair = 1
pair_stack = 0
offset_y = 0.03
for i in range(size):
anno = self.edited_annos[i]
if anno[1] == 0:#聞き手なら
if anno[0] == 1 and anno[2] == 0:
#print i,":user0",self.input_joints[i][user1_nose_y_idx]
self.edited_joints[i][user1_nose_y_idx] += offset_y
pair = 0
pair_stack = 0
#print self.input_joints[i][user1_nose_y_idx]
elif anno[0] == 0 and anno[2] == 1:
#print i,":user2",self.input_joints[i][user1_nose_y_idx]
self.edited_joints[i][user1_nose_y_idx] -= offset_y
pair = 2
pair_stack = 0
#print self.input_joints[i][user1_nose_y_idx]
else: #誰も話していないなら
pair_stack += 1
else:#話し手ならどこを向くか
if pair == 0:
self.edited_joints[i][user1_nose_y_idx] += offset_y
elif pair == 2:
self.edited_joints[i][user1_nose_y_idx] -= offset_y
# 引っ繰り返して追加
def reverseData(self):
#y方向だけ引っくり返す
def reverse_y(joints):
#(10000, 36)
rev = np.array([[1,-1,1]*12]*joints.shape[0])
return joints*rev
print "reverseData before:",self.edited_joints.shape, self.edited_annos.shape
datalen = self.edited_annos.shape[0]
if self.anno_dim == 2:
j_r = np.hstack((self.edited_joints[:,36:], self.edited_joints[:,:36]))
self.edited_joints = np.vstack((self.edited_joints, j_r))
s_r = np.hstack((self.edited_speaks[:,1].reshape(datalen,1), self.edited_speaks[:,0].reshape(datalen,1)))
self.edited_speaks = np.vstack((self.edited_speaks, s_r))
a_r = np.hstack((self.edited_annos[:,1].reshape(datalen,1), self.edited_annos[:,0].reshape(datalen,1)))
self.edited_annos = np.vstack((self.edited_annos, a_r))
self.edited_flags = np.vstack((self.edited_flags, self.edited_flags))
self.edited_times = np.append(self.edited_times, self.edited_times)
else:
print "bad shape"
#print self.edited_times.shape
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
#Sliderの最大値をset
self.sld.setMaximum(self.edited_joints.shape[0]-1)
print "reverseData after:",self.edited_joints.shape,self.edited_speaks.shape,self.edited_annos.shape
def rviz_obj(self, obj_id, obj_ns, obj_type, obj_size, obj_color=[0, 0, 0, 0], obj_life=0):
obj = Marker()
obj.header.frame_id, obj.header.stamp = "camera_link", rospy.Time.now()
obj.ns, obj.action, obj.type = str(obj_ns), 0, obj_type
obj.scale.x, obj.scale.y, obj.scale.z = obj_size[0], obj_size[1], obj_size[2]
obj.color = obj_color
obj.lifetime = rospy.Duration.from_sec(obj_life)
obj.pose.orientation.w = 1.0
return obj
def set_point(self, pos, addx=0, addy=0, addz=0, rotate=False):
pt = Point()
if rotate == True:
pt.x, pt.y, pt.z = -1*pos[0]+addx, -1*pos[1]+addy, pos[2]+addz
else:
pt.x, pt.y, pt.z = pos[0]+addx, pos[1]+addy, pos[2]+addz
return pt
def set_vizmsg_point(self, u, data, color, psize, ofs, addx=0, addy=0, rotate=False):
pmsg = self.rviz_obj(u, 'p'+str(u), 7, [psize, psize, psize], color, 0)
points = []
for p in range(data.shape[1]/ofs):
points.append(self.set_point([data[0, p*ofs],
data[0, p*ofs+1],
data[0, p*ofs+2]],
addx=addx, addy=addy, rotate=rotate))
pmsg.points = points
return pmsg
def set_vizmsg_line(self, u, data, color, lsize, llist, addx=0, addy=0, rotate=False):
lmsg = self.rviz_obj(u, 'l'+str(u), 5, [lsize, lsize, lsize], color, 0)
for ls in llist:
for l in range(len(ls)-1):
for add in range(2):
#print person[0, ls[l+add]], ls[l+add], l, add
linepoint=self.set_point([data[0,ls[l+add]*3],
data[0,ls[l+add]*3+1],
data[0,ls[l+add]*3+2]],
addx=addx, addy=addy, rotate=rotate)
lmsg.points.append(linepoint)
return lmsg
def pubViz(self, tl):
#print "pub Viz:", tl
# drawing
msgs = MarkerArray()
amsg = Float64MultiArray()
per_js = []
dim_p = 36
dim = len(self.edited_joints[tl])
for i in range(dim/dim_p):
per_js.append(self.edited_joints[tl,dim_p*i:dim_p*(i+1)].reshape(1, dim_p))
ofs = 3
#ofs_xyr = [[-0.5, -0.5, 1], [0.5, 0, 0], [-0.5, 0.5, 1]]
ofs_xyr = [[-1, 0, 1], [0, 0, 0]]
for u, (person, speak, anno) in enumerate(zip(per_js, self.edited_speaks[tl], self.edited_annos[tl])):
# ---Person points---
offset = 0
psize = speak*0.05
#psize = 0.03
#psize = 0.05 if anno > 0.7 else 0.03
pmsg = self.set_vizmsg_point(u, person, self.carray[0], psize, ofs,
addx=ofs_xyr[u][0], addy=ofs_xyr[u][1], rotate=ofs_xyr[u][2])
msgs.markers.append(pmsg)
# ---Person lines---
lsize = 0.03 if anno > 0.5 else 0.01
cid = 3 if anno > 0.5 else 2
lmsg = self.set_vizmsg_line(u, person, self.carray[cid], lsize, self.llist,
addx=ofs_xyr[u][0], addy=ofs_xyr[u][1], rotate=ofs_xyr[u][2])
msgs.markers.append(lmsg)
# ------text------
tidx, tsize = 3, 0.1 # tidx=3 is Head
tmsg = self.rviz_obj(u, 't'+str(u), 9, [tsize, tsize, tsize], self.carray[u], 0)
tmsg.pose.position = self.set_point([person[0, tidx*ofs], person[0, tidx*ofs+1], person[0, tidx*ofs+2]],
addx=ofs_xyr[u][0], addy=ofs_xyr[u][1], addz=0.3)
tmsg.pose.orientation.w = 1
tmsg.text = "User"+str(u)+":"+str(speak)
msgs.markers.append(tmsg)
amsg.data.append(speak)
# ------text------
if self.edited_times[tl] > 0.00001:
tidx, tsize = 3, 0.1 # tidx=3 is Head
tmsg = self.rviz_obj(u, 'time'+str(u), 9, [tsize, tsize, tsize], self.carray[u], 0)
tmsg.pose.position = self.set_point([-0.5, 0, 0])
tmsg.pose.orientation.w = 1
tmsg.text = "time: "+str(self.edited_times[tl])
msgs.markers.append(tmsg)
self.mpub.publish(msgs)
#print pred[1].data[0, 0]
self.speakpub.publish(amsg)
def main():
app = QtGui.QApplication(sys.argv)
anotation = ANNOTATION()
#graph = GRAPH()
sys.exit(app.exec_())
if __name__=='__main__':
main() | random_line_split |
|
ann_interface.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
2017.1.24---
時間
2017.1.4----
音声を生で扱う
Annotationとは別で扱う
2016.12.12----
20161210_proc3p.jsonを使う。反転させてみる
2016.12.9----
三人の対話におけるannotationのInterface
手動でannotationする
slider barとtableを同期させた
2016.10.19----
interaction dataをannotationするためのinterface
"""
import sys
import os.path
import math
import json
import time
import copy
import numpy as np
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib.pyplot as plt
import rospy
from visualization_msgs.msg import MarkerArray
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point
from geometry_msgs.msg import PointStamped
from std_msgs.msg import ColorRGBA
import data_proc2
from std_msgs.msg import Float64MultiArray
class ANNOTATION(QtGui.QWidget):
def __init__(self):
super(ANNOTATION, self).__init__()
#UIの初期化
self.initUI()
#ROSのパブリッシャなどの初期化
rospy.init_node('annotation_interface2', anonymous=True)
self.mpub = rospy.Publisher('visualization_marker_array', MarkerArray, queue_size=10)
#self.ppub = rospy.Publisher('joint_diff', PointStamped, queue_size=10)
self.speakpub = rospy.Publisher('/speaks', Float64MultiArray, queue_size=10)
#rvizのカラー設定(未)
self.carray = []
clist = [[1, 0, 0, 1], [0, 1, 0, 1], [1, 1, 0, 1], [1, 0.5, 0, 1]]
for c in clist:
color = ColorRGBA()
color.r, color.g, color.b, color.a = c[0], c[1], c[2], c[3]
self.carray.append(color)
# set extra data param
self.dim_x = 72
self.llist = [[0, 1, 10, 2, 3, 11], [10, 4, 5, 6], [10, 7, 8, 9]]
self.input_joints = []
self.input_speaks = []
self.anno_dim = 2
self.r, self.c = 0, 0
def initUI(self):
#Botton Objectの作成
def boxBtnObj(name, func, maxlen=30):
box = QtGui.QHBoxLayout()
btn = btnObj(name, func, maxlen=maxlen)
box.addWidget(btn)
return box
def btnObj(name, func, maxlen=30):
btn = QtGui.QPushButton(name)
btn.setMaximumWidth(maxlen)
btn.clicked.connect(func)
return btn
grid = QtGui.QGridLayout()
form = QtGui.QFormLayout()
#frame size
self.frmSizeBox = QtGui.QLineEdit()
self.frmSizeBox.setText('-1')
self.frmSizeBox.setFixedWidth(100)
form.addRow('size', self.frmSizeBox)
#ファイル入力ボックス
self.txtSepFile = QtGui.QLineEdit()
btnSepFile = btnObj("...", self.chooseDbFile, maxlen=40)
btnRawInput = btnObj("raw", self.inputData, maxlen=60)
btnProcedInput = btnObj("proced", self.inputProcedData, maxlen=60)
boxSepFile = QtGui.QHBoxLayout()
boxSepFile.addWidget(self.txtSepFile)
boxSepFile.addWidget(btnSepFile)
boxSepFile.addWidget(btnRawInput)
boxSepFile.addWidget(btnProcedInput)
form.addRow('input', boxSepFile)
#ファイル出力ボックス
self.txtOutputFile = QtGui.QLineEdit()
self.txtOutputFile.setText('test_proc.json')
btnOutputFile = btnObj("save", self.outputData, maxlen=100)
boxOutputFile = QtGui.QHBoxLayout()
boxOutputFile.addWidget(self.txtOutputFile)
boxOutputFile.addWidget(btnOutputFile)
form.addRow('output', boxOutputFile)
#data cut
cutRange = QtGui.QHBoxLayout()
self.cutStart = QtGui.QLineEdit()
self.cutStart.setText('0')
self.cutEnd = QtGui.QLineEdit()
self.cutEnd.setText('0')
btnCutRange = btnObj("exec", self.cutRangeData, maxlen=60)
cutRange.addWidget(self.cutStart)
cutRange.addWidget(self.cutEnd)
cutRange.addWidget(btnCutRange)
form.addRow('trimming', cutRange)
# check range
checkRange = QtGui.QHBoxLayout()
self.checkUser = QtGui.QLineEdit()
self.checkUser.setText('0')
self.checkSt = QtGui.QLineEdit()
self.checkSt.setText('0')
self.checkEd = QtGui.QLineEdit()
self.checkEd.setText('0')
btnRangeCheck = btnObj("check", self.checkRangeData, maxlen=60)
checkRange.addWidget(self.checkUser)
checkRange.addWidget(self.checkSt)
checkRange.addWidget(self.checkEd)
checkRange.addWidget(btnRangeCheck)
form.addRow('U/S/E', checkRange)
# direct
boxDirect = boxBtnObj("d", self.directJoints, maxlen=20)
form.addRow('direct', boxDirect)
# Reset
boxResetAnno = boxBtnObj("e", self.resetAnno, maxlen=20)
form.addRow('reset', boxResetAnno)
# Reverse
boxReverse = boxBtnObj("e", self.reverseData, maxlen=20)
form.addRow('reverse', boxReverse)
# Time Line
boxSld = QtGui.QHBoxLayout()
lcd = QtGui.QLCDNumber(self)
self.sld = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.sld.valueChanged.connect(lcd.display)
self.sld.valueChanged.connect(self.sliderChange)
boxSld.addWidget(lcd)
boxSld.addWidget(self.sld)
#テーブルの初期化
#horizonはuser2の時間
self.table = QtGui.QTableWidget(self)
self.table.setColumnCount(0)
#self.table.setHorizontalHeaderLabels("use_2 time")
jItem = QtGui.QTableWidgetItem(str(0))
self.table.setHorizontalHeaderItem(0, jItem)
#アイテムがクリックされたらグラフを更新
self.table.itemClicked.connect(self.clickUpdateTable)
#self.table.itemActivated.connect(self.activatedUpdateTable)
self.table.setItem(0, 0, QtGui.QTableWidgetItem(1))
#self.itemSelectionChanged.connect(self.selection_changed)
#self.tableSlider = self.table.verticalScrollBar()
boxTable = QtGui.QHBoxLayout()
boxTable.addWidget(self.table)
#配置
grid.addLayout(form,1,0)
grid.addLayout(boxSld,2,0)
grid.addLayout(boxTable,3,0)
self.setLayout(grid)
self.resize(400,100)
self.setWindowTitle("cca window")
self.show()
def chooseDbFile(self):
dialog = QtGui.QFileDialog()
dialog.setFileMode(QtGui.QFileDialog.ExistingFile)
if dialog.exec_():
fileNames = dialog.selectedFiles()
for f in fileNames:
self.txtSepFile.setText(f)
return
return self.txtSepFile.setText('')
# reset
def resetAnno(self):
print "reset Data before:",self.edited_joints.shape, self.edited_annos.shape
self.edited_joints = copy.deepcopy(self.input_joints)
self.edited_speaks = copy.deepcopy(self.input_speaks)
#datalen = self.edited_speaks.shape[1]
self.edited_annos = copy.deepcopy(self.input_annos) #np.zeros((datalen, self.anno_dim)) #(1000, 2)
self.edited_flags = copy.deepcopy(self.input_flags)
self.sld.setMaximum(self.edited_speaks.shape[0]-1) #
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
print "reset Data after:",self.edited_joints.shape, self.edited_speaks.shape, self.edited_annos.shape
def speakNorm(self, data, upper=300, lower=80):
# 上限
c_data=data if data <upper else upper
# 下限
c_data=c_data if c_data>lower else lower
# 最大値で割る
return (c_data-lower)/float(upper-lower)
def convDecibel(self, data):
return 20*math.log10(data)
#decibel
def speakDecibelNorm(self, data):
decibel = self.convDecibel(data)
return self.speakNorm(decibel, upper=70, lower=30)
def updateTable(self, data, anno, times):
#plt.plot(data)
#plt.show()
th = 0#float(self.ThesholdBox.text())
if(len(data)==0):
print "No Data! Push exec button..."
d_row, d_col = data.shape #(1000, 2)
add_ann = anno.shape[1] #annotationの個数
#print add_ann
add_flag = 1
#t_row:時間軸, t_col:次元数, add_ann:加えるAnnotation
t_col = d_col+add_ann+add_flag
t_row = d_row
#print "t, d, a",t_col,d_col,add_ann
self.table.clear()
font = QtGui.QFont()
font.setFamily(u"DejaVu Sans")
font.setPointSize(5)
self.table.horizontalHeader().setFont(font)
self.table.verticalHeader().setFont(font)
self.table.setColumnCount(t_row) # 次元数(tableはcolだけど値はrow!)+flag
self.table.setRowCount(t_col) # 時間軸(tableはrowだけど値はcol!)
# print "t:", t_row, t_col
#self.table.setRowCount(data.shape[0])
#self.table.setColumnCount(ann_num)
# 軸の値をSet
#for i in range(len(times)):
# jItem = QtGui.QTableWidgetItem(str(i))
# self.table.setHorizontalHeaderItem(i, jItem)
hor = True
for i in range(t_col):
iItem = QtGui.QTableWidgetItem(str(i))
self.table.setVerticalHeaderItem(i, iItem)
self.table.verticalHeaderItem(i).setToolTip(str(i))
#時間軸にデータを入れるなら↓
#self.table.verticalHeaderItem(i).setToolTip(str(times[i]))
for j in range(t_row):
if hor == True:
jItem = QtGui.QTableWidgetItem(str(j))
self.table.setHorizontalHeaderItem(j, jItem)
self.table.horizontalHeaderItem(j).setToolTip(str(times[j]))
#print "%.10f"%times[j]
if i < d_col: #data(speak)の可視化
# 音声Dataがrmsの場合
# set_data:範囲は 0-1
set_data = data[j][i] #self.speakDecibelNorm(data[j][i])
#ON/OFF むちゃくちゃすぎる
#set_data = 1 if set_data > 0.3 else 0
#self.edited_speaks[j][i] = set_data
#一時的なOffset
#set_data = data_proc2.speakNorm(set_data, upper=0.75, lower=0.25)
#self.edited_speaks[j][i] = set_data
#color_dataの範囲を0-255に変更
#color_data=int(set_data*255)
color_data = set_data
color_data = 1 if color_data > 0.5 else 0
color_data=int(color_data*255)
#print color_data
#print "at",color_data
color = [255-color_data]*3
elif i >= d_col and i < t_col-add_flag: # annotationの可視化
#print i
"""
set_data = anno[j][i-d_col] #anno(1000, 2)
if set_data == 0:
color = [255, 255, 255]
else:
color = [0, 0, 0]
"""
set_data = anno[j][i-d_col] #anno(1000, 2)
set_data = 0 if set_data < 0 else set_data
set_data = 1 if set_data > 1 else set_data
color_data=int(set_data*255)
color = [255-color_data]*3
else: #flag
set_data = self.edited_flags[j][0]
if set_data == 0:
color = [255, 255, 255]
else:
color = [0, 0, 0]
self.table.setItem(i, j, QtGui.QTableWidgetItem())
self.table.item(i, j).setBackground(QtGui.QColor(color[0],color[1],color[2]))
self.table.item(i, j).setToolTip(str(set_data))
hor = False
self.table.setVisible(False)
self.table.resizeRowsToContents()
self.table.resizeColumnsToContents()
self.table.setVisible(True)
# TableがClickされたとき
def clickUpdateTable(self, cItem):
self.tip = float(cItem.toolTip())
self.r = cItem.row()
self.c = cItem.column()
print "r:",self.r,", c:",self.c, ". tip:",self.tip
set_data = 0 if self.tip == 1 else 1
speak_dim = self.edited_speaks.shape[1]
anno_dim = self.edited_annos.shape[1]
if self.r < speak_dim:
self.edited_speaks[self.c][self.r] = set_data
elif self.r >= speak_dim and self.r < speak_dim+anno_dim:
self.edited_annos[self.c][self.r-speak_dim] = set_data
else:
self.edited_flags[self.c][0] = set_data
#indexes = self.table.selectedIndexes()
#print indexes
color = [[255, 255, 255], [0, 0, 0]]
self.table.setItem(self.r, self.c, QtGui.QTableWidgetItem())
self.table.item(self.r, self.c).setBackground(QtGui.QColor(color[set_data][0],color[set_data][1],color[set_data][2]))
self.table.item(self.r, self.c).setToolTip(str(set_data))
#jointsの可視化
# self.vizJoint(self.c)
def checkRangeData(self):
set_row = int(self.checkUser.text()) #0~4 User1=2, User2=3
start = int(self.checkSt.text())
end = int(self.checkEd.text())
color = [[255, 255, 255], [0, 0, 0]]
table_offset = self.edited_speaks.shape[1]
#変更するUserの指定
user = set_row - table_offset
#print user, table_offset
if user < 0:
print "Not Change!"
return
print "Change [User:", user, ", Start:",start,", end:",end,"]"
for i in range(start, end):
get_data = self.edited_annos[i][user]
set_data = 0 if get_data == 1 else 1
self.table.setItem(set_row, i, QtGui.QTableWidgetItem())
self.table.item(set_row, i).setBackground(QtGui.QColor(color[set_data][0],color[set_data][1],color[set_data][2]))
self.table.item(set_row, i).setToolTip(str(set_data))
self.edited_annos[i][user] = set_data
def sliderChange(self, timeline):
if len(self.input_joints)==0:
print "now no data:", timeline
return
#self.table.selectColum(timeline)
#self.table.verticalScrollBar().setValue(timeline)
self.table.setCurrentCell(self.r, timeline)
self.pubViz(timeline)
"""
def activatedUpdateTable(self, cItem):
row = cItem.row()
col = cItem.column()
print "row:", row,", col:", col#, ". tip:",self.tip
"""
# 生Dataの入力
def inputData(self):
filesize = int(self.frmSizeBox.text())
print "Input raw data:", filesize
self.fname = [str(self.txtSepFile.text())]
input_data = data_proc2.load_persons(self.fname, annobool=False, datalen=filesize)
#print input_data["times"][0]
# もし手をくわえるなら
#input_data[2] = data_proc.proc_anno(input_data[0], input_data[2], use_vote=True, use_speak=False)
self.loadInputData(input_data)
# 加工済のDataの入力
def inputProcedData(self):
filesize = int(self.frmSizeBox.text())
print "Input proced data", filesize
self.fname = [str(self.txtSepFile.text())]
input_data = data_proc2.load_proced_data_flag(self.fname, datalen=filesize)
#print input_data["times"]
self.loadInputData(input_data)
def loadInputData(self, input_data):
self.input_joints = input_data["joints"]
self.input_speaks = input_data["speaks"]
datalen=self.input_speaks.shape[0]
if input_data.has_key("annos"):
self.input_annos = input_data["annos"]
#とりあえずuser1だけ編集(0124_personsはまた別)
#self.input_annos[:,:1] = np.zeros((self.input_annos.shape[0],1))
"""
user_joint = self.input_joints[:,:36]
user_anno = self.input_annos[:,:1]
calc_user_anno = data_proc2.proc_anno(user_joint, user_anno, use_vote=True, use_anno=False, threshold=-0.2)
self.input_annos[:,:1] = calc_user_anno
print "now_persons mode"
user_joint = self.input_joints[:,36:]
user_anno = self.input_annos[:,1:]
calc_user_anno = data_proc2.proc_anno(user_joint, user_anno, use_vote=True, use_anno=False, threshold=-0.2)
self.input_annos[:,1:] = calc_user_anno
"""
#print "now model mode"
self.input_annos[:,:] = np.round(self.input_annos[:,:])
else:
self.input_annos = np.zeros((datalen, self.anno_dim))#(1000,2)
if input_data.has_key("flags"):
print "load flags"
self.input_flags = input_data["flags"]
else:
print "create flags"
self.input_flags = np.zeros((datalen, 1))
if input_data.has_key("times"):
print "load times"
self.input_times = input_data["times"]
else:
print "create times"
self.input_times = np.zeros((datalen, 1))
"""
diff_times = []
for t in range(len(self.input_times)):
if t == 0:
diff_times.append(0)
else:
diff_times.append(self.input_times[t]-self.input_times[t-1])
fps = np.round(1/float(np.mean(diff_times)))
plt.plot(self.input_annos[:,0], label="person", color="red")
plt.plot(self.input_annos[:,1], label="robot", color="green")
plt.xlabel("Frame (fps:"+str(fps)+")")
plt.title("speaker/listener result")
plt.xlim(0,len(self.input_annos))
plt.ylim(-0.2, 1.2)
plt.legend()
plt.show()
"""
print "joints shape:", self.input_joints.shape
print "speaks shape:", self.input_speaks.shape
print "annos shape:", self.input_annos.shape
print "flags shape:", self.input_flags.shape
print "times shape:", self.input_times.shape
#for j in range(len(self.input_times)):
# print j,"%.10f"%self.input_times[j]
self.timediff(self.input_times)
self.edited_joints = copy.deepcopy(self.input_joints)#np.arrayでCopyされる
self.edited_speaks = copy.deepcopy(self.input_speaks)#speakは編集しない(今のところ)
self.edited_annos = copy.deepcopy(self.input_annos)
self.edited_flags = copy.deepcopy(self.input_flags)
self.edited_times = copy.deepcopy(self.input_times)
#self.updateTable(self.input_joints)
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
#Sliderの最大値をset
self.sld.setMaximum(datalen - 1)
print "end"
def outputData(self):
name_json = str(self.txtOutputFile.text())
keys = ["joints", "speaks", "annos", "flags", "times"]
data = [self.edited_joints, self.edited_speaks, self.edited_annos, self.edited_flags, self.edited_times]
"""
if len(keys) != len(data):
print "Save false! keys len:"
"""
data_proc2.save_data(name_json, keys, data)
def timediff(self, times):
fpss = []
for i in range(len(times)-1):
fps = 1/(times[i+1]-times[i])
fpss.append(fps)
#print i, fps
fpss = np.array(fpss)
#print "mean:",np.mean(fpss),",std:",np.std(fpss)
def cutRangeData(self):
start = int(self.cutStart.text())
end = int(self.cutEnd.text())
print "cut data:",start,"-",end
self.edited_joints = self.edited_joints[start:end]
self.edited_speaks = self.edited_speaks[start:end]
self.edited_annos = self.edited_annos[start:end]
self.edited_flags = self.edited_flags[start:end]
self.edited_times = self.edited_times[start:end]
print "joints shape:", self.edited_joints.shape
print "speaks shape:", self.edited_speaks.shape
print "annos shape:", self.edited_annos.shape
print "flags shape:", self.edited_flags.shape
print "times shape:", self.edited_times.shape
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
#Sliderの最大値をset
self.sld.setMaximum(self.edited_joints.shape[0]-1)
print "end"
def directJoints(self):
print "direct joints"
size, dim = self.edited_joints.shape
user1_nose_y_idx = 35+33+2
pair = 1
pair_stack = 0
offset_y = 0.03
for i in range(size):
anno = self.edited_annos[i]
if anno[1] == 0:#聞き手なら
if anno[0] == 1 and anno[2] == 0:
#print i,":user0",self.input_joints[i][user1_nose_y_idx]
self.edited_joints[i][user1_nose_y_idx] += offset_y
pair = 0
pair_stack = 0
#print self.input_joints[i][user1_nose_y_idx]
elif anno[0] == 0 and anno[2] == 1:
#print i,":user2",self.input_joints[i][user1_nose_y_idx]
self.edited_joints[i][user1_nose_y_idx] -= offset_y
pair = 2
pair_stack = 0
#print self.input_joints[i][user1_nose_y_idx]
else: #誰も話していないなら
pair_stack += 1
else:#話し手ならどこを向くか
if pair == 0:
self.edited_joints[i][user1_nose_y_idx] += offset_y
elif pair == 2:
self.edited_joints[i][user1_nose_y_idx] -= offset_y
# 引っ繰り返して追加
def reverseData(self):
#y方向だけ引っくり返す
def reverse_y(joints):
#(10000, 36)
rev = np.array([[1,-1,1]*12]*joints.shape[0])
return joints*rev
print "reverseData before:",self.edited_joints.shape, self.edited_annos.shape
datalen = self.edited_annos.shape[0]
if self.anno_dim == 2:
j_r = np.hstack((self.edited_joints[:,36:], self.edited_joints[:,:36]))
self.edited_joints = np.vstack((self.edited_joints, j_r))
s_r = np.hstack((self.edited_speaks[:,1].reshape(datalen,1), self.edited_speaks[:,0].reshape(datalen,1)))
self.edited_speaks = np.vstack((self.edited_speaks, s_r))
a_r = np.hstack((self.edited_annos[:,1].reshape(datalen,1), self.edited_annos[:,0].reshape(datalen,1)))
self.edited_annos = np.vstack((self.edited_annos, a_r))
self.edited_flags = np.vstack((self.edited_flags, self.edited_flags))
self.edited_times = np.append(self.edited_times, self.edited_times)
else:
print "bad shape"
#print self.edited_times.shape
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
#Sliderの最大値をset
self.sld.setMaximum(self.edited_joints.shape[0]-1)
print "reverseData after:",self.edited_joints.shape,self.edited_speaks.shape,self.edited_annos.shape
def rviz_obj(self, obj_id, obj_ns, obj_type, obj_size, obj_color=[0, 0, 0, 0], obj_life=0):
obj = Marker()
obj.header.frame_id, obj.header.stamp = "camera_link", rospy.Time.now()
obj.ns, obj.action, obj.type = str(obj_ns), 0, obj_type
obj.scale.x, obj.scale.y, obj.scale.z = obj_size[0], obj_size[1], obj_size[2]
obj.color = obj_color
obj.lifetime = rospy.Duration.from_sec(obj_life)
obj.pose.orientation.w = 1.0
return obj
def set_point(self, pos, addx=0, addy=0, addz=0, rotate=False):
pt = Point()
if rotate == True:
pt.x, pt.y, pt.z = -1*pos[0]+addx, -1*pos[1]+addy, pos[2]+addz
else:
pt.x, pt.y, pt.z = pos[0]+addx, pos[1]+addy, pos[2]+addz
return pt
def set_vizmsg_point(self, u, data, color, psize, ofs, addx=0, addy=0, rotate=False):
pmsg = self.rviz_obj(u, 'p'+str(u), 7, [psize, psize, psize], color, 0)
points = []
for p in range(data.shape[1]/ofs):
points.append(self.set_point([data[0, p*ofs],
data[0, p*ofs+1],
data[0, p*ofs+2]],
addx=addx, addy=addy, rotate=rotate))
pmsg.points = points
return pmsg
def set_vizmsg_line(self, u, data, color, lsize, llist, addx=0, addy=0, rotate=False):
lmsg = self.rviz_obj(u, 'l'+str(u), 5, [lsize, lsize, lsize], color, 0)
for ls in llist:
for l in range(len(ls)-1):
for add in r |
def pubViz(self, tl):
#print "pub Viz:", tl
# drawing
msgs = MarkerArray()
amsg = Float64MultiArray()
per_js = []
dim_p = 36
dim = len(self.edited_joints[tl])
for i in range(dim/dim_p):
per_js.append(self.edited_joints[tl,dim_p*i:dim_p*(i+1)].reshape(1, dim_p))
ofs = 3
#ofs_xyr = [[-0.5, -0.5, 1], [0.5, 0, 0], [-0.5, 0.5, 1]]
ofs_xyr = [[-1, 0, 1], [0, 0, 0]]
for u, (person, speak, anno) in enumerate(zip(per_js, self.edited_speaks[tl], self.edited_annos[tl])):
# ---Person points---
offset = 0
psize = speak*0.05
#psize = 0.03
#psize = 0.05 if anno > 0.7 else 0.03
pmsg = self.set_vizmsg_point(u, person, self.carray[0], psize, ofs,
addx=ofs_xyr[u][0], addy=ofs_xyr[u][1], rotate=ofs_xyr[u][2])
msgs.markers.append(pmsg)
# ---Person lines---
lsize = 0.03 if anno > 0.5 else 0.01
cid = 3 if anno > 0.5 else 2
lmsg = self.set_vizmsg_line(u, person, self.carray[cid], lsize, self.llist,
addx=ofs_xyr[u][0], addy=ofs_xyr[u][1], rotate=ofs_xyr[u][2])
msgs.markers.append(lmsg)
# ------text------
tidx, tsize = 3, 0.1 # tidx=3 is Head
tmsg = self.rviz_obj(u, 't'+str(u), 9, [tsize, tsize, tsize], self.carray[u], 0)
tmsg.pose.position = self.set_point([person[0, tidx*ofs], person[0, tidx*ofs+1], person[0, tidx*ofs+2]],
addx=ofs_xyr[u][0], addy=ofs_xyr[u][1], addz=0.3)
tmsg.pose.orientation.w = 1
tmsg.text = "User"+str(u)+":"+str(speak)
msgs.markers.append(tmsg)
amsg.data.append(speak)
# ------text------
if self.edited_times[tl] > 0.00001:
tidx, tsize = 3, 0.1 # tidx=3 is Head
tmsg = self.rviz_obj(u, 'time'+str(u), 9, [tsize, tsize, tsize], self.carray[u], 0)
tmsg.pose.position = self.set_point([-0.5, 0, 0])
tmsg.pose.orientation.w = 1
tmsg.text = "time: "+str(self.edited_times[tl])
msgs.markers.append(tmsg)
self.mpub.publish(msgs)
#print pred[1].data[0, 0]
self.speakpub.publish(amsg)
def main():
app = QtGui.QApplication(sys.argv)
anotation = ANNOTATION()
#graph = GRAPH()
sys.exit(app.exec_())
if __name__=='__main__':
main()
| ange(2):
#print person[0, ls[l+add]], ls[l+add], l, add
linepoint=self.set_point([data[0,ls[l+add]*3],
data[0,ls[l+add]*3+1],
data[0,ls[l+add]*3+2]],
addx=addx, addy=addy, rotate=rotate)
lmsg.points.append(linepoint)
return lmsg
| identifier_body |
ann_interface.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
2017.1.24---
時間
2017.1.4----
音声を生で扱う
Annotationとは別で扱う
2016.12.12----
20161210_proc3p.jsonを使う。反転させてみる
2016.12.9----
三人の対話におけるannotationのInterface
手動でannotationする
slider barとtableを同期させた
2016.10.19----
interaction dataをannotationするためのinterface
"""
import sys
import os.path
import math
import json
import time
import copy
import numpy as np
from PyQt4 import QtGui, QtCore
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import matplotlib.pyplot as plt
import rospy
from visualization_msgs.msg import MarkerArray
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point
from geometry_msgs.msg import PointStamped
from std_msgs.msg import ColorRGBA
import data_proc2
from std_msgs.msg import Float64MultiArray
class ANNOTATION(QtGui.QWidget):
def __init__(self):
super(ANNOTATION, self).__init__()
#UIの初期化
self.initUI()
#ROSのパブリッシャなどの初期化
rospy.init_node('annotation_interface2', anonymous=True)
self.mpub = rospy.Publisher('visualization_marker_array', MarkerArray, queue_size=10)
#self.ppub = rospy.Publisher('joint_diff', PointStamped, queue_size=10)
self.speakpub = rospy.Publisher('/speaks', Float64MultiArray, queue_size=10)
#rvizのカラー設定(未)
self.carray = []
clist = [[1, 0, 0, 1], [0, 1, 0, 1], [1, 1, 0, 1], [1, 0.5, 0, 1]]
for c in clist:
color = ColorRGBA()
color.r, color.g, color.b, color.a = c[0], c[1], c[2], c[3]
self.carray.append(color)
# set extra data param
self.dim_x = 72
self.llist = [[0, 1, 10, 2, 3, 11], [10, 4, 5, 6], [10, 7, 8, 9]]
self.input_joints = []
self.input_speaks = []
self.anno_dim = 2
self.r, self.c = 0, 0
def initUI(self):
#Botton Objectの作成
def boxBtnObj(name, func, maxlen=30):
box = QtGui.QHBoxLayout()
btn = btnObj(name, func, maxlen=maxlen)
box.addWidget(btn)
return box
def btnObj(name, func, maxlen=30):
btn = QtGui.QPushButton(name)
btn.setMaximumWidth(maxlen)
btn.clicked.connect(func)
return btn
grid = QtGui.QGridLayout()
form = QtGui.QFormLayout()
#frame size
self.frmSizeBox = QtGui.QLineEdit()
self.frmSizeBox.setText('-1')
self.frmSizeBox.setFixedWidth(100)
form.addRow('size', self.frmSizeBox)
#ファイル入力ボックス
self.txtSepFile = QtGui.QLineEdit()
btnSepFile = btnObj("...", self.chooseDbFile, maxlen=40)
btnRawInput = btnObj("raw", self.inputData, maxlen=60)
btnProcedInput = btnObj("proced", self.inputProcedData, maxlen=60)
boxSepFile = QtGui.QHBoxLayout()
boxSepFile.addWidget(self.txtSepFile)
boxSepFile.addWidget(btnSepFile)
boxSepFile.addWidget(btnRawInput)
boxSepFile.addWidget(btnProcedInput)
form.addRow('input', boxSepFile)
#ファイル出力ボックス
self.txtOutputFile = QtGui.QLineEdit()
self.txtOutputFile.setText('test_proc.json')
btnOutputFile = btnObj("save", self.outputData, maxlen=100)
boxOutputFile = QtGui.QHBoxLayout()
boxOutputFile.addWidget(self.txtOutputFile)
boxOutputFile.addWidget(btnOutputFile)
form.addRow('output', boxOutputFile)
#data cut
cutRange = QtGui.QHBoxLayout()
self.cutStart = QtGui.QLineEdit()
self.cutStart.setText('0')
self.cutEnd = QtGui.QLineEdit()
self.cutEnd.setText('0')
btnCutRange = btnObj("exec", self.cutRangeData, maxlen=60)
cutRange.addWidget(self.cutStart)
cutRange.addWidget(self.cutEnd)
cutRange.addWidget(btnCutRange)
form.addRow('trimming', cutRange)
# check range
checkRange = QtGui.QHBoxLayout()
self.checkUser = QtGui.QLineEdit()
self.checkUser.setText('0')
self.checkSt = QtGui.QLineEdit()
self.checkSt.setText('0')
self.checkEd = QtGui.QLineEdit()
self.checkEd.setText('0')
btnRangeCheck = btnObj("check", self.checkRangeData, maxlen=60)
checkRange.addWidget(self.checkUser)
checkRange.addWidget(self.checkSt)
checkRange.addWidget(self.checkEd)
checkRange.addWidget(btnRangeCheck)
form.addRow('U/S/E', checkRange)
# direct
boxDirect = boxBtnObj("d", self.directJoints, maxlen=20)
form.addRow('direct', boxDirect)
# Reset
boxResetAnno = boxBtnObj("e", self.resetAnno, maxlen=20)
form.addRow('reset', boxResetAnno)
# Reverse
boxReverse = boxBtnObj("e", self.reverseData, maxlen=20)
form.addRow('reverse', boxReverse)
# Time Line
boxSld = QtGui.QHBoxLayout()
lcd = QtGui.QLCDNumber(self)
self.sld = QtGui.QSlider(QtCore.Qt.Horizontal, self)
self.sld.valueChanged.connect(lcd.display)
self.sld.valueChanged.connect(self.sliderChange)
boxSld.addWidget(lcd)
boxSld.addWidget(self.sld)
#テーブルの初期化
#horizonはuser2の時間
self.table = QtGui.QTableWidget(self)
self.table.setColumnCount(0)
#self.table.setHorizontalHeaderLabels("use_2 time")
jItem = QtGui.QTableWidgetItem(str(0))
self.table.setHorizontalHeaderItem(0, jItem)
#アイテムがクリックされたらグラフを更新
self.table.itemClicked.connect(self.clickUpdateTable)
#self.table.itemActivated.connect(self.activatedUpdateTable)
self.table.setItem(0, 0, QtGui.QTableWidgetItem(1))
#self.itemSelectionChanged.connect(self.selection_changed)
#self.tableSlider = self.table.verticalScrollBar()
boxTable = QtGui.QHBoxLayout()
boxTable.addWidget(self.table)
#配置
grid.addLayout(form,1,0)
grid.addLayout(boxSld,2,0)
grid.addLayout(boxTable,3,0)
self.setLayout(grid)
self.resize(400,100)
self.setWindowTitle("cca window")
self.show()
def chooseDbFile(self):
dialog = QtGui.QFileDialog()
dialog.setFileMode(QtGui.QFileDialog.ExistingFile)
if dialog.exec_():
fileNames = dialog.selectedFiles()
for f in fileNames:
self.txtSepFile.setText(f)
return
return self.txtSepFile.setText('')
# reset
def resetAnno(self):
print "reset Data before:",self.edited_joints.shape, self.edited_annos.shape
self.edited_joints = copy.deepcopy(self.input_joints)
self.edited_speaks = copy.deepcopy(self.input_speaks)
#datalen = self.edited_speaks.shape[1]
self.edited_annos = copy.deepcopy(self.input_annos) #np.zeros((datalen, self.anno_dim)) #(1000, 2)
self.edited_flags = copy.deepcopy(self.input_flags)
self.sld.setMaximum(self.edited_speaks.shape[0]-1) #
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
print "reset Data after:",self.edited_joints.shape, self.edited_speaks.shape, self.edited_annos.shape
def speakNorm(self, data, upper=300, lower=80):
# 上限
c_data=data if data <upper else upper
# 下限
c_data=c_data if c_data>lower else lower
# 最大値で割る
return (c_data-lower)/float(upper-lower)
def convDecibel(self, data):
return 20*math.log10(data)
#decibel
def speakDecibelNorm(self, data):
decibel = self.convDecibel(data)
return self.speakNorm(decibel, upper=70, lower=30)
def updateTable(self, data, anno, times):
#plt.plot(data)
#plt.show()
th = 0#float(self.ThesholdBox.text())
if(len(data)==0):
print "No Data! Push exec button..."
d_row, d_col = data.shape #(1000, 2)
add_ann = anno.shape[1] #annotationの個数
#print add_ann
add_flag = 1
#t_row:時間軸, t_col:次元数, add_ann:加えるAnnotation
t_col = d_col+add_ann+add_flag
t_row = d_row
#print "t, d, a",t_col,d_col,add_ann
self.table.clear()
font = QtGui.QFont()
font.setFamily(u"DejaVu Sans")
font.setPointSize(5)
self.table.horizontalHeader().setFont(font)
self.table.verticalHeader().setFont(font)
self.table.setColumnCount(t_row) # 次元数(tableはcolだけど値はrow!)+flag
self.table.setRowCount(t_col) # 時間軸(tableはrowだけど値はcol!)
# print "t:", t_row, t_col
#self.table.setRowCount(data.shape[0])
#self.table.setColumnCount(ann_num)
# 軸の値をSet
#for i in range(len(times)):
# jItem = QtGui.QTableWidgetItem(str(i))
# self.table.setHorizontalHeaderItem(i, jItem)
hor = True
for i in range(t_col):
iItem = QtGui.QTableWidgetItem(str(i))
self.table.setVerticalHeaderItem(i, iItem)
self.table.verticalHeaderItem(i).setToolTip(str(i))
#時間軸にデータを入れるなら↓
#self.table.verticalHeaderItem(i).setToolTip(str(times[i]))
for j in range(t_row):
if hor == True:
jItem = QtGui.QTableWidgetItem(str(j))
self.table.setHorizontalHeaderItem(j, jItem)
self.table.horizontalHeaderItem(j).setToolTip(str(times[j]))
#print "%.10f"%times[j]
if i < d_col: #data(speak)の可視化
# 音声Dataがrmsの場合
# set_data:範囲は 0-1
set_data = data[j][i] #self.speakDecibelNorm(data[j][i])
#ON/OFF むちゃくちゃすぎる
#set_data = 1 if set_data > 0.3 else 0
#self.edited_speaks[j][i] = set_data
#一時的なOffset
#set_data = data_proc2.speakNorm(set_data, upper=0.75, lower=0.25)
#self.edited_speaks[j][i] = set_data
#color_dataの範囲を0-255に変更
#color_data=int(set_data*255)
color_data = set_data
color_data = 1 if color_data > 0.5 else 0
color_data=int(color_data*255)
#print color_data
#print "at",color_data
color = [255-color_data]*3
elif i >= d_col and i < t_col-add_flag: # annotationの可視化
#print i
"""
set_data = anno[j][i-d_col] #anno(1000, 2)
if set_data == 0:
color = [255, 255, 255]
else:
color = [0, 0, 0]
"""
set_data = anno[j][i-d_col] #anno(1000, 2)
set_data = 0 if set_data < 0 else set_data
set_data = 1 if set_data > 1 else set_data
color_data=int(set_data*255)
color = [255-color_data]*3
else: #flag
set_data = self.edited_flags[j][0]
if set_data == 0:
color = [255, 255, 255]
else:
color = [0, 0, 0]
self.table.setItem(i, j, QtGui.QTableWidgetItem())
self.table.item(i, j).setBackground(QtGui.QColor(color[0],color[1],color[2]))
self.table.item(i, j).setToolTip(str(set_data))
hor = False
self.table.setVisible(False)
self.table.resizeRowsToContents()
self.table.resizeColumnsToContents()
self.table.setVisible(True)
# TableがClickされたとき
def clickUpdateTable(self, cItem):
self.tip = float(cItem.toolTip())
self.r = cItem.row()
self.c = cItem.column()
print "r:",self.r,", c:",self.c, ". tip:",self.tip
set_data = 0 if self.tip == 1 else 1
speak_dim = self.edited_speaks.shape[1]
anno_dim = self.edited_annos.shape[1]
if self.r < speak_dim:
self.edited_speaks[self.c][self.r] = set_data
elif self.r >= speak_dim and self.r < speak_dim+anno_dim:
self.edited_annos[self.c][self.r-speak_dim] = set_data
else:
self.edited_flags[self.c][0] = set_data
#indexes = self.table.selectedIndexes()
#print indexes
color = [[255, 255, 255], [0, 0, 0]]
self.table.setItem(self.r, self.c, QtGui.QTableWidgetItem())
self.table.item(self.r, self.c).setBackground(QtGui.QColor(color[set_data][0],color[set_data][1],color[set_data][2]))
self.table.item(self.r, self.c).setToolTip(str(set_data))
#jointsの可視化
# self.vizJoint(self.c)
def checkRangeData(self):
set_row = int(self.checkUser.text()) #0~4 User1=2, User2=3
start = int(self.checkSt.text())
end = int(self.checkEd.text())
color = [[255, 255, 255], [0, 0, 0]]
table_offset = self.edited_speaks.shape[1]
#変更するUserの指定
user = set_row - table_offset
#print user, table_offset
if user < 0:
print "Not Change!"
return
print "Change [User:", user, ", Start:",start,", end:",end,"]"
for i in range(start, end):
get_data = self.edited_annos[i][user]
set_data = 0 if get_data == 1 else 1
self.table.setItem(set_row, i, QtGui.QTableWidgetItem())
self.table.item(set_row, i).setBackground(QtGui.QColor(color[set_data][0],color[set_data][1],color[set_data][2]))
self.table.item(set_row, i).setToolTip(str(set_data))
self.edited_annos[i][user] = set_data
def sliderChange(self, timeline):
if len(self.input_joints)==0:
print "now no data:", timeline
return
#self.table.selectColum(timeline)
#self.table.verticalScrollBar().setValue(timeline)
self.table.setCurrentCell(self.r, timeline)
self.pubViz(timeline)
"""
def activatedUpdateTable(self, cItem):
row = cItem.row()
col = cItem.column()
print "row:", row,", col:", col#, ". tip:",self.tip
"""
# 生Dataの入力
def inputData(self):
filesize = int(self.frmSizeBox.text())
print "Input raw data:", filesize
self.fname = [str(self.txtSepFile.text())]
input_data = data_proc2.load_persons(self.fname, annobool=False, datalen=filesize)
#print input_data["times"][0]
# もし手をくわえるなら
#input_data[2] = data_proc.proc_anno(input_data[0], input_data[2], use_vote=True, use_speak=False)
self.loadInputData(input_data)
# 加工済のDataの入力
def inputProcedData(self):
filesize = int(self.frmSizeBox.text())
print "Input proced data", filesize
self.fname = [str(self.txtSepFile.text())]
input_data = data_proc2.load_proced_data_flag(self.fname, datalen=filesize)
#print input_data["times"]
self.loadInputData(input_data)
def loadInputData(self, input_data):
self.input_joints = input_data["joints"]
self.input_speaks = input_data["speaks"]
datalen=self.input_speaks.shape[0]
if input_data.has_key("annos"):
self.input_annos = input_data["annos"]
#とりあえずuser1だけ編集(0124_personsはまた別)
#self.input_annos[:,:1] = np.zeros((self.input_annos.shape[0],1))
"""
user_joint = self.input_joints[:,:36]
user_anno = self.input_annos[:,:1]
calc_user_anno = data_proc2.proc_anno(user_joint, user_anno, use_vote=True, use_anno=False, threshold=-0.2)
self.input_annos[:,:1] = calc_user_anno
print "now_persons mode"
user_joint = self.input_joints[:,36:]
user_anno = self.input_annos[:,1:]
calc_user_anno = data_proc2.proc_anno(user_joint, user_anno, use_vote=True, use_anno=False, threshold=-0.2)
self.input_annos[:,1:] = calc_user_anno
"""
#print "now model mode"
self.input_annos[:,:] = np.round(self.input_annos[:,:])
else:
self.input_annos = np.zeros((datalen, self.anno_dim))#(1000,2)
if input_data.has_key("flags"):
print "load flags"
self.input_flags = input_data["flags"]
else:
print "create flags"
self.input_flags = np.zeros((datalen, 1))
if input_data.has_key("times"):
print "load times"
self.input_times = input_data["times"]
else:
print "create times"
self.input_times = np.zeros((datalen, 1))
"""
diff_times = []
for t in range(len(self.input_times)):
if t == 0:
diff_times.append(0)
else:
diff_times.append(self.input_times[t]-self.input_times[t-1])
fps = np.round(1/float(np.mean(diff_times)))
plt.plot(self.input_annos[:,0], label="person", color="red")
plt.plot(self.input_annos[:,1], label="robot", color="green")
plt.xlabel("Frame (fps:"+str(fps)+")")
plt.title("speaker/listener result")
plt.xlim(0,len(self.input_annos))
plt.ylim(-0.2, 1.2)
plt.legend()
plt.show()
"""
print "joints shape:", self.input_joints.shape
print "speaks shape:", self.input_speaks.shape
print "annos shape:", self.input_annos.shape
print "flags shape:", self.input_flags.shape
print "times shape:", self.input_times.shape
#for j in range(len(self.input_times)):
# print j,"%.10f"%self.input_times[j]
self.timediff(self.input_times)
self.edited_joints = copy.deepcopy(self.input_joints)#np.arrayでCopyされる
self.edited_speaks = copy.deepcopy(self.input_speaks)#speakは編集しない(今のところ)
self.edited_annos = copy.deepcopy(self.input_annos)
self.edited_flags = copy.deepcopy(self.input_flags)
self.edited_times = copy.deepcopy(self.input_times)
#self.updateTable(self.input_joints)
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
#Sliderの最大値をset
self.sld.setMaximum(datalen - 1)
print "end"
def outputData(self):
name_json = str(self.txtOutputFile.text())
keys = ["joints", "speaks", "annos", "flags", "times"]
data = [self.edited_joints, self.edited_speaks, self.edited_annos, self.edited_flags, self.edited_times]
"""
if len(keys) != len(data):
print "Save false! keys len:"
"""
data_proc2.save_data(name_json, keys, data)
def timediff(self, times):
fpss = []
for i in range(len(times)-1):
fps = 1/(times[i+1]-times[i])
fpss.append(fps)
#print i, fps
fpss = np.array(fpss)
#print "mean:",np.mean(fpss),",std:",np.std(fpss)
def cutRangeData(self):
start = int(self.cutStart.text())
end = int(self.cutEnd.text())
print "cut data:",start,"-",end
self.edited_joints = self.edited_joints[start:end]
self.edited_speaks = self.edited_speaks[start:end]
self.edited_annos = self.edited_annos[start:end]
self.edited_flags = self.edited_flags[start:end]
self.edited_times = self.edited_times[start:end]
print "joints shape:", self.edited_joints.shape
print "speaks shape:", self.edited_speaks.shape
print "annos shape:", self.edited_annos.shape
print "flags shape:", self.edited_flags.shape
print "times shape:", self.edited_times.shape
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
#Sliderの最大値をset
self.sld.setMaximum(self.edited_joints.shape[0]-1)
print "end"
def directJoints(self):
print "direct joints"
size, dim = self.edited_joints.shape
user1_nose_y_idx = 35+33+2
pair = 1
pair_stack = 0
offset_y = 0.03
for i in range(size):
anno = self.edited_annos[i]
if anno[1] == 0:#聞き手なら
if anno[0] == 1 and anno[2] == 0:
#print i,":user0",self.input_joints[i][user1_nose_y_idx]
self.edited_joints[i][user1_nose_y_idx] += offset_y
pair = 0
pair_stack = 0
#print self.input_joints[i][user1_nose_y_idx]
elif anno[0] == 0 and anno[2] == 1:
#print i,":user2",self.input_joints[i][user1_nose_y_idx]
self.edited_joints[i][user1_nose_y_idx] -= offset_y
pair = 2
pair_stack = 0
#print self.input_joints[i][user1_nose_y_idx]
else: #誰も話していないなら
pair_stack += 1
else:#話し手ならどこを向くか
if pair == 0:
self.edited_joints[i][user1_nose_y_idx] += offset_y
elif pair == 2:
self.edited_joints[i][user1_nose_y_idx] -= offset_y
# 引っ繰り返して追加
def reverseData(self):
#y方向だけ引っくり返す
def reverse_y(joints):
#(10000, 36)
rev = np.array([[1,-1,1]*12]*joints.shape[0])
return joints*rev
print "reverseData before:",self.edited_joints.shape, self.edited_annos.shape
datalen = self.edited_annos.shape[0]
if self.anno_dim == 2:
j_r = np.hstack((self.edited_joints[:,36:], self.edited_joints[:,:36]))
self.edited_joints = np.vstack((self.edited_joints, j_r))
s_r = np.hstack((self.edited_speaks[:,1].reshape(datalen,1), self.edited_speaks[:,0].reshape(datalen,1)))
self.edited_speaks = np.vstack((self.edited_speaks, s_r))
a_r = np.hstack((self.edited_annos[:,1].reshape(datalen,1), self.edited_annos[:,0].reshape(datalen,1)))
self.edited_annos = np.vstack((self.edited_annos, a_r))
self.edited_flags = np.vstack((self.edited_flags, self.edited_flags))
self.edited_times = np.append(self.edited_times, self.edited_times)
else:
print "bad shape"
#print self.edited_times.shape
self.updateTable(self.edited_speaks, self.edited_annos, self.edited_times)
#Sliderの最大値をset
self.sld.setMaximum(self.edited_joints.shape[0]-1)
print "reverseData after:",self.edited_joints.shape,self.edited_speaks.shape,self.edited_annos.shape
def rviz_obj(self, obj_id, obj_ns, obj_type, obj_size, obj_color=[0, 0, 0, 0], obj_life=0):
obj = Marker()
obj.header.frame_id, obj.header.stamp = "camera_link", rospy.Time.now()
obj.ns, obj.action, obj.type = str(obj_ns), 0, obj_type
obj.scale.x, obj.scale.y, obj.scale.z = obj_size[0], obj_size[1], obj_size[2]
obj.color = obj_color
obj.lifetime = rospy.Duration.from_sec(obj_life)
obj.pose.orientation.w = 1.0
return obj
def set_point(self, pos, addx=0, addy=0, addz=0, rotate=False):
pt = Point()
if rotate == True:
pt.x, pt.y, pt.z = -1*pos[0]+addx, -1*pos[1]+addy, pos[2]+addz
else:
pt.x, pt.y, pt.z = pos[0]+addx, pos[1]+addy, pos[2]+addz
return pt
def set_vizmsg_point(self, u, data, color, psize, ofs, addx=0, addy=0, rotate=False):
pmsg = self.rviz_obj(u, 'p'+str(u), 7, [psize, psize, psize], color, 0)
points = []
for p in range(data.shape[1]/ofs):
points.append(self.set_point([data[0, p*ofs],
data[0, p*ofs+1],
data[0, p*ofs+2]], | addx=addx, addy=addy, rotate=rotate))
pmsg.points = points
return pmsg
def set_vizmsg_line(self, u, data, color, lsize, llist, addx=0, addy=0, rotate=False):
lmsg = self.rviz_obj(u, 'l'+str(u), 5, [lsize, lsize, lsize], color, 0)
for ls in llist:
for l in range(len(ls)-1):
for add in range(2):
#print person[0, ls[l+add]], ls[l+add], l, add
linepoint=self.set_point([data[0,ls[l+add]*3],
data[0,ls[l+add]*3+1],
data[0,ls[l+add]*3+2]],
addx=addx, addy=addy, rotate=rotate)
lmsg.points.append(linepoint)
return lmsg
def pubViz(self, tl):
#print "pub Viz:", tl
# drawing
msgs = MarkerArray()
amsg = Float64MultiArray()
per_js = []
dim_p = 36
dim = len(self.edited_joints[tl])
for i in range(dim/dim_p):
per_js.append(self.edited_joints[tl,dim_p*i:dim_p*(i+1)].reshape(1, dim_p))
ofs = 3
#ofs_xyr = [[-0.5, -0.5, 1], [0.5, 0, 0], [-0.5, 0.5, 1]]
ofs_xyr = [[-1, 0, 1], [0, 0, 0]]
for u, (person, speak, anno) in enumerate(zip(per_js, self.edited_speaks[tl], self.edited_annos[tl])):
# ---Person points---
offset = 0
psize = speak*0.05
#psize = 0.03
#psize = 0.05 if anno > 0.7 else 0.03
pmsg = self.set_vizmsg_point(u, person, self.carray[0], psize, ofs,
addx=ofs_xyr[u][0], addy=ofs_xyr[u][1], rotate=ofs_xyr[u][2])
msgs.markers.append(pmsg)
# ---Person lines---
lsize = 0.03 if anno > 0.5 else 0.01
cid = 3 if anno > 0.5 else 2
lmsg = self.set_vizmsg_line(u, person, self.carray[cid], lsize, self.llist,
addx=ofs_xyr[u][0], addy=ofs_xyr[u][1], rotate=ofs_xyr[u][2])
msgs.markers.append(lmsg)
# ------text------
tidx, tsize = 3, 0.1 # tidx=3 is Head
tmsg = self.rviz_obj(u, 't'+str(u), 9, [tsize, tsize, tsize], self.carray[u], 0)
tmsg.pose.position = self.set_point([person[0, tidx*ofs], person[0, tidx*ofs+1], person[0, tidx*ofs+2]],
addx=ofs_xyr[u][0], addy=ofs_xyr[u][1], addz=0.3)
tmsg.pose.orientation.w = 1
tmsg.text = "User"+str(u)+":"+str(speak)
msgs.markers.append(tmsg)
amsg.data.append(speak)
# ------text------
if self.edited_times[tl] > 0.00001:
tidx, tsize = 3, 0.1 # tidx=3 is Head
tmsg = self.rviz_obj(u, 'time'+str(u), 9, [tsize, tsize, tsize], self.carray[u], 0)
tmsg.pose.position = self.set_point([-0.5, 0, 0])
tmsg.pose.orientation.w = 1
tmsg.text = "time: "+str(self.edited_times[tl])
msgs.markers.append(tmsg)
self.mpub.publish(msgs)
#print pred[1].data[0, 0]
self.speakpub.publish(amsg)
def main():
app = QtGui.QApplication(sys.argv)
anotation = ANNOTATION()
#graph = GRAPH()
sys.exit(app.exec_())
if __name__=='__main__':
main()
| identifier_name |
|
index.js | const path = require('path');
const yaml = require('js-yaml');
const toml = require('@iarna/toml');
const fse = require('fs-extra');
const _ = require('lodash');
const TaskQueue = require('./task-queue');
module.exports = {
forEachPromise,
mapPromise,
findPromise,
readDirRecursively,
getFirst,
append,
concat,
copy,
copyIfNotSet,
rename,
failFunctionWithTag,
assertFunctionWithFail,
mapDeep,
fieldPathToString,
getFirstExistingFile,
parseFirstExistingFile,
parseFile,
parseDataByFilePath,
parseMarkdownWithFrontMatter,
outputData,
stringifyDataByFilePath,
TaskQueue
};
function forEachPromise(array, callback, thisArg) {
return new Promise((resolve, reject) => {
function next(index) {
if (index < array.length) {
callback.call(thisArg, array[index], index, array).then(() => {
next(index + 1);
}).catch(error => {
reject(error);
});
} else {
resolve();
}
}
next(0);
});
}
function mapPromise(array, callback, thisArg) {
return new Promise((resolve, reject) => {
let results = [];
function next(index) {
if (index < array.length) | else {
resolve(results);
}
}
next(0);
});
}
function findPromise(array, callback, thisArg) {
return new Promise((resolve, reject) => {
function next(index) {
if (index < array.length) {
callback.call(thisArg, array[index], index, array).then(result => {
if (result) {
resolve(array[index]);
} else {
next(index + 1);
}
}).catch(error => {
reject(error);
});
} else {
resolve();
}
}
next(0);
});
}
async function readDirRecursively(dir, options) {
const rootDir = _.get(options, 'rootDir', dir);
const files = await fse.readdir(dir);
const result = await mapPromise(files, async (file) => {
const filePath = path.join(dir, file);
const relFilePath = path.relative(rootDir, filePath);
const stats = await fse.stat(filePath);
if (_.has(options, 'filter') && !options.filter(relFilePath, stats)) {
return Promise.resolve();
}
if (stats.isDirectory()) {
return readDirRecursively(filePath, {...options, rootDir});
} else if (stats.isFile()) {
return relFilePath;
} else {
return null;
}
});
return _.chain(result).compact().flatten().value();
}
/**
* Gets the value at the first path of object having non undefined value.
* If all paths resolve to undefined values, the defaultValue is returned.
*
* @param {Object} object The object to query.
* @param {Array<String | Array<String>>} paths The property paths to search for.
* @param {*} [defaultValue] The value returned if all paths resolve to undefined values
* @returns {*}
*/
function getFirst(object, paths, defaultValue) {
let result = _(object).at(paths).reject(_.isUndefined).first();
return _.isUndefined(result) ? defaultValue : result;
}
function append(object, path, value) {
if (!_.has(object, path)) {
_.set(object, path, []);
}
_.get(object, path).push(value);
}
function concat(object, path, value) {
if (!_.has(object, path)) {
_.set(object, path, []);
}
_.set(object, path, _.get(object, path).concat(value));
}
function copy(sourceObject, sourcePath, targetObject, targetPath, transform) {
if (_.has(sourceObject, sourcePath)) {
let value = _.get(sourceObject, sourcePath);
if (transform) {
value = transform(value);
}
_.set(targetObject, targetPath, value);
}
}
function copyIfNotSet(sourceObject, sourcePath, targetObject, targetPath, transform) {
if (!_.has(targetObject, targetPath)) {
copy(sourceObject, sourcePath, targetObject, targetPath, transform);
}
}
function rename(object, oldPath, newPath) {
if (_.has(object, oldPath)) {
_.set(object, newPath, _.get(object, oldPath));
oldPath = _.toPath(oldPath);
if (oldPath.length > 1) {
object = _.get(object, _.initial(oldPath));
}
delete object[_.last(oldPath)];
}
}
function failFunctionWithTag(tag) {
return function fail(message) {
throw new Error(`[${tag}] ${message}`);
};
}
function assertFunctionWithFail(fail) {
return function assert(value, message) {
if (!value) {
fail(message);
}
}
}
/**
* Deeply maps the passed `value` by recursively calling the `iteratee` on
* value's children. The value returned from the iteratee is used to map the
* children.
*
* The iteratee is invoked with three arguments - the `value` being iterated,
* the `fieldPath` of the current `value` relative to the original passed value,
* and the `stack` of ancestors of the current `value`.
*
* For the first time the `iterate` will be called with the original `value`
* and empty arrays for `fieldPath` and `stack`.
*
* In other words, any `value` passed to the iteratee (except the first call,
* and assuming the ancestors keys were not mapped)
* will be equal to: `_.get(originalValue, fieldPath)`
*
* The recursion is called in pre-order depth-first-search. Meaning, the
* iteratee is called first on parent nodes and then on its children. Therefore
* if iteratee maps/replaces the parent node, then the children of the replaced
* node will be traversed.
*
* @example
* mapDeep({ prop: 'foo', arr: [ 'bar' , 1, 2 ] }, (value) => {
* if (_.isString(value)) return '__' + value;
* if (_.isNumber(value)) return value * 10;
* return value;
* })
* => { prop: '__foo', arr: [ '__bar', 10, 20 ] }
*
* mapDeep({ prop: 'foo', arr: [ 'bar' ] }, (value, fieldPath) => {
* if ((_.isString(value)) return value + '__' + fieldPath.join('.');
* return value;
* })
* => { prop: 'foo__prop', arr: [ 'bar__arr.0' ] }
*
* @param {*} value A value to map
* @param {Function} iteratee Function (value: any, fieldPath: Array, stack: Array)
* @param {object} [options]
* @param {boolean} [options.iterateCollections] Default: true
* @param {boolean} [options.iterateScalars] Default: true
* @param {boolean} [options.postOrder] Change the invocation of iteratee to post-order depth-first-search. Default: false
* @param {Array} [_keyPath] For internal recursive use
* @param {Array} [_objectStack] For internal recursive use
* @returns {*}
*/
function mapDeep(value, iteratee, options = {}, _keyPath = [], _objectStack = []) {
const postOrder = _.get(options, 'postOrder', false);
let iterate;
if (_.isPlainObject(value) || _.isArray(value)) {
iterate = _.get(options, 'iterateCollections', true);
} else {
iterate = _.get(options, 'iterateScalars', true);
}
if (iterate && !postOrder) {
value = iteratee(value, _keyPath, _objectStack);
}
const childrenIterator = (val, key) => {
return mapDeep(val, iteratee, options, _.concat(_keyPath, key), _.concat(_objectStack, [value]));
};
if (_.isPlainObject(value)) {
value = _.mapValues(value, childrenIterator);
} else if (_.isArray(value)) {
value = _.map(value, childrenIterator);
}
if (iterate && postOrder) {
value = iteratee(value, _keyPath, _objectStack);
}
return value;
}
function fieldPathToString(fieldPath) {
return _.reduce(fieldPath, (accumulator, fieldName, index) => {
if (_.isString(fieldName) && /\W/.test(fieldName)) {
// field name is a string with non alphanumeric character
accumulator += `['${fieldName}']`;
} else if (_.isNumber(fieldName)) {
accumulator += `[${fieldName}]`;
} else {
if (index > 0) {
accumulator += '.';
}
accumulator += fieldName;
}
return accumulator;
}, '');
}
function getFirstExistingFile(fileNames, inputDir) {
const filePaths = _.map(fileNames, fileName => path.resolve(inputDir, fileName));
return findPromise(filePaths, (filePath) => fse.pathExists(filePath));
}
function parseFirstExistingFile(fileNames, inputDir) {
return getFirstExistingFile(fileNames, inputDir).then(filePath => {
if (filePath) {
return parseFile(filePath);
} else {
return null;
}
});
}
async function parseFile(filePath) {
const data = await fse.readFile(filePath, 'utf8');
return parseDataByFilePath(data, filePath);
}
function parseDataByFilePath(string, filePath) {
const extension = path.extname(filePath).substring(1);
let data;
switch (extension) {
case 'yml':
case 'yaml':
data = yaml.safeLoad(string, {schema: yaml.JSON_SCHEMA});
break;
case 'json':
data = JSON.parse(string);
break;
case 'toml':
data = toml.parse(string);
break;
case 'md':
data = parseMarkdownWithFrontMatter(string);
break;
default:
throw new Error(`parseDataByFilePath error, extension '${extension}' of file ${filePath} is not supported`);
}
return data;
}
function parseMarkdownWithFrontMatter(string) {
string = string.replace('\r\n', '\n');
let frontmatter = null;
let markdown = string;
let frontMatterTypes = [
{
type: 'yaml',
startDelimiter: '---\n',
endDelimiter: '\n---',
parse: (string) => yaml.safeLoad(string, {schema: yaml.JSON_SCHEMA})
},
{
type: 'toml',
startDelimiter: '+++\n',
endDelimiter: '\n+++',
parse: (string) => toml.parse(string)
},
{
type: 'json',
startDelimiter: '{\n',
endDelimiter: '\n}',
parse: (string) => JSON.parse(string)
}
];
_.forEach(frontMatterTypes, fmType => {
if (string.startsWith(fmType.startDelimiter)) {
let index = string.indexOf(fmType.endDelimiter);
if (index !== -1) {
// The end delimiter must be followed by EOF or by a new line (possibly preceded with spaces)
// For example ("." used for spaces):
// |---
// |title: Title
// |---...
// |
// |Markdown Content
// |
// "index" points to the beginning of the second "---"
// "endDelimEndIndex" points to the end of the second "---"
// "afterEndDelim" is everything after the second "---"
// "afterEndDelimMatch" is the matched "...\n" after the second "---"
// frontmatter will be: {title: "Title"}
// markdown will be "\nMarkdown Content\n" (the first \n after end delimiter is discarded)
const endDelimEndIndex = index + fmType.endDelimiter.length;
const afterEndDelim = string.substring(endDelimEndIndex);
const afterEndDelimMatch = afterEndDelim.match(/^\s*?(\n|$)/);
if (afterEndDelimMatch) {
const data = string.substring(fmType.startDelimiter.length, index);
frontmatter = fmType.parse(data);
markdown = afterEndDelim.substring(afterEndDelimMatch[0].length);
}
}
}
});
return {
frontmatter: frontmatter,
markdown: markdown
};
}
function outputData(filePath, data) {
let res = stringifyDataByFilePath(data, filePath);
return fse.outputFile(filePath, res);
}
function stringifyDataByFilePath(data, filePath) {
const extension = path.extname(filePath).substring(1);
let result;
switch (extension) {
case 'yml':
case 'yaml':
result = yaml.safeDump(data, {noRefs: true});
break;
case 'json':
result = JSON.stringify(data, null, 4);
break;
case 'toml':
result = toml.stringify(data);
break;
case 'md':
result = '---\n' + yaml.safeDump(data.frontmatter, {noRefs: true}) + '---\n' + data.markdown;
break;
default:
throw new Error(`stringifyDataByFilePath error, extension '${extension}' of file ${filePath} is not supported`);
}
return result;
}
| {
callback.call(thisArg, array[index], index, array).then(result => {
results[index] = result;
next(index + 1);
}).catch(error => {
reject(error);
});
} | conditional_block |
index.js | const path = require('path');
const yaml = require('js-yaml');
const toml = require('@iarna/toml');
const fse = require('fs-extra');
const _ = require('lodash');
const TaskQueue = require('./task-queue');
module.exports = {
forEachPromise,
mapPromise,
findPromise,
readDirRecursively,
getFirst,
append,
concat,
copy,
copyIfNotSet,
rename,
failFunctionWithTag,
assertFunctionWithFail,
mapDeep,
fieldPathToString,
getFirstExistingFile,
parseFirstExistingFile,
parseFile,
parseDataByFilePath,
parseMarkdownWithFrontMatter,
outputData,
stringifyDataByFilePath,
TaskQueue
};
function forEachPromise(array, callback, thisArg) {
return new Promise((resolve, reject) => {
function next(index) {
if (index < array.length) {
callback.call(thisArg, array[index], index, array).then(() => {
next(index + 1);
}).catch(error => {
reject(error);
});
} else {
resolve();
}
}
next(0);
});
}
function mapPromise(array, callback, thisArg) {
return new Promise((resolve, reject) => {
let results = [];
function next(index) {
if (index < array.length) {
callback.call(thisArg, array[index], index, array).then(result => {
results[index] = result;
next(index + 1);
}).catch(error => {
reject(error);
});
} else {
resolve(results);
}
}
next(0);
});
}
function findPromise(array, callback, thisArg) {
return new Promise((resolve, reject) => {
function next(index) {
if (index < array.length) {
callback.call(thisArg, array[index], index, array).then(result => {
if (result) {
resolve(array[index]);
} else {
next(index + 1);
}
}).catch(error => {
reject(error);
});
} else {
resolve();
}
}
next(0);
});
}
async function readDirRecursively(dir, options) {
const rootDir = _.get(options, 'rootDir', dir);
const files = await fse.readdir(dir);
const result = await mapPromise(files, async (file) => {
const filePath = path.join(dir, file);
const relFilePath = path.relative(rootDir, filePath);
const stats = await fse.stat(filePath);
if (_.has(options, 'filter') && !options.filter(relFilePath, stats)) {
return Promise.resolve();
}
if (stats.isDirectory()) {
return readDirRecursively(filePath, {...options, rootDir});
} else if (stats.isFile()) {
return relFilePath;
} else {
return null;
}
});
return _.chain(result).compact().flatten().value();
}
/**
* Gets the value at the first path of object having non undefined value.
* If all paths resolve to undefined values, the defaultValue is returned.
*
* @param {Object} object The object to query.
* @param {Array<String | Array<String>>} paths The property paths to search for.
* @param {*} [defaultValue] The value returned if all paths resolve to undefined values
* @returns {*}
*/
function getFirst(object, paths, defaultValue) {
let result = _(object).at(paths).reject(_.isUndefined).first();
return _.isUndefined(result) ? defaultValue : result;
}
function append(object, path, value) {
if (!_.has(object, path)) {
_.set(object, path, []);
}
_.get(object, path).push(value);
}
function concat(object, path, value) {
if (!_.has(object, path)) {
_.set(object, path, []);
}
_.set(object, path, _.get(object, path).concat(value));
}
function copy(sourceObject, sourcePath, targetObject, targetPath, transform) {
if (_.has(sourceObject, sourcePath)) {
let value = _.get(sourceObject, sourcePath);
if (transform) {
value = transform(value);
}
_.set(targetObject, targetPath, value);
}
}
function copyIfNotSet(sourceObject, sourcePath, targetObject, targetPath, transform) {
if (!_.has(targetObject, targetPath)) {
copy(sourceObject, sourcePath, targetObject, targetPath, transform);
}
}
function rename(object, oldPath, newPath) {
if (_.has(object, oldPath)) {
_.set(object, newPath, _.get(object, oldPath));
oldPath = _.toPath(oldPath);
if (oldPath.length > 1) {
object = _.get(object, _.initial(oldPath));
}
delete object[_.last(oldPath)];
}
}
function failFunctionWithTag(tag) {
return function fail(message) {
throw new Error(`[${tag}] ${message}`);
};
}
function assertFunctionWithFail(fail) {
return function assert(value, message) {
if (!value) {
fail(message);
}
}
}
/**
* Deeply maps the passed `value` by recursively calling the `iteratee` on
* value's children. The value returned from the iteratee is used to map the
* children.
*
* The iteratee is invoked with three arguments - the `value` being iterated,
* the `fieldPath` of the current `value` relative to the original passed value,
* and the `stack` of ancestors of the current `value`.
*
* For the first time the `iterate` will be called with the original `value`
* and empty arrays for `fieldPath` and `stack`.
*
* In other words, any `value` passed to the iteratee (except the first call,
* and assuming the ancestors keys were not mapped)
* will be equal to: `_.get(originalValue, fieldPath)`
*
* The recursion is called in pre-order depth-first-search. Meaning, the
* iteratee is called first on parent nodes and then on its children. Therefore
* if iteratee maps/replaces the parent node, then the children of the replaced
* node will be traversed.
*
* @example
* mapDeep({ prop: 'foo', arr: [ 'bar' , 1, 2 ] }, (value) => {
* if (_.isString(value)) return '__' + value;
* if (_.isNumber(value)) return value * 10;
* return value;
* })
* => { prop: '__foo', arr: [ '__bar', 10, 20 ] }
*
* mapDeep({ prop: 'foo', arr: [ 'bar' ] }, (value, fieldPath) => {
* if ((_.isString(value)) return value + '__' + fieldPath.join('.');
* return value;
* })
* => { prop: 'foo__prop', arr: [ 'bar__arr.0' ] }
*
* @param {*} value A value to map
* @param {Function} iteratee Function (value: any, fieldPath: Array, stack: Array)
* @param {object} [options]
* @param {boolean} [options.iterateCollections] Default: true
* @param {boolean} [options.iterateScalars] Default: true
* @param {boolean} [options.postOrder] Change the invocation of iteratee to post-order depth-first-search. Default: false
* @param {Array} [_keyPath] For internal recursive use
* @param {Array} [_objectStack] For internal recursive use
* @returns {*}
*/
function mapDeep(value, iteratee, options = {}, _keyPath = [], _objectStack = []) {
const postOrder = _.get(options, 'postOrder', false);
let iterate;
if (_.isPlainObject(value) || _.isArray(value)) {
iterate = _.get(options, 'iterateCollections', true);
} else {
iterate = _.get(options, 'iterateScalars', true);
}
if (iterate && !postOrder) {
value = iteratee(value, _keyPath, _objectStack);
}
const childrenIterator = (val, key) => {
return mapDeep(val, iteratee, options, _.concat(_keyPath, key), _.concat(_objectStack, [value]));
};
if (_.isPlainObject(value)) {
value = _.mapValues(value, childrenIterator);
} else if (_.isArray(value)) {
value = _.map(value, childrenIterator);
}
if (iterate && postOrder) {
value = iteratee(value, _keyPath, _objectStack);
}
return value;
}
function fieldPathToString(fieldPath) {
return _.reduce(fieldPath, (accumulator, fieldName, index) => {
if (_.isString(fieldName) && /\W/.test(fieldName)) {
// field name is a string with non alphanumeric character
accumulator += `['${fieldName}']`;
} else if (_.isNumber(fieldName)) {
accumulator += `[${fieldName}]`;
} else {
if (index > 0) {
accumulator += '.';
}
accumulator += fieldName;
}
return accumulator;
}, '');
}
function getFirstExistingFile(fileNames, inputDir) {
const filePaths = _.map(fileNames, fileName => path.resolve(inputDir, fileName));
return findPromise(filePaths, (filePath) => fse.pathExists(filePath));
}
function parseFirstExistingFile(fileNames, inputDir) {
return getFirstExistingFile(fileNames, inputDir).then(filePath => {
if (filePath) {
return parseFile(filePath);
} else {
return null;
}
});
}
async function parseFile(filePath) {
const data = await fse.readFile(filePath, 'utf8');
return parseDataByFilePath(data, filePath);
}
function parseDataByFilePath(string, filePath) {
const extension = path.extname(filePath).substring(1);
let data;
switch (extension) {
case 'yml':
case 'yaml':
data = yaml.safeLoad(string, {schema: yaml.JSON_SCHEMA});
break;
case 'json':
data = JSON.parse(string);
break;
case 'toml':
data = toml.parse(string);
break;
case 'md':
data = parseMarkdownWithFrontMatter(string);
break;
default:
throw new Error(`parseDataByFilePath error, extension '${extension}' of file ${filePath} is not supported`);
}
return data;
}
function parseMarkdownWithFrontMatter(string) {
string = string.replace('\r\n', '\n');
let frontmatter = null;
let markdown = string;
let frontMatterTypes = [
{
type: 'yaml',
startDelimiter: '---\n',
endDelimiter: '\n---',
parse: (string) => yaml.safeLoad(string, {schema: yaml.JSON_SCHEMA})
},
{
type: 'toml',
startDelimiter: '+++\n',
endDelimiter: '\n+++',
parse: (string) => toml.parse(string)
},
{
type: 'json',
startDelimiter: '{\n',
endDelimiter: '\n}',
parse: (string) => JSON.parse(string)
}
];
_.forEach(frontMatterTypes, fmType => {
if (string.startsWith(fmType.startDelimiter)) {
let index = string.indexOf(fmType.endDelimiter);
if (index !== -1) {
// The end delimiter must be followed by EOF or by a new line (possibly preceded with spaces)
// For example ("." used for spaces):
// |---
// |title: Title
// |---...
// |
// |Markdown Content
// |
// "index" points to the beginning of the second "---"
// "endDelimEndIndex" points to the end of the second "---"
// "afterEndDelim" is everything after the second "---"
// "afterEndDelimMatch" is the matched "...\n" after the second "---"
// frontmatter will be: {title: "Title"}
// markdown will be "\nMarkdown Content\n" (the first \n after end delimiter is discarded)
const endDelimEndIndex = index + fmType.endDelimiter.length;
const afterEndDelim = string.substring(endDelimEndIndex);
const afterEndDelimMatch = afterEndDelim.match(/^\s*?(\n|$)/);
if (afterEndDelimMatch) {
const data = string.substring(fmType.startDelimiter.length, index);
frontmatter = fmType.parse(data); | markdown = afterEndDelim.substring(afterEndDelimMatch[0].length);
}
}
}
});
return {
frontmatter: frontmatter,
markdown: markdown
};
}
function outputData(filePath, data) {
let res = stringifyDataByFilePath(data, filePath);
return fse.outputFile(filePath, res);
}
function stringifyDataByFilePath(data, filePath) {
const extension = path.extname(filePath).substring(1);
let result;
switch (extension) {
case 'yml':
case 'yaml':
result = yaml.safeDump(data, {noRefs: true});
break;
case 'json':
result = JSON.stringify(data, null, 4);
break;
case 'toml':
result = toml.stringify(data);
break;
case 'md':
result = '---\n' + yaml.safeDump(data.frontmatter, {noRefs: true}) + '---\n' + data.markdown;
break;
default:
throw new Error(`stringifyDataByFilePath error, extension '${extension}' of file ${filePath} is not supported`);
}
return result;
} | random_line_split |
|
index.js | const path = require('path');
const yaml = require('js-yaml');
const toml = require('@iarna/toml');
const fse = require('fs-extra');
const _ = require('lodash');
const TaskQueue = require('./task-queue');
module.exports = {
forEachPromise,
mapPromise,
findPromise,
readDirRecursively,
getFirst,
append,
concat,
copy,
copyIfNotSet,
rename,
failFunctionWithTag,
assertFunctionWithFail,
mapDeep,
fieldPathToString,
getFirstExistingFile,
parseFirstExistingFile,
parseFile,
parseDataByFilePath,
parseMarkdownWithFrontMatter,
outputData,
stringifyDataByFilePath,
TaskQueue
};
function forEachPromise(array, callback, thisArg) {
return new Promise((resolve, reject) => {
function next(index) {
if (index < array.length) {
callback.call(thisArg, array[index], index, array).then(() => {
next(index + 1);
}).catch(error => {
reject(error);
});
} else {
resolve();
}
}
next(0);
});
}
function mapPromise(array, callback, thisArg) {
return new Promise((resolve, reject) => {
let results = [];
function next(index) {
if (index < array.length) {
callback.call(thisArg, array[index], index, array).then(result => {
results[index] = result;
next(index + 1);
}).catch(error => {
reject(error);
});
} else {
resolve(results);
}
}
next(0);
});
}
function findPromise(array, callback, thisArg) {
return new Promise((resolve, reject) => {
function next(index) {
if (index < array.length) {
callback.call(thisArg, array[index], index, array).then(result => {
if (result) {
resolve(array[index]);
} else {
next(index + 1);
}
}).catch(error => {
reject(error);
});
} else {
resolve();
}
}
next(0);
});
}
async function readDirRecursively(dir, options) {
const rootDir = _.get(options, 'rootDir', dir);
const files = await fse.readdir(dir);
const result = await mapPromise(files, async (file) => {
const filePath = path.join(dir, file);
const relFilePath = path.relative(rootDir, filePath);
const stats = await fse.stat(filePath);
if (_.has(options, 'filter') && !options.filter(relFilePath, stats)) {
return Promise.resolve();
}
if (stats.isDirectory()) {
return readDirRecursively(filePath, {...options, rootDir});
} else if (stats.isFile()) {
return relFilePath;
} else {
return null;
}
});
return _.chain(result).compact().flatten().value();
}
/**
* Gets the value at the first path of object having non undefined value.
* If all paths resolve to undefined values, the defaultValue is returned.
*
* @param {Object} object The object to query.
* @param {Array<String | Array<String>>} paths The property paths to search for.
* @param {*} [defaultValue] The value returned if all paths resolve to undefined values
* @returns {*}
*/
function getFirst(object, paths, defaultValue) {
let result = _(object).at(paths).reject(_.isUndefined).first();
return _.isUndefined(result) ? defaultValue : result;
}
function append(object, path, value) {
if (!_.has(object, path)) {
_.set(object, path, []);
}
_.get(object, path).push(value);
}
function concat(object, path, value) {
if (!_.has(object, path)) {
_.set(object, path, []);
}
_.set(object, path, _.get(object, path).concat(value));
}
function copy(sourceObject, sourcePath, targetObject, targetPath, transform) {
if (_.has(sourceObject, sourcePath)) {
let value = _.get(sourceObject, sourcePath);
if (transform) {
value = transform(value);
}
_.set(targetObject, targetPath, value);
}
}
function copyIfNotSet(sourceObject, sourcePath, targetObject, targetPath, transform) {
if (!_.has(targetObject, targetPath)) {
copy(sourceObject, sourcePath, targetObject, targetPath, transform);
}
}
function rename(object, oldPath, newPath) {
if (_.has(object, oldPath)) {
_.set(object, newPath, _.get(object, oldPath));
oldPath = _.toPath(oldPath);
if (oldPath.length > 1) {
object = _.get(object, _.initial(oldPath));
}
delete object[_.last(oldPath)];
}
}
function failFunctionWithTag(tag) {
return function fail(message) {
throw new Error(`[${tag}] ${message}`);
};
}
function assertFunctionWithFail(fail) {
return function assert(value, message) {
if (!value) {
fail(message);
}
}
}
/**
* Deeply maps the passed `value` by recursively calling the `iteratee` on
* value's children. The value returned from the iteratee is used to map the
* children.
*
* The iteratee is invoked with three arguments - the `value` being iterated,
* the `fieldPath` of the current `value` relative to the original passed value,
* and the `stack` of ancestors of the current `value`.
*
* For the first time the `iterate` will be called with the original `value`
* and empty arrays for `fieldPath` and `stack`.
*
* In other words, any `value` passed to the iteratee (except the first call,
* and assuming the ancestors keys were not mapped)
* will be equal to: `_.get(originalValue, fieldPath)`
*
* The recursion is called in pre-order depth-first-search. Meaning, the
* iteratee is called first on parent nodes and then on its children. Therefore
* if iteratee maps/replaces the parent node, then the children of the replaced
* node will be traversed.
*
* @example
* mapDeep({ prop: 'foo', arr: [ 'bar' , 1, 2 ] }, (value) => {
* if (_.isString(value)) return '__' + value;
* if (_.isNumber(value)) return value * 10;
* return value;
* })
* => { prop: '__foo', arr: [ '__bar', 10, 20 ] }
*
* mapDeep({ prop: 'foo', arr: [ 'bar' ] }, (value, fieldPath) => {
* if ((_.isString(value)) return value + '__' + fieldPath.join('.');
* return value;
* })
* => { prop: 'foo__prop', arr: [ 'bar__arr.0' ] }
*
* @param {*} value A value to map
* @param {Function} iteratee Function (value: any, fieldPath: Array, stack: Array)
* @param {object} [options]
* @param {boolean} [options.iterateCollections] Default: true
* @param {boolean} [options.iterateScalars] Default: true
* @param {boolean} [options.postOrder] Change the invocation of iteratee to post-order depth-first-search. Default: false
* @param {Array} [_keyPath] For internal recursive use
* @param {Array} [_objectStack] For internal recursive use
* @returns {*}
*/
function mapDeep(value, iteratee, options = {}, _keyPath = [], _objectStack = []) {
const postOrder = _.get(options, 'postOrder', false);
let iterate;
if (_.isPlainObject(value) || _.isArray(value)) {
iterate = _.get(options, 'iterateCollections', true);
} else {
iterate = _.get(options, 'iterateScalars', true);
}
if (iterate && !postOrder) {
value = iteratee(value, _keyPath, _objectStack);
}
const childrenIterator = (val, key) => {
return mapDeep(val, iteratee, options, _.concat(_keyPath, key), _.concat(_objectStack, [value]));
};
if (_.isPlainObject(value)) {
value = _.mapValues(value, childrenIterator);
} else if (_.isArray(value)) {
value = _.map(value, childrenIterator);
}
if (iterate && postOrder) {
value = iteratee(value, _keyPath, _objectStack);
}
return value;
}
function fieldPathToString(fieldPath) {
return _.reduce(fieldPath, (accumulator, fieldName, index) => {
if (_.isString(fieldName) && /\W/.test(fieldName)) {
// field name is a string with non alphanumeric character
accumulator += `['${fieldName}']`;
} else if (_.isNumber(fieldName)) {
accumulator += `[${fieldName}]`;
} else {
if (index > 0) {
accumulator += '.';
}
accumulator += fieldName;
}
return accumulator;
}, '');
}
function getFirstExistingFile(fileNames, inputDir) {
const filePaths = _.map(fileNames, fileName => path.resolve(inputDir, fileName));
return findPromise(filePaths, (filePath) => fse.pathExists(filePath));
}
function parseFirstExistingFile(fileNames, inputDir) {
return getFirstExistingFile(fileNames, inputDir).then(filePath => {
if (filePath) {
return parseFile(filePath);
} else {
return null;
}
});
}
async function parseFile(filePath) {
const data = await fse.readFile(filePath, 'utf8');
return parseDataByFilePath(data, filePath);
}
function parseDataByFilePath(string, filePath) {
const extension = path.extname(filePath).substring(1);
let data;
switch (extension) {
case 'yml':
case 'yaml':
data = yaml.safeLoad(string, {schema: yaml.JSON_SCHEMA});
break;
case 'json':
data = JSON.parse(string);
break;
case 'toml':
data = toml.parse(string);
break;
case 'md':
data = parseMarkdownWithFrontMatter(string);
break;
default:
throw new Error(`parseDataByFilePath error, extension '${extension}' of file ${filePath} is not supported`);
}
return data;
}
function parseMarkdownWithFrontMatter(string) |
function outputData(filePath, data) {
let res = stringifyDataByFilePath(data, filePath);
return fse.outputFile(filePath, res);
}
function stringifyDataByFilePath(data, filePath) {
const extension = path.extname(filePath).substring(1);
let result;
switch (extension) {
case 'yml':
case 'yaml':
result = yaml.safeDump(data, {noRefs: true});
break;
case 'json':
result = JSON.stringify(data, null, 4);
break;
case 'toml':
result = toml.stringify(data);
break;
case 'md':
result = '---\n' + yaml.safeDump(data.frontmatter, {noRefs: true}) + '---\n' + data.markdown;
break;
default:
throw new Error(`stringifyDataByFilePath error, extension '${extension}' of file ${filePath} is not supported`);
}
return result;
}
| {
string = string.replace('\r\n', '\n');
let frontmatter = null;
let markdown = string;
let frontMatterTypes = [
{
type: 'yaml',
startDelimiter: '---\n',
endDelimiter: '\n---',
parse: (string) => yaml.safeLoad(string, {schema: yaml.JSON_SCHEMA})
},
{
type: 'toml',
startDelimiter: '+++\n',
endDelimiter: '\n+++',
parse: (string) => toml.parse(string)
},
{
type: 'json',
startDelimiter: '{\n',
endDelimiter: '\n}',
parse: (string) => JSON.parse(string)
}
];
_.forEach(frontMatterTypes, fmType => {
if (string.startsWith(fmType.startDelimiter)) {
let index = string.indexOf(fmType.endDelimiter);
if (index !== -1) {
// The end delimiter must be followed by EOF or by a new line (possibly preceded with spaces)
// For example ("." used for spaces):
// |---
// |title: Title
// |---...
// |
// |Markdown Content
// |
// "index" points to the beginning of the second "---"
// "endDelimEndIndex" points to the end of the second "---"
// "afterEndDelim" is everything after the second "---"
// "afterEndDelimMatch" is the matched "...\n" after the second "---"
// frontmatter will be: {title: "Title"}
// markdown will be "\nMarkdown Content\n" (the first \n after end delimiter is discarded)
const endDelimEndIndex = index + fmType.endDelimiter.length;
const afterEndDelim = string.substring(endDelimEndIndex);
const afterEndDelimMatch = afterEndDelim.match(/^\s*?(\n|$)/);
if (afterEndDelimMatch) {
const data = string.substring(fmType.startDelimiter.length, index);
frontmatter = fmType.parse(data);
markdown = afterEndDelim.substring(afterEndDelimMatch[0].length);
}
}
}
});
return {
frontmatter: frontmatter,
markdown: markdown
};
} | identifier_body |
index.js | const path = require('path');
const yaml = require('js-yaml');
const toml = require('@iarna/toml');
const fse = require('fs-extra');
const _ = require('lodash');
const TaskQueue = require('./task-queue');
module.exports = {
forEachPromise,
mapPromise,
findPromise,
readDirRecursively,
getFirst,
append,
concat,
copy,
copyIfNotSet,
rename,
failFunctionWithTag,
assertFunctionWithFail,
mapDeep,
fieldPathToString,
getFirstExistingFile,
parseFirstExistingFile,
parseFile,
parseDataByFilePath,
parseMarkdownWithFrontMatter,
outputData,
stringifyDataByFilePath,
TaskQueue
};
function | (array, callback, thisArg) {
return new Promise((resolve, reject) => {
function next(index) {
if (index < array.length) {
callback.call(thisArg, array[index], index, array).then(() => {
next(index + 1);
}).catch(error => {
reject(error);
});
} else {
resolve();
}
}
next(0);
});
}
function mapPromise(array, callback, thisArg) {
return new Promise((resolve, reject) => {
let results = [];
function next(index) {
if (index < array.length) {
callback.call(thisArg, array[index], index, array).then(result => {
results[index] = result;
next(index + 1);
}).catch(error => {
reject(error);
});
} else {
resolve(results);
}
}
next(0);
});
}
function findPromise(array, callback, thisArg) {
return new Promise((resolve, reject) => {
function next(index) {
if (index < array.length) {
callback.call(thisArg, array[index], index, array).then(result => {
if (result) {
resolve(array[index]);
} else {
next(index + 1);
}
}).catch(error => {
reject(error);
});
} else {
resolve();
}
}
next(0);
});
}
async function readDirRecursively(dir, options) {
const rootDir = _.get(options, 'rootDir', dir);
const files = await fse.readdir(dir);
const result = await mapPromise(files, async (file) => {
const filePath = path.join(dir, file);
const relFilePath = path.relative(rootDir, filePath);
const stats = await fse.stat(filePath);
if (_.has(options, 'filter') && !options.filter(relFilePath, stats)) {
return Promise.resolve();
}
if (stats.isDirectory()) {
return readDirRecursively(filePath, {...options, rootDir});
} else if (stats.isFile()) {
return relFilePath;
} else {
return null;
}
});
return _.chain(result).compact().flatten().value();
}
/**
* Gets the value at the first path of object having non undefined value.
* If all paths resolve to undefined values, the defaultValue is returned.
*
* @param {Object} object The object to query.
* @param {Array<String | Array<String>>} paths The property paths to search for.
* @param {*} [defaultValue] The value returned if all paths resolve to undefined values
* @returns {*}
*/
function getFirst(object, paths, defaultValue) {
let result = _(object).at(paths).reject(_.isUndefined).first();
return _.isUndefined(result) ? defaultValue : result;
}
function append(object, path, value) {
if (!_.has(object, path)) {
_.set(object, path, []);
}
_.get(object, path).push(value);
}
function concat(object, path, value) {
if (!_.has(object, path)) {
_.set(object, path, []);
}
_.set(object, path, _.get(object, path).concat(value));
}
function copy(sourceObject, sourcePath, targetObject, targetPath, transform) {
if (_.has(sourceObject, sourcePath)) {
let value = _.get(sourceObject, sourcePath);
if (transform) {
value = transform(value);
}
_.set(targetObject, targetPath, value);
}
}
function copyIfNotSet(sourceObject, sourcePath, targetObject, targetPath, transform) {
if (!_.has(targetObject, targetPath)) {
copy(sourceObject, sourcePath, targetObject, targetPath, transform);
}
}
function rename(object, oldPath, newPath) {
if (_.has(object, oldPath)) {
_.set(object, newPath, _.get(object, oldPath));
oldPath = _.toPath(oldPath);
if (oldPath.length > 1) {
object = _.get(object, _.initial(oldPath));
}
delete object[_.last(oldPath)];
}
}
function failFunctionWithTag(tag) {
return function fail(message) {
throw new Error(`[${tag}] ${message}`);
};
}
function assertFunctionWithFail(fail) {
return function assert(value, message) {
if (!value) {
fail(message);
}
}
}
/**
* Deeply maps the passed `value` by recursively calling the `iteratee` on
* value's children. The value returned from the iteratee is used to map the
* children.
*
* The iteratee is invoked with three arguments - the `value` being iterated,
* the `fieldPath` of the current `value` relative to the original passed value,
* and the `stack` of ancestors of the current `value`.
*
* For the first time the `iterate` will be called with the original `value`
* and empty arrays for `fieldPath` and `stack`.
*
* In other words, any `value` passed to the iteratee (except the first call,
* and assuming the ancestors keys were not mapped)
* will be equal to: `_.get(originalValue, fieldPath)`
*
* The recursion is called in pre-order depth-first-search. Meaning, the
* iteratee is called first on parent nodes and then on its children. Therefore
* if iteratee maps/replaces the parent node, then the children of the replaced
* node will be traversed.
*
* @example
* mapDeep({ prop: 'foo', arr: [ 'bar' , 1, 2 ] }, (value) => {
* if (_.isString(value)) return '__' + value;
* if (_.isNumber(value)) return value * 10;
* return value;
* })
* => { prop: '__foo', arr: [ '__bar', 10, 20 ] }
*
* mapDeep({ prop: 'foo', arr: [ 'bar' ] }, (value, fieldPath) => {
* if ((_.isString(value)) return value + '__' + fieldPath.join('.');
* return value;
* })
* => { prop: 'foo__prop', arr: [ 'bar__arr.0' ] }
*
* @param {*} value A value to map
* @param {Function} iteratee Function (value: any, fieldPath: Array, stack: Array)
* @param {object} [options]
* @param {boolean} [options.iterateCollections] Default: true
* @param {boolean} [options.iterateScalars] Default: true
* @param {boolean} [options.postOrder] Change the invocation of iteratee to post-order depth-first-search. Default: false
* @param {Array} [_keyPath] For internal recursive use
* @param {Array} [_objectStack] For internal recursive use
* @returns {*}
*/
function mapDeep(value, iteratee, options = {}, _keyPath = [], _objectStack = []) {
const postOrder = _.get(options, 'postOrder', false);
let iterate;
if (_.isPlainObject(value) || _.isArray(value)) {
iterate = _.get(options, 'iterateCollections', true);
} else {
iterate = _.get(options, 'iterateScalars', true);
}
if (iterate && !postOrder) {
value = iteratee(value, _keyPath, _objectStack);
}
const childrenIterator = (val, key) => {
return mapDeep(val, iteratee, options, _.concat(_keyPath, key), _.concat(_objectStack, [value]));
};
if (_.isPlainObject(value)) {
value = _.mapValues(value, childrenIterator);
} else if (_.isArray(value)) {
value = _.map(value, childrenIterator);
}
if (iterate && postOrder) {
value = iteratee(value, _keyPath, _objectStack);
}
return value;
}
function fieldPathToString(fieldPath) {
return _.reduce(fieldPath, (accumulator, fieldName, index) => {
if (_.isString(fieldName) && /\W/.test(fieldName)) {
// field name is a string with non alphanumeric character
accumulator += `['${fieldName}']`;
} else if (_.isNumber(fieldName)) {
accumulator += `[${fieldName}]`;
} else {
if (index > 0) {
accumulator += '.';
}
accumulator += fieldName;
}
return accumulator;
}, '');
}
function getFirstExistingFile(fileNames, inputDir) {
const filePaths = _.map(fileNames, fileName => path.resolve(inputDir, fileName));
return findPromise(filePaths, (filePath) => fse.pathExists(filePath));
}
function parseFirstExistingFile(fileNames, inputDir) {
return getFirstExistingFile(fileNames, inputDir).then(filePath => {
if (filePath) {
return parseFile(filePath);
} else {
return null;
}
});
}
async function parseFile(filePath) {
const data = await fse.readFile(filePath, 'utf8');
return parseDataByFilePath(data, filePath);
}
function parseDataByFilePath(string, filePath) {
const extension = path.extname(filePath).substring(1);
let data;
switch (extension) {
case 'yml':
case 'yaml':
data = yaml.safeLoad(string, {schema: yaml.JSON_SCHEMA});
break;
case 'json':
data = JSON.parse(string);
break;
case 'toml':
data = toml.parse(string);
break;
case 'md':
data = parseMarkdownWithFrontMatter(string);
break;
default:
throw new Error(`parseDataByFilePath error, extension '${extension}' of file ${filePath} is not supported`);
}
return data;
}
function parseMarkdownWithFrontMatter(string) {
string = string.replace('\r\n', '\n');
let frontmatter = null;
let markdown = string;
let frontMatterTypes = [
{
type: 'yaml',
startDelimiter: '---\n',
endDelimiter: '\n---',
parse: (string) => yaml.safeLoad(string, {schema: yaml.JSON_SCHEMA})
},
{
type: 'toml',
startDelimiter: '+++\n',
endDelimiter: '\n+++',
parse: (string) => toml.parse(string)
},
{
type: 'json',
startDelimiter: '{\n',
endDelimiter: '\n}',
parse: (string) => JSON.parse(string)
}
];
_.forEach(frontMatterTypes, fmType => {
if (string.startsWith(fmType.startDelimiter)) {
let index = string.indexOf(fmType.endDelimiter);
if (index !== -1) {
// The end delimiter must be followed by EOF or by a new line (possibly preceded with spaces)
// For example ("." used for spaces):
// |---
// |title: Title
// |---...
// |
// |Markdown Content
// |
// "index" points to the beginning of the second "---"
// "endDelimEndIndex" points to the end of the second "---"
// "afterEndDelim" is everything after the second "---"
// "afterEndDelimMatch" is the matched "...\n" after the second "---"
// frontmatter will be: {title: "Title"}
// markdown will be "\nMarkdown Content\n" (the first \n after end delimiter is discarded)
const endDelimEndIndex = index + fmType.endDelimiter.length;
const afterEndDelim = string.substring(endDelimEndIndex);
const afterEndDelimMatch = afterEndDelim.match(/^\s*?(\n|$)/);
if (afterEndDelimMatch) {
const data = string.substring(fmType.startDelimiter.length, index);
frontmatter = fmType.parse(data);
markdown = afterEndDelim.substring(afterEndDelimMatch[0].length);
}
}
}
});
return {
frontmatter: frontmatter,
markdown: markdown
};
}
function outputData(filePath, data) {
let res = stringifyDataByFilePath(data, filePath);
return fse.outputFile(filePath, res);
}
function stringifyDataByFilePath(data, filePath) {
const extension = path.extname(filePath).substring(1);
let result;
switch (extension) {
case 'yml':
case 'yaml':
result = yaml.safeDump(data, {noRefs: true});
break;
case 'json':
result = JSON.stringify(data, null, 4);
break;
case 'toml':
result = toml.stringify(data);
break;
case 'md':
result = '---\n' + yaml.safeDump(data.frontmatter, {noRefs: true}) + '---\n' + data.markdown;
break;
default:
throw new Error(`stringifyDataByFilePath error, extension '${extension}' of file ${filePath} is not supported`);
}
return result;
}
| forEachPromise | identifier_name |
server.go | package server
import (
"Polaris/src/common"
pb "Polaris/src/rpc"
"Polaris/src/util"
"net"
"Polaris/src/byzantineGroup"
"Polaris/src/configuration"
"Polaris/src/raftnode"
"github.com/op/go-logging"
grpcpool "github.com/processout/grpc-go-pool"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
"encoding/binary"
"encoding/json"
"io/ioutil"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"fmt"
"os"
"strconv"
"strings"
"sync"
"time"
)
var logger = logging.MustGetLogger("server")
var RaftInputChannelSize int = 10240
var RaftOutputChannelSize int = 10240
const CHANNELLEN = 10240
type Server struct {
// server info
ID string
Address string
RPCPort string
rpcPeers []string
grpcServer *grpc.Server
addr string // Address:RPCPort
RaftID int
RaftPort string
raftPeers []string
join bool
raftNode *raftnode.RaftNode
// client request manager
requestManager *RequestManager
//raft layer
raftLayer *Raft
// dissemination layer
disseminationLayer *Dissemination
// current pbft leader Id
currentBGLeaderSuperLeadId int
BGsInfo []*byzantineGroup.ByzantineGroup
SuperLeafID int
BGID int
getLeaderID func() uint64
config *configuration.Configuration
// cycleId -> consensusState
log []*ConsensusState
logLock sync.RWMutex
skipCycleHash [][]byte
SBFTClientAddressForMembership string
SBFTClientAddressForBFTLayer string
SBFTReplicaAddressForBFTResult string
SBFTResultVerifierAddress string
SBFTClientConnection *byzantineGroup.Connection
resultReceiver *BftResultReceiver
resultVerifier *BftResultVerifier
txnStore *TxnBodyStore
tcpManager *TCPManager
serverTCPAddrMap map[string]string
round2LastFinishedCycle int
round3LastFinishedCycle int
}
func NewServer(configFilePath string, configHome string) *Server {
logger.Debug(configFilePath)
raw, err := ioutil.ReadFile(configFilePath)
common.AssertNil(err, "Cannot read config file", logger)
c := make(map[string]interface{})
err = json.Unmarshal(raw, &c)
common.AssertNil(err, "JSON parsing error", logger)
server := &Server{
ID: c["id"].(string),
Address: c["address"].(string),
RPCPort: c["rpc_port"].(string),
RaftID: int(c["raft_id"].(float64)),
RaftPort: c["raft_port"].(string),
join: c["join"].(bool),
BGID: int(c["bg_id"].(float64)),
SBFTClientAddressForMembership: c["SBFT_client_membership"].(string),
SBFTClientAddressForBFTLayer: c["SBFT_client_bft_layer"].(string),
SBFTReplicaAddressForBFTResult: c["SBFT_replica_bft_result"].(string),
SBFTResultVerifierAddress: c["SBFT_result_verifier"].(string),
disseminationLayer: NewDissemination(),
currentBGLeaderSuperLeadId: 0,
txnStore: NewTxnBodyStore(),
serverTCPAddrMap: make(map[string]string),
skipCycleHash: make([][]byte, 0, 512),
round2LastFinishedCycle: 0,
round3LastFinishedCycle: 0,
log: make([]*ConsensusState, common.MaxCycleNumberInStorage), | server.log[i] = NewConsensusState(len(server.BGsInfo), -1, server)
}
i1, err := strconv.Atoi(server.RPCPort)
common.AssertNil(err, "RPC port cannot be recognized.", logger)
tcpPort := common.CalcTCPPortFromRPCPort(i1)
server.tcpManager = NewTCPManager(strconv.Itoa(tcpPort))
server.tcpManager.SetServer(server)
logger.Infof("Server %s start ", server.Address+":"+server.RPCPort)
for _, s := range c["raft_peers"].([]interface{}) {
server.raftPeers = append(server.raftPeers, s.(string))
}
for _, s := range c["rpc_peers"].([]interface{}) {
server.rpcPeers = append(server.rpcPeers, s.(string))
}
server.resultReceiver = NewBftResultReceiver(server.SBFTReplicaAddressForBFTResult)
server.resultVerifier = NewBftResultVerifier(server.SBFTResultVerifierAddress)
server.disseminationLayer.SetServer(server)
server.config = configuration.NewConfiguration(server.ID, configHome)
server.addr = server.Address + ":" + server.RPCPort
// initialize byzantine groups (list of superleafs)
for _, bg := range c["byzantine_groups"].([]interface{}) {
bgInfo := byzantineGroup.NewByzantineGroup(bg)
server.BGsInfo = append(server.BGsInfo, bgInfo)
}
var succ bool
server.SuperLeafID, succ = server.BGsInfo[server.BGID].GetSuperLeafIdByServer(
server.Address + ":" + server.RPCPort)
common.AssertTrue(succ, "Cannot fetch SuperleafID", logger)
logger.Infof("Server %s SuperLeafId %d", server.Address+":"+server.RPCPort, server.SuperLeafID)
//create grpc server
sopts := []grpc.ServerOption{grpc.InitialWindowSize(2000000)}
sopts = append(sopts, grpc.InitialConnWindowSize(2000000))
server.grpcServer = grpc.NewServer(sopts...)
pb.RegisterPolarisAllServer(server.grpcServer, server)
reflection.Register(server.grpcServer)
logger.Info("GRPC server created")
return server
}
func (s *Server) Start() {
logger.Debugf("Server %v start...", s.ID)
// initialize Raft layer
proposeC := make(chan string)
defer close(proposeC)
confChangeC := make(chan raftpb.ConfChange)
defer close(confChangeC)
var raft *Raft
getSnapshot := func() ([]byte, error) { return raft.GetSnapshot() }
commitC, errorC, snapshotterReady, getLeaderID, raftNode, leaderC := raftnode.NewRaftNode(
s.RaftID, s.RaftPort, s.raftPeers, s.join, getSnapshot, proposeC, confChangeC, RaftOutputChannelSize)
s.getLeaderID = getLeaderID
s.raftNode = raftNode
s.raftLayer = NewRaft(s, <-snapshotterReady, proposeC, commitC, errorC)
s.requestManager = NewRequestManager(s,
int(s.config.GetClientRequestCycleTimeout()),
s.config.GetMaxBatchSize(),
s.raftLayer.slRequestChan,
s.raftLayer.txnBodyChan)
// wait for raft leader election
<-leaderC
go s.requestManager.Start()
go s.raftLayer.Start()
go s.disseminationLayer.Start()
if s.IsLeader() {
// create a thread receiving the result from SBFT replica
go s.resultReceiver.Start()
logger.Debugf("leader %v start process sbft result thread", s.ID)
// create a thread handling the result from SBFT replica
go s.processSBFTResult()
}
s.saveLeaderAddress()
s.tcpManager.Start()
tcpAddrList := make([]string, 0)
for _, bg := range s.BGsInfo {
for _, sAddr := range bg.GetServerList() {
items := strings.Split(sAddr, ":")
portNum, _ := strconv.Atoi(items[1])
tPort := common.CalcTCPPortFromRPCPort(portNum)
tAddr := items[0] + ":" + strconv.Itoa(tPort)
s.serverTCPAddrMap[sAddr] = tAddr
tcpAddrList = append(tcpAddrList, tAddr)
}
}
logger.Debugf("tpc server addrs %v", tcpAddrList)
s.tcpManager.DialAll(tcpAddrList)
// Start a RPC Listener
RPCListener, err := net.Listen("tcp", ":"+s.RPCPort)
if err != nil {
logger.Errorf("Failed to listen port %s, %v", s.RPCPort, err)
}
err = s.grpcServer.Serve(RPCListener)
if err != nil {
logger.Errorf("Cannot start to serve RPC calls %v", err)
}
}
// get monitor id (the leader of superleaf(Raft group))
func (s *Server) GetLeader() string {
id := s.getLeaderID()
if id == 0 {
return ""
}
leader := s.rpcPeers[id-1]
go s.BGsInfo[s.BGID].UpdateLeader(leader)
logger.Debugf("server %v get leader %v, raft id %v", s.addr, leader, id)
return leader
}
func (s *Server) IsLeader() bool {
// when the raft leader call getLeaderID, it will return 0
// it only shows in the first time
return int(s.getLeaderID()) == s.RaftID || int(s.getLeaderID()) == 0
}
func (s *Server) IsBGLeader() bool {
if s.currentBGLeaderSuperLeadId != s.SuperLeafID {
return false
}
return s.IsLeader()
}
func (s *Server) saveLeaderAddress() {
fName := fmt.Sprintf("./leader_%v.log", s.ID)
f, err := os.Create(fName)
if err != nil {
logger.Fatalf("create leader log file error %v", err)
}
defer f.Close()
leader := s.GetLeader()
if leader == "" {
leader = s.addr
}
logger.Debugf("save leader id %v", leader)
f.WriteString(leader)
f.Sync()
}
func (s *Server) GetRaftTerm() uint64 {
if s.raftNode == nil {
logger.Error("Raft Node is nil!")
return raft.InvalidRaftTerm
}
return s.raftNode.GetRaftTerm()
}
func (s *Server) GetSBFTClientConnection() *byzantineGroup.Connection {
if s.SBFTClientConnection == nil {
s.SBFTClientConnection = byzantineGroup.NewConnection(s.SBFTClientAddressForBFTLayer, queueLen, 1)
}
return s.SBFTClientConnection
}
func (s *Server) handleRPCError(err error) (*grpcpool.Pool, bool) {
errCode := grpc.Code(err)
if errCode == common.NotLeaderErrorCode {
leaderId := grpc.ErrorDesc(err)
s.BGsInfo[s.BGID].UpdateLeader(leaderId)
conn := s.BGsInfo[s.BGID].GetConnection(leaderId).GetConnectionPool()
return conn, true
}
logger.Warningf("Unhandled ERROR: %v", err)
return nil, false
}
// list of monitors (leaders of superleafs)
func (s *Server) GetBGMembers() []string {
leaders := make([]string, 0)
for i := 0; i < s.BGsInfo[s.BGID].GetSuperLeafNum(); i++ {
l := s.BGsInfo[s.BGID].GetLeader(i)
if l == s.GetServerAddr() {
continue
}
leaders = append(leaders, l)
}
return leaders
}
func (s *Server) GetServerAddr() string {
return s.addr
}
// return consensusState for cycleId
func (s *Server) GetConsensusStateByCycleId(cycleId int) *ConsensusState {
s.logLock.Lock()
defer s.logLock.Unlock()
common.AssertTrue(cycleId >= 0, "cycleID cannot be negative!", logger)
common.AssertTrue(cycleId > s.round2LastFinishedCycle-common.MaxCycleNumberInStorage, "Cannot access stale cycles!", logger)
// cycle may not complete in order
index := cycleId % common.MaxCycleNumberInStorage
if s.log[index].cycleId != cycleId {
// name-removed: Here we overwrite stale records
s.log[index] = NewConsensusState(len(s.BGsInfo), cycleId, s)
}
return s.log[index]
}
// check if the server recieves the state response of bgId for cycleId
// return true: already received
// false: not received
func (s *Server) CheckBGState(cycleId int, bgId int) bool {
logger.Debugf("check bg state response for cycle %v bgId %v", cycleId, bgId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewCheckBGStateOp(bgId, cs)
if !cs.AddOPandWait(op) {
logger.Fatal("Error: should return true")
}
logger.Debugf("state response for cycle %v bgId %v is %v",
cycleId, bgId, op.GetResult())
return op.GetResult()
}
func (s *Server) CheckStateRequest(sp *pb.StateRequestMessage) bool {
return s.checkRequestQC(sp)
}
func (s *Server) CheckStateResponse(sp *pb.StateResponseMessage) bool {
r := s.checkQC(sp)
if r {
// if QC check ok, then replicate to the Raft group
s.disseminationLayer.AddStateResponse(sp)
}
return r
}
// if the state response is ready, then get the state response
// otherwise return nil and put request to pending list
func (s *Server) GetOrWaitBGStateResponse(stateRequest *pb.StateRequestMessage) *pb.StateResponseMessage {
logger.Debugf("Get state response for cycle %v from server %v",
stateRequest.CycleId, stateRequest.SenderId)
cs := s.GetConsensusStateByCycleId(int(stateRequest.CycleId))
op := NewGetOrWaitBGStateResponseOp(stateRequest, cs, s.BGID)
if !cs.AddOPandWait(op) {
logger.Fatal("Error: should return true")
}
logger.Debugf("get or wait state response for cycle %v bgId %v is %v for server %v",
stateRequest.CycleId, s.BGID, op.GetResult(), stateRequest.SenderId)
return op.GetResult()
}
// get the own bg info
func (s *Server) GetSelfBGInfo() *byzantineGroup.ByzantineGroup {
return s.BGsInfo[s.BGID]
}
// add the state response into log
func (s *Server) AddBGState(sp *pb.StateResponseMessage) {
logger.Debugf("Add state response for bg %v cycleId %v", sp.BgId, sp.CycleId)
cs := s.GetConsensusStateByCycleId(int(sp.CycleId))
op := NewAddStateResponseOp(sp, cs)
cs.AddOPandWait(op)
for cycleID := s.round3LastFinishedCycle + 1; ; cycleID++ {
if s.CheckCycleComplete(cycleID) {
s.round3LastFinishedCycle = cycleID
// name-removed: discard Raft logs
if cycleID%100 == 0 {
stat := s.raftNode.Node.Status()
index := stat.Applied
s.raftNode.RaftStorage.Compact(index)
}
} else {
break
}
}
logger.Debugf("Successfully add state response for bg %v cycle %v", sp.BgId, sp.CycleId)
}
// check if the cycle completes: the server received all the state response of BG for cycleId
// return true: received all state response
// false: at least one state response is missing
func (s *Server) CheckCycleComplete(cycleId int) bool {
logger.Debugf("Check cycle complete cycleId %v", cycleId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewCheckCycleCompleteOp(cs)
if !cs.AddOPandWait(op) {
logger.Fatal("Should return true check cycle complete")
}
return op.GetResult()
}
func (s *Server) GetAndWaitOwnBGStateResponse(cycleId int) *pb.StateResponseMessage {
logger.Debugf("get and wait own bg state response for cycle %v", cycleId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewGetAndWaitOwnBGStateResponseOp(s.BGID, cs)
if !cs.AddOPandWait(op) {
logger.Fatal("should return true get and wait own bg state response")
}
return op.GetResult()
}
// return the state response of its own BG for cycleId
func (s *Server) GetBGStateResponseForCycle(cycleId int) *pb.StateResponseMessage {
logger.Debugf("get bg state response for cycle %v", cycleId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewGetBGStateResponseOp(s.BGID, cs)
if !cs.AddOPandWait(op) {
logger.Fatal("should return true get bg state response")
}
return op.GetResult()
}
// return the cycle result until the cycle completes
func (s *Server) GetCycleResult(cycleId int) []*pb.StateResponseMessage {
logger.Debugf("Get cycle result for cycle %v", cycleId)
if cycleId <= s.round2LastFinishedCycle-common.MaxCycleNumberInStorage {
panic("name-removed: this cycle has been overwritten!")
}
cs := s.GetConsensusStateByCycleId(cycleId)
common.AssertTrue(cs.cycleId == cycleId, "Cycle IDs should match!", logger)
csWait := s.GetConsensusStateByCycleId(cycleId + int(s.config.GetMaxPipelineDepth()))
common.AssertTrue(csWait.cycleId == cycleId+int(s.config.GetMaxPipelineDepth()), "JustWait Cycle IDs should match!", logger)
op := NewWaitCycleComplete(cs)
opJustWait := NewJustWaitCycleComplete(csWait)
if !cs.AddOPandWait(op) {
logger.Fatal("should return true get cycle result")
}
// name-removed: wait until (cycleId + depth) completes
if !cs.AddOPandWait(opJustWait) {
logger.Fatal("should return true get cycle result")
}
return op.GetResult()
}
// send start cycle requst to BG leader
// TODO: start cycle request should have a proof
// only the request that have a valid proof can trigger to start a cycle
func (s *Server) StartCycle(stateRequest *pb.StateRequestMessage) {
if s.IsLeader() {
// if the node is monitor then process the start cycle request
s.requestManager.AddSkipCycle(SkipCycleApplicationMaterial{
skipTo: uint32(stateRequest.CycleId),
BGApplicant: uint32(stateRequest.BgId),
view: stateRequest.View,
sequence: stateRequest.Sequence,
proof: stateRequest.Proof,
signedOn: stateRequest.Hash,
})
return
}
// otherwise, send the state request to monitor
s.sendStateRequestToMonitor(stateRequest)
}
func (s *Server) sendStateRequestToMonitor(sr *pb.StateRequestMessage) {
logger.Debugf("Send state request from server %v bg %v of cycle %v to monitor since state response for bg %v is not ready", sr.SenderId, sr.BgId, sr.CycleId, s.BGID)
dstAddr := s.GetLeader()
conn := s.GetSelfBGInfo().GetConnection(dstAddr)
conn.AddEvent(NewStateRequestToMonitorEvent(sr, 2*time.Second, s))
}
func (s *Server) processSBFTResult() {
for {
// blocking until next cycle result is received from SBFT replica
bftResult, senderIds := s.resultReceiver.GetNextCycle()
// name-removed: this is in order
s.round2LastFinishedCycle = int(bftResult.CurrentCycle)
logger.Debugf("Current cycle %d, txn sender Ids %v", bftResult.CurrentCycle, senderIds)
s.reassembleTxn(bftResult)
bftResult.Hash = make([]byte, 8, 40)
binary.LittleEndian.PutUint32(bftResult.Hash[0:4], bftResult.LastCycle)
binary.LittleEndian.PutUint32(bftResult.Hash[4:8], bftResult.CurrentCycle)
bftResult.Hash = append(bftResult.Hash, util.ComputeBftResultHash(bftResult)...)
if s.config.IsHeaderBodySeparate() && bftResult.FullTxnList == nil {
logger.Fatal("Txn bodies are not attached")
}
// Unpack SkipCycle results
// Last cycle is not empty
if bftResult.CurrentCycle-bftResult.LastCycle > 1 {
logger.Warningf("Unpacking cycle %d.", bftResult.CurrentCycle)
}
stateResponse := &pb.StateResponseMessage{
CycleId: int32(bftResult.CurrentCycle),
SenderId: s.ID,
BgId: int32(s.BGID),
Result: bftResult,
}
stateResponse.Result.ReplaceMsgListWithHash = false
s.raftLayer.HandleOwnStateResponse(stateResponse)
emptyResult := pb.BftResult{
View: bftResult.View,
Sequence: bftResult.Sequence,
Nonce: bftResult.Nonce,
LastCycle: bftResult.LastCycle,
CurrentCycle: bftResult.CurrentCycle,
MessageList: nil,
Hash: bftResult.Hash,
CommitProof: bftResult.CommitProof,
BodyAttached: false,
FullTxnList: nil,
ReplaceMsgListWithHash: true,
}
// Empty cycles
if bftResult.CurrentCycle-bftResult.LastCycle > 1 {
logger.Warningf("This BFT result has cycle %d to %d", bftResult.LastCycle+1, bftResult.CurrentCycle)
}
for cycleID := bftResult.LastCycle + 1; cycleID < bftResult.CurrentCycle; cycleID++ {
logger.Warningf("Unpacking cycle %d.", cycleID)
stateResponse := &pb.StateResponseMessage{
CycleId: int32(cycleID),
SenderId: s.ID,
BgId: int32(s.BGID),
Result: &emptyResult,
}
s.raftLayer.HandleOwnStateResponse(stateResponse)
}
// check if the list of txn is sent by this super leaf
for _, txnSenderId := range senderIds {
if txnSenderId[:4] == s.ID[:4] {
logger.Debugf("cycle %v is partially sent by server %v", stateResponse.CycleId, s.ID)
s.requestManager.previousCycleComplete <- true
}
}
}
}
func (s *Server) forwardTxnToLeader(txn *pb.TransactionMessage) {
dstAddr := s.GetLeader()
conn := s.GetSelfBGInfo().GetConnection(dstAddr)
conn.AddEvent(NewTxnMsgToMonitorEvent(txn, 2*time.Second, s))
}
func (s *Server) forwardTxnBodyToLeader(txn *pb.TxnBodyMessage) {
dstAddr := s.GetLeader()
logger.Debugf("forward txn body to leader: txn num %v from server %v", len(txn.TxnMessageBlock.TxnMessageList), txn.SenderId)
conn := s.GetSelfBGInfo().GetConnection(dstAddr)
conn.AddEvent(NewTxnBodyToMonitorEvent(txn, 2*time.Second, s))
}
func (s *Server) reassembleTxn(bftResult *pb.BftResult) {
bftResult.BodyAttached = s.config.IsHeaderBodySeparate()
if s.config.IsHeaderBodySeparate() {
bftResult.FullTxnList = make([]*pb.TxnMessageBlock, 0)
for _, msg := range bftResult.MessageList {
txnList := make([]*pb.TransactionMessage, 0)
for _, header := range msg.TxnHeaderBlock.TxnHeaderList {
txn := s.txnStore.GetBodyRequest(header.HashContent)
txnList = append(txnList, txn)
}
orderedMessage := &pb.TxnMessageBlock{
TxnMessageList: txnList,
}
bftResult.FullTxnList = append(bftResult.FullTxnList, orderedMessage)
}
}
} | }
for i := 0; i < common.MaxCycleNumberInStorage; i++ {
// Fill it with -1 | random_line_split |
server.go | package server
import (
"Polaris/src/common"
pb "Polaris/src/rpc"
"Polaris/src/util"
"net"
"Polaris/src/byzantineGroup"
"Polaris/src/configuration"
"Polaris/src/raftnode"
"github.com/op/go-logging"
grpcpool "github.com/processout/grpc-go-pool"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
"encoding/binary"
"encoding/json"
"io/ioutil"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"fmt"
"os"
"strconv"
"strings"
"sync"
"time"
)
var logger = logging.MustGetLogger("server")
var RaftInputChannelSize int = 10240
var RaftOutputChannelSize int = 10240
const CHANNELLEN = 10240
type Server struct {
// server info
ID string
Address string
RPCPort string
rpcPeers []string
grpcServer *grpc.Server
addr string // Address:RPCPort
RaftID int
RaftPort string
raftPeers []string
join bool
raftNode *raftnode.RaftNode
// client request manager
requestManager *RequestManager
//raft layer
raftLayer *Raft
// dissemination layer
disseminationLayer *Dissemination
// current pbft leader Id
currentBGLeaderSuperLeadId int
BGsInfo []*byzantineGroup.ByzantineGroup
SuperLeafID int
BGID int
getLeaderID func() uint64
config *configuration.Configuration
// cycleId -> consensusState
log []*ConsensusState
logLock sync.RWMutex
skipCycleHash [][]byte
SBFTClientAddressForMembership string
SBFTClientAddressForBFTLayer string
SBFTReplicaAddressForBFTResult string
SBFTResultVerifierAddress string
SBFTClientConnection *byzantineGroup.Connection
resultReceiver *BftResultReceiver
resultVerifier *BftResultVerifier
txnStore *TxnBodyStore
tcpManager *TCPManager
serverTCPAddrMap map[string]string
round2LastFinishedCycle int
round3LastFinishedCycle int
}
func NewServer(configFilePath string, configHome string) *Server {
logger.Debug(configFilePath)
raw, err := ioutil.ReadFile(configFilePath)
common.AssertNil(err, "Cannot read config file", logger)
c := make(map[string]interface{})
err = json.Unmarshal(raw, &c)
common.AssertNil(err, "JSON parsing error", logger)
server := &Server{
ID: c["id"].(string),
Address: c["address"].(string),
RPCPort: c["rpc_port"].(string),
RaftID: int(c["raft_id"].(float64)),
RaftPort: c["raft_port"].(string),
join: c["join"].(bool),
BGID: int(c["bg_id"].(float64)),
SBFTClientAddressForMembership: c["SBFT_client_membership"].(string),
SBFTClientAddressForBFTLayer: c["SBFT_client_bft_layer"].(string),
SBFTReplicaAddressForBFTResult: c["SBFT_replica_bft_result"].(string),
SBFTResultVerifierAddress: c["SBFT_result_verifier"].(string),
disseminationLayer: NewDissemination(),
currentBGLeaderSuperLeadId: 0,
txnStore: NewTxnBodyStore(),
serverTCPAddrMap: make(map[string]string),
skipCycleHash: make([][]byte, 0, 512),
round2LastFinishedCycle: 0,
round3LastFinishedCycle: 0,
log: make([]*ConsensusState, common.MaxCycleNumberInStorage),
}
for i := 0; i < common.MaxCycleNumberInStorage; i++ {
// Fill it with -1
server.log[i] = NewConsensusState(len(server.BGsInfo), -1, server)
}
i1, err := strconv.Atoi(server.RPCPort)
common.AssertNil(err, "RPC port cannot be recognized.", logger)
tcpPort := common.CalcTCPPortFromRPCPort(i1)
server.tcpManager = NewTCPManager(strconv.Itoa(tcpPort))
server.tcpManager.SetServer(server)
logger.Infof("Server %s start ", server.Address+":"+server.RPCPort)
for _, s := range c["raft_peers"].([]interface{}) {
server.raftPeers = append(server.raftPeers, s.(string))
}
for _, s := range c["rpc_peers"].([]interface{}) {
server.rpcPeers = append(server.rpcPeers, s.(string))
}
server.resultReceiver = NewBftResultReceiver(server.SBFTReplicaAddressForBFTResult)
server.resultVerifier = NewBftResultVerifier(server.SBFTResultVerifierAddress)
server.disseminationLayer.SetServer(server)
server.config = configuration.NewConfiguration(server.ID, configHome)
server.addr = server.Address + ":" + server.RPCPort
// initialize byzantine groups (list of superleafs)
for _, bg := range c["byzantine_groups"].([]interface{}) {
bgInfo := byzantineGroup.NewByzantineGroup(bg)
server.BGsInfo = append(server.BGsInfo, bgInfo)
}
var succ bool
server.SuperLeafID, succ = server.BGsInfo[server.BGID].GetSuperLeafIdByServer(
server.Address + ":" + server.RPCPort)
common.AssertTrue(succ, "Cannot fetch SuperleafID", logger)
logger.Infof("Server %s SuperLeafId %d", server.Address+":"+server.RPCPort, server.SuperLeafID)
//create grpc server
sopts := []grpc.ServerOption{grpc.InitialWindowSize(2000000)}
sopts = append(sopts, grpc.InitialConnWindowSize(2000000))
server.grpcServer = grpc.NewServer(sopts...)
pb.RegisterPolarisAllServer(server.grpcServer, server)
reflection.Register(server.grpcServer)
logger.Info("GRPC server created")
return server
}
func (s *Server) Start() {
logger.Debugf("Server %v start...", s.ID)
// initialize Raft layer
proposeC := make(chan string)
defer close(proposeC)
confChangeC := make(chan raftpb.ConfChange)
defer close(confChangeC)
var raft *Raft
getSnapshot := func() ([]byte, error) { return raft.GetSnapshot() }
commitC, errorC, snapshotterReady, getLeaderID, raftNode, leaderC := raftnode.NewRaftNode(
s.RaftID, s.RaftPort, s.raftPeers, s.join, getSnapshot, proposeC, confChangeC, RaftOutputChannelSize)
s.getLeaderID = getLeaderID
s.raftNode = raftNode
s.raftLayer = NewRaft(s, <-snapshotterReady, proposeC, commitC, errorC)
s.requestManager = NewRequestManager(s,
int(s.config.GetClientRequestCycleTimeout()),
s.config.GetMaxBatchSize(),
s.raftLayer.slRequestChan,
s.raftLayer.txnBodyChan)
// wait for raft leader election
<-leaderC
go s.requestManager.Start()
go s.raftLayer.Start()
go s.disseminationLayer.Start()
if s.IsLeader() {
// create a thread receiving the result from SBFT replica
go s.resultReceiver.Start()
logger.Debugf("leader %v start process sbft result thread", s.ID)
// create a thread handling the result from SBFT replica
go s.processSBFTResult()
}
s.saveLeaderAddress()
s.tcpManager.Start()
tcpAddrList := make([]string, 0)
for _, bg := range s.BGsInfo {
for _, sAddr := range bg.GetServerList() {
items := strings.Split(sAddr, ":")
portNum, _ := strconv.Atoi(items[1])
tPort := common.CalcTCPPortFromRPCPort(portNum)
tAddr := items[0] + ":" + strconv.Itoa(tPort)
s.serverTCPAddrMap[sAddr] = tAddr
tcpAddrList = append(tcpAddrList, tAddr)
}
}
logger.Debugf("tpc server addrs %v", tcpAddrList)
s.tcpManager.DialAll(tcpAddrList)
// Start a RPC Listener
RPCListener, err := net.Listen("tcp", ":"+s.RPCPort)
if err != nil {
logger.Errorf("Failed to listen port %s, %v", s.RPCPort, err)
}
err = s.grpcServer.Serve(RPCListener)
if err != nil {
logger.Errorf("Cannot start to serve RPC calls %v", err)
}
}
// get monitor id (the leader of superleaf(Raft group))
func (s *Server) GetLeader() string {
id := s.getLeaderID()
if id == 0 {
return ""
}
leader := s.rpcPeers[id-1]
go s.BGsInfo[s.BGID].UpdateLeader(leader)
logger.Debugf("server %v get leader %v, raft id %v", s.addr, leader, id)
return leader
}
func (s *Server) IsLeader() bool {
// when the raft leader call getLeaderID, it will return 0
// it only shows in the first time
return int(s.getLeaderID()) == s.RaftID || int(s.getLeaderID()) == 0
}
func (s *Server) IsBGLeader() bool {
if s.currentBGLeaderSuperLeadId != s.SuperLeafID {
return false
}
return s.IsLeader()
}
func (s *Server) saveLeaderAddress() {
fName := fmt.Sprintf("./leader_%v.log", s.ID)
f, err := os.Create(fName)
if err != nil {
logger.Fatalf("create leader log file error %v", err)
}
defer f.Close()
leader := s.GetLeader()
if leader == "" {
leader = s.addr
}
logger.Debugf("save leader id %v", leader)
f.WriteString(leader)
f.Sync()
}
func (s *Server) GetRaftTerm() uint64 {
if s.raftNode == nil {
logger.Error("Raft Node is nil!")
return raft.InvalidRaftTerm
}
return s.raftNode.GetRaftTerm()
}
func (s *Server) GetSBFTClientConnection() *byzantineGroup.Connection {
if s.SBFTClientConnection == nil {
s.SBFTClientConnection = byzantineGroup.NewConnection(s.SBFTClientAddressForBFTLayer, queueLen, 1)
}
return s.SBFTClientConnection
}
func (s *Server) | (err error) (*grpcpool.Pool, bool) {
errCode := grpc.Code(err)
if errCode == common.NotLeaderErrorCode {
leaderId := grpc.ErrorDesc(err)
s.BGsInfo[s.BGID].UpdateLeader(leaderId)
conn := s.BGsInfo[s.BGID].GetConnection(leaderId).GetConnectionPool()
return conn, true
}
logger.Warningf("Unhandled ERROR: %v", err)
return nil, false
}
// list of monitors (leaders of superleafs)
func (s *Server) GetBGMembers() []string {
leaders := make([]string, 0)
for i := 0; i < s.BGsInfo[s.BGID].GetSuperLeafNum(); i++ {
l := s.BGsInfo[s.BGID].GetLeader(i)
if l == s.GetServerAddr() {
continue
}
leaders = append(leaders, l)
}
return leaders
}
func (s *Server) GetServerAddr() string {
return s.addr
}
// return consensusState for cycleId
func (s *Server) GetConsensusStateByCycleId(cycleId int) *ConsensusState {
s.logLock.Lock()
defer s.logLock.Unlock()
common.AssertTrue(cycleId >= 0, "cycleID cannot be negative!", logger)
common.AssertTrue(cycleId > s.round2LastFinishedCycle-common.MaxCycleNumberInStorage, "Cannot access stale cycles!", logger)
// cycle may not complete in order
index := cycleId % common.MaxCycleNumberInStorage
if s.log[index].cycleId != cycleId {
// name-removed: Here we overwrite stale records
s.log[index] = NewConsensusState(len(s.BGsInfo), cycleId, s)
}
return s.log[index]
}
// check if the server recieves the state response of bgId for cycleId
// return true: already received
// false: not received
func (s *Server) CheckBGState(cycleId int, bgId int) bool {
logger.Debugf("check bg state response for cycle %v bgId %v", cycleId, bgId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewCheckBGStateOp(bgId, cs)
if !cs.AddOPandWait(op) {
logger.Fatal("Error: should return true")
}
logger.Debugf("state response for cycle %v bgId %v is %v",
cycleId, bgId, op.GetResult())
return op.GetResult()
}
func (s *Server) CheckStateRequest(sp *pb.StateRequestMessage) bool {
return s.checkRequestQC(sp)
}
func (s *Server) CheckStateResponse(sp *pb.StateResponseMessage) bool {
r := s.checkQC(sp)
if r {
// if QC check ok, then replicate to the Raft group
s.disseminationLayer.AddStateResponse(sp)
}
return r
}
// if the state response is ready, then get the state response
// otherwise return nil and put request to pending list
func (s *Server) GetOrWaitBGStateResponse(stateRequest *pb.StateRequestMessage) *pb.StateResponseMessage {
logger.Debugf("Get state response for cycle %v from server %v",
stateRequest.CycleId, stateRequest.SenderId)
cs := s.GetConsensusStateByCycleId(int(stateRequest.CycleId))
op := NewGetOrWaitBGStateResponseOp(stateRequest, cs, s.BGID)
if !cs.AddOPandWait(op) {
logger.Fatal("Error: should return true")
}
logger.Debugf("get or wait state response for cycle %v bgId %v is %v for server %v",
stateRequest.CycleId, s.BGID, op.GetResult(), stateRequest.SenderId)
return op.GetResult()
}
// get the own bg info
func (s *Server) GetSelfBGInfo() *byzantineGroup.ByzantineGroup {
return s.BGsInfo[s.BGID]
}
// add the state response into log
func (s *Server) AddBGState(sp *pb.StateResponseMessage) {
logger.Debugf("Add state response for bg %v cycleId %v", sp.BgId, sp.CycleId)
cs := s.GetConsensusStateByCycleId(int(sp.CycleId))
op := NewAddStateResponseOp(sp, cs)
cs.AddOPandWait(op)
for cycleID := s.round3LastFinishedCycle + 1; ; cycleID++ {
if s.CheckCycleComplete(cycleID) {
s.round3LastFinishedCycle = cycleID
// name-removed: discard Raft logs
if cycleID%100 == 0 {
stat := s.raftNode.Node.Status()
index := stat.Applied
s.raftNode.RaftStorage.Compact(index)
}
} else {
break
}
}
logger.Debugf("Successfully add state response for bg %v cycle %v", sp.BgId, sp.CycleId)
}
// check if the cycle completes: the server received all the state response of BG for cycleId
// return true: received all state response
// false: at least one state response is missing
func (s *Server) CheckCycleComplete(cycleId int) bool {
logger.Debugf("Check cycle complete cycleId %v", cycleId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewCheckCycleCompleteOp(cs)
if !cs.AddOPandWait(op) {
logger.Fatal("Should return true check cycle complete")
}
return op.GetResult()
}
func (s *Server) GetAndWaitOwnBGStateResponse(cycleId int) *pb.StateResponseMessage {
logger.Debugf("get and wait own bg state response for cycle %v", cycleId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewGetAndWaitOwnBGStateResponseOp(s.BGID, cs)
if !cs.AddOPandWait(op) {
logger.Fatal("should return true get and wait own bg state response")
}
return op.GetResult()
}
// return the state response of its own BG for cycleId
func (s *Server) GetBGStateResponseForCycle(cycleId int) *pb.StateResponseMessage {
logger.Debugf("get bg state response for cycle %v", cycleId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewGetBGStateResponseOp(s.BGID, cs)
if !cs.AddOPandWait(op) {
logger.Fatal("should return true get bg state response")
}
return op.GetResult()
}
// return the cycle result until the cycle completes
func (s *Server) GetCycleResult(cycleId int) []*pb.StateResponseMessage {
logger.Debugf("Get cycle result for cycle %v", cycleId)
if cycleId <= s.round2LastFinishedCycle-common.MaxCycleNumberInStorage {
panic("name-removed: this cycle has been overwritten!")
}
cs := s.GetConsensusStateByCycleId(cycleId)
common.AssertTrue(cs.cycleId == cycleId, "Cycle IDs should match!", logger)
csWait := s.GetConsensusStateByCycleId(cycleId + int(s.config.GetMaxPipelineDepth()))
common.AssertTrue(csWait.cycleId == cycleId+int(s.config.GetMaxPipelineDepth()), "JustWait Cycle IDs should match!", logger)
op := NewWaitCycleComplete(cs)
opJustWait := NewJustWaitCycleComplete(csWait)
if !cs.AddOPandWait(op) {
logger.Fatal("should return true get cycle result")
}
// name-removed: wait until (cycleId + depth) completes
if !cs.AddOPandWait(opJustWait) {
logger.Fatal("should return true get cycle result")
}
return op.GetResult()
}
// send start cycle requst to BG leader
// TODO: start cycle request should have a proof
// only the request that have a valid proof can trigger to start a cycle
func (s *Server) StartCycle(stateRequest *pb.StateRequestMessage) {
if s.IsLeader() {
// if the node is monitor then process the start cycle request
s.requestManager.AddSkipCycle(SkipCycleApplicationMaterial{
skipTo: uint32(stateRequest.CycleId),
BGApplicant: uint32(stateRequest.BgId),
view: stateRequest.View,
sequence: stateRequest.Sequence,
proof: stateRequest.Proof,
signedOn: stateRequest.Hash,
})
return
}
// otherwise, send the state request to monitor
s.sendStateRequestToMonitor(stateRequest)
}
func (s *Server) sendStateRequestToMonitor(sr *pb.StateRequestMessage) {
logger.Debugf("Send state request from server %v bg %v of cycle %v to monitor since state response for bg %v is not ready", sr.SenderId, sr.BgId, sr.CycleId, s.BGID)
dstAddr := s.GetLeader()
conn := s.GetSelfBGInfo().GetConnection(dstAddr)
conn.AddEvent(NewStateRequestToMonitorEvent(sr, 2*time.Second, s))
}
func (s *Server) processSBFTResult() {
for {
// blocking until next cycle result is received from SBFT replica
bftResult, senderIds := s.resultReceiver.GetNextCycle()
// name-removed: this is in order
s.round2LastFinishedCycle = int(bftResult.CurrentCycle)
logger.Debugf("Current cycle %d, txn sender Ids %v", bftResult.CurrentCycle, senderIds)
s.reassembleTxn(bftResult)
bftResult.Hash = make([]byte, 8, 40)
binary.LittleEndian.PutUint32(bftResult.Hash[0:4], bftResult.LastCycle)
binary.LittleEndian.PutUint32(bftResult.Hash[4:8], bftResult.CurrentCycle)
bftResult.Hash = append(bftResult.Hash, util.ComputeBftResultHash(bftResult)...)
if s.config.IsHeaderBodySeparate() && bftResult.FullTxnList == nil {
logger.Fatal("Txn bodies are not attached")
}
// Unpack SkipCycle results
// Last cycle is not empty
if bftResult.CurrentCycle-bftResult.LastCycle > 1 {
logger.Warningf("Unpacking cycle %d.", bftResult.CurrentCycle)
}
stateResponse := &pb.StateResponseMessage{
CycleId: int32(bftResult.CurrentCycle),
SenderId: s.ID,
BgId: int32(s.BGID),
Result: bftResult,
}
stateResponse.Result.ReplaceMsgListWithHash = false
s.raftLayer.HandleOwnStateResponse(stateResponse)
emptyResult := pb.BftResult{
View: bftResult.View,
Sequence: bftResult.Sequence,
Nonce: bftResult.Nonce,
LastCycle: bftResult.LastCycle,
CurrentCycle: bftResult.CurrentCycle,
MessageList: nil,
Hash: bftResult.Hash,
CommitProof: bftResult.CommitProof,
BodyAttached: false,
FullTxnList: nil,
ReplaceMsgListWithHash: true,
}
// Empty cycles
if bftResult.CurrentCycle-bftResult.LastCycle > 1 {
logger.Warningf("This BFT result has cycle %d to %d", bftResult.LastCycle+1, bftResult.CurrentCycle)
}
for cycleID := bftResult.LastCycle + 1; cycleID < bftResult.CurrentCycle; cycleID++ {
logger.Warningf("Unpacking cycle %d.", cycleID)
stateResponse := &pb.StateResponseMessage{
CycleId: int32(cycleID),
SenderId: s.ID,
BgId: int32(s.BGID),
Result: &emptyResult,
}
s.raftLayer.HandleOwnStateResponse(stateResponse)
}
// check if the list of txn is sent by this super leaf
for _, txnSenderId := range senderIds {
if txnSenderId[:4] == s.ID[:4] {
logger.Debugf("cycle %v is partially sent by server %v", stateResponse.CycleId, s.ID)
s.requestManager.previousCycleComplete <- true
}
}
}
}
func (s *Server) forwardTxnToLeader(txn *pb.TransactionMessage) {
dstAddr := s.GetLeader()
conn := s.GetSelfBGInfo().GetConnection(dstAddr)
conn.AddEvent(NewTxnMsgToMonitorEvent(txn, 2*time.Second, s))
}
func (s *Server) forwardTxnBodyToLeader(txn *pb.TxnBodyMessage) {
dstAddr := s.GetLeader()
logger.Debugf("forward txn body to leader: txn num %v from server %v", len(txn.TxnMessageBlock.TxnMessageList), txn.SenderId)
conn := s.GetSelfBGInfo().GetConnection(dstAddr)
conn.AddEvent(NewTxnBodyToMonitorEvent(txn, 2*time.Second, s))
}
func (s *Server) reassembleTxn(bftResult *pb.BftResult) {
bftResult.BodyAttached = s.config.IsHeaderBodySeparate()
if s.config.IsHeaderBodySeparate() {
bftResult.FullTxnList = make([]*pb.TxnMessageBlock, 0)
for _, msg := range bftResult.MessageList {
txnList := make([]*pb.TransactionMessage, 0)
for _, header := range msg.TxnHeaderBlock.TxnHeaderList {
txn := s.txnStore.GetBodyRequest(header.HashContent)
txnList = append(txnList, txn)
}
orderedMessage := &pb.TxnMessageBlock{
TxnMessageList: txnList,
}
bftResult.FullTxnList = append(bftResult.FullTxnList, orderedMessage)
}
}
}
| handleRPCError | identifier_name |
server.go | package server
import (
"Polaris/src/common"
pb "Polaris/src/rpc"
"Polaris/src/util"
"net"
"Polaris/src/byzantineGroup"
"Polaris/src/configuration"
"Polaris/src/raftnode"
"github.com/op/go-logging"
grpcpool "github.com/processout/grpc-go-pool"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
"encoding/binary"
"encoding/json"
"io/ioutil"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"fmt"
"os"
"strconv"
"strings"
"sync"
"time"
)
var logger = logging.MustGetLogger("server")
var RaftInputChannelSize int = 10240
var RaftOutputChannelSize int = 10240
const CHANNELLEN = 10240
type Server struct {
// server info
ID string
Address string
RPCPort string
rpcPeers []string
grpcServer *grpc.Server
addr string // Address:RPCPort
RaftID int
RaftPort string
raftPeers []string
join bool
raftNode *raftnode.RaftNode
// client request manager
requestManager *RequestManager
//raft layer
raftLayer *Raft
// dissemination layer
disseminationLayer *Dissemination
// current pbft leader Id
currentBGLeaderSuperLeadId int
BGsInfo []*byzantineGroup.ByzantineGroup
SuperLeafID int
BGID int
getLeaderID func() uint64
config *configuration.Configuration
// cycleId -> consensusState
log []*ConsensusState
logLock sync.RWMutex
skipCycleHash [][]byte
SBFTClientAddressForMembership string
SBFTClientAddressForBFTLayer string
SBFTReplicaAddressForBFTResult string
SBFTResultVerifierAddress string
SBFTClientConnection *byzantineGroup.Connection
resultReceiver *BftResultReceiver
resultVerifier *BftResultVerifier
txnStore *TxnBodyStore
tcpManager *TCPManager
serverTCPAddrMap map[string]string
round2LastFinishedCycle int
round3LastFinishedCycle int
}
func NewServer(configFilePath string, configHome string) *Server {
logger.Debug(configFilePath)
raw, err := ioutil.ReadFile(configFilePath)
common.AssertNil(err, "Cannot read config file", logger)
c := make(map[string]interface{})
err = json.Unmarshal(raw, &c)
common.AssertNil(err, "JSON parsing error", logger)
server := &Server{
ID: c["id"].(string),
Address: c["address"].(string),
RPCPort: c["rpc_port"].(string),
RaftID: int(c["raft_id"].(float64)),
RaftPort: c["raft_port"].(string),
join: c["join"].(bool),
BGID: int(c["bg_id"].(float64)),
SBFTClientAddressForMembership: c["SBFT_client_membership"].(string),
SBFTClientAddressForBFTLayer: c["SBFT_client_bft_layer"].(string),
SBFTReplicaAddressForBFTResult: c["SBFT_replica_bft_result"].(string),
SBFTResultVerifierAddress: c["SBFT_result_verifier"].(string),
disseminationLayer: NewDissemination(),
currentBGLeaderSuperLeadId: 0,
txnStore: NewTxnBodyStore(),
serverTCPAddrMap: make(map[string]string),
skipCycleHash: make([][]byte, 0, 512),
round2LastFinishedCycle: 0,
round3LastFinishedCycle: 0,
log: make([]*ConsensusState, common.MaxCycleNumberInStorage),
}
for i := 0; i < common.MaxCycleNumberInStorage; i++ {
// Fill it with -1
server.log[i] = NewConsensusState(len(server.BGsInfo), -1, server)
}
i1, err := strconv.Atoi(server.RPCPort)
common.AssertNil(err, "RPC port cannot be recognized.", logger)
tcpPort := common.CalcTCPPortFromRPCPort(i1)
server.tcpManager = NewTCPManager(strconv.Itoa(tcpPort))
server.tcpManager.SetServer(server)
logger.Infof("Server %s start ", server.Address+":"+server.RPCPort)
for _, s := range c["raft_peers"].([]interface{}) {
server.raftPeers = append(server.raftPeers, s.(string))
}
for _, s := range c["rpc_peers"].([]interface{}) {
server.rpcPeers = append(server.rpcPeers, s.(string))
}
server.resultReceiver = NewBftResultReceiver(server.SBFTReplicaAddressForBFTResult)
server.resultVerifier = NewBftResultVerifier(server.SBFTResultVerifierAddress)
server.disseminationLayer.SetServer(server)
server.config = configuration.NewConfiguration(server.ID, configHome)
server.addr = server.Address + ":" + server.RPCPort
// initialize byzantine groups (list of superleafs)
for _, bg := range c["byzantine_groups"].([]interface{}) {
bgInfo := byzantineGroup.NewByzantineGroup(bg)
server.BGsInfo = append(server.BGsInfo, bgInfo)
}
var succ bool
server.SuperLeafID, succ = server.BGsInfo[server.BGID].GetSuperLeafIdByServer(
server.Address + ":" + server.RPCPort)
common.AssertTrue(succ, "Cannot fetch SuperleafID", logger)
logger.Infof("Server %s SuperLeafId %d", server.Address+":"+server.RPCPort, server.SuperLeafID)
//create grpc server
sopts := []grpc.ServerOption{grpc.InitialWindowSize(2000000)}
sopts = append(sopts, grpc.InitialConnWindowSize(2000000))
server.grpcServer = grpc.NewServer(sopts...)
pb.RegisterPolarisAllServer(server.grpcServer, server)
reflection.Register(server.grpcServer)
logger.Info("GRPC server created")
return server
}
func (s *Server) Start() {
logger.Debugf("Server %v start...", s.ID)
// initialize Raft layer
proposeC := make(chan string)
defer close(proposeC)
confChangeC := make(chan raftpb.ConfChange)
defer close(confChangeC)
var raft *Raft
getSnapshot := func() ([]byte, error) { return raft.GetSnapshot() }
commitC, errorC, snapshotterReady, getLeaderID, raftNode, leaderC := raftnode.NewRaftNode(
s.RaftID, s.RaftPort, s.raftPeers, s.join, getSnapshot, proposeC, confChangeC, RaftOutputChannelSize)
s.getLeaderID = getLeaderID
s.raftNode = raftNode
s.raftLayer = NewRaft(s, <-snapshotterReady, proposeC, commitC, errorC)
s.requestManager = NewRequestManager(s,
int(s.config.GetClientRequestCycleTimeout()),
s.config.GetMaxBatchSize(),
s.raftLayer.slRequestChan,
s.raftLayer.txnBodyChan)
// wait for raft leader election
<-leaderC
go s.requestManager.Start()
go s.raftLayer.Start()
go s.disseminationLayer.Start()
if s.IsLeader() {
// create a thread receiving the result from SBFT replica
go s.resultReceiver.Start()
logger.Debugf("leader %v start process sbft result thread", s.ID)
// create a thread handling the result from SBFT replica
go s.processSBFTResult()
}
s.saveLeaderAddress()
s.tcpManager.Start()
tcpAddrList := make([]string, 0)
for _, bg := range s.BGsInfo {
for _, sAddr := range bg.GetServerList() {
items := strings.Split(sAddr, ":")
portNum, _ := strconv.Atoi(items[1])
tPort := common.CalcTCPPortFromRPCPort(portNum)
tAddr := items[0] + ":" + strconv.Itoa(tPort)
s.serverTCPAddrMap[sAddr] = tAddr
tcpAddrList = append(tcpAddrList, tAddr)
}
}
logger.Debugf("tpc server addrs %v", tcpAddrList)
s.tcpManager.DialAll(tcpAddrList)
// Start a RPC Listener
RPCListener, err := net.Listen("tcp", ":"+s.RPCPort)
if err != nil {
logger.Errorf("Failed to listen port %s, %v", s.RPCPort, err)
}
err = s.grpcServer.Serve(RPCListener)
if err != nil {
logger.Errorf("Cannot start to serve RPC calls %v", err)
}
}
// get monitor id (the leader of superleaf(Raft group))
func (s *Server) GetLeader() string {
id := s.getLeaderID()
if id == 0 {
return ""
}
leader := s.rpcPeers[id-1]
go s.BGsInfo[s.BGID].UpdateLeader(leader)
logger.Debugf("server %v get leader %v, raft id %v", s.addr, leader, id)
return leader
}
func (s *Server) IsLeader() bool |
func (s *Server) IsBGLeader() bool {
if s.currentBGLeaderSuperLeadId != s.SuperLeafID {
return false
}
return s.IsLeader()
}
func (s *Server) saveLeaderAddress() {
fName := fmt.Sprintf("./leader_%v.log", s.ID)
f, err := os.Create(fName)
if err != nil {
logger.Fatalf("create leader log file error %v", err)
}
defer f.Close()
leader := s.GetLeader()
if leader == "" {
leader = s.addr
}
logger.Debugf("save leader id %v", leader)
f.WriteString(leader)
f.Sync()
}
func (s *Server) GetRaftTerm() uint64 {
if s.raftNode == nil {
logger.Error("Raft Node is nil!")
return raft.InvalidRaftTerm
}
return s.raftNode.GetRaftTerm()
}
func (s *Server) GetSBFTClientConnection() *byzantineGroup.Connection {
if s.SBFTClientConnection == nil {
s.SBFTClientConnection = byzantineGroup.NewConnection(s.SBFTClientAddressForBFTLayer, queueLen, 1)
}
return s.SBFTClientConnection
}
func (s *Server) handleRPCError(err error) (*grpcpool.Pool, bool) {
errCode := grpc.Code(err)
if errCode == common.NotLeaderErrorCode {
leaderId := grpc.ErrorDesc(err)
s.BGsInfo[s.BGID].UpdateLeader(leaderId)
conn := s.BGsInfo[s.BGID].GetConnection(leaderId).GetConnectionPool()
return conn, true
}
logger.Warningf("Unhandled ERROR: %v", err)
return nil, false
}
// list of monitors (leaders of superleafs)
func (s *Server) GetBGMembers() []string {
leaders := make([]string, 0)
for i := 0; i < s.BGsInfo[s.BGID].GetSuperLeafNum(); i++ {
l := s.BGsInfo[s.BGID].GetLeader(i)
if l == s.GetServerAddr() {
continue
}
leaders = append(leaders, l)
}
return leaders
}
func (s *Server) GetServerAddr() string {
return s.addr
}
// return consensusState for cycleId
func (s *Server) GetConsensusStateByCycleId(cycleId int) *ConsensusState {
s.logLock.Lock()
defer s.logLock.Unlock()
common.AssertTrue(cycleId >= 0, "cycleID cannot be negative!", logger)
common.AssertTrue(cycleId > s.round2LastFinishedCycle-common.MaxCycleNumberInStorage, "Cannot access stale cycles!", logger)
// cycle may not complete in order
index := cycleId % common.MaxCycleNumberInStorage
if s.log[index].cycleId != cycleId {
// name-removed: Here we overwrite stale records
s.log[index] = NewConsensusState(len(s.BGsInfo), cycleId, s)
}
return s.log[index]
}
// check if the server recieves the state response of bgId for cycleId
// return true: already received
// false: not received
func (s *Server) CheckBGState(cycleId int, bgId int) bool {
logger.Debugf("check bg state response for cycle %v bgId %v", cycleId, bgId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewCheckBGStateOp(bgId, cs)
if !cs.AddOPandWait(op) {
logger.Fatal("Error: should return true")
}
logger.Debugf("state response for cycle %v bgId %v is %v",
cycleId, bgId, op.GetResult())
return op.GetResult()
}
func (s *Server) CheckStateRequest(sp *pb.StateRequestMessage) bool {
return s.checkRequestQC(sp)
}
func (s *Server) CheckStateResponse(sp *pb.StateResponseMessage) bool {
r := s.checkQC(sp)
if r {
// if QC check ok, then replicate to the Raft group
s.disseminationLayer.AddStateResponse(sp)
}
return r
}
// if the state response is ready, then get the state response
// otherwise return nil and put request to pending list
func (s *Server) GetOrWaitBGStateResponse(stateRequest *pb.StateRequestMessage) *pb.StateResponseMessage {
logger.Debugf("Get state response for cycle %v from server %v",
stateRequest.CycleId, stateRequest.SenderId)
cs := s.GetConsensusStateByCycleId(int(stateRequest.CycleId))
op := NewGetOrWaitBGStateResponseOp(stateRequest, cs, s.BGID)
if !cs.AddOPandWait(op) {
logger.Fatal("Error: should return true")
}
logger.Debugf("get or wait state response for cycle %v bgId %v is %v for server %v",
stateRequest.CycleId, s.BGID, op.GetResult(), stateRequest.SenderId)
return op.GetResult()
}
// get the own bg info
func (s *Server) GetSelfBGInfo() *byzantineGroup.ByzantineGroup {
return s.BGsInfo[s.BGID]
}
// add the state response into log
func (s *Server) AddBGState(sp *pb.StateResponseMessage) {
logger.Debugf("Add state response for bg %v cycleId %v", sp.BgId, sp.CycleId)
cs := s.GetConsensusStateByCycleId(int(sp.CycleId))
op := NewAddStateResponseOp(sp, cs)
cs.AddOPandWait(op)
for cycleID := s.round3LastFinishedCycle + 1; ; cycleID++ {
if s.CheckCycleComplete(cycleID) {
s.round3LastFinishedCycle = cycleID
// name-removed: discard Raft logs
if cycleID%100 == 0 {
stat := s.raftNode.Node.Status()
index := stat.Applied
s.raftNode.RaftStorage.Compact(index)
}
} else {
break
}
}
logger.Debugf("Successfully add state response for bg %v cycle %v", sp.BgId, sp.CycleId)
}
// check if the cycle completes: the server received all the state response of BG for cycleId
// return true: received all state response
// false: at least one state response is missing
func (s *Server) CheckCycleComplete(cycleId int) bool {
logger.Debugf("Check cycle complete cycleId %v", cycleId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewCheckCycleCompleteOp(cs)
if !cs.AddOPandWait(op) {
logger.Fatal("Should return true check cycle complete")
}
return op.GetResult()
}
func (s *Server) GetAndWaitOwnBGStateResponse(cycleId int) *pb.StateResponseMessage {
logger.Debugf("get and wait own bg state response for cycle %v", cycleId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewGetAndWaitOwnBGStateResponseOp(s.BGID, cs)
if !cs.AddOPandWait(op) {
logger.Fatal("should return true get and wait own bg state response")
}
return op.GetResult()
}
// return the state response of its own BG for cycleId
func (s *Server) GetBGStateResponseForCycle(cycleId int) *pb.StateResponseMessage {
logger.Debugf("get bg state response for cycle %v", cycleId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewGetBGStateResponseOp(s.BGID, cs)
if !cs.AddOPandWait(op) {
logger.Fatal("should return true get bg state response")
}
return op.GetResult()
}
// return the cycle result until the cycle completes
func (s *Server) GetCycleResult(cycleId int) []*pb.StateResponseMessage {
logger.Debugf("Get cycle result for cycle %v", cycleId)
if cycleId <= s.round2LastFinishedCycle-common.MaxCycleNumberInStorage {
panic("name-removed: this cycle has been overwritten!")
}
cs := s.GetConsensusStateByCycleId(cycleId)
common.AssertTrue(cs.cycleId == cycleId, "Cycle IDs should match!", logger)
csWait := s.GetConsensusStateByCycleId(cycleId + int(s.config.GetMaxPipelineDepth()))
common.AssertTrue(csWait.cycleId == cycleId+int(s.config.GetMaxPipelineDepth()), "JustWait Cycle IDs should match!", logger)
op := NewWaitCycleComplete(cs)
opJustWait := NewJustWaitCycleComplete(csWait)
if !cs.AddOPandWait(op) {
logger.Fatal("should return true get cycle result")
}
// name-removed: wait until (cycleId + depth) completes
if !cs.AddOPandWait(opJustWait) {
logger.Fatal("should return true get cycle result")
}
return op.GetResult()
}
// send start cycle requst to BG leader
// TODO: start cycle request should have a proof
// only the request that have a valid proof can trigger to start a cycle
func (s *Server) StartCycle(stateRequest *pb.StateRequestMessage) {
if s.IsLeader() {
// if the node is monitor then process the start cycle request
s.requestManager.AddSkipCycle(SkipCycleApplicationMaterial{
skipTo: uint32(stateRequest.CycleId),
BGApplicant: uint32(stateRequest.BgId),
view: stateRequest.View,
sequence: stateRequest.Sequence,
proof: stateRequest.Proof,
signedOn: stateRequest.Hash,
})
return
}
// otherwise, send the state request to monitor
s.sendStateRequestToMonitor(stateRequest)
}
func (s *Server) sendStateRequestToMonitor(sr *pb.StateRequestMessage) {
logger.Debugf("Send state request from server %v bg %v of cycle %v to monitor since state response for bg %v is not ready", sr.SenderId, sr.BgId, sr.CycleId, s.BGID)
dstAddr := s.GetLeader()
conn := s.GetSelfBGInfo().GetConnection(dstAddr)
conn.AddEvent(NewStateRequestToMonitorEvent(sr, 2*time.Second, s))
}
func (s *Server) processSBFTResult() {
for {
// blocking until next cycle result is received from SBFT replica
bftResult, senderIds := s.resultReceiver.GetNextCycle()
// name-removed: this is in order
s.round2LastFinishedCycle = int(bftResult.CurrentCycle)
logger.Debugf("Current cycle %d, txn sender Ids %v", bftResult.CurrentCycle, senderIds)
s.reassembleTxn(bftResult)
bftResult.Hash = make([]byte, 8, 40)
binary.LittleEndian.PutUint32(bftResult.Hash[0:4], bftResult.LastCycle)
binary.LittleEndian.PutUint32(bftResult.Hash[4:8], bftResult.CurrentCycle)
bftResult.Hash = append(bftResult.Hash, util.ComputeBftResultHash(bftResult)...)
if s.config.IsHeaderBodySeparate() && bftResult.FullTxnList == nil {
logger.Fatal("Txn bodies are not attached")
}
// Unpack SkipCycle results
// Last cycle is not empty
if bftResult.CurrentCycle-bftResult.LastCycle > 1 {
logger.Warningf("Unpacking cycle %d.", bftResult.CurrentCycle)
}
stateResponse := &pb.StateResponseMessage{
CycleId: int32(bftResult.CurrentCycle),
SenderId: s.ID,
BgId: int32(s.BGID),
Result: bftResult,
}
stateResponse.Result.ReplaceMsgListWithHash = false
s.raftLayer.HandleOwnStateResponse(stateResponse)
emptyResult := pb.BftResult{
View: bftResult.View,
Sequence: bftResult.Sequence,
Nonce: bftResult.Nonce,
LastCycle: bftResult.LastCycle,
CurrentCycle: bftResult.CurrentCycle,
MessageList: nil,
Hash: bftResult.Hash,
CommitProof: bftResult.CommitProof,
BodyAttached: false,
FullTxnList: nil,
ReplaceMsgListWithHash: true,
}
// Empty cycles
if bftResult.CurrentCycle-bftResult.LastCycle > 1 {
logger.Warningf("This BFT result has cycle %d to %d", bftResult.LastCycle+1, bftResult.CurrentCycle)
}
for cycleID := bftResult.LastCycle + 1; cycleID < bftResult.CurrentCycle; cycleID++ {
logger.Warningf("Unpacking cycle %d.", cycleID)
stateResponse := &pb.StateResponseMessage{
CycleId: int32(cycleID),
SenderId: s.ID,
BgId: int32(s.BGID),
Result: &emptyResult,
}
s.raftLayer.HandleOwnStateResponse(stateResponse)
}
// check if the list of txn is sent by this super leaf
for _, txnSenderId := range senderIds {
if txnSenderId[:4] == s.ID[:4] {
logger.Debugf("cycle %v is partially sent by server %v", stateResponse.CycleId, s.ID)
s.requestManager.previousCycleComplete <- true
}
}
}
}
func (s *Server) forwardTxnToLeader(txn *pb.TransactionMessage) {
dstAddr := s.GetLeader()
conn := s.GetSelfBGInfo().GetConnection(dstAddr)
conn.AddEvent(NewTxnMsgToMonitorEvent(txn, 2*time.Second, s))
}
func (s *Server) forwardTxnBodyToLeader(txn *pb.TxnBodyMessage) {
dstAddr := s.GetLeader()
logger.Debugf("forward txn body to leader: txn num %v from server %v", len(txn.TxnMessageBlock.TxnMessageList), txn.SenderId)
conn := s.GetSelfBGInfo().GetConnection(dstAddr)
conn.AddEvent(NewTxnBodyToMonitorEvent(txn, 2*time.Second, s))
}
func (s *Server) reassembleTxn(bftResult *pb.BftResult) {
bftResult.BodyAttached = s.config.IsHeaderBodySeparate()
if s.config.IsHeaderBodySeparate() {
bftResult.FullTxnList = make([]*pb.TxnMessageBlock, 0)
for _, msg := range bftResult.MessageList {
txnList := make([]*pb.TransactionMessage, 0)
for _, header := range msg.TxnHeaderBlock.TxnHeaderList {
txn := s.txnStore.GetBodyRequest(header.HashContent)
txnList = append(txnList, txn)
}
orderedMessage := &pb.TxnMessageBlock{
TxnMessageList: txnList,
}
bftResult.FullTxnList = append(bftResult.FullTxnList, orderedMessage)
}
}
}
| {
// when the raft leader call getLeaderID, it will return 0
// it only shows in the first time
return int(s.getLeaderID()) == s.RaftID || int(s.getLeaderID()) == 0
} | identifier_body |
server.go | package server
import (
"Polaris/src/common"
pb "Polaris/src/rpc"
"Polaris/src/util"
"net"
"Polaris/src/byzantineGroup"
"Polaris/src/configuration"
"Polaris/src/raftnode"
"github.com/op/go-logging"
grpcpool "github.com/processout/grpc-go-pool"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
"encoding/binary"
"encoding/json"
"io/ioutil"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"fmt"
"os"
"strconv"
"strings"
"sync"
"time"
)
var logger = logging.MustGetLogger("server")
var RaftInputChannelSize int = 10240
var RaftOutputChannelSize int = 10240
const CHANNELLEN = 10240
type Server struct {
// server info
ID string
Address string
RPCPort string
rpcPeers []string
grpcServer *grpc.Server
addr string // Address:RPCPort
RaftID int
RaftPort string
raftPeers []string
join bool
raftNode *raftnode.RaftNode
// client request manager
requestManager *RequestManager
//raft layer
raftLayer *Raft
// dissemination layer
disseminationLayer *Dissemination
// current pbft leader Id
currentBGLeaderSuperLeadId int
BGsInfo []*byzantineGroup.ByzantineGroup
SuperLeafID int
BGID int
getLeaderID func() uint64
config *configuration.Configuration
// cycleId -> consensusState
log []*ConsensusState
logLock sync.RWMutex
skipCycleHash [][]byte
SBFTClientAddressForMembership string
SBFTClientAddressForBFTLayer string
SBFTReplicaAddressForBFTResult string
SBFTResultVerifierAddress string
SBFTClientConnection *byzantineGroup.Connection
resultReceiver *BftResultReceiver
resultVerifier *BftResultVerifier
txnStore *TxnBodyStore
tcpManager *TCPManager
serverTCPAddrMap map[string]string
round2LastFinishedCycle int
round3LastFinishedCycle int
}
func NewServer(configFilePath string, configHome string) *Server {
logger.Debug(configFilePath)
raw, err := ioutil.ReadFile(configFilePath)
common.AssertNil(err, "Cannot read config file", logger)
c := make(map[string]interface{})
err = json.Unmarshal(raw, &c)
common.AssertNil(err, "JSON parsing error", logger)
server := &Server{
ID: c["id"].(string),
Address: c["address"].(string),
RPCPort: c["rpc_port"].(string),
RaftID: int(c["raft_id"].(float64)),
RaftPort: c["raft_port"].(string),
join: c["join"].(bool),
BGID: int(c["bg_id"].(float64)),
SBFTClientAddressForMembership: c["SBFT_client_membership"].(string),
SBFTClientAddressForBFTLayer: c["SBFT_client_bft_layer"].(string),
SBFTReplicaAddressForBFTResult: c["SBFT_replica_bft_result"].(string),
SBFTResultVerifierAddress: c["SBFT_result_verifier"].(string),
disseminationLayer: NewDissemination(),
currentBGLeaderSuperLeadId: 0,
txnStore: NewTxnBodyStore(),
serverTCPAddrMap: make(map[string]string),
skipCycleHash: make([][]byte, 0, 512),
round2LastFinishedCycle: 0,
round3LastFinishedCycle: 0,
log: make([]*ConsensusState, common.MaxCycleNumberInStorage),
}
for i := 0; i < common.MaxCycleNumberInStorage; i++ {
// Fill it with -1
server.log[i] = NewConsensusState(len(server.BGsInfo), -1, server)
}
i1, err := strconv.Atoi(server.RPCPort)
common.AssertNil(err, "RPC port cannot be recognized.", logger)
tcpPort := common.CalcTCPPortFromRPCPort(i1)
server.tcpManager = NewTCPManager(strconv.Itoa(tcpPort))
server.tcpManager.SetServer(server)
logger.Infof("Server %s start ", server.Address+":"+server.RPCPort)
for _, s := range c["raft_peers"].([]interface{}) {
server.raftPeers = append(server.raftPeers, s.(string))
}
for _, s := range c["rpc_peers"].([]interface{}) {
server.rpcPeers = append(server.rpcPeers, s.(string))
}
server.resultReceiver = NewBftResultReceiver(server.SBFTReplicaAddressForBFTResult)
server.resultVerifier = NewBftResultVerifier(server.SBFTResultVerifierAddress)
server.disseminationLayer.SetServer(server)
server.config = configuration.NewConfiguration(server.ID, configHome)
server.addr = server.Address + ":" + server.RPCPort
// initialize byzantine groups (list of superleafs)
for _, bg := range c["byzantine_groups"].([]interface{}) {
bgInfo := byzantineGroup.NewByzantineGroup(bg)
server.BGsInfo = append(server.BGsInfo, bgInfo)
}
var succ bool
server.SuperLeafID, succ = server.BGsInfo[server.BGID].GetSuperLeafIdByServer(
server.Address + ":" + server.RPCPort)
common.AssertTrue(succ, "Cannot fetch SuperleafID", logger)
logger.Infof("Server %s SuperLeafId %d", server.Address+":"+server.RPCPort, server.SuperLeafID)
//create grpc server
sopts := []grpc.ServerOption{grpc.InitialWindowSize(2000000)}
sopts = append(sopts, grpc.InitialConnWindowSize(2000000))
server.grpcServer = grpc.NewServer(sopts...)
pb.RegisterPolarisAllServer(server.grpcServer, server)
reflection.Register(server.grpcServer)
logger.Info("GRPC server created")
return server
}
func (s *Server) Start() {
logger.Debugf("Server %v start...", s.ID)
// initialize Raft layer
proposeC := make(chan string)
defer close(proposeC)
confChangeC := make(chan raftpb.ConfChange)
defer close(confChangeC)
var raft *Raft
getSnapshot := func() ([]byte, error) { return raft.GetSnapshot() }
commitC, errorC, snapshotterReady, getLeaderID, raftNode, leaderC := raftnode.NewRaftNode(
s.RaftID, s.RaftPort, s.raftPeers, s.join, getSnapshot, proposeC, confChangeC, RaftOutputChannelSize)
s.getLeaderID = getLeaderID
s.raftNode = raftNode
s.raftLayer = NewRaft(s, <-snapshotterReady, proposeC, commitC, errorC)
s.requestManager = NewRequestManager(s,
int(s.config.GetClientRequestCycleTimeout()),
s.config.GetMaxBatchSize(),
s.raftLayer.slRequestChan,
s.raftLayer.txnBodyChan)
// wait for raft leader election
<-leaderC
go s.requestManager.Start()
go s.raftLayer.Start()
go s.disseminationLayer.Start()
if s.IsLeader() {
// create a thread receiving the result from SBFT replica
go s.resultReceiver.Start()
logger.Debugf("leader %v start process sbft result thread", s.ID)
// create a thread handling the result from SBFT replica
go s.processSBFTResult()
}
s.saveLeaderAddress()
s.tcpManager.Start()
tcpAddrList := make([]string, 0)
for _, bg := range s.BGsInfo {
for _, sAddr := range bg.GetServerList() {
items := strings.Split(sAddr, ":")
portNum, _ := strconv.Atoi(items[1])
tPort := common.CalcTCPPortFromRPCPort(portNum)
tAddr := items[0] + ":" + strconv.Itoa(tPort)
s.serverTCPAddrMap[sAddr] = tAddr
tcpAddrList = append(tcpAddrList, tAddr)
}
}
logger.Debugf("tpc server addrs %v", tcpAddrList)
s.tcpManager.DialAll(tcpAddrList)
// Start a RPC Listener
RPCListener, err := net.Listen("tcp", ":"+s.RPCPort)
if err != nil {
logger.Errorf("Failed to listen port %s, %v", s.RPCPort, err)
}
err = s.grpcServer.Serve(RPCListener)
if err != nil {
logger.Errorf("Cannot start to serve RPC calls %v", err)
}
}
// get monitor id (the leader of superleaf(Raft group))
func (s *Server) GetLeader() string {
id := s.getLeaderID()
if id == 0 |
leader := s.rpcPeers[id-1]
go s.BGsInfo[s.BGID].UpdateLeader(leader)
logger.Debugf("server %v get leader %v, raft id %v", s.addr, leader, id)
return leader
}
func (s *Server) IsLeader() bool {
// when the raft leader call getLeaderID, it will return 0
// it only shows in the first time
return int(s.getLeaderID()) == s.RaftID || int(s.getLeaderID()) == 0
}
func (s *Server) IsBGLeader() bool {
if s.currentBGLeaderSuperLeadId != s.SuperLeafID {
return false
}
return s.IsLeader()
}
func (s *Server) saveLeaderAddress() {
fName := fmt.Sprintf("./leader_%v.log", s.ID)
f, err := os.Create(fName)
if err != nil {
logger.Fatalf("create leader log file error %v", err)
}
defer f.Close()
leader := s.GetLeader()
if leader == "" {
leader = s.addr
}
logger.Debugf("save leader id %v", leader)
f.WriteString(leader)
f.Sync()
}
func (s *Server) GetRaftTerm() uint64 {
if s.raftNode == nil {
logger.Error("Raft Node is nil!")
return raft.InvalidRaftTerm
}
return s.raftNode.GetRaftTerm()
}
func (s *Server) GetSBFTClientConnection() *byzantineGroup.Connection {
if s.SBFTClientConnection == nil {
s.SBFTClientConnection = byzantineGroup.NewConnection(s.SBFTClientAddressForBFTLayer, queueLen, 1)
}
return s.SBFTClientConnection
}
func (s *Server) handleRPCError(err error) (*grpcpool.Pool, bool) {
errCode := grpc.Code(err)
if errCode == common.NotLeaderErrorCode {
leaderId := grpc.ErrorDesc(err)
s.BGsInfo[s.BGID].UpdateLeader(leaderId)
conn := s.BGsInfo[s.BGID].GetConnection(leaderId).GetConnectionPool()
return conn, true
}
logger.Warningf("Unhandled ERROR: %v", err)
return nil, false
}
// list of monitors (leaders of superleafs)
func (s *Server) GetBGMembers() []string {
leaders := make([]string, 0)
for i := 0; i < s.BGsInfo[s.BGID].GetSuperLeafNum(); i++ {
l := s.BGsInfo[s.BGID].GetLeader(i)
if l == s.GetServerAddr() {
continue
}
leaders = append(leaders, l)
}
return leaders
}
func (s *Server) GetServerAddr() string {
return s.addr
}
// return consensusState for cycleId
func (s *Server) GetConsensusStateByCycleId(cycleId int) *ConsensusState {
s.logLock.Lock()
defer s.logLock.Unlock()
common.AssertTrue(cycleId >= 0, "cycleID cannot be negative!", logger)
common.AssertTrue(cycleId > s.round2LastFinishedCycle-common.MaxCycleNumberInStorage, "Cannot access stale cycles!", logger)
// cycle may not complete in order
index := cycleId % common.MaxCycleNumberInStorage
if s.log[index].cycleId != cycleId {
// name-removed: Here we overwrite stale records
s.log[index] = NewConsensusState(len(s.BGsInfo), cycleId, s)
}
return s.log[index]
}
// check if the server recieves the state response of bgId for cycleId
// return true: already received
// false: not received
func (s *Server) CheckBGState(cycleId int, bgId int) bool {
logger.Debugf("check bg state response for cycle %v bgId %v", cycleId, bgId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewCheckBGStateOp(bgId, cs)
if !cs.AddOPandWait(op) {
logger.Fatal("Error: should return true")
}
logger.Debugf("state response for cycle %v bgId %v is %v",
cycleId, bgId, op.GetResult())
return op.GetResult()
}
func (s *Server) CheckStateRequest(sp *pb.StateRequestMessage) bool {
return s.checkRequestQC(sp)
}
func (s *Server) CheckStateResponse(sp *pb.StateResponseMessage) bool {
r := s.checkQC(sp)
if r {
// if QC check ok, then replicate to the Raft group
s.disseminationLayer.AddStateResponse(sp)
}
return r
}
// if the state response is ready, then get the state response
// otherwise return nil and put request to pending list
func (s *Server) GetOrWaitBGStateResponse(stateRequest *pb.StateRequestMessage) *pb.StateResponseMessage {
logger.Debugf("Get state response for cycle %v from server %v",
stateRequest.CycleId, stateRequest.SenderId)
cs := s.GetConsensusStateByCycleId(int(stateRequest.CycleId))
op := NewGetOrWaitBGStateResponseOp(stateRequest, cs, s.BGID)
if !cs.AddOPandWait(op) {
logger.Fatal("Error: should return true")
}
logger.Debugf("get or wait state response for cycle %v bgId %v is %v for server %v",
stateRequest.CycleId, s.BGID, op.GetResult(), stateRequest.SenderId)
return op.GetResult()
}
// get the own bg info
func (s *Server) GetSelfBGInfo() *byzantineGroup.ByzantineGroup {
return s.BGsInfo[s.BGID]
}
// add the state response into log
func (s *Server) AddBGState(sp *pb.StateResponseMessage) {
logger.Debugf("Add state response for bg %v cycleId %v", sp.BgId, sp.CycleId)
cs := s.GetConsensusStateByCycleId(int(sp.CycleId))
op := NewAddStateResponseOp(sp, cs)
cs.AddOPandWait(op)
for cycleID := s.round3LastFinishedCycle + 1; ; cycleID++ {
if s.CheckCycleComplete(cycleID) {
s.round3LastFinishedCycle = cycleID
// name-removed: discard Raft logs
if cycleID%100 == 0 {
stat := s.raftNode.Node.Status()
index := stat.Applied
s.raftNode.RaftStorage.Compact(index)
}
} else {
break
}
}
logger.Debugf("Successfully add state response for bg %v cycle %v", sp.BgId, sp.CycleId)
}
// check if the cycle completes: the server received all the state response of BG for cycleId
// return true: received all state response
// false: at least one state response is missing
func (s *Server) CheckCycleComplete(cycleId int) bool {
logger.Debugf("Check cycle complete cycleId %v", cycleId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewCheckCycleCompleteOp(cs)
if !cs.AddOPandWait(op) {
logger.Fatal("Should return true check cycle complete")
}
return op.GetResult()
}
func (s *Server) GetAndWaitOwnBGStateResponse(cycleId int) *pb.StateResponseMessage {
logger.Debugf("get and wait own bg state response for cycle %v", cycleId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewGetAndWaitOwnBGStateResponseOp(s.BGID, cs)
if !cs.AddOPandWait(op) {
logger.Fatal("should return true get and wait own bg state response")
}
return op.GetResult()
}
// return the state response of its own BG for cycleId
func (s *Server) GetBGStateResponseForCycle(cycleId int) *pb.StateResponseMessage {
logger.Debugf("get bg state response for cycle %v", cycleId)
cs := s.GetConsensusStateByCycleId(cycleId)
op := NewGetBGStateResponseOp(s.BGID, cs)
if !cs.AddOPandWait(op) {
logger.Fatal("should return true get bg state response")
}
return op.GetResult()
}
// return the cycle result until the cycle completes
func (s *Server) GetCycleResult(cycleId int) []*pb.StateResponseMessage {
logger.Debugf("Get cycle result for cycle %v", cycleId)
if cycleId <= s.round2LastFinishedCycle-common.MaxCycleNumberInStorage {
panic("name-removed: this cycle has been overwritten!")
}
cs := s.GetConsensusStateByCycleId(cycleId)
common.AssertTrue(cs.cycleId == cycleId, "Cycle IDs should match!", logger)
csWait := s.GetConsensusStateByCycleId(cycleId + int(s.config.GetMaxPipelineDepth()))
common.AssertTrue(csWait.cycleId == cycleId+int(s.config.GetMaxPipelineDepth()), "JustWait Cycle IDs should match!", logger)
op := NewWaitCycleComplete(cs)
opJustWait := NewJustWaitCycleComplete(csWait)
if !cs.AddOPandWait(op) {
logger.Fatal("should return true get cycle result")
}
// name-removed: wait until (cycleId + depth) completes
if !cs.AddOPandWait(opJustWait) {
logger.Fatal("should return true get cycle result")
}
return op.GetResult()
}
// send start cycle requst to BG leader
// TODO: start cycle request should have a proof
// only the request that have a valid proof can trigger to start a cycle
func (s *Server) StartCycle(stateRequest *pb.StateRequestMessage) {
if s.IsLeader() {
// if the node is monitor then process the start cycle request
s.requestManager.AddSkipCycle(SkipCycleApplicationMaterial{
skipTo: uint32(stateRequest.CycleId),
BGApplicant: uint32(stateRequest.BgId),
view: stateRequest.View,
sequence: stateRequest.Sequence,
proof: stateRequest.Proof,
signedOn: stateRequest.Hash,
})
return
}
// otherwise, send the state request to monitor
s.sendStateRequestToMonitor(stateRequest)
}
func (s *Server) sendStateRequestToMonitor(sr *pb.StateRequestMessage) {
logger.Debugf("Send state request from server %v bg %v of cycle %v to monitor since state response for bg %v is not ready", sr.SenderId, sr.BgId, sr.CycleId, s.BGID)
dstAddr := s.GetLeader()
conn := s.GetSelfBGInfo().GetConnection(dstAddr)
conn.AddEvent(NewStateRequestToMonitorEvent(sr, 2*time.Second, s))
}
func (s *Server) processSBFTResult() {
for {
// blocking until next cycle result is received from SBFT replica
bftResult, senderIds := s.resultReceiver.GetNextCycle()
// name-removed: this is in order
s.round2LastFinishedCycle = int(bftResult.CurrentCycle)
logger.Debugf("Current cycle %d, txn sender Ids %v", bftResult.CurrentCycle, senderIds)
s.reassembleTxn(bftResult)
bftResult.Hash = make([]byte, 8, 40)
binary.LittleEndian.PutUint32(bftResult.Hash[0:4], bftResult.LastCycle)
binary.LittleEndian.PutUint32(bftResult.Hash[4:8], bftResult.CurrentCycle)
bftResult.Hash = append(bftResult.Hash, util.ComputeBftResultHash(bftResult)...)
if s.config.IsHeaderBodySeparate() && bftResult.FullTxnList == nil {
logger.Fatal("Txn bodies are not attached")
}
// Unpack SkipCycle results
// Last cycle is not empty
if bftResult.CurrentCycle-bftResult.LastCycle > 1 {
logger.Warningf("Unpacking cycle %d.", bftResult.CurrentCycle)
}
stateResponse := &pb.StateResponseMessage{
CycleId: int32(bftResult.CurrentCycle),
SenderId: s.ID,
BgId: int32(s.BGID),
Result: bftResult,
}
stateResponse.Result.ReplaceMsgListWithHash = false
s.raftLayer.HandleOwnStateResponse(stateResponse)
emptyResult := pb.BftResult{
View: bftResult.View,
Sequence: bftResult.Sequence,
Nonce: bftResult.Nonce,
LastCycle: bftResult.LastCycle,
CurrentCycle: bftResult.CurrentCycle,
MessageList: nil,
Hash: bftResult.Hash,
CommitProof: bftResult.CommitProof,
BodyAttached: false,
FullTxnList: nil,
ReplaceMsgListWithHash: true,
}
// Empty cycles
if bftResult.CurrentCycle-bftResult.LastCycle > 1 {
logger.Warningf("This BFT result has cycle %d to %d", bftResult.LastCycle+1, bftResult.CurrentCycle)
}
for cycleID := bftResult.LastCycle + 1; cycleID < bftResult.CurrentCycle; cycleID++ {
logger.Warningf("Unpacking cycle %d.", cycleID)
stateResponse := &pb.StateResponseMessage{
CycleId: int32(cycleID),
SenderId: s.ID,
BgId: int32(s.BGID),
Result: &emptyResult,
}
s.raftLayer.HandleOwnStateResponse(stateResponse)
}
// check if the list of txn is sent by this super leaf
for _, txnSenderId := range senderIds {
if txnSenderId[:4] == s.ID[:4] {
logger.Debugf("cycle %v is partially sent by server %v", stateResponse.CycleId, s.ID)
s.requestManager.previousCycleComplete <- true
}
}
}
}
func (s *Server) forwardTxnToLeader(txn *pb.TransactionMessage) {
dstAddr := s.GetLeader()
conn := s.GetSelfBGInfo().GetConnection(dstAddr)
conn.AddEvent(NewTxnMsgToMonitorEvent(txn, 2*time.Second, s))
}
func (s *Server) forwardTxnBodyToLeader(txn *pb.TxnBodyMessage) {
dstAddr := s.GetLeader()
logger.Debugf("forward txn body to leader: txn num %v from server %v", len(txn.TxnMessageBlock.TxnMessageList), txn.SenderId)
conn := s.GetSelfBGInfo().GetConnection(dstAddr)
conn.AddEvent(NewTxnBodyToMonitorEvent(txn, 2*time.Second, s))
}
func (s *Server) reassembleTxn(bftResult *pb.BftResult) {
bftResult.BodyAttached = s.config.IsHeaderBodySeparate()
if s.config.IsHeaderBodySeparate() {
bftResult.FullTxnList = make([]*pb.TxnMessageBlock, 0)
for _, msg := range bftResult.MessageList {
txnList := make([]*pb.TransactionMessage, 0)
for _, header := range msg.TxnHeaderBlock.TxnHeaderList {
txn := s.txnStore.GetBodyRequest(header.HashContent)
txnList = append(txnList, txn)
}
orderedMessage := &pb.TxnMessageBlock{
TxnMessageList: txnList,
}
bftResult.FullTxnList = append(bftResult.FullTxnList, orderedMessage)
}
}
}
| {
return ""
} | conditional_block |
doom.py | from vizdoom import *
import tensorflow as tf
import numpy as np
import random
import time
from skimage import transform
import dqn2
import mem
from collections import deque
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore') #ignore the skimags warnings
import argparse
# Model hyperparams
state_size = None
action_size = None
learning_rate = 0.0002
total_episodes = 500
max_steps = 100
batch_size = 64
explore_start = 1.0
explore_stop = .01
decay_rate = 0.0001
gamma = 0.95
pretrain_length = batch_size
memory_size = 1000000
render = True
DQNetwork = None
stack_size = 4
saver = tf.train.Saver()
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=False, type=bool, default=False,
help="Use the latest saved model")
args = vars(ap.parse_args())
def train(memory):
global stacked_frames
global saver
sess = tf.Session()
sess.run(tf.global_variables_initializer())
decay_step = 0
game.init()
for episode in range(total_episodes):
print("Training on episode: {}".format(episode))
step = 0
episode_rewards = []
game.new_episode()
state = game.get_state().screen_buffer
state, stacked_frames = stack_frames(stacked_frames, state, True)
while step < max_steps:
step += 1
decay_step += 1
action, explore_probability = predict_action(decay_step, possible_actions, state, sess)
reward = game.make_action(action)
done = game.is_episode_finished()
episode_rewards.append(reward)
if done:
# the episode ends so no next state
next_state = np.zeros((84, 84), dtype=np.int)
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
# Set step = max_steps to end the episode
step = max_steps
# Get the total reward of the episode
total_reward = np.sum(episode_rewards)
print('Episode: {}'.format(episode),
'Total reward: {}'.format(total_reward),
'Training loss: {:.4f}'.format(loss),
'Explore P: {:.4f}'.format(explore_probability))
memory.add((state, action, reward, next_state, done))
else:
# Get the next state
next_state = game.get_state().screen_buffer
# Stack the frame of the next_state
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
# Add experience to memory
memory.add((state, action, reward, next_state, done))
# st+1 is now our current state
state = next_state
### LEARNING PART
# Obtain random mini-batch from memory
batch = memory.sample(batch_size)
states_mb = np.array([each[0] for each in batch])
actions_mb = np.array([each[1] for each in batch])
rewards_mb = np.array([each[2] for each in batch])
next_states_mb = np.array([each[3] for each in batch])
dones_mb = np.array([each[4] for each in batch])
target_Qs_batch = []
# Get Q values for next_state
Qs_next_state = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: next_states_mb})
# Set Q_target = r if the episode ends at s+1, otherwise set Q_target = r + gamma*maxQ(s', a')
for i in range(0, len(batch)):
terminal = dones_mb[i]
# If we are in a terminal state, only equals reward
if terminal:
target_Qs_batch.append(rewards_mb[i])
else:
target = rewards_mb[i] + gamma * np.max(Qs_next_state[i])
target_Qs_batch.append(target)
targets_mb = np.array([each for each in target_Qs_batch])
loss, _ = sess.run([DQNetwork.loss, DQNetwork.optimizer],
feed_dict={DQNetwork.inputs_: states_mb,
DQNetwork.target_Q: targets_mb,
DQNetwork.actions_: actions_mb})
# Write TF Summaries
summary = sess.run(write_op, feed_dict={DQNetwork.inputs_: states_mb,
DQNetwork.target_Q: targets_mb,
DQNetwork.actions_: actions_mb})
writer.add_summary(summary, episode)
writer.flush()
# Save model every 5 episodes
if episode % 5 == 0:
save_path = saver.save(sess, "./models/model.ckpt")
print("Model Saved")
def infer():
global env
global saver
sess = tf.Session()
total_test_rewards = []
game, possible_actions = create_environment()
totalScore = 0
saver.restore(sess, "./models/model.ckpt")
game.init()
for episode in range(1):
total_rewards = 0
state = env.reset()
state, stacked_frames = stack_frames(stacked_frames, state, True)
print("**************************************")
print("EPISODSE ", episode)
while True:
# reshape the state
state = state.reshape((1, *state_size))
Qs = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: state})
# Take the biggest Q value
choice = np.argmax(Qs)
action = possible_actions[choice]
next_state, reward, done, _ = env.step(action)
env.render()
total_rewards += reward
if done:
print("Score", total_rewards)
total_test_rewards.append(total_rewards)
break
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
state = next_state
env.close()
def predict_action(decay_step, possible_actions, state, sess):
global explore_start
global explore_stop
global decay_rate
exp_exp_tradeoff = np.random.rand()
explore_probability = explore_stop + (explore_start - explore_stop) * np.exp(-decay_rate * decay_step)
if explore_probability > exp_exp_tradeoff:
action = random.choice(possible_actions)
else:
Qs = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: state.reshape((1, *state.shape))})
choice = np.argmax(Qs)
action = possible_actions[int(choice)]
return action, explore_probability
def createnetwork():
global DQNetwork
global state_size
global action_size
global learning_rate
tf.reset_default_graph()
DQNetwork = dqn2.DQNetwork(state_size, action_size, learning_rate)
def preprocess_frame(frame):
cropped_frame = frame[30:-10, 30: -30]
normalized_frame = cropped_frame/255.0
preprocessed_frame = transform.resize(normalized_frame, [84, 84])
return preprocessed_frame
def stack_frames(frames2stack, state, is_new_episode):
frame = preprocess_frame(state)
if is_new_episode:
# Clear the stacked frames
frames2stack = deque([np.zeros((84, 84), dtype=np.int) for i in range(stack_size)], maxlen=4)
# this is new episode so copy the first and only frame 4 times
frames2stack.append(frame)
frames2stack.append(frame)
frames2stack.append(frame)
frames2stack.append(frame)
# stack the frames
stacked_state = np.stack(frames2stack, axis=2)
else:
# Append frame to deque
frames2stack.append(frame)
stacked_state = np.stack(frames2stack, axis=2)
return stacked_state, frames2stack
def create_environment():
global game
global state_size
global action_size
game = DoomGame()
game.load_config("basic.cfg")
game.set_doom_scenario_path("basic.wad")
game.init()
state_size = [84, 84, 4] # 4 Frames at 84x84 resolution
# Possible actions
left = [1, 0, 0]
right = [0, 1, 0]
shoot = [0, 0, 1]
possible_actions = [left, right, shoot]
action_size = 3
return game, possible_actions
def test_environment():
|
if not args['model']:
stacked_frames = deque([np.zeros((84, 84), dtype=np.int) for i in range(stack_size)], maxlen=4)
game, possible_actions = create_environment()
print("Game Environment created")
createnetwork()
print("network created")
memory = mem.Memory(max_size=memory_size)
game.new_episode()
for i in range(pretrain_length):
if i is 0:
state = game.get_state().screen_buffer
state, stacked_frames = stack_frames(stacked_frames, state, True)
# Take a random action
action = random.choice(possible_actions)
# rewards
reward = game.make_action(action)
done = game.is_episode_finished()
if done:
next_state = np.zeros(state.shape)
memory.add((state, action, reward, next_state, done))
# Start a new episode
game.new_episode()
# Get fresh state
state = game.get_state().screen_buffer
# Stack frames
state, stacked_frames = stack_frames(stacked_frames, state, True)
else:
next_state = game.get_state().screen_buffer
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
memory.add((state, action, reward, next_state, done))
state = next_state
print("memory created")
# Setup tensorboard
writer = tf.summary.FileWriter("/tensorboard/dqn/1")
tf.summary.scalar("Loss", DQNetwork.loss)
write_op = tf.summary.merge_all()
print("tensorboard setup")
print("here we go...")
train(memory)
command = input("Training complete, press a key to test our model\n")
else:
create_environment()
infer()
| global game
episodes = 10
for i in range(episodes):
game.new_episode()
while not game.is_episode_finished():
state = game.get_state()
img = state.screen_buffer
misc = state.game_variables
action = random.choice(possible_actions)
print("Taking action: {}".format(action))
reward = game.make_action(action)
print("Reward for above action: {}".format(reward))
time.sleep(.2)
print("Episode final reward score: {}".format(game.get_total_reward()))
time.sleep(2)
game.close() | identifier_body |
doom.py | from vizdoom import *
import tensorflow as tf
import numpy as np
import random
import time
from skimage import transform
import dqn2
import mem
from collections import deque
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore') #ignore the skimags warnings
import argparse
# Model hyperparams
state_size = None
action_size = None
learning_rate = 0.0002
total_episodes = 500
max_steps = 100
batch_size = 64
explore_start = 1.0
explore_stop = .01
decay_rate = 0.0001
gamma = 0.95
pretrain_length = batch_size
memory_size = 1000000
render = True
DQNetwork = None
stack_size = 4
saver = tf.train.Saver()
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=False, type=bool, default=False,
help="Use the latest saved model")
args = vars(ap.parse_args())
def train(memory):
global stacked_frames
global saver
sess = tf.Session()
sess.run(tf.global_variables_initializer())
decay_step = 0
game.init()
for episode in range(total_episodes):
print("Training on episode: {}".format(episode))
step = 0
episode_rewards = []
game.new_episode()
state = game.get_state().screen_buffer
state, stacked_frames = stack_frames(stacked_frames, state, True)
while step < max_steps:
step += 1
decay_step += 1
action, explore_probability = predict_action(decay_step, possible_actions, state, sess)
reward = game.make_action(action)
done = game.is_episode_finished()
episode_rewards.append(reward)
if done:
# the episode ends so no next state
next_state = np.zeros((84, 84), dtype=np.int)
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
# Set step = max_steps to end the episode
step = max_steps
# Get the total reward of the episode
total_reward = np.sum(episode_rewards)
print('Episode: {}'.format(episode),
'Total reward: {}'.format(total_reward),
'Training loss: {:.4f}'.format(loss),
'Explore P: {:.4f}'.format(explore_probability))
memory.add((state, action, reward, next_state, done))
else:
# Get the next state
next_state = game.get_state().screen_buffer
# Stack the frame of the next_state
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
# Add experience to memory
memory.add((state, action, reward, next_state, done))
# st+1 is now our current state
state = next_state
### LEARNING PART
# Obtain random mini-batch from memory
batch = memory.sample(batch_size)
states_mb = np.array([each[0] for each in batch])
actions_mb = np.array([each[1] for each in batch])
rewards_mb = np.array([each[2] for each in batch])
next_states_mb = np.array([each[3] for each in batch])
dones_mb = np.array([each[4] for each in batch])
target_Qs_batch = []
# Get Q values for next_state
Qs_next_state = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: next_states_mb})
# Set Q_target = r if the episode ends at s+1, otherwise set Q_target = r + gamma*maxQ(s', a')
for i in range(0, len(batch)):
terminal = dones_mb[i]
# If we are in a terminal state, only equals reward
if terminal:
target_Qs_batch.append(rewards_mb[i])
else:
target = rewards_mb[i] + gamma * np.max(Qs_next_state[i])
target_Qs_batch.append(target)
targets_mb = np.array([each for each in target_Qs_batch])
loss, _ = sess.run([DQNetwork.loss, DQNetwork.optimizer],
feed_dict={DQNetwork.inputs_: states_mb,
DQNetwork.target_Q: targets_mb,
DQNetwork.actions_: actions_mb})
# Write TF Summaries
summary = sess.run(write_op, feed_dict={DQNetwork.inputs_: states_mb,
DQNetwork.target_Q: targets_mb,
DQNetwork.actions_: actions_mb})
writer.add_summary(summary, episode)
writer.flush()
# Save model every 5 episodes
if episode % 5 == 0:
save_path = saver.save(sess, "./models/model.ckpt")
print("Model Saved")
def infer():
global env
global saver
sess = tf.Session()
total_test_rewards = []
game, possible_actions = create_environment()
totalScore = 0
saver.restore(sess, "./models/model.ckpt")
game.init()
for episode in range(1):
total_rewards = 0
state = env.reset()
state, stacked_frames = stack_frames(stacked_frames, state, True)
print("**************************************")
print("EPISODSE ", episode)
while True:
# reshape the state
state = state.reshape((1, *state_size))
Qs = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: state})
# Take the biggest Q value
choice = np.argmax(Qs)
action = possible_actions[choice]
next_state, reward, done, _ = env.step(action)
env.render()
total_rewards += reward
if done:
|
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
state = next_state
env.close()
def predict_action(decay_step, possible_actions, state, sess):
global explore_start
global explore_stop
global decay_rate
exp_exp_tradeoff = np.random.rand()
explore_probability = explore_stop + (explore_start - explore_stop) * np.exp(-decay_rate * decay_step)
if explore_probability > exp_exp_tradeoff:
action = random.choice(possible_actions)
else:
Qs = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: state.reshape((1, *state.shape))})
choice = np.argmax(Qs)
action = possible_actions[int(choice)]
return action, explore_probability
def createnetwork():
global DQNetwork
global state_size
global action_size
global learning_rate
tf.reset_default_graph()
DQNetwork = dqn2.DQNetwork(state_size, action_size, learning_rate)
def preprocess_frame(frame):
cropped_frame = frame[30:-10, 30: -30]
normalized_frame = cropped_frame/255.0
preprocessed_frame = transform.resize(normalized_frame, [84, 84])
return preprocessed_frame
def stack_frames(frames2stack, state, is_new_episode):
frame = preprocess_frame(state)
if is_new_episode:
# Clear the stacked frames
frames2stack = deque([np.zeros((84, 84), dtype=np.int) for i in range(stack_size)], maxlen=4)
# this is new episode so copy the first and only frame 4 times
frames2stack.append(frame)
frames2stack.append(frame)
frames2stack.append(frame)
frames2stack.append(frame)
# stack the frames
stacked_state = np.stack(frames2stack, axis=2)
else:
# Append frame to deque
frames2stack.append(frame)
stacked_state = np.stack(frames2stack, axis=2)
return stacked_state, frames2stack
def create_environment():
global game
global state_size
global action_size
game = DoomGame()
game.load_config("basic.cfg")
game.set_doom_scenario_path("basic.wad")
game.init()
state_size = [84, 84, 4] # 4 Frames at 84x84 resolution
# Possible actions
left = [1, 0, 0]
right = [0, 1, 0]
shoot = [0, 0, 1]
possible_actions = [left, right, shoot]
action_size = 3
return game, possible_actions
def test_environment():
global game
episodes = 10
for i in range(episodes):
game.new_episode()
while not game.is_episode_finished():
state = game.get_state()
img = state.screen_buffer
misc = state.game_variables
action = random.choice(possible_actions)
print("Taking action: {}".format(action))
reward = game.make_action(action)
print("Reward for above action: {}".format(reward))
time.sleep(.2)
print("Episode final reward score: {}".format(game.get_total_reward()))
time.sleep(2)
game.close()
if not args['model']:
stacked_frames = deque([np.zeros((84, 84), dtype=np.int) for i in range(stack_size)], maxlen=4)
game, possible_actions = create_environment()
print("Game Environment created")
createnetwork()
print("network created")
memory = mem.Memory(max_size=memory_size)
game.new_episode()
for i in range(pretrain_length):
if i is 0:
state = game.get_state().screen_buffer
state, stacked_frames = stack_frames(stacked_frames, state, True)
# Take a random action
action = random.choice(possible_actions)
# rewards
reward = game.make_action(action)
done = game.is_episode_finished()
if done:
next_state = np.zeros(state.shape)
memory.add((state, action, reward, next_state, done))
# Start a new episode
game.new_episode()
# Get fresh state
state = game.get_state().screen_buffer
# Stack frames
state, stacked_frames = stack_frames(stacked_frames, state, True)
else:
next_state = game.get_state().screen_buffer
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
memory.add((state, action, reward, next_state, done))
state = next_state
print("memory created")
# Setup tensorboard
writer = tf.summary.FileWriter("/tensorboard/dqn/1")
tf.summary.scalar("Loss", DQNetwork.loss)
write_op = tf.summary.merge_all()
print("tensorboard setup")
print("here we go...")
train(memory)
command = input("Training complete, press a key to test our model\n")
else:
create_environment()
infer()
| print("Score", total_rewards)
total_test_rewards.append(total_rewards)
break | conditional_block |
doom.py | from vizdoom import *
import tensorflow as tf
import numpy as np
import random
import time
from skimage import transform
import dqn2
import mem
from collections import deque
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore') #ignore the skimags warnings
import argparse
# Model hyperparams
state_size = None
action_size = None
learning_rate = 0.0002
total_episodes = 500
max_steps = 100
batch_size = 64
explore_start = 1.0
explore_stop = .01
decay_rate = 0.0001
gamma = 0.95
pretrain_length = batch_size
memory_size = 1000000
render = True
DQNetwork = None
stack_size = 4
saver = tf.train.Saver()
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=False, type=bool, default=False,
help="Use the latest saved model")
args = vars(ap.parse_args())
def | (memory):
global stacked_frames
global saver
sess = tf.Session()
sess.run(tf.global_variables_initializer())
decay_step = 0
game.init()
for episode in range(total_episodes):
print("Training on episode: {}".format(episode))
step = 0
episode_rewards = []
game.new_episode()
state = game.get_state().screen_buffer
state, stacked_frames = stack_frames(stacked_frames, state, True)
while step < max_steps:
step += 1
decay_step += 1
action, explore_probability = predict_action(decay_step, possible_actions, state, sess)
reward = game.make_action(action)
done = game.is_episode_finished()
episode_rewards.append(reward)
if done:
# the episode ends so no next state
next_state = np.zeros((84, 84), dtype=np.int)
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
# Set step = max_steps to end the episode
step = max_steps
# Get the total reward of the episode
total_reward = np.sum(episode_rewards)
print('Episode: {}'.format(episode),
'Total reward: {}'.format(total_reward),
'Training loss: {:.4f}'.format(loss),
'Explore P: {:.4f}'.format(explore_probability))
memory.add((state, action, reward, next_state, done))
else:
# Get the next state
next_state = game.get_state().screen_buffer
# Stack the frame of the next_state
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
# Add experience to memory
memory.add((state, action, reward, next_state, done))
# st+1 is now our current state
state = next_state
### LEARNING PART
# Obtain random mini-batch from memory
batch = memory.sample(batch_size)
states_mb = np.array([each[0] for each in batch])
actions_mb = np.array([each[1] for each in batch])
rewards_mb = np.array([each[2] for each in batch])
next_states_mb = np.array([each[3] for each in batch])
dones_mb = np.array([each[4] for each in batch])
target_Qs_batch = []
# Get Q values for next_state
Qs_next_state = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: next_states_mb})
# Set Q_target = r if the episode ends at s+1, otherwise set Q_target = r + gamma*maxQ(s', a')
for i in range(0, len(batch)):
terminal = dones_mb[i]
# If we are in a terminal state, only equals reward
if terminal:
target_Qs_batch.append(rewards_mb[i])
else:
target = rewards_mb[i] + gamma * np.max(Qs_next_state[i])
target_Qs_batch.append(target)
targets_mb = np.array([each for each in target_Qs_batch])
loss, _ = sess.run([DQNetwork.loss, DQNetwork.optimizer],
feed_dict={DQNetwork.inputs_: states_mb,
DQNetwork.target_Q: targets_mb,
DQNetwork.actions_: actions_mb})
# Write TF Summaries
summary = sess.run(write_op, feed_dict={DQNetwork.inputs_: states_mb,
DQNetwork.target_Q: targets_mb,
DQNetwork.actions_: actions_mb})
writer.add_summary(summary, episode)
writer.flush()
# Save model every 5 episodes
if episode % 5 == 0:
save_path = saver.save(sess, "./models/model.ckpt")
print("Model Saved")
def infer():
global env
global saver
sess = tf.Session()
total_test_rewards = []
game, possible_actions = create_environment()
totalScore = 0
saver.restore(sess, "./models/model.ckpt")
game.init()
for episode in range(1):
total_rewards = 0
state = env.reset()
state, stacked_frames = stack_frames(stacked_frames, state, True)
print("**************************************")
print("EPISODSE ", episode)
while True:
# reshape the state
state = state.reshape((1, *state_size))
Qs = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: state})
# Take the biggest Q value
choice = np.argmax(Qs)
action = possible_actions[choice]
next_state, reward, done, _ = env.step(action)
env.render()
total_rewards += reward
if done:
print("Score", total_rewards)
total_test_rewards.append(total_rewards)
break
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
state = next_state
env.close()
def predict_action(decay_step, possible_actions, state, sess):
global explore_start
global explore_stop
global decay_rate
exp_exp_tradeoff = np.random.rand()
explore_probability = explore_stop + (explore_start - explore_stop) * np.exp(-decay_rate * decay_step)
if explore_probability > exp_exp_tradeoff:
action = random.choice(possible_actions)
else:
Qs = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: state.reshape((1, *state.shape))})
choice = np.argmax(Qs)
action = possible_actions[int(choice)]
return action, explore_probability
def createnetwork():
global DQNetwork
global state_size
global action_size
global learning_rate
tf.reset_default_graph()
DQNetwork = dqn2.DQNetwork(state_size, action_size, learning_rate)
def preprocess_frame(frame):
cropped_frame = frame[30:-10, 30: -30]
normalized_frame = cropped_frame/255.0
preprocessed_frame = transform.resize(normalized_frame, [84, 84])
return preprocessed_frame
def stack_frames(frames2stack, state, is_new_episode):
frame = preprocess_frame(state)
if is_new_episode:
# Clear the stacked frames
frames2stack = deque([np.zeros((84, 84), dtype=np.int) for i in range(stack_size)], maxlen=4)
# this is new episode so copy the first and only frame 4 times
frames2stack.append(frame)
frames2stack.append(frame)
frames2stack.append(frame)
frames2stack.append(frame)
# stack the frames
stacked_state = np.stack(frames2stack, axis=2)
else:
# Append frame to deque
frames2stack.append(frame)
stacked_state = np.stack(frames2stack, axis=2)
return stacked_state, frames2stack
def create_environment():
global game
global state_size
global action_size
game = DoomGame()
game.load_config("basic.cfg")
game.set_doom_scenario_path("basic.wad")
game.init()
state_size = [84, 84, 4] # 4 Frames at 84x84 resolution
# Possible actions
left = [1, 0, 0]
right = [0, 1, 0]
shoot = [0, 0, 1]
possible_actions = [left, right, shoot]
action_size = 3
return game, possible_actions
def test_environment():
global game
episodes = 10
for i in range(episodes):
game.new_episode()
while not game.is_episode_finished():
state = game.get_state()
img = state.screen_buffer
misc = state.game_variables
action = random.choice(possible_actions)
print("Taking action: {}".format(action))
reward = game.make_action(action)
print("Reward for above action: {}".format(reward))
time.sleep(.2)
print("Episode final reward score: {}".format(game.get_total_reward()))
time.sleep(2)
game.close()
if not args['model']:
stacked_frames = deque([np.zeros((84, 84), dtype=np.int) for i in range(stack_size)], maxlen=4)
game, possible_actions = create_environment()
print("Game Environment created")
createnetwork()
print("network created")
memory = mem.Memory(max_size=memory_size)
game.new_episode()
for i in range(pretrain_length):
if i is 0:
state = game.get_state().screen_buffer
state, stacked_frames = stack_frames(stacked_frames, state, True)
# Take a random action
action = random.choice(possible_actions)
# rewards
reward = game.make_action(action)
done = game.is_episode_finished()
if done:
next_state = np.zeros(state.shape)
memory.add((state, action, reward, next_state, done))
# Start a new episode
game.new_episode()
# Get fresh state
state = game.get_state().screen_buffer
# Stack frames
state, stacked_frames = stack_frames(stacked_frames, state, True)
else:
next_state = game.get_state().screen_buffer
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
memory.add((state, action, reward, next_state, done))
state = next_state
print("memory created")
# Setup tensorboard
writer = tf.summary.FileWriter("/tensorboard/dqn/1")
tf.summary.scalar("Loss", DQNetwork.loss)
write_op = tf.summary.merge_all()
print("tensorboard setup")
print("here we go...")
train(memory)
command = input("Training complete, press a key to test our model\n")
else:
create_environment()
infer()
| train | identifier_name |
doom.py | from vizdoom import *
import tensorflow as tf
import numpy as np
import random
import time
from skimage import transform
import dqn2
import mem
from collections import deque
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore') #ignore the skimags warnings
import argparse
# Model hyperparams
state_size = None
action_size = None
learning_rate = 0.0002
total_episodes = 500
max_steps = 100
batch_size = 64
explore_start = 1.0
explore_stop = .01
decay_rate = 0.0001
gamma = 0.95
pretrain_length = batch_size
memory_size = 1000000
render = True
DQNetwork = None
stack_size = 4
saver = tf.train.Saver()
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=False, type=bool, default=False,
help="Use the latest saved model")
args = vars(ap.parse_args())
def train(memory):
global stacked_frames
global saver
sess = tf.Session()
sess.run(tf.global_variables_initializer())
decay_step = 0
game.init()
for episode in range(total_episodes):
print("Training on episode: {}".format(episode))
step = 0
episode_rewards = []
game.new_episode()
state = game.get_state().screen_buffer
state, stacked_frames = stack_frames(stacked_frames, state, True)
while step < max_steps:
step += 1
decay_step += 1
action, explore_probability = predict_action(decay_step, possible_actions, state, sess)
reward = game.make_action(action)
done = game.is_episode_finished()
episode_rewards.append(reward)
if done:
# the episode ends so no next state
next_state = np.zeros((84, 84), dtype=np.int)
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
# Set step = max_steps to end the episode
step = max_steps
# Get the total reward of the episode
total_reward = np.sum(episode_rewards)
print('Episode: {}'.format(episode),
'Total reward: {}'.format(total_reward),
'Training loss: {:.4f}'.format(loss),
'Explore P: {:.4f}'.format(explore_probability))
memory.add((state, action, reward, next_state, done))
else:
# Get the next state
next_state = game.get_state().screen_buffer
# Stack the frame of the next_state
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
# Add experience to memory
memory.add((state, action, reward, next_state, done))
# st+1 is now our current state
state = next_state
### LEARNING PART
# Obtain random mini-batch from memory
batch = memory.sample(batch_size)
states_mb = np.array([each[0] for each in batch])
actions_mb = np.array([each[1] for each in batch])
rewards_mb = np.array([each[2] for each in batch])
next_states_mb = np.array([each[3] for each in batch])
dones_mb = np.array([each[4] for each in batch])
target_Qs_batch = []
# Get Q values for next_state
Qs_next_state = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: next_states_mb})
# Set Q_target = r if the episode ends at s+1, otherwise set Q_target = r + gamma*maxQ(s', a')
for i in range(0, len(batch)):
terminal = dones_mb[i]
# If we are in a terminal state, only equals reward
if terminal:
target_Qs_batch.append(rewards_mb[i])
else:
target = rewards_mb[i] + gamma * np.max(Qs_next_state[i])
target_Qs_batch.append(target)
targets_mb = np.array([each for each in target_Qs_batch]) | loss, _ = sess.run([DQNetwork.loss, DQNetwork.optimizer],
feed_dict={DQNetwork.inputs_: states_mb,
DQNetwork.target_Q: targets_mb,
DQNetwork.actions_: actions_mb})
# Write TF Summaries
summary = sess.run(write_op, feed_dict={DQNetwork.inputs_: states_mb,
DQNetwork.target_Q: targets_mb,
DQNetwork.actions_: actions_mb})
writer.add_summary(summary, episode)
writer.flush()
# Save model every 5 episodes
if episode % 5 == 0:
save_path = saver.save(sess, "./models/model.ckpt")
print("Model Saved")
def infer():
global env
global saver
sess = tf.Session()
total_test_rewards = []
game, possible_actions = create_environment()
totalScore = 0
saver.restore(sess, "./models/model.ckpt")
game.init()
for episode in range(1):
total_rewards = 0
state = env.reset()
state, stacked_frames = stack_frames(stacked_frames, state, True)
print("**************************************")
print("EPISODSE ", episode)
while True:
# reshape the state
state = state.reshape((1, *state_size))
Qs = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: state})
# Take the biggest Q value
choice = np.argmax(Qs)
action = possible_actions[choice]
next_state, reward, done, _ = env.step(action)
env.render()
total_rewards += reward
if done:
print("Score", total_rewards)
total_test_rewards.append(total_rewards)
break
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
state = next_state
env.close()
def predict_action(decay_step, possible_actions, state, sess):
global explore_start
global explore_stop
global decay_rate
exp_exp_tradeoff = np.random.rand()
explore_probability = explore_stop + (explore_start - explore_stop) * np.exp(-decay_rate * decay_step)
if explore_probability > exp_exp_tradeoff:
action = random.choice(possible_actions)
else:
Qs = sess.run(DQNetwork.output, feed_dict={DQNetwork.inputs_: state.reshape((1, *state.shape))})
choice = np.argmax(Qs)
action = possible_actions[int(choice)]
return action, explore_probability
def createnetwork():
global DQNetwork
global state_size
global action_size
global learning_rate
tf.reset_default_graph()
DQNetwork = dqn2.DQNetwork(state_size, action_size, learning_rate)
def preprocess_frame(frame):
cropped_frame = frame[30:-10, 30: -30]
normalized_frame = cropped_frame/255.0
preprocessed_frame = transform.resize(normalized_frame, [84, 84])
return preprocessed_frame
def stack_frames(frames2stack, state, is_new_episode):
frame = preprocess_frame(state)
if is_new_episode:
# Clear the stacked frames
frames2stack = deque([np.zeros((84, 84), dtype=np.int) for i in range(stack_size)], maxlen=4)
# this is new episode so copy the first and only frame 4 times
frames2stack.append(frame)
frames2stack.append(frame)
frames2stack.append(frame)
frames2stack.append(frame)
# stack the frames
stacked_state = np.stack(frames2stack, axis=2)
else:
# Append frame to deque
frames2stack.append(frame)
stacked_state = np.stack(frames2stack, axis=2)
return stacked_state, frames2stack
def create_environment():
global game
global state_size
global action_size
game = DoomGame()
game.load_config("basic.cfg")
game.set_doom_scenario_path("basic.wad")
game.init()
state_size = [84, 84, 4] # 4 Frames at 84x84 resolution
# Possible actions
left = [1, 0, 0]
right = [0, 1, 0]
shoot = [0, 0, 1]
possible_actions = [left, right, shoot]
action_size = 3
return game, possible_actions
def test_environment():
global game
episodes = 10
for i in range(episodes):
game.new_episode()
while not game.is_episode_finished():
state = game.get_state()
img = state.screen_buffer
misc = state.game_variables
action = random.choice(possible_actions)
print("Taking action: {}".format(action))
reward = game.make_action(action)
print("Reward for above action: {}".format(reward))
time.sleep(.2)
print("Episode final reward score: {}".format(game.get_total_reward()))
time.sleep(2)
game.close()
if not args['model']:
stacked_frames = deque([np.zeros((84, 84), dtype=np.int) for i in range(stack_size)], maxlen=4)
game, possible_actions = create_environment()
print("Game Environment created")
createnetwork()
print("network created")
memory = mem.Memory(max_size=memory_size)
game.new_episode()
for i in range(pretrain_length):
if i is 0:
state = game.get_state().screen_buffer
state, stacked_frames = stack_frames(stacked_frames, state, True)
# Take a random action
action = random.choice(possible_actions)
# rewards
reward = game.make_action(action)
done = game.is_episode_finished()
if done:
next_state = np.zeros(state.shape)
memory.add((state, action, reward, next_state, done))
# Start a new episode
game.new_episode()
# Get fresh state
state = game.get_state().screen_buffer
# Stack frames
state, stacked_frames = stack_frames(stacked_frames, state, True)
else:
next_state = game.get_state().screen_buffer
next_state, stacked_frames = stack_frames(stacked_frames, next_state, False)
memory.add((state, action, reward, next_state, done))
state = next_state
print("memory created")
# Setup tensorboard
writer = tf.summary.FileWriter("/tensorboard/dqn/1")
tf.summary.scalar("Loss", DQNetwork.loss)
write_op = tf.summary.merge_all()
print("tensorboard setup")
print("here we go...")
train(memory)
command = input("Training complete, press a key to test our model\n")
else:
create_environment()
infer() | random_line_split |
|
dataset3.js | module.exports = [
{ x: '11.3034455', y: '536.486347', label: 'C2' },
{ x: '10.1644948', y: '3.39302974', label: 'C1' },
{ x: '9.36384167', y: '3.4662791', label: 'C1' },
{ x: '1.71758171', y: '3.1736868', label: 'C2' },
{ x: '6.26468703', y: '3.38746862', label: 'C1' },
{ x: '9.96948149', y: '63.9384092', label: 'C2' },
{ x: '11.7433699', y: '14.757106', label: 'C2' },
{ x: '11.874752', y: '13.7192019', label: 'C2' },
{ x: '9.58037049', y: '3.42547834', label: 'C1' },
{ x: '11.2590411', y: '3.49072422', label: 'C1' },
{ x: '1.32618183', y: '4.39702267', label: 'C2' },
{ x: '12.350383', y: '58.9211679', label: 'C2' },
{ x: '0.02746666', y: '1.03092077', label: 'C2' },
{ x: '3.74553667', y: '3.22849596', label: 'C1' },
{ x: '0.13779107', y: '3.18784598', label: 'C1' },
{ x: '8.27936644', y: '3.49935401', label: 'C1' },
{ x: '1.49693289', y: '4.48572215', label: 'C2' },
{ x: '10.2377392', y: '69.0190034', label: 'C2' },
{ x: '9.06903287', y: '3.47857051', label: 'C1' },
{ x: '12.4401074', y: '22.6431613', label: 'C2' },
{ x: '0.7924131', y: '2.53028974', label: 'C1' },
{ x: '5.73320719', y: '3.44315577', label: 'C1' },
{ x: '0.5323954', y: '10.0173283', label: 'C2' },
{ x: '2.53242592', y: '4.72197815', label: 'C2' },
{ x: '9.2915128', y: '11.3674831', label: 'C2' },
{ x: '3.14172796', y: '20.5978543', label: 'C2' },
{ x: '9.52452162', y: '3.43231276', label: 'C1' },
{ x: '14.7912534', y: '3.47320534', label: 'C1' },
{ x: '14.6305734', y: '3.41808872', label: 'C1' },
{ x: '1.90527055', y: '3.13719694', label: 'C1' },
{ x: '12.5142674', y: '3.36968132', label: 'C1' },
{ x: '0.85833308', y: '2.81712839', label: 'C1' },
{ x: '11.8440809', y: '3.35418886', label: 'C1' },
{ x: '13.5525071', y: '3.40217389', label: 'C1' },
{ x: '5.25666066', y: '24.9828126', label: 'C2' },
{ x: '0.03570666', y: '1.03658257', label: 'C2' },
{ x: '2.08334605', y: '13.0712069', label: 'C2' },
{ x: '13.7809381', y: '261.38567', label: 'C2' },
{ x: '10.4849391', y: '31.5307592', label: 'C2' },
{ x: '11.7136143', y: '3.46104997', label: 'C1' },
{ x: '5.93050935', y: '3.22922252', label: 'C1' },
{ x: '12.7266762', y: '28.82497', label: 'C2' },
{ x: '11.2764367', y: '3.39610713', label: 'C1' },
{ x: '4.58418531', y: '3.17806176', label: 'C1' },
{ x: '4.28296762', y: '9.39110461', label: 'C2' },
{ x: '0.54567095', y: '1.74704601', label: 'C2' },
{ x: '6.47297586', y: '80.0737218', label: 'C2' },
{ x: '3.56196783', y: '3.46501229', label: 'C1' },
{ x: '4.92110965', y: '3.21782503', label: 'C1' },
{ x: '7.08960234', y: '3.31001019', label: 'C1' },
{ x: '6.6400647', y: '11.4021873', label: 'C2' },
{ x: '3.04376354', y: '3.11156957', label: 'C1' },
{ x: '3.29279458', y: '3.20280598', label: 'C1' },
{ x: '11.1281167', y: '3.35227009', label: 'C1' },
{ x: '0.62532426', y: '2.6768092', label: 'C1' },
{ x: '14.1219825', y: '3.47557778', label: 'C1' },
{ x: '5.97399823', y: '7.92169457', label: 'C2' },
{ x: '11.4297922', y: '21.5417738', label: 'C2' },
{ x: '14.5248268', y: '27.7087372', label: 'C2' },
{ x: '3.09732353', y: '3.26640523', label: 'C1' },
{ x: '9.90401929', y: '62.2373692', label: 'C2' },
{ x: '8.49818415', y: '69.0314842', label: 'C2' },
{ x: '4.60844752', y: '3.15543562', label: 'C1' },
{ x: '14.7514267', y: '3.45141488', label: 'C1' },
{ x: '10.5700858', y: '3.42392187', label: 'C1' },
{ x: '11.8527787', y: '3.38192599', label: 'C1' }, | { x: '4.06277657', y: '3.30054593', label: 'C1' },
{ x: '10.1004059', y: '73.8328052', label: 'C2' },
{ x: '11.9150365', y: '3.37543243', label: 'C1' },
{ x: '2.67113254', y: '5.12725717', label: 'C2' },
{ x: '9.21185949', y: '14.5993617', label: 'C2' },
{ x: '2.59559923', y: '5.1530681', label: 'C2' },
{ x: '11.2512589', y: '34.1527354', label: 'C2' },
{ x: '3.46217231', y: '4.64449256', label: 'C2' },
{ x: '10.1713614', y: '287.740405', label: 'C2' },
{ x: '12.7060762', y: '18.9983532', label: 'C2' },
{ x: '0.24033326', y: '2.65936106', label: 'C1' },
{ x: '7.21594897', y: '3.2822869', label: 'C1' },
{ x: '11.1487167', y: '16.5251589', label: 'C2' },
{ x: '11.31077', y: '36.740755', label: 'C2' },
{ x: '8.7980285', y: '150.739092', label: 'C2' },
{ x: '12.3128452', y: '3.46424609', label: 'C1' },
{ x: '14.3563646', y: '27.2356295', label: 'C2' },
{ x: '7.55607776', y: '3.27713683', label: 'C1' },
{ x: '1.16779077', y: '2.59038073', label: 'C1' },
{ x: '1.15726188', y: '2.18638168', label: 'C2' },
{ x: '4.74303415', y: '3.28008028', label: 'C1' },
{ x: '6.85750908', y: '11.8555794', label: 'C2' },
{ x: '4.57319864', y: '3.14574577', label: 'C1' },
{ x: '10.458388', y: '3.45509926', label: 'C1' },
{ x: '7.18939787', y: '3.35470766', label: 'C1' },
{ x: '7.59407331', y: '14.5317836', label: 'C2' },
{ x: '1.32847072', y: '2.86762537', label: 'C2' },
{ x: '4.82451857', y: '3.23219081', label: 'C1' },
{ x: '11.2828455', y: '58.5434514', label: 'C2' },
{ x: '4.27289651', y: '16.265655', label: 'C2' },
{ x: '5.08133183', y: '18.6596478', label: 'C2' },
{ x: '2.08105716', y: '4.57779004', label: 'C2' },
{ x: '9.44441053', y: '25.1695738', label: 'C2' },
{ x: '11.514481', y: '14.966176', label: 'C2' },
{ x: '12.018952', y: '19.5919853', label: 'C2' },
{ x: '2.84920804', y: '4.18428956', label: 'C2' },
{ x: '8.39609973', y: '3.33221623', label: 'C1' },
{ x: '7.51808222', y: '3.33735785', label: 'C1' },
{ x: '3.68465224', y: '16.7482232', label: 'C2' },
{ x: '7.47917112', y: '45.0340366', label: 'C2' },
{ x: '13.8189337', y: '35.2171628', label: 'C2' },
{ x: '2.53929258', y: '3.38845726', label: 'C1' },
{ x: '0.11169774', y: '31.031496', label: 'C2' },
{ x: '6.15024262', y: '3.31973075', label: 'C1' },
{ x: '5.80690939', y: '3.25246304', label: 'C1' },
{ x: '10.2803125', y: '22.5973451', label: 'C2' },
{ x: '7.10516678', y: '3.49068142', label: 'C1' },
{ x: '13.013245', y: '3.42062661', label: 'C1' },
{ x: '6.68263802', y: '9.91122751', label: 'C2' },
{ x: '10.4396191', y: '42.8141856', label: 'C2' },
{ x: '4.4656209', y: '3.30581371', label: 'C1' },
{ x: '5.23285623', y: '3.39042884', label: 'C1' },
{ x: '11.9425031', y: '30.2314854', label: 'C2' },
{ x: '7.60689108', y: '3.40846815', label: 'C1' },
{ x: '10.2839747', y: '22.0444711', label: 'C2' },
{ x: '5.16968291', y: '14.4130865', label: 'C2' },
{ x: '4.19965209', y: '3.26619915', label: 'C1' },
{ x: '3.11243019', y: '3.41869999', label: 'C1' },
{ x: '12.3389386', y: '3.3977073', label: 'C1' },
{ x: '6.61442915', y: '3.30811226', label: 'C1' },
{ x: '4.35117649', y: '3.13679062', label: 'C1' },
{ x: '8.42036195', y: '10.5070401', label: 'C2' },
{ x: '10.213477', y: '3.4505612', label: 'C1' },
{ x: '9.89211707', y: '35.1929803', label: 'C2' },
{ x: '4.96368297', y: '3.21713231', label: 'C1' },
{ x: '3.88332774', y: '3.37341697', label: 'C1' },
{ x: '7.18161565', y: '14.3851868', label: 'C2' },
{ x: '14.4460891', y: '56.4319093', label: 'C2' },
{ x: '2.34107486', y: '4.2548288', label: 'C2' },
{ x: '14.0414136', y: '17.2030581', label: 'C2' },
{ x: '3.38984344', y: '3.25319652', label: 'C1' },
{ x: '9.98504593', y: '3.36899443', label: 'C1' },
{ x: '3.15775018', y: '3.30369274', label: 'C1' },
{ x: '13.1272317', y: '3.4572059', label: 'C1' },
{ x: '12.1407208', y: '3.37019501', label: 'C1' },
{ x: '8.31049532', y: '10.7852591', label: 'C2' },
{ x: '9.07361064', y: '3.32835144', label: 'C1' },
{ x: '0.72374645', y: '3.41119542', label: 'C1' },
{ x: '8.74034852', y: '15.2563629', label: 'C2' },
{ x: '2.71691031', y: '3.43019098', label: 'C1' },
{ x: '11.4210944', y: '3.39509568', label: 'C1' },
{ x: '6.32648701', y: '3.28064934', label: 'C1' },
{ x: '11.6930143', y: '3.40299681', label: 'C1' },
{ x: '4.48530534', y: '5.69809764', label: 'C2' },
{ x: '5.9295938', y: '8.70785554', label: 'C2' },
{ x: '1.44611957', y: '2.64899685', label: 'C2' },
{ x: '11.0599078', y: '3.49218424', label: 'C1' },
{ x: '2.88995025', y: '3.11614016', label: 'C1' },
{ x: '14.9359111', y: '160.260414', label: 'C2' },
{ x: '9.90035707', y: '13.9886034', label: 'C2' },
{ x: '13.8212226', y: '3.41271757', label: 'C1' },
{ x: '7.82982879', y: '95.5846707', label: 'C2' },
{ x: '1.0176397', y: '164.335747', label: 'C2' },
{ x: '10.4977569', y: '103.407967', label: 'C2' },
{ x: '10.5096591', y: '194.830758', label: 'C2' },
{ x: '4.25641652', y: '6.28909015', label: 'C2' },
{ x: '14.9862667', y: '3.40289683', label: 'C1' },
{ x: '0.44724876', y: '2.86555045', label: 'C1' },
{ x: '7.4466689', y: '8.92406165', label: 'C2' },
{ x: '0.00640889', y: '1.01827034', label: 'C2' },
{ x: '14.6113468', y: '66.176191', label: 'C2' },
{ x: '14.7688223', y: '3.42930231', label: 'C1' },
{ x: '1.44703513', y: '3.54272995', label: 'C2' },
{ x: '10.7747124', y: '3.35817666', label: 'C1' },
{ x: '7.76757103', y: '176.959802', label: 'C2' },
{ x: '1.34769738', y: '5.40356196', label: 'C2' },
{ x: '0.9132664', y: '2.01037839', label: 'C2' },
{ x: '3.24427015', y: '7.4912', label: 'C2' },
{ x: '3.58439894', y: '4.62635267', label: 'C2' },
{ x: '2.05313272', y: '3.40216623', label: 'C1' },
{ x: '14.1819514', y: '21.0392215', label: 'C2' },
{ x: '9.44898831', y: '3.34517377', label: 'C1' },
{ x: '8.20612201', y: '3.39491052', label: 'C1' },
{ x: '0.36896878', y: '1.43401999', label: 'C2' },
{ x: '14.2524491', y: '314.040705', label: 'C2' },
{ x: '7.13675344', y: '3.34060907', label: 'C1' },
{ x: '8.51741081', y: '3.47412766', label: 'C1' },
{ x: '1.81783502', y: '3.41876187', label: 'C1' },
{ x: '11.7392499', y: '3.40721499', label: 'C1' },
{ x: '10.5815302', y: '279.440933', label: 'C2' },
{ x: '9.00723289', y: '3.32631486', label: 'C1' },
{ x: '10.275277', y: '3.43365637', label: 'C1' },
{ x: '7.0291757', y: '3.31145209', label: 'C1' },
{ x: '7.57942442', y: '15.9896792', label: 'C2' },
{ x: '10.9482101', y: '3.34028963', label: 'C1' },
{ x: '12.1901608', y: '3.45772488', label: 'C1' },
{ x: '10.1109348', y: '3.45553116', label: 'C1' },
{ x: '9.88616596', y: '11.5213228', label: 'C2' },
{ x: '10.744499', y: '3.49052344', label: 'C1' },
{ x: '6.33381146', y: '18.0873626', label: 'C2' },
]; | { x: '0.51454207', y: '2.55030179', label: 'C1' },
{ x: '0.1780755', y: '1.52949356', label: 'C2' },
{ x: '0.76631977', y: '2.5589982', label: 'C1' },
{ x: '14.2249825', y: '19.6862421', label: 'C2' }, | random_line_split |
covid19co_pipe.py | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
# Dependencies
import numpy as np
import pandas as pd
import requests
import unidecode
import datetime
import dateutil
import subprocess
import sys
import json
import tempfile
import os
# Install missing dependencies
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
# PDFMiner pdfminer.six
try:
from pdfminer.high_level import extract_text
except Exception:
install('pdfminer.six')
from pdfminer.high_level import extract_text
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
#for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# %% [markdown]
# ---
# %% [markdown]
# # Colombia Covid19 Pipeline
# Dataset obtained from [Instituto Nacional de Salud](https://www.ins.gov.co/Noticias/Paginas/Coronavirus.aspx) daily report Covid19 from Colombia.
#
# You can get the official dataset here:
# [INS - Official Report](https://www.datos.gov.co/Salud-y-Protecci-n-Social/Casos-positivos-de-COVID-19-en-Colombia/gt2j-8ykr)
#
# The number of new cases are increasing day by day around the world.
# This dataset has information about reported cases from 32 Colombia departments.
#
# Also you can get the dataset Google COVID-19 Community Mobility Reports - Colombia.
#
# You can view and collaborate to the analysis here:
# [colombia_covid_19_analysis](https://www.kaggle.com/sebaxtian/colombia-covid-19-analysis) Kaggle Notebook Kernel.
# %% [markdown]
# ---
# %% [markdown]
# ## Data Sources
# %%
# Input data files are available in the "../input/" directory.
INPUT_DIR = './'
if os.path.split(os.path.abspath('.'))[-1] == 'src':
INPUT_DIR = '../input'
# Output data files are available in the "../output/" directory.
OUTPUT_DIR = './'
if os.path.split(os.path.abspath('.'))[-1] == 'src':
OUTPUT_DIR = '../output'
# Official Daily Report Until Now
URL_OFFICIAL_DATASET = 'https://www.datos.gov.co/api/views/gt2j-8ykr/rows.csv?accessType=DOWNLOAD'
# Official Daily Samples Processed
URL_SAMPLES_PROCESSED = 'https://www.datos.gov.co/api/views/8835-5baf/rows.csv?accessType=DOWNLOAD'
# %% [markdown]
# ---
# %% [markdown]
# ## Official Covid19 Colombia Daily Report
# %%
# Official Daily Report Until Now
with requests.get(URL_OFFICIAL_DATASET) as official_dataset:
with open(os.path.join(INPUT_DIR, 'covid19co_official.csv'), 'wb') as dataset_file:
dataset_file.write(official_dataset.content)
# %%
# Open Official Daily Report
covid19co = pd.read_csv(os.path.join(INPUT_DIR, 'covid19co_official.csv'))
# Total Daily Report
covid19co.shape
# %%
# Show dataframe
covid19co.tail()
# %%
# Show attributes
list(covid19co.columns.values)
# %%
# Update Name Columns
# Remove Accents and Uppercase
covid19co.columns = [unidecode.unidecode(value).upper() for value in covid19co.columns]
# Show dataframe
covid19co.head()
| covid19co[attr] = covid19co[attr].transform(lambda value: str(value).title())
# Show dataframe
covid19co.head()
# %%
# Fill NaN Values
if covid19co.isna().sum().sum() > 0:
covid19co.fillna(value='-', inplace=True)
# Show dataframe
covid19co.head()
# %%
# Setup Date Format
def setup_date(value):
try:
value = value.split('T')[0].split('-')
if len(value) == 3:
value = value[2] + '/' + value[1] + '/' + value[0]
else:
value = '-'
except IndexError:
value = '-'
if len(value) != 10 and len(value) != 1:
value = '-'
return value
# Date Columns
date_columns = list(filter(lambda value: value.find('FECHA') != -1 or value.find('FIS') != -1, covid19co.columns))
# For each date column
for date_column in date_columns:
covid19co[date_column] = covid19co[date_column].transform(lambda value: setup_date(value))
# Show dataframe
covid19co.head()
# %%
# Add Day, Month, Year, Month Name and Day Name for each Date
# Spanish
nombre_mes = ['Enero', 'Febrero', 'Marzo', 'Abril', 'Mayo', 'Junio', 'Julio', 'Agosto', 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre']
nombre_dia = ['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes', 'Sábado', 'Domingo']
# Get day
def get_day(value):
if value not in '-':
return value.split('/')[0]
return value
# Get month
def get_month(value):
if value not in '-':
return value.split('/')[1]
return value
# Get year
def get_year(value):
if value not in '-':
return value.split('/')[2]
return value
# Get month name
def get_month_name(value):
if value not in '-':
return nombre_mes[int(value.split('/')[1]) - 1]
return value
# Get weekday
def get_weekday(value):
if value not in '-':
return nombre_dia[datetime.date(int(value.split('/')[2]), int(value.split('/')[1]), int(value.split('/')[0])).weekday()]
return value
# For each date column
for date_column in date_columns:
covid19co[date_column + ' DIA'] = covid19co[date_column].transform(lambda value: get_day(value))
covid19co[date_column + ' MES'] = covid19co[date_column].transform(lambda value: get_month(value))
covid19co[date_column + ' ANIO'] = covid19co[date_column].transform(lambda value: get_year(value))
covid19co[date_column + ' NOMBRE MES'] = covid19co[date_column].transform(lambda value: get_month_name(value))
covid19co[date_column + ' DIA SEMANA'] = covid19co[date_column].transform(lambda value: get_weekday(value))
# Show dataframe
covid19co.head()
# %% [markdown]
# ## Covid19 Colombia Dataset
# > ***Output file***: covid19co.csv
# %%
# Save dataframe
covid19co.to_csv(os.path.join(OUTPUT_DIR, 'covid19co.csv'), index=False)
# %% [markdown]
# ---
# %% [markdown]
# ## Official Covid19 Colombia Samples Processed
# %%
# Official Samples Processed Until Now
with requests.get(URL_SAMPLES_PROCESSED) as official_dataset:
with open(os.path.join(INPUT_DIR, 'covid19co_samples_processed_official.csv'), 'wb') as dataset_file:
dataset_file.write(official_dataset.content)
# %%
# Open Official Samples Processed
covid19co_samples_processed = pd.read_csv(os.path.join(INPUT_DIR, 'covid19co_samples_processed_official.csv'))
# Total Daily Report
covid19co_samples_processed.shape
# %%
# Show dataframe
covid19co_samples_processed.head()
# %%
# Update Name Columns
# Remove Accents and Uppercase
covid19co_samples_processed.columns = [unidecode.unidecode(value).upper() for value in covid19co_samples_processed.columns]
# Show dataframe
covid19co_samples_processed.head()
# %%
# Setup Date Format
covid19co_samples_processed['FECHA'] = covid19co_samples_processed['FECHA'].transform(lambda value: setup_date(value))
# Show dataframe
covid19co_samples_processed.head()
# %%
# Select Columns
covid19co_samples_processed = covid19co_samples_processed[['FECHA', 'ACUMULADAS']]
# Show dataframe
covid19co_samples_processed.tail()
# %% [markdown]
# ## Covid19 Colombia Samples Processed Dataset
# > ***Output file***: covid19co_samples_processed.csv
# %%
# Save dataframe
covid19co_samples_processed.to_csv(os.path.join(OUTPUT_DIR, 'covid19co_samples_processed.csv'), index=False)
# %% [markdown]
# ---
# %% [markdown]
# ## Google Community Mobility Reports - Colombia
# %%
# Open Official Google Community Mobility - Colombia
google_community_mobility_reports = pd.read_csv(os.path.join(OUTPUT_DIR, 'google_community_mobility_reports.csv'))
# Total Google Community Mobility - Colombia
google_community_mobility_reports.shape
# %%
# Show dataframe
google_community_mobility_reports.tail()
# %%
# Update Google Community Mobility Reports - Colombia
date_last_report = google_community_mobility_reports['date'].values[-1]
#print(date_last_report)
# 13/05/2020
date_last_report = date_last_report.split('/')
date_last_report = date_last_report[2] + '-' + date_last_report[1] + '-' + date_last_report[0]
#print(date_last_report)
# 2020-05-13
new_reports = pd.DataFrame(columns=['date', 'country', 'file', 'url'])
new_reports['date'] = [dti.strftime('%Y-%m-%d') for dti in pd.date_range(start=date_last_report, end=datetime.date.today().isoformat(), freq='D')]
new_reports['country'] = 'Colombia'
new_reports['file'] = [date + '_CO_Mobility_Report_en.pdf' for date in new_reports['date'].values]
# Get URL report
def get_report_url(file):
with requests.get('https://www.gstatic.com/covid19/mobility/' + file) as community_mobility_report:
if community_mobility_report.status_code == 200:
return community_mobility_report.url
else:
return np.nan
# Get URL report
new_reports['url'] = new_reports['file'].transform(lambda value: get_report_url(value))
# Drop any report without URL
new_reports.dropna(inplace=True)
# Reset index
new_reports.reset_index(inplace=True, drop=True)
# Only new reports
new_reports = new_reports.iloc[1:]
# Show dataframe
new_reports.head()
# %%
# Get/Add Mobility Changes
def get_mobility_changes(URL):
# Target changes
targets = ['Retail & recreation', 'Grocery & pharmacy', 'Parks', 'Transit stations', 'Workplaces', 'Residential']
# Mobility Changes
mobility_changes = []
# Get Mobility Report
with requests.get(URL) as mobility_report:
if mobility_report.status_code == 200:
temp = tempfile.NamedTemporaryFile()
temp.write(mobility_report.content)
with open(temp.name, 'rb') as file:
# By pages
pdf_text = []
page = 0
while page != -1:
text = extract_text(file, maxpages=1, page_numbers=[page])
if text:
pdf_text.append(text.split('\n'))
page += 1
else:
page = -1
# Page 1
page1 = pdf_text[0]
page1 = filter(lambda value: value != '', page1)
page1 = filter(lambda value: value in targets or value[-1] == '%', list(page1))
page1 = list(page1)[:6]
# Page 2
page2 = pdf_text[1]
page2 = filter(lambda value: value != '', page2)
page2 = filter(lambda value: value in targets or value[-1] == '%', list(page2))
page2 = list(page2)[:6]
# Merge
mobility_changes = page1 + page2
return mobility_changes
# Check new report
if len(new_reports['date'].values) and new_reports['date'].values[-1] != date_last_report:
# New report
print('New Google Community Mobility Reports - Colombia')
# Add Mobility Changes
new_reports['mobility_changes'] = new_reports['url'].transform(lambda value: get_mobility_changes(value))
# By case
new_reports['Retail & recreation'] = new_reports['mobility_changes'].transform(lambda value: value[1])
new_reports['Grocery & pharmacy'] = new_reports['mobility_changes'].transform(lambda value: value[3])
new_reports['Parks'] = new_reports['mobility_changes'].transform(lambda value: value[5])
new_reports['Transit stations'] = new_reports['mobility_changes'].transform(lambda value: value[7])
new_reports['Workplaces'] = new_reports['mobility_changes'].transform(lambda value: value[9])
new_reports['Residential'] = new_reports['mobility_changes'].transform(lambda value: value[11])
# Drop column
new_reports.drop(columns=['mobility_changes'], inplace=True)
# Sort columns
new_reports = new_reports[['date', 'country', 'Retail & recreation', 'Grocery & pharmacy', 'Parks', 'Transit stations', 'Workplaces', 'Residential', 'file', 'url']]
# Setup date format
new_reports['date'] = [value.strftime('%d/%m/%Y') for value in pd.to_datetime(new_reports['date'], format='%Y-%m-%d')]
# Merge and Update
google_community_mobility_reports = pd.concat([google_community_mobility_reports, new_reports])
else:
# Do nothing
print('Google Community Mobility Reports - Colombia All Updated')
# Show dataframe
google_community_mobility_reports.tail()
# %% [markdown]
# ## Google Community Mobility Reports - Colombia
# > ***Output file***: google_community_mobility_reports.csv
# %%
# Save dataframe
google_community_mobility_reports.to_csv(os.path.join(OUTPUT_DIR, 'google_community_mobility_reports.csv'), index=False)
# %% [markdown]
# ---
# %% |
# %%
# Update texto to title text format
for attr in covid19co.columns:
if covid19co[attr].dtypes == 'object': | random_line_split |
covid19co_pipe.py | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
# Dependencies
import numpy as np
import pandas as pd
import requests
import unidecode
import datetime
import dateutil
import subprocess
import sys
import json
import tempfile
import os
# Install missing dependencies
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
# PDFMiner pdfminer.six
try:
from pdfminer.high_level import extract_text
except Exception:
install('pdfminer.six')
from pdfminer.high_level import extract_text
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
#for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# %% [markdown]
# ---
# %% [markdown]
# # Colombia Covid19 Pipeline
# Dataset obtained from [Instituto Nacional de Salud](https://www.ins.gov.co/Noticias/Paginas/Coronavirus.aspx) daily report Covid19 from Colombia.
#
# You can get the official dataset here:
# [INS - Official Report](https://www.datos.gov.co/Salud-y-Protecci-n-Social/Casos-positivos-de-COVID-19-en-Colombia/gt2j-8ykr)
#
# The number of new cases are increasing day by day around the world.
# This dataset has information about reported cases from 32 Colombia departments.
#
# Also you can get the dataset Google COVID-19 Community Mobility Reports - Colombia.
#
# You can view and collaborate to the analysis here:
# [colombia_covid_19_analysis](https://www.kaggle.com/sebaxtian/colombia-covid-19-analysis) Kaggle Notebook Kernel.
# %% [markdown]
# ---
# %% [markdown]
# ## Data Sources
# %%
# Input data files are available in the "../input/" directory.
INPUT_DIR = './'
if os.path.split(os.path.abspath('.'))[-1] == 'src':
INPUT_DIR = '../input'
# Output data files are available in the "../output/" directory.
OUTPUT_DIR = './'
if os.path.split(os.path.abspath('.'))[-1] == 'src':
OUTPUT_DIR = '../output'
# Official Daily Report Until Now
URL_OFFICIAL_DATASET = 'https://www.datos.gov.co/api/views/gt2j-8ykr/rows.csv?accessType=DOWNLOAD'
# Official Daily Samples Processed
URL_SAMPLES_PROCESSED = 'https://www.datos.gov.co/api/views/8835-5baf/rows.csv?accessType=DOWNLOAD'
# %% [markdown]
# ---
# %% [markdown]
# ## Official Covid19 Colombia Daily Report
# %%
# Official Daily Report Until Now
with requests.get(URL_OFFICIAL_DATASET) as official_dataset:
with open(os.path.join(INPUT_DIR, 'covid19co_official.csv'), 'wb') as dataset_file:
dataset_file.write(official_dataset.content)
# %%
# Open Official Daily Report
covid19co = pd.read_csv(os.path.join(INPUT_DIR, 'covid19co_official.csv'))
# Total Daily Report
covid19co.shape
# %%
# Show dataframe
covid19co.tail()
# %%
# Show attributes
list(covid19co.columns.values)
# %%
# Update Name Columns
# Remove Accents and Uppercase
covid19co.columns = [unidecode.unidecode(value).upper() for value in covid19co.columns]
# Show dataframe
covid19co.head()
# %%
# Update texto to title text format
for attr in covid19co.columns:
if covid19co[attr].dtypes == 'object':
covid19co[attr] = covid19co[attr].transform(lambda value: str(value).title())
# Show dataframe
covid19co.head()
# %%
# Fill NaN Values
if covid19co.isna().sum().sum() > 0:
covid19co.fillna(value='-', inplace=True)
# Show dataframe
covid19co.head()
# %%
# Setup Date Format
def setup_date(value):
try:
value = value.split('T')[0].split('-')
if len(value) == 3:
value = value[2] + '/' + value[1] + '/' + value[0]
else:
value = '-'
except IndexError:
value = '-'
if len(value) != 10 and len(value) != 1:
value = '-'
return value
# Date Columns
date_columns = list(filter(lambda value: value.find('FECHA') != -1 or value.find('FIS') != -1, covid19co.columns))
# For each date column
for date_column in date_columns:
covid19co[date_column] = covid19co[date_column].transform(lambda value: setup_date(value))
# Show dataframe
covid19co.head()
# %%
# Add Day, Month, Year, Month Name and Day Name for each Date
# Spanish
nombre_mes = ['Enero', 'Febrero', 'Marzo', 'Abril', 'Mayo', 'Junio', 'Julio', 'Agosto', 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre']
nombre_dia = ['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes', 'Sábado', 'Domingo']
# Get day
def get_day(value):
if value not in '-':
return value.split('/')[0]
return value
# Get month
def get_month(value):
if value not in '-':
return value.split('/')[1]
return value
# Get year
def get_year(value):
if value not in '-':
return value.split('/')[2]
return value
# Get month name
def get_month_name(value):
if value not in '-':
return nombre_mes[int(value.split('/')[1]) - 1]
return value
# Get weekday
def get_weekday(value):
if value not in '-':
return nombre_dia[datetime.date(int(value.split('/')[2]), int(value.split('/')[1]), int(value.split('/')[0])).weekday()]
return value
# For each date column
for date_column in date_columns:
covid19co[date_column + ' DIA'] = covid19co[date_column].transform(lambda value: get_day(value))
covid19co[date_column + ' MES'] = covid19co[date_column].transform(lambda value: get_month(value))
covid19co[date_column + ' ANIO'] = covid19co[date_column].transform(lambda value: get_year(value))
covid19co[date_column + ' NOMBRE MES'] = covid19co[date_column].transform(lambda value: get_month_name(value))
covid19co[date_column + ' DIA SEMANA'] = covid19co[date_column].transform(lambda value: get_weekday(value))
# Show dataframe
covid19co.head()
# %% [markdown]
# ## Covid19 Colombia Dataset
# > ***Output file***: covid19co.csv
# %%
# Save dataframe
covid19co.to_csv(os.path.join(OUTPUT_DIR, 'covid19co.csv'), index=False)
# %% [markdown]
# ---
# %% [markdown]
# ## Official Covid19 Colombia Samples Processed
# %%
# Official Samples Processed Until Now
with requests.get(URL_SAMPLES_PROCESSED) as official_dataset:
with open(os.path.join(INPUT_DIR, 'covid19co_samples_processed_official.csv'), 'wb') as dataset_file:
dataset_file.write(official_dataset.content)
# %%
# Open Official Samples Processed
covid19co_samples_processed = pd.read_csv(os.path.join(INPUT_DIR, 'covid19co_samples_processed_official.csv'))
# Total Daily Report
covid19co_samples_processed.shape
# %%
# Show dataframe
covid19co_samples_processed.head()
# %%
# Update Name Columns
# Remove Accents and Uppercase
covid19co_samples_processed.columns = [unidecode.unidecode(value).upper() for value in covid19co_samples_processed.columns]
# Show dataframe
covid19co_samples_processed.head()
# %%
# Setup Date Format
covid19co_samples_processed['FECHA'] = covid19co_samples_processed['FECHA'].transform(lambda value: setup_date(value))
# Show dataframe
covid19co_samples_processed.head()
# %%
# Select Columns
covid19co_samples_processed = covid19co_samples_processed[['FECHA', 'ACUMULADAS']]
# Show dataframe
covid19co_samples_processed.tail()
# %% [markdown]
# ## Covid19 Colombia Samples Processed Dataset
# > ***Output file***: covid19co_samples_processed.csv
# %%
# Save dataframe
covid19co_samples_processed.to_csv(os.path.join(OUTPUT_DIR, 'covid19co_samples_processed.csv'), index=False)
# %% [markdown]
# ---
# %% [markdown]
# ## Google Community Mobility Reports - Colombia
# %%
# Open Official Google Community Mobility - Colombia
google_community_mobility_reports = pd.read_csv(os.path.join(OUTPUT_DIR, 'google_community_mobility_reports.csv'))
# Total Google Community Mobility - Colombia
google_community_mobility_reports.shape
# %%
# Show dataframe
google_community_mobility_reports.tail()
# %%
# Update Google Community Mobility Reports - Colombia
date_last_report = google_community_mobility_reports['date'].values[-1]
#print(date_last_report)
# 13/05/2020
date_last_report = date_last_report.split('/')
date_last_report = date_last_report[2] + '-' + date_last_report[1] + '-' + date_last_report[0]
#print(date_last_report)
# 2020-05-13
new_reports = pd.DataFrame(columns=['date', 'country', 'file', 'url'])
new_reports['date'] = [dti.strftime('%Y-%m-%d') for dti in pd.date_range(start=date_last_report, end=datetime.date.today().isoformat(), freq='D')]
new_reports['country'] = 'Colombia'
new_reports['file'] = [date + '_CO_Mobility_Report_en.pdf' for date in new_reports['date'].values]
# Get URL report
def get_report_url(file):
with requests.get('https://www.gstatic.com/covid19/mobility/' + file) as community_mobility_report:
if community_mobility_report.status_code == 200:
return community_mobility_report.url
else:
return np.nan
# Get URL report
new_reports['url'] = new_reports['file'].transform(lambda value: get_report_url(value))
# Drop any report without URL
new_reports.dropna(inplace=True)
# Reset index
new_reports.reset_index(inplace=True, drop=True)
# Only new reports
new_reports = new_reports.iloc[1:]
# Show dataframe
new_reports.head()
# %%
# Get/Add Mobility Changes
def ge | RL):
# Target changes
targets = ['Retail & recreation', 'Grocery & pharmacy', 'Parks', 'Transit stations', 'Workplaces', 'Residential']
# Mobility Changes
mobility_changes = []
# Get Mobility Report
with requests.get(URL) as mobility_report:
if mobility_report.status_code == 200:
temp = tempfile.NamedTemporaryFile()
temp.write(mobility_report.content)
with open(temp.name, 'rb') as file:
# By pages
pdf_text = []
page = 0
while page != -1:
text = extract_text(file, maxpages=1, page_numbers=[page])
if text:
pdf_text.append(text.split('\n'))
page += 1
else:
page = -1
# Page 1
page1 = pdf_text[0]
page1 = filter(lambda value: value != '', page1)
page1 = filter(lambda value: value in targets or value[-1] == '%', list(page1))
page1 = list(page1)[:6]
# Page 2
page2 = pdf_text[1]
page2 = filter(lambda value: value != '', page2)
page2 = filter(lambda value: value in targets or value[-1] == '%', list(page2))
page2 = list(page2)[:6]
# Merge
mobility_changes = page1 + page2
return mobility_changes
# Check new report
if len(new_reports['date'].values) and new_reports['date'].values[-1] != date_last_report:
# New report
print('New Google Community Mobility Reports - Colombia')
# Add Mobility Changes
new_reports['mobility_changes'] = new_reports['url'].transform(lambda value: get_mobility_changes(value))
# By case
new_reports['Retail & recreation'] = new_reports['mobility_changes'].transform(lambda value: value[1])
new_reports['Grocery & pharmacy'] = new_reports['mobility_changes'].transform(lambda value: value[3])
new_reports['Parks'] = new_reports['mobility_changes'].transform(lambda value: value[5])
new_reports['Transit stations'] = new_reports['mobility_changes'].transform(lambda value: value[7])
new_reports['Workplaces'] = new_reports['mobility_changes'].transform(lambda value: value[9])
new_reports['Residential'] = new_reports['mobility_changes'].transform(lambda value: value[11])
# Drop column
new_reports.drop(columns=['mobility_changes'], inplace=True)
# Sort columns
new_reports = new_reports[['date', 'country', 'Retail & recreation', 'Grocery & pharmacy', 'Parks', 'Transit stations', 'Workplaces', 'Residential', 'file', 'url']]
# Setup date format
new_reports['date'] = [value.strftime('%d/%m/%Y') for value in pd.to_datetime(new_reports['date'], format='%Y-%m-%d')]
# Merge and Update
google_community_mobility_reports = pd.concat([google_community_mobility_reports, new_reports])
else:
# Do nothing
print('Google Community Mobility Reports - Colombia All Updated')
# Show dataframe
google_community_mobility_reports.tail()
# %% [markdown]
# ## Google Community Mobility Reports - Colombia
# > ***Output file***: google_community_mobility_reports.csv
# %%
# Save dataframe
google_community_mobility_reports.to_csv(os.path.join(OUTPUT_DIR, 'google_community_mobility_reports.csv'), index=False)
# %% [markdown]
# ---
# %%
| t_mobility_changes(U | identifier_name |
covid19co_pipe.py | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
# Dependencies
import numpy as np
import pandas as pd
import requests
import unidecode
import datetime
import dateutil
import subprocess
import sys
import json
import tempfile
import os
# Install missing dependencies
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
# PDFMiner pdfminer.six
try:
from pdfminer.high_level import extract_text
except Exception:
install('pdfminer.six')
from pdfminer.high_level import extract_text
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
#for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# %% [markdown]
# ---
# %% [markdown]
# # Colombia Covid19 Pipeline
# Dataset obtained from [Instituto Nacional de Salud](https://www.ins.gov.co/Noticias/Paginas/Coronavirus.aspx) daily report Covid19 from Colombia.
#
# You can get the official dataset here:
# [INS - Official Report](https://www.datos.gov.co/Salud-y-Protecci-n-Social/Casos-positivos-de-COVID-19-en-Colombia/gt2j-8ykr)
#
# The number of new cases are increasing day by day around the world.
# This dataset has information about reported cases from 32 Colombia departments.
#
# Also you can get the dataset Google COVID-19 Community Mobility Reports - Colombia.
#
# You can view and collaborate to the analysis here:
# [colombia_covid_19_analysis](https://www.kaggle.com/sebaxtian/colombia-covid-19-analysis) Kaggle Notebook Kernel.
# %% [markdown]
# ---
# %% [markdown]
# ## Data Sources
# %%
# Input data files are available in the "../input/" directory.
INPUT_DIR = './'
if os.path.split(os.path.abspath('.'))[-1] == 'src':
INPUT_DIR = '../input'
# Output data files are available in the "../output/" directory.
OUTPUT_DIR = './'
if os.path.split(os.path.abspath('.'))[-1] == 'src':
OUTPUT_DIR = '../output'
# Official Daily Report Until Now
URL_OFFICIAL_DATASET = 'https://www.datos.gov.co/api/views/gt2j-8ykr/rows.csv?accessType=DOWNLOAD'
# Official Daily Samples Processed
URL_SAMPLES_PROCESSED = 'https://www.datos.gov.co/api/views/8835-5baf/rows.csv?accessType=DOWNLOAD'
# %% [markdown]
# ---
# %% [markdown]
# ## Official Covid19 Colombia Daily Report
# %%
# Official Daily Report Until Now
with requests.get(URL_OFFICIAL_DATASET) as official_dataset:
with open(os.path.join(INPUT_DIR, 'covid19co_official.csv'), 'wb') as dataset_file:
dataset_file.write(official_dataset.content)
# %%
# Open Official Daily Report
covid19co = pd.read_csv(os.path.join(INPUT_DIR, 'covid19co_official.csv'))
# Total Daily Report
covid19co.shape
# %%
# Show dataframe
covid19co.tail()
# %%
# Show attributes
list(covid19co.columns.values)
# %%
# Update Name Columns
# Remove Accents and Uppercase
covid19co.columns = [unidecode.unidecode(value).upper() for value in covid19co.columns]
# Show dataframe
covid19co.head()
# %%
# Update texto to title text format
for attr in covid19co.columns:
if covid19co[attr].dtypes == 'object':
covid19co[attr] = covid19co[attr].transform(lambda value: str(value).title())
# Show dataframe
covid19co.head()
# %%
# Fill NaN Values
if covid19co.isna().sum().sum() > 0:
covid19co.fillna(value='-', inplace=True)
# Show dataframe
covid19co.head()
# %%
# Setup Date Format
def setup_date(value):
try:
value = value.split('T')[0].split('-')
if len(value) == 3:
value = value[2] + '/' + value[1] + '/' + value[0]
else:
value = '-'
except IndexError:
value = '-'
if len(value) != 10 and len(value) != 1:
value = '-'
return value
# Date Columns
date_columns = list(filter(lambda value: value.find('FECHA') != -1 or value.find('FIS') != -1, covid19co.columns))
# For each date column
for date_column in date_columns:
covid19co[date_column] = covid19co[date_column].transform(lambda value: setup_date(value))
# Show dataframe
covid19co.head()
# %%
# Add Day, Month, Year, Month Name and Day Name for each Date
# Spanish
nombre_mes = ['Enero', 'Febrero', 'Marzo', 'Abril', 'Mayo', 'Junio', 'Julio', 'Agosto', 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre']
nombre_dia = ['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes', 'Sábado', 'Domingo']
# Get day
def get_day(value):
if value not in '-':
return value.split('/')[0]
return value
# Get month
def get_month(value):
if value not in '-':
return value.split('/')[1]
return value
# Get year
def get_year(value):
if value not in '-':
return value.split('/')[2]
return value
# Get month name
def get_month_name(value):
if value not in '-':
return nombre_mes[int(value.split('/')[1]) - 1]
return value
# Get weekday
def get_weekday(value):
if value not in '-':
return nombre_dia[datetime.date(int(value.split('/')[2]), int(value.split('/')[1]), int(value.split('/')[0])).weekday()]
return value
# For each date column
for date_column in date_columns:
covid19co[date_column + ' DIA'] = covid19co[date_column].transform(lambda value: get_day(value))
covid19co[date_column + ' MES'] = covid19co[date_column].transform(lambda value: get_month(value))
covid19co[date_column + ' ANIO'] = covid19co[date_column].transform(lambda value: get_year(value))
covid19co[date_column + ' NOMBRE MES'] = covid19co[date_column].transform(lambda value: get_month_name(value))
covid19co[date_column + ' DIA SEMANA'] = covid19co[date_column].transform(lambda value: get_weekday(value))
# Show dataframe
covid19co.head()
# %% [markdown]
# ## Covid19 Colombia Dataset
# > ***Output file***: covid19co.csv
# %%
# Save dataframe
covid19co.to_csv(os.path.join(OUTPUT_DIR, 'covid19co.csv'), index=False)
# %% [markdown]
# ---
# %% [markdown]
# ## Official Covid19 Colombia Samples Processed
# %%
# Official Samples Processed Until Now
with requests.get(URL_SAMPLES_PROCESSED) as official_dataset:
with open(os.path.join(INPUT_DIR, 'covid19co_samples_processed_official.csv'), 'wb') as dataset_file:
dataset_file.write(official_dataset.content)
# %%
# Open Official Samples Processed
covid19co_samples_processed = pd.read_csv(os.path.join(INPUT_DIR, 'covid19co_samples_processed_official.csv'))
# Total Daily Report
covid19co_samples_processed.shape
# %%
# Show dataframe
covid19co_samples_processed.head()
# %%
# Update Name Columns
# Remove Accents and Uppercase
covid19co_samples_processed.columns = [unidecode.unidecode(value).upper() for value in covid19co_samples_processed.columns]
# Show dataframe
covid19co_samples_processed.head()
# %%
# Setup Date Format
covid19co_samples_processed['FECHA'] = covid19co_samples_processed['FECHA'].transform(lambda value: setup_date(value))
# Show dataframe
covid19co_samples_processed.head()
# %%
# Select Columns
covid19co_samples_processed = covid19co_samples_processed[['FECHA', 'ACUMULADAS']]
# Show dataframe
covid19co_samples_processed.tail()
# %% [markdown]
# ## Covid19 Colombia Samples Processed Dataset
# > ***Output file***: covid19co_samples_processed.csv
# %%
# Save dataframe
covid19co_samples_processed.to_csv(os.path.join(OUTPUT_DIR, 'covid19co_samples_processed.csv'), index=False)
# %% [markdown]
# ---
# %% [markdown]
# ## Google Community Mobility Reports - Colombia
# %%
# Open Official Google Community Mobility - Colombia
google_community_mobility_reports = pd.read_csv(os.path.join(OUTPUT_DIR, 'google_community_mobility_reports.csv'))
# Total Google Community Mobility - Colombia
google_community_mobility_reports.shape
# %%
# Show dataframe
google_community_mobility_reports.tail()
# %%
# Update Google Community Mobility Reports - Colombia
date_last_report = google_community_mobility_reports['date'].values[-1]
#print(date_last_report)
# 13/05/2020
date_last_report = date_last_report.split('/')
date_last_report = date_last_report[2] + '-' + date_last_report[1] + '-' + date_last_report[0]
#print(date_last_report)
# 2020-05-13
new_reports = pd.DataFrame(columns=['date', 'country', 'file', 'url'])
new_reports['date'] = [dti.strftime('%Y-%m-%d') for dti in pd.date_range(start=date_last_report, end=datetime.date.today().isoformat(), freq='D')]
new_reports['country'] = 'Colombia'
new_reports['file'] = [date + '_CO_Mobility_Report_en.pdf' for date in new_reports['date'].values]
# Get URL report
def get_report_url(file):
wi | Get URL report
new_reports['url'] = new_reports['file'].transform(lambda value: get_report_url(value))
# Drop any report without URL
new_reports.dropna(inplace=True)
# Reset index
new_reports.reset_index(inplace=True, drop=True)
# Only new reports
new_reports = new_reports.iloc[1:]
# Show dataframe
new_reports.head()
# %%
# Get/Add Mobility Changes
def get_mobility_changes(URL):
# Target changes
targets = ['Retail & recreation', 'Grocery & pharmacy', 'Parks', 'Transit stations', 'Workplaces', 'Residential']
# Mobility Changes
mobility_changes = []
# Get Mobility Report
with requests.get(URL) as mobility_report:
if mobility_report.status_code == 200:
temp = tempfile.NamedTemporaryFile()
temp.write(mobility_report.content)
with open(temp.name, 'rb') as file:
# By pages
pdf_text = []
page = 0
while page != -1:
text = extract_text(file, maxpages=1, page_numbers=[page])
if text:
pdf_text.append(text.split('\n'))
page += 1
else:
page = -1
# Page 1
page1 = pdf_text[0]
page1 = filter(lambda value: value != '', page1)
page1 = filter(lambda value: value in targets or value[-1] == '%', list(page1))
page1 = list(page1)[:6]
# Page 2
page2 = pdf_text[1]
page2 = filter(lambda value: value != '', page2)
page2 = filter(lambda value: value in targets or value[-1] == '%', list(page2))
page2 = list(page2)[:6]
# Merge
mobility_changes = page1 + page2
return mobility_changes
# Check new report
if len(new_reports['date'].values) and new_reports['date'].values[-1] != date_last_report:
# New report
print('New Google Community Mobility Reports - Colombia')
# Add Mobility Changes
new_reports['mobility_changes'] = new_reports['url'].transform(lambda value: get_mobility_changes(value))
# By case
new_reports['Retail & recreation'] = new_reports['mobility_changes'].transform(lambda value: value[1])
new_reports['Grocery & pharmacy'] = new_reports['mobility_changes'].transform(lambda value: value[3])
new_reports['Parks'] = new_reports['mobility_changes'].transform(lambda value: value[5])
new_reports['Transit stations'] = new_reports['mobility_changes'].transform(lambda value: value[7])
new_reports['Workplaces'] = new_reports['mobility_changes'].transform(lambda value: value[9])
new_reports['Residential'] = new_reports['mobility_changes'].transform(lambda value: value[11])
# Drop column
new_reports.drop(columns=['mobility_changes'], inplace=True)
# Sort columns
new_reports = new_reports[['date', 'country', 'Retail & recreation', 'Grocery & pharmacy', 'Parks', 'Transit stations', 'Workplaces', 'Residential', 'file', 'url']]
# Setup date format
new_reports['date'] = [value.strftime('%d/%m/%Y') for value in pd.to_datetime(new_reports['date'], format='%Y-%m-%d')]
# Merge and Update
google_community_mobility_reports = pd.concat([google_community_mobility_reports, new_reports])
else:
# Do nothing
print('Google Community Mobility Reports - Colombia All Updated')
# Show dataframe
google_community_mobility_reports.tail()
# %% [markdown]
# ## Google Community Mobility Reports - Colombia
# > ***Output file***: google_community_mobility_reports.csv
# %%
# Save dataframe
google_community_mobility_reports.to_csv(os.path.join(OUTPUT_DIR, 'google_community_mobility_reports.csv'), index=False)
# %% [markdown]
# ---
# %%
| th requests.get('https://www.gstatic.com/covid19/mobility/' + file) as community_mobility_report:
if community_mobility_report.status_code == 200:
return community_mobility_report.url
else:
return np.nan
# | identifier_body |
covid19co_pipe.py | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
# Dependencies
import numpy as np
import pandas as pd
import requests
import unidecode
import datetime
import dateutil
import subprocess
import sys
import json
import tempfile
import os
# Install missing dependencies
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
# PDFMiner pdfminer.six
try:
from pdfminer.high_level import extract_text
except Exception:
install('pdfminer.six')
from pdfminer.high_level import extract_text
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
#for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# %% [markdown]
# ---
# %% [markdown]
# # Colombia Covid19 Pipeline
# Dataset obtained from [Instituto Nacional de Salud](https://www.ins.gov.co/Noticias/Paginas/Coronavirus.aspx) daily report Covid19 from Colombia.
#
# You can get the official dataset here:
# [INS - Official Report](https://www.datos.gov.co/Salud-y-Protecci-n-Social/Casos-positivos-de-COVID-19-en-Colombia/gt2j-8ykr)
#
# The number of new cases are increasing day by day around the world.
# This dataset has information about reported cases from 32 Colombia departments.
#
# Also you can get the dataset Google COVID-19 Community Mobility Reports - Colombia.
#
# You can view and collaborate to the analysis here:
# [colombia_covid_19_analysis](https://www.kaggle.com/sebaxtian/colombia-covid-19-analysis) Kaggle Notebook Kernel.
# %% [markdown]
# ---
# %% [markdown]
# ## Data Sources
# %%
# Input data files are available in the "../input/" directory.
INPUT_DIR = './'
if os.path.split(os.path.abspath('.'))[-1] == 'src':
INPUT_DIR = '../input'
# Output data files are available in the "../output/" directory.
OUTPUT_DIR = './'
if os.path.split(os.path.abspath('.'))[-1] == 'src':
OUTPUT_DIR = '../output'
# Official Daily Report Until Now
URL_OFFICIAL_DATASET = 'https://www.datos.gov.co/api/views/gt2j-8ykr/rows.csv?accessType=DOWNLOAD'
# Official Daily Samples Processed
URL_SAMPLES_PROCESSED = 'https://www.datos.gov.co/api/views/8835-5baf/rows.csv?accessType=DOWNLOAD'
# %% [markdown]
# ---
# %% [markdown]
# ## Official Covid19 Colombia Daily Report
# %%
# Official Daily Report Until Now
with requests.get(URL_OFFICIAL_DATASET) as official_dataset:
with open(os.path.join(INPUT_DIR, 'covid19co_official.csv'), 'wb') as dataset_file:
dataset_file.write(official_dataset.content)
# %%
# Open Official Daily Report
covid19co = pd.read_csv(os.path.join(INPUT_DIR, 'covid19co_official.csv'))
# Total Daily Report
covid19co.shape
# %%
# Show dataframe
covid19co.tail()
# %%
# Show attributes
list(covid19co.columns.values)
# %%
# Update Name Columns
# Remove Accents and Uppercase
covid19co.columns = [unidecode.unidecode(value).upper() for value in covid19co.columns]
# Show dataframe
covid19co.head()
# %%
# Update texto to title text format
for attr in covid19co.columns:
if covid19co[attr].dtypes == 'object':
covid19co[attr] = covid19co[attr].transform(lambda value: str(value).title())
# Show dataframe
covid19co.head()
# %%
# Fill NaN Values
if covid19co.isna().sum().sum() > 0:
covid19co.fillna(value='-', inplace=True)
# Show dataframe
covid19co.head()
# %%
# Setup Date Format
def setup_date(value):
try:
value = value.split('T')[0].split('-')
if len(value) == 3:
|
else:
value = '-'
except IndexError:
value = '-'
if len(value) != 10 and len(value) != 1:
value = '-'
return value
# Date Columns
date_columns = list(filter(lambda value: value.find('FECHA') != -1 or value.find('FIS') != -1, covid19co.columns))
# For each date column
for date_column in date_columns:
covid19co[date_column] = covid19co[date_column].transform(lambda value: setup_date(value))
# Show dataframe
covid19co.head()
# %%
# Add Day, Month, Year, Month Name and Day Name for each Date
# Spanish
nombre_mes = ['Enero', 'Febrero', 'Marzo', 'Abril', 'Mayo', 'Junio', 'Julio', 'Agosto', 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre']
nombre_dia = ['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes', 'Sábado', 'Domingo']
# Get day
def get_day(value):
if value not in '-':
return value.split('/')[0]
return value
# Get month
def get_month(value):
if value not in '-':
return value.split('/')[1]
return value
# Get year
def get_year(value):
if value not in '-':
return value.split('/')[2]
return value
# Get month name
def get_month_name(value):
if value not in '-':
return nombre_mes[int(value.split('/')[1]) - 1]
return value
# Get weekday
def get_weekday(value):
if value not in '-':
return nombre_dia[datetime.date(int(value.split('/')[2]), int(value.split('/')[1]), int(value.split('/')[0])).weekday()]
return value
# For each date column
for date_column in date_columns:
covid19co[date_column + ' DIA'] = covid19co[date_column].transform(lambda value: get_day(value))
covid19co[date_column + ' MES'] = covid19co[date_column].transform(lambda value: get_month(value))
covid19co[date_column + ' ANIO'] = covid19co[date_column].transform(lambda value: get_year(value))
covid19co[date_column + ' NOMBRE MES'] = covid19co[date_column].transform(lambda value: get_month_name(value))
covid19co[date_column + ' DIA SEMANA'] = covid19co[date_column].transform(lambda value: get_weekday(value))
# Show dataframe
covid19co.head()
# %% [markdown]
# ## Covid19 Colombia Dataset
# > ***Output file***: covid19co.csv
# %%
# Save dataframe
covid19co.to_csv(os.path.join(OUTPUT_DIR, 'covid19co.csv'), index=False)
# %% [markdown]
# ---
# %% [markdown]
# ## Official Covid19 Colombia Samples Processed
# %%
# Official Samples Processed Until Now
with requests.get(URL_SAMPLES_PROCESSED) as official_dataset:
with open(os.path.join(INPUT_DIR, 'covid19co_samples_processed_official.csv'), 'wb') as dataset_file:
dataset_file.write(official_dataset.content)
# %%
# Open Official Samples Processed
covid19co_samples_processed = pd.read_csv(os.path.join(INPUT_DIR, 'covid19co_samples_processed_official.csv'))
# Total Daily Report
covid19co_samples_processed.shape
# %%
# Show dataframe
covid19co_samples_processed.head()
# %%
# Update Name Columns
# Remove Accents and Uppercase
covid19co_samples_processed.columns = [unidecode.unidecode(value).upper() for value in covid19co_samples_processed.columns]
# Show dataframe
covid19co_samples_processed.head()
# %%
# Setup Date Format
covid19co_samples_processed['FECHA'] = covid19co_samples_processed['FECHA'].transform(lambda value: setup_date(value))
# Show dataframe
covid19co_samples_processed.head()
# %%
# Select Columns
covid19co_samples_processed = covid19co_samples_processed[['FECHA', 'ACUMULADAS']]
# Show dataframe
covid19co_samples_processed.tail()
# %% [markdown]
# ## Covid19 Colombia Samples Processed Dataset
# > ***Output file***: covid19co_samples_processed.csv
# %%
# Save dataframe
covid19co_samples_processed.to_csv(os.path.join(OUTPUT_DIR, 'covid19co_samples_processed.csv'), index=False)
# %% [markdown]
# ---
# %% [markdown]
# ## Google Community Mobility Reports - Colombia
# %%
# Open Official Google Community Mobility - Colombia
google_community_mobility_reports = pd.read_csv(os.path.join(OUTPUT_DIR, 'google_community_mobility_reports.csv'))
# Total Google Community Mobility - Colombia
google_community_mobility_reports.shape
# %%
# Show dataframe
google_community_mobility_reports.tail()
# %%
# Update Google Community Mobility Reports - Colombia
date_last_report = google_community_mobility_reports['date'].values[-1]
#print(date_last_report)
# 13/05/2020
date_last_report = date_last_report.split('/')
date_last_report = date_last_report[2] + '-' + date_last_report[1] + '-' + date_last_report[0]
#print(date_last_report)
# 2020-05-13
new_reports = pd.DataFrame(columns=['date', 'country', 'file', 'url'])
new_reports['date'] = [dti.strftime('%Y-%m-%d') for dti in pd.date_range(start=date_last_report, end=datetime.date.today().isoformat(), freq='D')]
new_reports['country'] = 'Colombia'
new_reports['file'] = [date + '_CO_Mobility_Report_en.pdf' for date in new_reports['date'].values]
# Get URL report
def get_report_url(file):
with requests.get('https://www.gstatic.com/covid19/mobility/' + file) as community_mobility_report:
if community_mobility_report.status_code == 200:
return community_mobility_report.url
else:
return np.nan
# Get URL report
new_reports['url'] = new_reports['file'].transform(lambda value: get_report_url(value))
# Drop any report without URL
new_reports.dropna(inplace=True)
# Reset index
new_reports.reset_index(inplace=True, drop=True)
# Only new reports
new_reports = new_reports.iloc[1:]
# Show dataframe
new_reports.head()
# %%
# Get/Add Mobility Changes
def get_mobility_changes(URL):
# Target changes
targets = ['Retail & recreation', 'Grocery & pharmacy', 'Parks', 'Transit stations', 'Workplaces', 'Residential']
# Mobility Changes
mobility_changes = []
# Get Mobility Report
with requests.get(URL) as mobility_report:
if mobility_report.status_code == 200:
temp = tempfile.NamedTemporaryFile()
temp.write(mobility_report.content)
with open(temp.name, 'rb') as file:
# By pages
pdf_text = []
page = 0
while page != -1:
text = extract_text(file, maxpages=1, page_numbers=[page])
if text:
pdf_text.append(text.split('\n'))
page += 1
else:
page = -1
# Page 1
page1 = pdf_text[0]
page1 = filter(lambda value: value != '', page1)
page1 = filter(lambda value: value in targets or value[-1] == '%', list(page1))
page1 = list(page1)[:6]
# Page 2
page2 = pdf_text[1]
page2 = filter(lambda value: value != '', page2)
page2 = filter(lambda value: value in targets or value[-1] == '%', list(page2))
page2 = list(page2)[:6]
# Merge
mobility_changes = page1 + page2
return mobility_changes
# Check new report
if len(new_reports['date'].values) and new_reports['date'].values[-1] != date_last_report:
# New report
print('New Google Community Mobility Reports - Colombia')
# Add Mobility Changes
new_reports['mobility_changes'] = new_reports['url'].transform(lambda value: get_mobility_changes(value))
# By case
new_reports['Retail & recreation'] = new_reports['mobility_changes'].transform(lambda value: value[1])
new_reports['Grocery & pharmacy'] = new_reports['mobility_changes'].transform(lambda value: value[3])
new_reports['Parks'] = new_reports['mobility_changes'].transform(lambda value: value[5])
new_reports['Transit stations'] = new_reports['mobility_changes'].transform(lambda value: value[7])
new_reports['Workplaces'] = new_reports['mobility_changes'].transform(lambda value: value[9])
new_reports['Residential'] = new_reports['mobility_changes'].transform(lambda value: value[11])
# Drop column
new_reports.drop(columns=['mobility_changes'], inplace=True)
# Sort columns
new_reports = new_reports[['date', 'country', 'Retail & recreation', 'Grocery & pharmacy', 'Parks', 'Transit stations', 'Workplaces', 'Residential', 'file', 'url']]
# Setup date format
new_reports['date'] = [value.strftime('%d/%m/%Y') for value in pd.to_datetime(new_reports['date'], format='%Y-%m-%d')]
# Merge and Update
google_community_mobility_reports = pd.concat([google_community_mobility_reports, new_reports])
else:
# Do nothing
print('Google Community Mobility Reports - Colombia All Updated')
# Show dataframe
google_community_mobility_reports.tail()
# %% [markdown]
# ## Google Community Mobility Reports - Colombia
# > ***Output file***: google_community_mobility_reports.csv
# %%
# Save dataframe
google_community_mobility_reports.to_csv(os.path.join(OUTPUT_DIR, 'google_community_mobility_reports.csv'), index=False)
# %% [markdown]
# ---
# %%
| value = value[2] + '/' + value[1] + '/' + value[0] | conditional_block |
peermanager.go | /*
* @file
* @copyright defined in aergo/LICENSE.txt
*/
package p2p
import (
"context"
"fmt"
"github.com/aergoio/aergo/p2p/metric"
"github.com/libp2p/go-libp2p-peerstore/pstoremem"
"net"
"strconv"
"strings"
"sync"
"time"
"github.com/libp2p/go-libp2p-host"
inet "github.com/libp2p/go-libp2p-net"
"github.com/aergoio/aergo-lib/log"
"github.com/aergoio/aergo/message"
"github.com/aergoio/aergo/types"
cfg "github.com/aergoio/aergo/config"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p-crypto"
"github.com/libp2p/go-libp2p-peer"
pstore "github.com/libp2p/go-libp2p-peerstore"
ma "github.com/multiformats/go-multiaddr"
)
// TODO this value better related to max peer and block produce interval, not constant
const (
DefaultGlobalBlockCacheSize = 300
DefaultPeerBlockCacheSize = 100
DefaultGlobalTxCacheSize = 50000
DefaultPeerTxCacheSize = 2000
// DefaultPeerTxQueueSize is maximum size of hashes in a single tx notice message
DefaultPeerTxQueueSize = 40000
defaultTTL = time.Second * 4
defaultHandshakeTTL = time.Second * 20
cachePlaceHolder = true
)
// PeerManager is internal service that provide peer management
type PeerManager interface {
host.Host
Start() error
Stop() error
PrivateKey() crypto.PrivKey
PublicKey() crypto.PubKey
SelfMeta() PeerMeta
SelfNodeID() peer.ID
AddNewPeer(peer PeerMeta)
RemovePeer(peerID peer.ID)
// NotifyPeerHandshake is called after remote peer is completed handshake and ready to receive or send
NotifyPeerHandshake(peerID peer.ID)
NotifyPeerAddressReceived([]PeerMeta)
// GetPeer return registered(handshaked) remote peer object
GetPeer(ID peer.ID) (RemotePeer, bool)
GetPeers() []RemotePeer
GetPeerAddresses() ([]*types.PeerAddress, []*types.NewBlockNotice, []types.PeerState)
}
/**
* peerManager connect to and listen from other nodes.
* It implements Component interface
*/
type peerManager struct {
host.Host
privateKey crypto.PrivKey
publicKey crypto.PubKey
bindAddress net.IP
bindPort int
selfMeta PeerMeta
handlerFactory HandlerFactory
actorServ ActorService
signer msgSigner
mf moFactory
rm ReconnectManager
mm metric.MetricsManager
designatedPeers map[peer.ID]PeerMeta
remotePeers map[peer.ID]*remotePeerImpl
peerPool map[peer.ID]PeerMeta
conf *cfg.P2PConfig
logger *log.Logger
mutex *sync.Mutex
peerCache []RemotePeer
addPeerChannel chan PeerMeta
removePeerChannel chan peer.ID
fillPoolChannel chan []PeerMeta
finishChannel chan struct{}
eventListeners []PeerEventListener
}
var _ PeerManager = (*peerManager)(nil)
// PeerEventListener listen peer manage event
type PeerEventListener interface {
// OnAddPeer is called just after the peer is added.
OnAddPeer(peerID peer.ID)
// OnRemovePeer is called just before the peer is removed
OnRemovePeer(peerID peer.ID)
}
func init() {
}
// NewPeerManager creates a peer manager object.
func NewPeerManager(handlerFactory HandlerFactory, iServ ActorService, cfg *cfg.Config, signer msgSigner, rm ReconnectManager, mm metric.MetricsManager, logger *log.Logger, mf moFactory) PeerManager {
p2pConf := cfg.P2P
//logger.SetLevel("debug")
pm := &peerManager{
handlerFactory: handlerFactory,
actorServ: iServ,
conf: p2pConf,
signer: signer,
mf: mf,
rm: rm,
mm: mm,
logger: logger,
mutex: &sync.Mutex{},
designatedPeers: make(map[peer.ID]PeerMeta, len(cfg.P2P.NPAddPeers)),
remotePeers: make(map[peer.ID]*remotePeerImpl, p2pConf.NPMaxPeers),
peerPool: make(map[peer.ID]PeerMeta, p2pConf.NPPeerPool),
peerCache: make([]RemotePeer, 0, p2pConf.NPMaxPeers),
addPeerChannel: make(chan PeerMeta, 2),
removePeerChannel: make(chan peer.ID),
fillPoolChannel: make(chan []PeerMeta),
eventListeners: make([]PeerEventListener, 0, 4),
finishChannel: make(chan struct{}),
}
// additional initializations
pm.init()
return pm
}
func (pm *peerManager) PrivateKey() crypto.PrivKey {
return pm.privateKey
}
func (pm *peerManager) PublicKey() crypto.PubKey {
return pm.publicKey
}
func (pm *peerManager) SelfMeta() PeerMeta {
return pm.selfMeta
}
func (pm *peerManager) SelfNodeID() peer.ID {
return pm.selfMeta.ID
}
func (pm *peerManager) RegisterEventListener(listener PeerEventListener) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
pm.eventListeners = append(pm.eventListeners, listener)
}
func (pm *peerManager) init() {
// check Key and address
priv := NodePrivKey()
pub := NodePubKey()
pid := NodeID()
pm.privateKey = priv
pm.publicKey = pub
// init address and port
// if not set, it look up ip addresses of machine and choose suitable one (but not so smart) and default port 7845
peerAddr, peerPort := pm.getProtocolAddrs()
pm.selfMeta.IPAddress = peerAddr.String()
pm.selfMeta.Port = uint32(peerPort)
pm.selfMeta.ID = pid
// if bindAddress or bindPort is not set, it will be same as NetProtocolAddr or NetProtocolPort
if len(pm.conf.NPBindAddr) > 0 {
bindAddr := net.ParseIP(pm.conf.NPBindAddr)
if bindAddr == nil {
panic("invalid NPBindAddr " + pm.conf.NPBindAddr)
}
pm.bindAddress = bindAddr
} else {
pm.bindAddress = peerAddr
}
if pm.conf.NPBindPort > 0 {
pm.bindPort = pm.conf.NPBindPort
} else {
pm.bindPort = peerPort
}
// set meta info
// TODO more survey libp2p NAT configuration
// set designated peers
pm.addDesignatedPeers()
}
func (pm *peerManager) getProtocolAddrs() (protocolAddr net.IP, protocolPort int) {
if len(pm.conf.NetProtocolAddr) != 0 {
protocolAddr = net.ParseIP(pm.conf.NetProtocolAddr)
if protocolAddr == nil {
panic("invalid NetProtocolAddr " + pm.conf.NetProtocolAddr)
}
if protocolAddr.IsUnspecified() {
panic("NetProtocolAddr should be a specified IP address, not 0.0.0.0")
}
} else {
extIP, err := externalIP()
if err != nil {
panic("error while finding IP address: " + err.Error())
}
protocolAddr = extIP
}
protocolPort = pm.conf.NetProtocolPort
if protocolPort <= 0 {
panic("invalid NetProtocolPort " + strconv.Itoa(pm.conf.NetProtocolPort))
}
return
}
func (pm *peerManager) run() {
go pm.runManagePeers()
// need to start listen after chainservice is read to init
// FIXME: adhoc code
go func() {
time.Sleep(time.Second * 3)
pm.startListener()
// addition should start after all modules are started
go func() {
time.Sleep(time.Second * 2)
for _, meta := range pm.designatedPeers {
pm.addPeerChannel <- meta
}
}()
}()
}
func (pm *peerManager) addDesignatedPeers() {
// add remote node from config
for _, target := range pm.conf.NPAddPeers {
// go-multiaddr implementation does not support recent p2p protocol yet, but deprecated name ipfs.
// This adhoc will be removed when go-multiaddr is patched.
target = strings.Replace(target, "/p2p/", "/ipfs/", 1)
targetAddr, err := ma.NewMultiaddr(target)
if err != nil {
pm.logger.Warn().Err(err).Str("target", target).Msg("invalid NPAddPeer address")
continue
}
splitted := strings.Split(targetAddr.String(), "/")
if len(splitted) != 7 {
pm.logger.Warn().Str("target", target).Msg("invalid NPAddPeer address")
continue
}
peerAddrString := splitted[2]
peerPortString := splitted[4]
peerPort, err := strconv.Atoi(peerPortString)
if err != nil {
pm.logger.Warn().Str("port", peerPortString).Msg("invalid Peer port")
continue
}
peerIDString := splitted[6]
peerID, err := peer.IDB58Decode(peerIDString)
if err != nil {
pm.logger.Warn().Str(LogPeerID, peerIDString).Msg("invalid PeerID")
continue
}
peerMeta := PeerMeta{
ID: peerID,
Port: uint32(peerPort),
IPAddress: peerAddrString,
Designated: true,
Outbound: true,
}
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Str("addr", peerAddrString).Int("port", peerPort).Msg("Adding Designated peer")
pm.designatedPeers[peerID] = peerMeta
}
}
func (pm *peerManager) runManagePeers() {
addrDuration := time.Minute * 3
addrTicker := time.NewTicker(addrDuration)
// reconnectRunners := make(map[peer.ID]*reconnectRunner)
MANLOOP:
for {
select {
case meta := <-pm.addPeerChannel:
if pm.addOutboundPeer(meta) {
if _, found := pm.designatedPeers[meta.ID]; found {
pm.rm.CancelJob(meta.ID)
}
}
case id := <-pm.removePeerChannel:
if pm.removePeer(id) {
if meta, found := pm.designatedPeers[id]; found {
pm.rm.AddJob(meta)
}
}
case <-addrTicker.C:
pm.checkAndCollectPeerListFromAll()
pm.logPeerMetrics()
case peerMetas := <-pm.fillPoolChannel:
pm.tryFillPool(&peerMetas)
case <-pm.finishChannel:
addrTicker.Stop()
pm.rm.Stop()
// TODO need to keep loop till all remote peer objects are removed, otherwise panic or channel deadlock can come.
break MANLOOP
}
}
// cleanup peers
for peerID := range pm.remotePeers {
pm.removePeer(peerID)
}
}
func (pm *peerManager) logPeerMetrics() {
if pm.logger.IsDebugEnabled() {
pm.logger.Debug().Msg(pm.mm.Summary())
}
}
// addOutboundPeer try to connect and handshake to remote peer. it can be called after peermanager is inited.
// It return true if peer is added or already exist, or return false if failed to add peer.
func (pm *peerManager) addOutboundPeer(meta PeerMeta) bool {
addrString := fmt.Sprintf("/ip4/%s/tcp/%d", meta.IPAddress, meta.Port)
var peerAddr, err = ma.NewMultiaddr(addrString)
if err != nil {
pm.logger.Warn().Err(err).Str("addr", addrString).Msg("invalid NPAddPeer address")
return false
}
var peerID = meta.ID
pm.mutex.Lock()
inboundPeer, ok := pm.remotePeers[peerID]
if ok {
// peer is already exist (and maybe inbound peer)
pm.logger.Info().Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("Peer is already managed by peermanager")
if meta.Designated {
// If remote peer was connected first. designated flag is not set yet.
inboundPeer.meta.Designated = true
}
pm.mutex.Unlock()
return true
}
pm.mutex.Unlock()
pm.Peerstore().AddAddr(peerID, peerAddr, meta.TTL())
ctx := context.Background()
s, err := pm.NewStream(ctx, meta.ID, aergoP2PSub)
if err != nil {
pm.logger.Info().Err(err).Str("addr", addrString).Str(LogPeerID, meta.ID.Pretty()).Str(LogProtoID, string(aergoP2PSub)).Msg("Error while get stream")
return false
}
rd := metric.NewReader(s)
wt := metric.NewWriter(s)
h := newHandshaker(pm, pm.actorServ, pm.logger, peerID)
rw, remoteStatus, err := h.handshakeOutboundPeerTimeout(rd, wt, defaultHandshakeTTL)
if err != nil {
pm.logger.Debug().Err(err).Str(LogPeerID, meta.ID.Pretty()).Msg("Failed to handshake")
//pm.sendGoAway(rw, "Failed to handshake")
s.Close()
return false
}
pm.mutex.Lock()
inboundPeer, ok = pm.remotePeers[peerID]
if ok {
if ComparePeerID(pm.selfMeta.ID, meta.ID) <= 0 {
pm.logger.Info().Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("Inbound connection was already handshaked while handshaking outbound connection, and remote peer is higher priority so closing this outbound connection.")
pm.mutex.Unlock()
pm.sendGoAway(rw, "Already handshaked")
s.Close()
return true
} else {
pm.logger.Info().Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("Inbound connection was already handshaked while handshaking outbound connection, but local peer is higher priority so closing that inbound connection")
// disconnect lower valued connection
pm.deletePeer(meta.ID)
inboundPeer.stop()
}
}
// update peer info to remote sent infor
meta = FromPeerAddress(remoteStatus.Sender)
outboundPeer := newRemotePeer(meta, pm, pm.actorServ, pm.logger, pm.mf, pm.signer, rw)
// insert Handlers
pm.handlerFactory.insertHandlers(outboundPeer)
go outboundPeer.runPeer()
pm.insertPeer(peerID, outboundPeer)
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Str("addr", net.ParseIP(meta.IPAddress).String()+":"+strconv.Itoa(int(meta.Port))).Msg("Outbound peer is added to peerService")
outboundPeer.metric = pm.mm.Add(peerID, rd, wt)
pm.mutex.Unlock()
addrs := pm.Peerstore().Addrs(peerID)
addrStrs := make([]string, len(addrs))
for i, addr := range addrs {
addrStrs[i] = addr.String()
}
pm.logger.Debug().Strs("addrs", addrStrs).Str(LogPeerID, outboundPeer.meta.ID.Pretty()).Msg("addresses of peer")
// peer is ready
h.doInitialSync()
// notice to p2pmanager that handshaking is finished
pm.NotifyPeerHandshake(peerID)
return true
}
func (pm *peerManager) sendGoAway(rw MsgReadWriter, msg string) {
goMsg := &types.GoAwayNotice{Message: msg}
// TODO code smell. non safe casting.
mo := pm.mf.newMsgRequestOrder(false, GoAway, goMsg).(*pbRequestOrder)
container := mo.message
rw.WriteMsg(container)
}
func (pm *peerManager) checkInPeerstore(peerID peer.ID) bool {
found := false
for _, existingPeerID := range pm.Peerstore().Peers() {
if existingPeerID == peerID {
found = true
break
}
}
return found
}
func (pm *peerManager) AddNewPeer(peer PeerMeta) {
pm.addPeerChannel <- peer
}
func (pm *peerManager) RemovePeer(peerID peer.ID) {
pm.removePeerChannel <- peerID
}
func (pm *peerManager) NotifyPeerHandshake(peerID peer.ID) {
pm.checkAndCollectPeerList(peerID)
}
func (pm *peerManager) NotifyPeerAddressReceived(metas []PeerMeta) {
pm.fillPoolChannel <- metas
}
// removePeer remove and disconnect managed remote peer connection
// It return true if peer is exist and managed by peermanager
func (pm *peerManager) removePeer(peerID peer.ID) bool {
pm.mutex.Lock()
target, ok := pm.remotePeers[peerID]
if !ok {
pm.mutex.Unlock()
return false
}
pm.deletePeer(peerID)
// No internal module access this peer anymore, but remote message can be received.
target.stop()
pm.mutex.Unlock()
// also disconnect connection
for _, existingPeerID := range pm.Peerstore().Peers() {
if existingPeerID == peerID {
for _, listener := range pm.eventListeners {
listener.OnRemovePeer(peerID)
}
pm.Network().ClosePeer(peerID)
return true
}
}
return true
}
func (pm *peerManager) Peerstore() pstore.Peerstore |
func (pm *peerManager) startListener() {
var err error
listens := make([]ma.Multiaddr, 0, 2)
// FIXME: should also support ip6 later
listen, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", pm.bindAddress, pm.bindPort))
if err != nil {
panic("Can't estabilish listening address: " + err.Error())
}
listens = append(listens, listen)
peerStore := pstore.NewPeerstore(pstoremem.NewKeyBook(), pstoremem.NewAddrBook(), pstoremem.NewPeerMetadata())
newHost, err := libp2p.New(context.Background(), libp2p.Identity(pm.privateKey), libp2p.Peerstore(peerStore), libp2p.ListenAddrs(listens...))
if err != nil {
pm.logger.Fatal().Err(err).Str("addr", listen.String()).Msg("Couldn't listen from")
panic(err.Error())
}
pm.logger.Info().Str("pid", pm.SelfNodeID().Pretty()).Str("addr[0]", listens[0].String()).
Msg("Set self node's pid, and listening for connections")
pm.Host = newHost
pm.SetStreamHandler(aergoP2PSub, pm.onHandshake)
}
func (pm *peerManager) onHandshake(s inet.Stream) {
peerID := s.Conn().RemotePeer()
h := newHandshaker(pm, pm.actorServ, pm.logger, peerID)
rd := metric.NewReader(s)
wt := metric.NewWriter(s)
rw, statusMsg, err := h.handshakeInboundPeer(rd, wt)
if err != nil {
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Err(err).Msg("fail to handshake")
s.Close()
return
}
// TODO: check status
meta := FromPeerAddress(statusMsg.Sender)
// try Add peer
if inboundPeer, success := pm.tryAddInboundPeer(meta, rw); !success {
// failed to add
pm.sendGoAway(rw, "Concurrent handshake")
s.Close()
return
} else {
inboundPeer.metric = pm.mm.Add(peerID, rd, wt)
}
h.doInitialSync()
// notice to p2pmanager that handshaking is finished
pm.NotifyPeerHandshake(peerID)
}
func (pm *peerManager) tryAddInboundPeer(meta PeerMeta, rw MsgReadWriter) (*remotePeerImpl, bool) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
peerID := meta.ID
outboundPeer, found := pm.remotePeers[peerID]
if found {
if ComparePeerID(pm.selfMeta.ID, meta.ID) <= 0 {
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Msg("Outbound connection was already handshaked while handshaking inbound connection, and remote peer is higher priority so closing that outbound connection.")
pm.sendGoAway(rw, "Already handshaked")
pm.deletePeer(meta.ID)
outboundPeer.stop()
} else {
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Msg("Outbound connection was already handshaked while handshaking inbound connection, but local peer is higher priority and closing this inbound connection.")
// disconnect lower valued connection
return nil, false
}
}
inboundPeer := newRemotePeer(meta, pm, pm.actorServ, pm.logger, pm.mf, pm.signer, rw)
pm.handlerFactory.insertHandlers(inboundPeer)
go inboundPeer.runPeer()
pm.insertPeer(peerID, inboundPeer)
peerAddr := meta.ToPeerAddress()
addrs := pm.Peerstore().Addrs(peerID)
addrStrs := make([]string, len(addrs))
for i, addr := range addrs {
addrStrs[i] = addr.String()
}
pm.logger.Debug().Strs("addrs", addrStrs).Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("addresses of peer")
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Str("addr", getIP(&peerAddr).String()+":"+strconv.Itoa(int(peerAddr.Port))).Msg("Inbound peer is added to peerService")
return inboundPeer, true
}
func (pm *peerManager) Start() error {
pm.run()
//pm.conf.NPAddPeers
return nil
}
func (pm *peerManager) Stop() error {
// TODO stop service
// close(pm.addPeerChannel)
// close(pm.removePeerChannel)
pm.finishChannel <- struct{}{}
return nil
}
func (pm *peerManager) GetName() string {
return "p2p service"
}
func (pm *peerManager) checkAndCollectPeerListFromAll() {
if pm.hasEnoughPeers() {
return
}
for _, remotePeer := range pm.remotePeers {
pm.actorServ.SendRequest(message.P2PSvc, &message.GetAddressesMsg{ToWhom: remotePeer.meta.ID, Size: 20, Offset: 0})
}
}
func (pm *peerManager) checkAndCollectPeerList(ID peer.ID) {
if pm.hasEnoughPeers() {
return
}
peer, ok := pm.GetPeer(ID)
if !ok {
//pm.logger.Warnf("invalid peer id %s", ID.Pretty())
pm.logger.Warn().Str(LogPeerID, ID.Pretty()).Msg("invalid peer id")
return
}
pm.actorServ.SendRequest(message.P2PSvc, &message.GetAddressesMsg{ToWhom: peer.ID(), Size: 20, Offset: 0})
}
func (pm *peerManager) hasEnoughPeers() bool {
return len(pm.peerPool) >= pm.conf.NPPeerPool
}
// tryConnectPeers should be called in runManagePeers() only
func (pm *peerManager) tryFillPool(metas *[]PeerMeta) {
added := make([]PeerMeta, 0, len(*metas))
invalid := make([]string, 0)
for _, meta := range *metas {
if string(meta.ID) == "" {
invalid = append(invalid, meta.String())
continue
}
_, found := pm.peerPool[meta.ID]
if !found {
// change some properties
meta.Outbound = true
meta.Designated = false
pm.peerPool[meta.ID] = meta
added = append(added, meta)
}
}
if len(invalid) > 0 {
pm.logger.Warn().Strs("metas", invalid).Msg("invalid meta list was come")
}
pm.logger.Debug().Int("added_cnt", len(added)).Msg("Filled unknown peer addresses to peerpool")
pm.tryConnectPeers()
}
// tryConnectPeers should be called in runManagePeers() only
func (pm *peerManager) tryConnectPeers() {
remained := pm.conf.NPMaxPeers - len(pm.remotePeers)
for ID, meta := range pm.peerPool {
if _, found := pm.GetPeer(ID); found {
delete(pm.peerPool, ID)
continue
}
if meta.IPAddress == "" || meta.Port == 0 {
pm.logger.Warn().Str(LogPeerID, meta.ID.Pretty()).Str("addr", meta.IPAddress).
Uint32("port", meta.Port).Msg("Invalid peer meta informations")
continue
}
// in same go rountine.
pm.addOutboundPeer(meta)
remained--
if remained <= 0 {
break
}
}
}
func (pm *peerManager) GetPeer(ID peer.ID) (RemotePeer, bool) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
// vs code's lint does not allow direct return of map operation
ptr, ok := pm.remotePeers[ID]
if !ok {
return nil, false
}
return ptr, ok
}
func (pm *peerManager) GetPeers() []RemotePeer {
pm.mutex.Lock()
defer pm.mutex.Unlock()
return pm.peerCache
}
func (pm *peerManager) GetPeerAddresses() ([]*types.PeerAddress, []*types.NewBlockNotice, []types.PeerState) {
peers := make([]*types.PeerAddress, 0, len(pm.remotePeers))
blks := make([]*types.NewBlockNotice, 0, len(pm.remotePeers))
states := make([]types.PeerState, 0, len(pm.remotePeers))
for _, aPeer := range pm.remotePeers {
addr := aPeer.meta.ToPeerAddress()
peers = append(peers, &addr)
blks = append(blks, aPeer.lastNotice)
states = append(states, aPeer.state)
}
return peers, blks, states
}
// this method should be called inside pm.mutex
func (pm *peerManager) insertPeer(ID peer.ID, peer *remotePeerImpl) {
pm.remotePeers[ID] = peer
pm.updatePeerCache()
}
// this method should be called inside pm.mutex
func (pm *peerManager) deletePeer(ID peer.ID) {
pm.mm.Remove(ID)
delete(pm.remotePeers, ID)
pm.updatePeerCache()
}
func (pm *peerManager) updatePeerCache() {
newSlice := make([]RemotePeer, 0, len(pm.remotePeers))
for _, peer := range pm.remotePeers {
newSlice = append(newSlice, peer)
}
pm.peerCache = newSlice
}
| {
return pm.Host.Peerstore()
} | identifier_body |
peermanager.go | /*
* @file
* @copyright defined in aergo/LICENSE.txt
*/
package p2p
import (
"context"
"fmt"
"github.com/aergoio/aergo/p2p/metric"
"github.com/libp2p/go-libp2p-peerstore/pstoremem"
"net"
"strconv"
"strings"
"sync"
"time"
"github.com/libp2p/go-libp2p-host"
inet "github.com/libp2p/go-libp2p-net"
"github.com/aergoio/aergo-lib/log"
"github.com/aergoio/aergo/message"
"github.com/aergoio/aergo/types"
cfg "github.com/aergoio/aergo/config"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p-crypto"
"github.com/libp2p/go-libp2p-peer"
pstore "github.com/libp2p/go-libp2p-peerstore"
ma "github.com/multiformats/go-multiaddr"
)
// TODO this value better related to max peer and block produce interval, not constant
const (
DefaultGlobalBlockCacheSize = 300
DefaultPeerBlockCacheSize = 100
DefaultGlobalTxCacheSize = 50000
DefaultPeerTxCacheSize = 2000
// DefaultPeerTxQueueSize is maximum size of hashes in a single tx notice message
DefaultPeerTxQueueSize = 40000
defaultTTL = time.Second * 4
defaultHandshakeTTL = time.Second * 20
cachePlaceHolder = true
)
// PeerManager is internal service that provide peer management
type PeerManager interface {
host.Host
Start() error
Stop() error
PrivateKey() crypto.PrivKey
PublicKey() crypto.PubKey
SelfMeta() PeerMeta
SelfNodeID() peer.ID
AddNewPeer(peer PeerMeta)
RemovePeer(peerID peer.ID)
// NotifyPeerHandshake is called after remote peer is completed handshake and ready to receive or send
NotifyPeerHandshake(peerID peer.ID)
NotifyPeerAddressReceived([]PeerMeta)
// GetPeer return registered(handshaked) remote peer object
GetPeer(ID peer.ID) (RemotePeer, bool)
GetPeers() []RemotePeer
GetPeerAddresses() ([]*types.PeerAddress, []*types.NewBlockNotice, []types.PeerState)
}
/**
* peerManager connect to and listen from other nodes.
* It implements Component interface
*/
type peerManager struct {
host.Host
privateKey crypto.PrivKey
publicKey crypto.PubKey
bindAddress net.IP
bindPort int
selfMeta PeerMeta
handlerFactory HandlerFactory
actorServ ActorService
signer msgSigner
mf moFactory
rm ReconnectManager
mm metric.MetricsManager
designatedPeers map[peer.ID]PeerMeta
remotePeers map[peer.ID]*remotePeerImpl
peerPool map[peer.ID]PeerMeta
conf *cfg.P2PConfig
logger *log.Logger
mutex *sync.Mutex
peerCache []RemotePeer
addPeerChannel chan PeerMeta
removePeerChannel chan peer.ID
fillPoolChannel chan []PeerMeta
finishChannel chan struct{}
eventListeners []PeerEventListener
}
var _ PeerManager = (*peerManager)(nil)
// PeerEventListener listen peer manage event
type PeerEventListener interface {
// OnAddPeer is called just after the peer is added.
OnAddPeer(peerID peer.ID)
// OnRemovePeer is called just before the peer is removed
OnRemovePeer(peerID peer.ID)
}
func init() {
}
// NewPeerManager creates a peer manager object.
func NewPeerManager(handlerFactory HandlerFactory, iServ ActorService, cfg *cfg.Config, signer msgSigner, rm ReconnectManager, mm metric.MetricsManager, logger *log.Logger, mf moFactory) PeerManager {
p2pConf := cfg.P2P
//logger.SetLevel("debug")
pm := &peerManager{
handlerFactory: handlerFactory,
actorServ: iServ,
conf: p2pConf,
signer: signer,
mf: mf,
rm: rm,
mm: mm,
logger: logger,
mutex: &sync.Mutex{},
designatedPeers: make(map[peer.ID]PeerMeta, len(cfg.P2P.NPAddPeers)),
remotePeers: make(map[peer.ID]*remotePeerImpl, p2pConf.NPMaxPeers),
peerPool: make(map[peer.ID]PeerMeta, p2pConf.NPPeerPool),
peerCache: make([]RemotePeer, 0, p2pConf.NPMaxPeers),
addPeerChannel: make(chan PeerMeta, 2),
removePeerChannel: make(chan peer.ID),
fillPoolChannel: make(chan []PeerMeta),
eventListeners: make([]PeerEventListener, 0, 4),
finishChannel: make(chan struct{}),
}
// additional initializations
pm.init()
return pm
}
func (pm *peerManager) PrivateKey() crypto.PrivKey {
return pm.privateKey
}
func (pm *peerManager) PublicKey() crypto.PubKey {
return pm.publicKey
}
func (pm *peerManager) SelfMeta() PeerMeta {
return pm.selfMeta
}
func (pm *peerManager) SelfNodeID() peer.ID {
return pm.selfMeta.ID
}
func (pm *peerManager) RegisterEventListener(listener PeerEventListener) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
pm.eventListeners = append(pm.eventListeners, listener)
}
func (pm *peerManager) init() {
// check Key and address
priv := NodePrivKey()
pub := NodePubKey()
pid := NodeID()
pm.privateKey = priv
pm.publicKey = pub
// init address and port
// if not set, it look up ip addresses of machine and choose suitable one (but not so smart) and default port 7845
peerAddr, peerPort := pm.getProtocolAddrs()
pm.selfMeta.IPAddress = peerAddr.String()
pm.selfMeta.Port = uint32(peerPort)
pm.selfMeta.ID = pid
// if bindAddress or bindPort is not set, it will be same as NetProtocolAddr or NetProtocolPort
if len(pm.conf.NPBindAddr) > 0 {
bindAddr := net.ParseIP(pm.conf.NPBindAddr)
if bindAddr == nil {
panic("invalid NPBindAddr " + pm.conf.NPBindAddr)
}
pm.bindAddress = bindAddr
} else {
pm.bindAddress = peerAddr
}
if pm.conf.NPBindPort > 0 {
pm.bindPort = pm.conf.NPBindPort
} else {
pm.bindPort = peerPort
}
// set meta info
// TODO more survey libp2p NAT configuration
// set designated peers
pm.addDesignatedPeers()
}
func (pm *peerManager) getProtocolAddrs() (protocolAddr net.IP, protocolPort int) {
if len(pm.conf.NetProtocolAddr) != 0 {
protocolAddr = net.ParseIP(pm.conf.NetProtocolAddr)
if protocolAddr == nil {
panic("invalid NetProtocolAddr " + pm.conf.NetProtocolAddr)
}
if protocolAddr.IsUnspecified() {
panic("NetProtocolAddr should be a specified IP address, not 0.0.0.0")
}
} else {
extIP, err := externalIP()
if err != nil {
panic("error while finding IP address: " + err.Error())
}
protocolAddr = extIP
}
protocolPort = pm.conf.NetProtocolPort
if protocolPort <= 0 {
panic("invalid NetProtocolPort " + strconv.Itoa(pm.conf.NetProtocolPort))
}
return
}
func (pm *peerManager) run() {
go pm.runManagePeers()
// need to start listen after chainservice is read to init
// FIXME: adhoc code
go func() {
time.Sleep(time.Second * 3)
pm.startListener()
// addition should start after all modules are started
go func() {
time.Sleep(time.Second * 2)
for _, meta := range pm.designatedPeers {
pm.addPeerChannel <- meta
}
}()
}()
}
func (pm *peerManager) addDesignatedPeers() {
// add remote node from config
for _, target := range pm.conf.NPAddPeers {
// go-multiaddr implementation does not support recent p2p protocol yet, but deprecated name ipfs.
// This adhoc will be removed when go-multiaddr is patched.
target = strings.Replace(target, "/p2p/", "/ipfs/", 1)
targetAddr, err := ma.NewMultiaddr(target)
if err != nil {
pm.logger.Warn().Err(err).Str("target", target).Msg("invalid NPAddPeer address")
continue
}
splitted := strings.Split(targetAddr.String(), "/")
if len(splitted) != 7 {
pm.logger.Warn().Str("target", target).Msg("invalid NPAddPeer address")
continue
}
peerAddrString := splitted[2]
peerPortString := splitted[4]
peerPort, err := strconv.Atoi(peerPortString)
if err != nil {
pm.logger.Warn().Str("port", peerPortString).Msg("invalid Peer port")
continue
}
peerIDString := splitted[6]
peerID, err := peer.IDB58Decode(peerIDString)
if err != nil {
pm.logger.Warn().Str(LogPeerID, peerIDString).Msg("invalid PeerID")
continue
}
peerMeta := PeerMeta{
ID: peerID,
Port: uint32(peerPort),
IPAddress: peerAddrString,
Designated: true,
Outbound: true,
}
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Str("addr", peerAddrString).Int("port", peerPort).Msg("Adding Designated peer")
pm.designatedPeers[peerID] = peerMeta
}
}
func (pm *peerManager) runManagePeers() {
addrDuration := time.Minute * 3
addrTicker := time.NewTicker(addrDuration)
// reconnectRunners := make(map[peer.ID]*reconnectRunner)
MANLOOP:
for {
select {
case meta := <-pm.addPeerChannel:
if pm.addOutboundPeer(meta) |
case id := <-pm.removePeerChannel:
if pm.removePeer(id) {
if meta, found := pm.designatedPeers[id]; found {
pm.rm.AddJob(meta)
}
}
case <-addrTicker.C:
pm.checkAndCollectPeerListFromAll()
pm.logPeerMetrics()
case peerMetas := <-pm.fillPoolChannel:
pm.tryFillPool(&peerMetas)
case <-pm.finishChannel:
addrTicker.Stop()
pm.rm.Stop()
// TODO need to keep loop till all remote peer objects are removed, otherwise panic or channel deadlock can come.
break MANLOOP
}
}
// cleanup peers
for peerID := range pm.remotePeers {
pm.removePeer(peerID)
}
}
func (pm *peerManager) logPeerMetrics() {
if pm.logger.IsDebugEnabled() {
pm.logger.Debug().Msg(pm.mm.Summary())
}
}
// addOutboundPeer try to connect and handshake to remote peer. it can be called after peermanager is inited.
// It return true if peer is added or already exist, or return false if failed to add peer.
func (pm *peerManager) addOutboundPeer(meta PeerMeta) bool {
addrString := fmt.Sprintf("/ip4/%s/tcp/%d", meta.IPAddress, meta.Port)
var peerAddr, err = ma.NewMultiaddr(addrString)
if err != nil {
pm.logger.Warn().Err(err).Str("addr", addrString).Msg("invalid NPAddPeer address")
return false
}
var peerID = meta.ID
pm.mutex.Lock()
inboundPeer, ok := pm.remotePeers[peerID]
if ok {
// peer is already exist (and maybe inbound peer)
pm.logger.Info().Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("Peer is already managed by peermanager")
if meta.Designated {
// If remote peer was connected first. designated flag is not set yet.
inboundPeer.meta.Designated = true
}
pm.mutex.Unlock()
return true
}
pm.mutex.Unlock()
pm.Peerstore().AddAddr(peerID, peerAddr, meta.TTL())
ctx := context.Background()
s, err := pm.NewStream(ctx, meta.ID, aergoP2PSub)
if err != nil {
pm.logger.Info().Err(err).Str("addr", addrString).Str(LogPeerID, meta.ID.Pretty()).Str(LogProtoID, string(aergoP2PSub)).Msg("Error while get stream")
return false
}
rd := metric.NewReader(s)
wt := metric.NewWriter(s)
h := newHandshaker(pm, pm.actorServ, pm.logger, peerID)
rw, remoteStatus, err := h.handshakeOutboundPeerTimeout(rd, wt, defaultHandshakeTTL)
if err != nil {
pm.logger.Debug().Err(err).Str(LogPeerID, meta.ID.Pretty()).Msg("Failed to handshake")
//pm.sendGoAway(rw, "Failed to handshake")
s.Close()
return false
}
pm.mutex.Lock()
inboundPeer, ok = pm.remotePeers[peerID]
if ok {
if ComparePeerID(pm.selfMeta.ID, meta.ID) <= 0 {
pm.logger.Info().Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("Inbound connection was already handshaked while handshaking outbound connection, and remote peer is higher priority so closing this outbound connection.")
pm.mutex.Unlock()
pm.sendGoAway(rw, "Already handshaked")
s.Close()
return true
} else {
pm.logger.Info().Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("Inbound connection was already handshaked while handshaking outbound connection, but local peer is higher priority so closing that inbound connection")
// disconnect lower valued connection
pm.deletePeer(meta.ID)
inboundPeer.stop()
}
}
// update peer info to remote sent infor
meta = FromPeerAddress(remoteStatus.Sender)
outboundPeer := newRemotePeer(meta, pm, pm.actorServ, pm.logger, pm.mf, pm.signer, rw)
// insert Handlers
pm.handlerFactory.insertHandlers(outboundPeer)
go outboundPeer.runPeer()
pm.insertPeer(peerID, outboundPeer)
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Str("addr", net.ParseIP(meta.IPAddress).String()+":"+strconv.Itoa(int(meta.Port))).Msg("Outbound peer is added to peerService")
outboundPeer.metric = pm.mm.Add(peerID, rd, wt)
pm.mutex.Unlock()
addrs := pm.Peerstore().Addrs(peerID)
addrStrs := make([]string, len(addrs))
for i, addr := range addrs {
addrStrs[i] = addr.String()
}
pm.logger.Debug().Strs("addrs", addrStrs).Str(LogPeerID, outboundPeer.meta.ID.Pretty()).Msg("addresses of peer")
// peer is ready
h.doInitialSync()
// notice to p2pmanager that handshaking is finished
pm.NotifyPeerHandshake(peerID)
return true
}
func (pm *peerManager) sendGoAway(rw MsgReadWriter, msg string) {
goMsg := &types.GoAwayNotice{Message: msg}
// TODO code smell. non safe casting.
mo := pm.mf.newMsgRequestOrder(false, GoAway, goMsg).(*pbRequestOrder)
container := mo.message
rw.WriteMsg(container)
}
func (pm *peerManager) checkInPeerstore(peerID peer.ID) bool {
found := false
for _, existingPeerID := range pm.Peerstore().Peers() {
if existingPeerID == peerID {
found = true
break
}
}
return found
}
func (pm *peerManager) AddNewPeer(peer PeerMeta) {
pm.addPeerChannel <- peer
}
func (pm *peerManager) RemovePeer(peerID peer.ID) {
pm.removePeerChannel <- peerID
}
func (pm *peerManager) NotifyPeerHandshake(peerID peer.ID) {
pm.checkAndCollectPeerList(peerID)
}
func (pm *peerManager) NotifyPeerAddressReceived(metas []PeerMeta) {
pm.fillPoolChannel <- metas
}
// removePeer remove and disconnect managed remote peer connection
// It return true if peer is exist and managed by peermanager
func (pm *peerManager) removePeer(peerID peer.ID) bool {
pm.mutex.Lock()
target, ok := pm.remotePeers[peerID]
if !ok {
pm.mutex.Unlock()
return false
}
pm.deletePeer(peerID)
// No internal module access this peer anymore, but remote message can be received.
target.stop()
pm.mutex.Unlock()
// also disconnect connection
for _, existingPeerID := range pm.Peerstore().Peers() {
if existingPeerID == peerID {
for _, listener := range pm.eventListeners {
listener.OnRemovePeer(peerID)
}
pm.Network().ClosePeer(peerID)
return true
}
}
return true
}
func (pm *peerManager) Peerstore() pstore.Peerstore {
return pm.Host.Peerstore()
}
func (pm *peerManager) startListener() {
var err error
listens := make([]ma.Multiaddr, 0, 2)
// FIXME: should also support ip6 later
listen, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", pm.bindAddress, pm.bindPort))
if err != nil {
panic("Can't estabilish listening address: " + err.Error())
}
listens = append(listens, listen)
peerStore := pstore.NewPeerstore(pstoremem.NewKeyBook(), pstoremem.NewAddrBook(), pstoremem.NewPeerMetadata())
newHost, err := libp2p.New(context.Background(), libp2p.Identity(pm.privateKey), libp2p.Peerstore(peerStore), libp2p.ListenAddrs(listens...))
if err != nil {
pm.logger.Fatal().Err(err).Str("addr", listen.String()).Msg("Couldn't listen from")
panic(err.Error())
}
pm.logger.Info().Str("pid", pm.SelfNodeID().Pretty()).Str("addr[0]", listens[0].String()).
Msg("Set self node's pid, and listening for connections")
pm.Host = newHost
pm.SetStreamHandler(aergoP2PSub, pm.onHandshake)
}
func (pm *peerManager) onHandshake(s inet.Stream) {
peerID := s.Conn().RemotePeer()
h := newHandshaker(pm, pm.actorServ, pm.logger, peerID)
rd := metric.NewReader(s)
wt := metric.NewWriter(s)
rw, statusMsg, err := h.handshakeInboundPeer(rd, wt)
if err != nil {
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Err(err).Msg("fail to handshake")
s.Close()
return
}
// TODO: check status
meta := FromPeerAddress(statusMsg.Sender)
// try Add peer
if inboundPeer, success := pm.tryAddInboundPeer(meta, rw); !success {
// failed to add
pm.sendGoAway(rw, "Concurrent handshake")
s.Close()
return
} else {
inboundPeer.metric = pm.mm.Add(peerID, rd, wt)
}
h.doInitialSync()
// notice to p2pmanager that handshaking is finished
pm.NotifyPeerHandshake(peerID)
}
func (pm *peerManager) tryAddInboundPeer(meta PeerMeta, rw MsgReadWriter) (*remotePeerImpl, bool) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
peerID := meta.ID
outboundPeer, found := pm.remotePeers[peerID]
if found {
if ComparePeerID(pm.selfMeta.ID, meta.ID) <= 0 {
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Msg("Outbound connection was already handshaked while handshaking inbound connection, and remote peer is higher priority so closing that outbound connection.")
pm.sendGoAway(rw, "Already handshaked")
pm.deletePeer(meta.ID)
outboundPeer.stop()
} else {
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Msg("Outbound connection was already handshaked while handshaking inbound connection, but local peer is higher priority and closing this inbound connection.")
// disconnect lower valued connection
return nil, false
}
}
inboundPeer := newRemotePeer(meta, pm, pm.actorServ, pm.logger, pm.mf, pm.signer, rw)
pm.handlerFactory.insertHandlers(inboundPeer)
go inboundPeer.runPeer()
pm.insertPeer(peerID, inboundPeer)
peerAddr := meta.ToPeerAddress()
addrs := pm.Peerstore().Addrs(peerID)
addrStrs := make([]string, len(addrs))
for i, addr := range addrs {
addrStrs[i] = addr.String()
}
pm.logger.Debug().Strs("addrs", addrStrs).Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("addresses of peer")
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Str("addr", getIP(&peerAddr).String()+":"+strconv.Itoa(int(peerAddr.Port))).Msg("Inbound peer is added to peerService")
return inboundPeer, true
}
func (pm *peerManager) Start() error {
pm.run()
//pm.conf.NPAddPeers
return nil
}
func (pm *peerManager) Stop() error {
// TODO stop service
// close(pm.addPeerChannel)
// close(pm.removePeerChannel)
pm.finishChannel <- struct{}{}
return nil
}
func (pm *peerManager) GetName() string {
return "p2p service"
}
func (pm *peerManager) checkAndCollectPeerListFromAll() {
if pm.hasEnoughPeers() {
return
}
for _, remotePeer := range pm.remotePeers {
pm.actorServ.SendRequest(message.P2PSvc, &message.GetAddressesMsg{ToWhom: remotePeer.meta.ID, Size: 20, Offset: 0})
}
}
func (pm *peerManager) checkAndCollectPeerList(ID peer.ID) {
if pm.hasEnoughPeers() {
return
}
peer, ok := pm.GetPeer(ID)
if !ok {
//pm.logger.Warnf("invalid peer id %s", ID.Pretty())
pm.logger.Warn().Str(LogPeerID, ID.Pretty()).Msg("invalid peer id")
return
}
pm.actorServ.SendRequest(message.P2PSvc, &message.GetAddressesMsg{ToWhom: peer.ID(), Size: 20, Offset: 0})
}
func (pm *peerManager) hasEnoughPeers() bool {
return len(pm.peerPool) >= pm.conf.NPPeerPool
}
// tryConnectPeers should be called in runManagePeers() only
func (pm *peerManager) tryFillPool(metas *[]PeerMeta) {
added := make([]PeerMeta, 0, len(*metas))
invalid := make([]string, 0)
for _, meta := range *metas {
if string(meta.ID) == "" {
invalid = append(invalid, meta.String())
continue
}
_, found := pm.peerPool[meta.ID]
if !found {
// change some properties
meta.Outbound = true
meta.Designated = false
pm.peerPool[meta.ID] = meta
added = append(added, meta)
}
}
if len(invalid) > 0 {
pm.logger.Warn().Strs("metas", invalid).Msg("invalid meta list was come")
}
pm.logger.Debug().Int("added_cnt", len(added)).Msg("Filled unknown peer addresses to peerpool")
pm.tryConnectPeers()
}
// tryConnectPeers should be called in runManagePeers() only
func (pm *peerManager) tryConnectPeers() {
remained := pm.conf.NPMaxPeers - len(pm.remotePeers)
for ID, meta := range pm.peerPool {
if _, found := pm.GetPeer(ID); found {
delete(pm.peerPool, ID)
continue
}
if meta.IPAddress == "" || meta.Port == 0 {
pm.logger.Warn().Str(LogPeerID, meta.ID.Pretty()).Str("addr", meta.IPAddress).
Uint32("port", meta.Port).Msg("Invalid peer meta informations")
continue
}
// in same go rountine.
pm.addOutboundPeer(meta)
remained--
if remained <= 0 {
break
}
}
}
func (pm *peerManager) GetPeer(ID peer.ID) (RemotePeer, bool) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
// vs code's lint does not allow direct return of map operation
ptr, ok := pm.remotePeers[ID]
if !ok {
return nil, false
}
return ptr, ok
}
func (pm *peerManager) GetPeers() []RemotePeer {
pm.mutex.Lock()
defer pm.mutex.Unlock()
return pm.peerCache
}
func (pm *peerManager) GetPeerAddresses() ([]*types.PeerAddress, []*types.NewBlockNotice, []types.PeerState) {
peers := make([]*types.PeerAddress, 0, len(pm.remotePeers))
blks := make([]*types.NewBlockNotice, 0, len(pm.remotePeers))
states := make([]types.PeerState, 0, len(pm.remotePeers))
for _, aPeer := range pm.remotePeers {
addr := aPeer.meta.ToPeerAddress()
peers = append(peers, &addr)
blks = append(blks, aPeer.lastNotice)
states = append(states, aPeer.state)
}
return peers, blks, states
}
// this method should be called inside pm.mutex
func (pm *peerManager) insertPeer(ID peer.ID, peer *remotePeerImpl) {
pm.remotePeers[ID] = peer
pm.updatePeerCache()
}
// this method should be called inside pm.mutex
func (pm *peerManager) deletePeer(ID peer.ID) {
pm.mm.Remove(ID)
delete(pm.remotePeers, ID)
pm.updatePeerCache()
}
func (pm *peerManager) updatePeerCache() {
newSlice := make([]RemotePeer, 0, len(pm.remotePeers))
for _, peer := range pm.remotePeers {
newSlice = append(newSlice, peer)
}
pm.peerCache = newSlice
}
| {
if _, found := pm.designatedPeers[meta.ID]; found {
pm.rm.CancelJob(meta.ID)
}
} | conditional_block |
peermanager.go | /*
* @file
* @copyright defined in aergo/LICENSE.txt
*/
package p2p
import (
"context"
"fmt"
"github.com/aergoio/aergo/p2p/metric"
"github.com/libp2p/go-libp2p-peerstore/pstoremem"
"net"
"strconv"
"strings"
"sync"
"time"
"github.com/libp2p/go-libp2p-host"
inet "github.com/libp2p/go-libp2p-net"
"github.com/aergoio/aergo-lib/log"
"github.com/aergoio/aergo/message"
"github.com/aergoio/aergo/types"
cfg "github.com/aergoio/aergo/config"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p-crypto"
"github.com/libp2p/go-libp2p-peer"
pstore "github.com/libp2p/go-libp2p-peerstore"
ma "github.com/multiformats/go-multiaddr"
)
// TODO this value better related to max peer and block produce interval, not constant
const (
DefaultGlobalBlockCacheSize = 300
DefaultPeerBlockCacheSize = 100
DefaultGlobalTxCacheSize = 50000
DefaultPeerTxCacheSize = 2000
// DefaultPeerTxQueueSize is maximum size of hashes in a single tx notice message
DefaultPeerTxQueueSize = 40000
defaultTTL = time.Second * 4
defaultHandshakeTTL = time.Second * 20
cachePlaceHolder = true
)
// PeerManager is internal service that provide peer management
type PeerManager interface {
host.Host
Start() error
Stop() error
PrivateKey() crypto.PrivKey
PublicKey() crypto.PubKey
SelfMeta() PeerMeta
SelfNodeID() peer.ID
AddNewPeer(peer PeerMeta)
RemovePeer(peerID peer.ID)
// NotifyPeerHandshake is called after remote peer is completed handshake and ready to receive or send
NotifyPeerHandshake(peerID peer.ID)
NotifyPeerAddressReceived([]PeerMeta)
// GetPeer return registered(handshaked) remote peer object
GetPeer(ID peer.ID) (RemotePeer, bool)
GetPeers() []RemotePeer
GetPeerAddresses() ([]*types.PeerAddress, []*types.NewBlockNotice, []types.PeerState)
}
/**
* peerManager connect to and listen from other nodes.
* It implements Component interface
*/
type peerManager struct {
host.Host
privateKey crypto.PrivKey
publicKey crypto.PubKey
bindAddress net.IP
bindPort int
selfMeta PeerMeta
handlerFactory HandlerFactory
actorServ ActorService
signer msgSigner
mf moFactory
rm ReconnectManager
mm metric.MetricsManager
designatedPeers map[peer.ID]PeerMeta
remotePeers map[peer.ID]*remotePeerImpl
peerPool map[peer.ID]PeerMeta
conf *cfg.P2PConfig
logger *log.Logger
mutex *sync.Mutex
peerCache []RemotePeer
addPeerChannel chan PeerMeta
removePeerChannel chan peer.ID
fillPoolChannel chan []PeerMeta
finishChannel chan struct{}
eventListeners []PeerEventListener
}
var _ PeerManager = (*peerManager)(nil)
// PeerEventListener listen peer manage event
type PeerEventListener interface {
// OnAddPeer is called just after the peer is added.
OnAddPeer(peerID peer.ID)
// OnRemovePeer is called just before the peer is removed
OnRemovePeer(peerID peer.ID)
}
func init() {
}
// NewPeerManager creates a peer manager object.
func NewPeerManager(handlerFactory HandlerFactory, iServ ActorService, cfg *cfg.Config, signer msgSigner, rm ReconnectManager, mm metric.MetricsManager, logger *log.Logger, mf moFactory) PeerManager {
p2pConf := cfg.P2P
//logger.SetLevel("debug")
pm := &peerManager{
handlerFactory: handlerFactory,
actorServ: iServ,
conf: p2pConf,
signer: signer,
mf: mf,
rm: rm,
mm: mm,
logger: logger,
mutex: &sync.Mutex{},
designatedPeers: make(map[peer.ID]PeerMeta, len(cfg.P2P.NPAddPeers)),
remotePeers: make(map[peer.ID]*remotePeerImpl, p2pConf.NPMaxPeers),
peerPool: make(map[peer.ID]PeerMeta, p2pConf.NPPeerPool),
peerCache: make([]RemotePeer, 0, p2pConf.NPMaxPeers),
addPeerChannel: make(chan PeerMeta, 2),
removePeerChannel: make(chan peer.ID),
fillPoolChannel: make(chan []PeerMeta),
eventListeners: make([]PeerEventListener, 0, 4),
finishChannel: make(chan struct{}),
}
// additional initializations
pm.init()
return pm
}
func (pm *peerManager) PrivateKey() crypto.PrivKey {
return pm.privateKey
}
func (pm *peerManager) PublicKey() crypto.PubKey {
return pm.publicKey
}
func (pm *peerManager) SelfMeta() PeerMeta {
return pm.selfMeta
}
func (pm *peerManager) SelfNodeID() peer.ID {
return pm.selfMeta.ID
}
func (pm *peerManager) RegisterEventListener(listener PeerEventListener) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
pm.eventListeners = append(pm.eventListeners, listener)
}
func (pm *peerManager) init() {
// check Key and address
priv := NodePrivKey()
pub := NodePubKey()
pid := NodeID()
pm.privateKey = priv
pm.publicKey = pub
// init address and port
// if not set, it look up ip addresses of machine and choose suitable one (but not so smart) and default port 7845
peerAddr, peerPort := pm.getProtocolAddrs()
pm.selfMeta.IPAddress = peerAddr.String()
pm.selfMeta.Port = uint32(peerPort)
pm.selfMeta.ID = pid
// if bindAddress or bindPort is not set, it will be same as NetProtocolAddr or NetProtocolPort
if len(pm.conf.NPBindAddr) > 0 {
bindAddr := net.ParseIP(pm.conf.NPBindAddr)
if bindAddr == nil {
panic("invalid NPBindAddr " + pm.conf.NPBindAddr)
}
pm.bindAddress = bindAddr
} else {
pm.bindAddress = peerAddr
}
if pm.conf.NPBindPort > 0 {
pm.bindPort = pm.conf.NPBindPort
} else {
pm.bindPort = peerPort
}
// set meta info
// TODO more survey libp2p NAT configuration
// set designated peers
pm.addDesignatedPeers()
}
func (pm *peerManager) getProtocolAddrs() (protocolAddr net.IP, protocolPort int) {
if len(pm.conf.NetProtocolAddr) != 0 {
protocolAddr = net.ParseIP(pm.conf.NetProtocolAddr)
if protocolAddr == nil {
panic("invalid NetProtocolAddr " + pm.conf.NetProtocolAddr)
}
if protocolAddr.IsUnspecified() {
panic("NetProtocolAddr should be a specified IP address, not 0.0.0.0")
}
} else {
extIP, err := externalIP()
if err != nil {
panic("error while finding IP address: " + err.Error())
}
protocolAddr = extIP
}
protocolPort = pm.conf.NetProtocolPort
if protocolPort <= 0 {
panic("invalid NetProtocolPort " + strconv.Itoa(pm.conf.NetProtocolPort))
}
return
}
func (pm *peerManager) run() {
go pm.runManagePeers()
// need to start listen after chainservice is read to init
// FIXME: adhoc code
go func() {
time.Sleep(time.Second * 3)
pm.startListener()
// addition should start after all modules are started
go func() {
time.Sleep(time.Second * 2)
for _, meta := range pm.designatedPeers {
pm.addPeerChannel <- meta
}
}()
}()
}
func (pm *peerManager) addDesignatedPeers() {
// add remote node from config
for _, target := range pm.conf.NPAddPeers {
// go-multiaddr implementation does not support recent p2p protocol yet, but deprecated name ipfs.
// This adhoc will be removed when go-multiaddr is patched.
target = strings.Replace(target, "/p2p/", "/ipfs/", 1)
targetAddr, err := ma.NewMultiaddr(target)
if err != nil {
pm.logger.Warn().Err(err).Str("target", target).Msg("invalid NPAddPeer address")
continue
}
splitted := strings.Split(targetAddr.String(), "/")
if len(splitted) != 7 {
pm.logger.Warn().Str("target", target).Msg("invalid NPAddPeer address")
continue
}
peerAddrString := splitted[2]
peerPortString := splitted[4]
peerPort, err := strconv.Atoi(peerPortString)
if err != nil {
pm.logger.Warn().Str("port", peerPortString).Msg("invalid Peer port")
continue
}
peerIDString := splitted[6]
peerID, err := peer.IDB58Decode(peerIDString)
if err != nil {
pm.logger.Warn().Str(LogPeerID, peerIDString).Msg("invalid PeerID")
continue
}
peerMeta := PeerMeta{
ID: peerID,
Port: uint32(peerPort),
IPAddress: peerAddrString,
Designated: true,
Outbound: true,
}
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Str("addr", peerAddrString).Int("port", peerPort).Msg("Adding Designated peer")
pm.designatedPeers[peerID] = peerMeta
}
}
func (pm *peerManager) runManagePeers() {
addrDuration := time.Minute * 3
addrTicker := time.NewTicker(addrDuration)
// reconnectRunners := make(map[peer.ID]*reconnectRunner)
MANLOOP:
for {
select {
case meta := <-pm.addPeerChannel:
if pm.addOutboundPeer(meta) {
if _, found := pm.designatedPeers[meta.ID]; found {
pm.rm.CancelJob(meta.ID)
}
}
case id := <-pm.removePeerChannel:
if pm.removePeer(id) {
if meta, found := pm.designatedPeers[id]; found {
pm.rm.AddJob(meta)
}
}
case <-addrTicker.C:
pm.checkAndCollectPeerListFromAll()
pm.logPeerMetrics()
case peerMetas := <-pm.fillPoolChannel:
pm.tryFillPool(&peerMetas)
case <-pm.finishChannel:
addrTicker.Stop()
pm.rm.Stop()
// TODO need to keep loop till all remote peer objects are removed, otherwise panic or channel deadlock can come.
break MANLOOP
}
}
// cleanup peers
for peerID := range pm.remotePeers {
pm.removePeer(peerID)
}
}
func (pm *peerManager) logPeerMetrics() {
if pm.logger.IsDebugEnabled() {
pm.logger.Debug().Msg(pm.mm.Summary())
}
}
// addOutboundPeer try to connect and handshake to remote peer. it can be called after peermanager is inited.
// It return true if peer is added or already exist, or return false if failed to add peer.
func (pm *peerManager) addOutboundPeer(meta PeerMeta) bool {
addrString := fmt.Sprintf("/ip4/%s/tcp/%d", meta.IPAddress, meta.Port)
var peerAddr, err = ma.NewMultiaddr(addrString)
if err != nil {
pm.logger.Warn().Err(err).Str("addr", addrString).Msg("invalid NPAddPeer address")
return false
}
var peerID = meta.ID
pm.mutex.Lock()
inboundPeer, ok := pm.remotePeers[peerID]
if ok {
// peer is already exist (and maybe inbound peer)
pm.logger.Info().Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("Peer is already managed by peermanager")
if meta.Designated {
// If remote peer was connected first. designated flag is not set yet.
inboundPeer.meta.Designated = true
}
pm.mutex.Unlock()
return true
}
pm.mutex.Unlock()
pm.Peerstore().AddAddr(peerID, peerAddr, meta.TTL())
ctx := context.Background()
s, err := pm.NewStream(ctx, meta.ID, aergoP2PSub)
if err != nil {
pm.logger.Info().Err(err).Str("addr", addrString).Str(LogPeerID, meta.ID.Pretty()).Str(LogProtoID, string(aergoP2PSub)).Msg("Error while get stream")
return false
}
rd := metric.NewReader(s)
wt := metric.NewWriter(s)
h := newHandshaker(pm, pm.actorServ, pm.logger, peerID)
rw, remoteStatus, err := h.handshakeOutboundPeerTimeout(rd, wt, defaultHandshakeTTL)
if err != nil {
pm.logger.Debug().Err(err).Str(LogPeerID, meta.ID.Pretty()).Msg("Failed to handshake")
//pm.sendGoAway(rw, "Failed to handshake")
s.Close()
return false
}
pm.mutex.Lock()
inboundPeer, ok = pm.remotePeers[peerID]
if ok {
if ComparePeerID(pm.selfMeta.ID, meta.ID) <= 0 {
pm.logger.Info().Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("Inbound connection was already handshaked while handshaking outbound connection, and remote peer is higher priority so closing this outbound connection.")
pm.mutex.Unlock()
pm.sendGoAway(rw, "Already handshaked")
s.Close()
return true
} else {
pm.logger.Info().Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("Inbound connection was already handshaked while handshaking outbound connection, but local peer is higher priority so closing that inbound connection")
// disconnect lower valued connection
pm.deletePeer(meta.ID)
inboundPeer.stop()
}
}
// update peer info to remote sent infor
meta = FromPeerAddress(remoteStatus.Sender)
outboundPeer := newRemotePeer(meta, pm, pm.actorServ, pm.logger, pm.mf, pm.signer, rw)
// insert Handlers
pm.handlerFactory.insertHandlers(outboundPeer)
go outboundPeer.runPeer()
pm.insertPeer(peerID, outboundPeer)
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Str("addr", net.ParseIP(meta.IPAddress).String()+":"+strconv.Itoa(int(meta.Port))).Msg("Outbound peer is added to peerService")
outboundPeer.metric = pm.mm.Add(peerID, rd, wt)
pm.mutex.Unlock()
addrs := pm.Peerstore().Addrs(peerID)
addrStrs := make([]string, len(addrs))
for i, addr := range addrs {
addrStrs[i] = addr.String()
}
pm.logger.Debug().Strs("addrs", addrStrs).Str(LogPeerID, outboundPeer.meta.ID.Pretty()).Msg("addresses of peer")
// peer is ready
h.doInitialSync()
// notice to p2pmanager that handshaking is finished
pm.NotifyPeerHandshake(peerID)
return true
}
func (pm *peerManager) sendGoAway(rw MsgReadWriter, msg string) {
goMsg := &types.GoAwayNotice{Message: msg}
// TODO code smell. non safe casting.
mo := pm.mf.newMsgRequestOrder(false, GoAway, goMsg).(*pbRequestOrder)
container := mo.message
rw.WriteMsg(container)
}
func (pm *peerManager) checkInPeerstore(peerID peer.ID) bool {
found := false
for _, existingPeerID := range pm.Peerstore().Peers() {
if existingPeerID == peerID {
found = true
break
}
}
return found
}
func (pm *peerManager) AddNewPeer(peer PeerMeta) {
pm.addPeerChannel <- peer
}
func (pm *peerManager) RemovePeer(peerID peer.ID) {
pm.removePeerChannel <- peerID
}
func (pm *peerManager) NotifyPeerHandshake(peerID peer.ID) {
pm.checkAndCollectPeerList(peerID)
}
func (pm *peerManager) NotifyPeerAddressReceived(metas []PeerMeta) {
pm.fillPoolChannel <- metas
}
// removePeer remove and disconnect managed remote peer connection
// It return true if peer is exist and managed by peermanager
func (pm *peerManager) removePeer(peerID peer.ID) bool {
pm.mutex.Lock()
target, ok := pm.remotePeers[peerID]
if !ok {
pm.mutex.Unlock()
return false
}
pm.deletePeer(peerID)
// No internal module access this peer anymore, but remote message can be received.
target.stop()
pm.mutex.Unlock()
// also disconnect connection
for _, existingPeerID := range pm.Peerstore().Peers() {
if existingPeerID == peerID {
for _, listener := range pm.eventListeners {
listener.OnRemovePeer(peerID)
}
pm.Network().ClosePeer(peerID)
return true
}
}
return true
}
func (pm *peerManager) | () pstore.Peerstore {
return pm.Host.Peerstore()
}
func (pm *peerManager) startListener() {
var err error
listens := make([]ma.Multiaddr, 0, 2)
// FIXME: should also support ip6 later
listen, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", pm.bindAddress, pm.bindPort))
if err != nil {
panic("Can't estabilish listening address: " + err.Error())
}
listens = append(listens, listen)
peerStore := pstore.NewPeerstore(pstoremem.NewKeyBook(), pstoremem.NewAddrBook(), pstoremem.NewPeerMetadata())
newHost, err := libp2p.New(context.Background(), libp2p.Identity(pm.privateKey), libp2p.Peerstore(peerStore), libp2p.ListenAddrs(listens...))
if err != nil {
pm.logger.Fatal().Err(err).Str("addr", listen.String()).Msg("Couldn't listen from")
panic(err.Error())
}
pm.logger.Info().Str("pid", pm.SelfNodeID().Pretty()).Str("addr[0]", listens[0].String()).
Msg("Set self node's pid, and listening for connections")
pm.Host = newHost
pm.SetStreamHandler(aergoP2PSub, pm.onHandshake)
}
func (pm *peerManager) onHandshake(s inet.Stream) {
peerID := s.Conn().RemotePeer()
h := newHandshaker(pm, pm.actorServ, pm.logger, peerID)
rd := metric.NewReader(s)
wt := metric.NewWriter(s)
rw, statusMsg, err := h.handshakeInboundPeer(rd, wt)
if err != nil {
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Err(err).Msg("fail to handshake")
s.Close()
return
}
// TODO: check status
meta := FromPeerAddress(statusMsg.Sender)
// try Add peer
if inboundPeer, success := pm.tryAddInboundPeer(meta, rw); !success {
// failed to add
pm.sendGoAway(rw, "Concurrent handshake")
s.Close()
return
} else {
inboundPeer.metric = pm.mm.Add(peerID, rd, wt)
}
h.doInitialSync()
// notice to p2pmanager that handshaking is finished
pm.NotifyPeerHandshake(peerID)
}
func (pm *peerManager) tryAddInboundPeer(meta PeerMeta, rw MsgReadWriter) (*remotePeerImpl, bool) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
peerID := meta.ID
outboundPeer, found := pm.remotePeers[peerID]
if found {
if ComparePeerID(pm.selfMeta.ID, meta.ID) <= 0 {
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Msg("Outbound connection was already handshaked while handshaking inbound connection, and remote peer is higher priority so closing that outbound connection.")
pm.sendGoAway(rw, "Already handshaked")
pm.deletePeer(meta.ID)
outboundPeer.stop()
} else {
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Msg("Outbound connection was already handshaked while handshaking inbound connection, but local peer is higher priority and closing this inbound connection.")
// disconnect lower valued connection
return nil, false
}
}
inboundPeer := newRemotePeer(meta, pm, pm.actorServ, pm.logger, pm.mf, pm.signer, rw)
pm.handlerFactory.insertHandlers(inboundPeer)
go inboundPeer.runPeer()
pm.insertPeer(peerID, inboundPeer)
peerAddr := meta.ToPeerAddress()
addrs := pm.Peerstore().Addrs(peerID)
addrStrs := make([]string, len(addrs))
for i, addr := range addrs {
addrStrs[i] = addr.String()
}
pm.logger.Debug().Strs("addrs", addrStrs).Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("addresses of peer")
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Str("addr", getIP(&peerAddr).String()+":"+strconv.Itoa(int(peerAddr.Port))).Msg("Inbound peer is added to peerService")
return inboundPeer, true
}
func (pm *peerManager) Start() error {
pm.run()
//pm.conf.NPAddPeers
return nil
}
func (pm *peerManager) Stop() error {
// TODO stop service
// close(pm.addPeerChannel)
// close(pm.removePeerChannel)
pm.finishChannel <- struct{}{}
return nil
}
func (pm *peerManager) GetName() string {
return "p2p service"
}
func (pm *peerManager) checkAndCollectPeerListFromAll() {
if pm.hasEnoughPeers() {
return
}
for _, remotePeer := range pm.remotePeers {
pm.actorServ.SendRequest(message.P2PSvc, &message.GetAddressesMsg{ToWhom: remotePeer.meta.ID, Size: 20, Offset: 0})
}
}
func (pm *peerManager) checkAndCollectPeerList(ID peer.ID) {
if pm.hasEnoughPeers() {
return
}
peer, ok := pm.GetPeer(ID)
if !ok {
//pm.logger.Warnf("invalid peer id %s", ID.Pretty())
pm.logger.Warn().Str(LogPeerID, ID.Pretty()).Msg("invalid peer id")
return
}
pm.actorServ.SendRequest(message.P2PSvc, &message.GetAddressesMsg{ToWhom: peer.ID(), Size: 20, Offset: 0})
}
func (pm *peerManager) hasEnoughPeers() bool {
return len(pm.peerPool) >= pm.conf.NPPeerPool
}
// tryConnectPeers should be called in runManagePeers() only
func (pm *peerManager) tryFillPool(metas *[]PeerMeta) {
added := make([]PeerMeta, 0, len(*metas))
invalid := make([]string, 0)
for _, meta := range *metas {
if string(meta.ID) == "" {
invalid = append(invalid, meta.String())
continue
}
_, found := pm.peerPool[meta.ID]
if !found {
// change some properties
meta.Outbound = true
meta.Designated = false
pm.peerPool[meta.ID] = meta
added = append(added, meta)
}
}
if len(invalid) > 0 {
pm.logger.Warn().Strs("metas", invalid).Msg("invalid meta list was come")
}
pm.logger.Debug().Int("added_cnt", len(added)).Msg("Filled unknown peer addresses to peerpool")
pm.tryConnectPeers()
}
// tryConnectPeers should be called in runManagePeers() only
func (pm *peerManager) tryConnectPeers() {
remained := pm.conf.NPMaxPeers - len(pm.remotePeers)
for ID, meta := range pm.peerPool {
if _, found := pm.GetPeer(ID); found {
delete(pm.peerPool, ID)
continue
}
if meta.IPAddress == "" || meta.Port == 0 {
pm.logger.Warn().Str(LogPeerID, meta.ID.Pretty()).Str("addr", meta.IPAddress).
Uint32("port", meta.Port).Msg("Invalid peer meta informations")
continue
}
// in same go rountine.
pm.addOutboundPeer(meta)
remained--
if remained <= 0 {
break
}
}
}
func (pm *peerManager) GetPeer(ID peer.ID) (RemotePeer, bool) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
// vs code's lint does not allow direct return of map operation
ptr, ok := pm.remotePeers[ID]
if !ok {
return nil, false
}
return ptr, ok
}
func (pm *peerManager) GetPeers() []RemotePeer {
pm.mutex.Lock()
defer pm.mutex.Unlock()
return pm.peerCache
}
func (pm *peerManager) GetPeerAddresses() ([]*types.PeerAddress, []*types.NewBlockNotice, []types.PeerState) {
peers := make([]*types.PeerAddress, 0, len(pm.remotePeers))
blks := make([]*types.NewBlockNotice, 0, len(pm.remotePeers))
states := make([]types.PeerState, 0, len(pm.remotePeers))
for _, aPeer := range pm.remotePeers {
addr := aPeer.meta.ToPeerAddress()
peers = append(peers, &addr)
blks = append(blks, aPeer.lastNotice)
states = append(states, aPeer.state)
}
return peers, blks, states
}
// this method should be called inside pm.mutex
func (pm *peerManager) insertPeer(ID peer.ID, peer *remotePeerImpl) {
pm.remotePeers[ID] = peer
pm.updatePeerCache()
}
// this method should be called inside pm.mutex
func (pm *peerManager) deletePeer(ID peer.ID) {
pm.mm.Remove(ID)
delete(pm.remotePeers, ID)
pm.updatePeerCache()
}
func (pm *peerManager) updatePeerCache() {
newSlice := make([]RemotePeer, 0, len(pm.remotePeers))
for _, peer := range pm.remotePeers {
newSlice = append(newSlice, peer)
}
pm.peerCache = newSlice
}
| Peerstore | identifier_name |
peermanager.go | /*
* @file
* @copyright defined in aergo/LICENSE.txt
*/
package p2p
import (
"context"
"fmt"
"github.com/aergoio/aergo/p2p/metric"
"github.com/libp2p/go-libp2p-peerstore/pstoremem"
"net"
"strconv"
"strings"
"sync"
"time"
"github.com/libp2p/go-libp2p-host"
inet "github.com/libp2p/go-libp2p-net"
"github.com/aergoio/aergo-lib/log"
"github.com/aergoio/aergo/message"
"github.com/aergoio/aergo/types"
cfg "github.com/aergoio/aergo/config"
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p-crypto"
"github.com/libp2p/go-libp2p-peer"
pstore "github.com/libp2p/go-libp2p-peerstore"
ma "github.com/multiformats/go-multiaddr"
)
// TODO this value better related to max peer and block produce interval, not constant
const (
DefaultGlobalBlockCacheSize = 300
DefaultPeerBlockCacheSize = 100
DefaultGlobalTxCacheSize = 50000
DefaultPeerTxCacheSize = 2000
// DefaultPeerTxQueueSize is maximum size of hashes in a single tx notice message
DefaultPeerTxQueueSize = 40000
defaultTTL = time.Second * 4
defaultHandshakeTTL = time.Second * 20
cachePlaceHolder = true
)
// PeerManager is internal service that provide peer management
type PeerManager interface {
host.Host
Start() error
Stop() error
PrivateKey() crypto.PrivKey
PublicKey() crypto.PubKey
SelfMeta() PeerMeta
SelfNodeID() peer.ID
AddNewPeer(peer PeerMeta)
RemovePeer(peerID peer.ID)
// NotifyPeerHandshake is called after remote peer is completed handshake and ready to receive or send
NotifyPeerHandshake(peerID peer.ID)
NotifyPeerAddressReceived([]PeerMeta)
// GetPeer return registered(handshaked) remote peer object
GetPeer(ID peer.ID) (RemotePeer, bool)
GetPeers() []RemotePeer
GetPeerAddresses() ([]*types.PeerAddress, []*types.NewBlockNotice, []types.PeerState)
}
/**
* peerManager connect to and listen from other nodes.
* It implements Component interface
*/
type peerManager struct {
host.Host
privateKey crypto.PrivKey
publicKey crypto.PubKey
bindAddress net.IP
bindPort int
selfMeta PeerMeta
handlerFactory HandlerFactory
actorServ ActorService
signer msgSigner
mf moFactory
rm ReconnectManager
mm metric.MetricsManager
designatedPeers map[peer.ID]PeerMeta
remotePeers map[peer.ID]*remotePeerImpl
peerPool map[peer.ID]PeerMeta
conf *cfg.P2PConfig
logger *log.Logger
mutex *sync.Mutex
peerCache []RemotePeer
addPeerChannel chan PeerMeta
removePeerChannel chan peer.ID
fillPoolChannel chan []PeerMeta
finishChannel chan struct{}
eventListeners []PeerEventListener
}
var _ PeerManager = (*peerManager)(nil)
// PeerEventListener listen peer manage event
type PeerEventListener interface {
// OnAddPeer is called just after the peer is added.
OnAddPeer(peerID peer.ID)
// OnRemovePeer is called just before the peer is removed
OnRemovePeer(peerID peer.ID)
}
func init() {
}
// NewPeerManager creates a peer manager object.
func NewPeerManager(handlerFactory HandlerFactory, iServ ActorService, cfg *cfg.Config, signer msgSigner, rm ReconnectManager, mm metric.MetricsManager, logger *log.Logger, mf moFactory) PeerManager {
p2pConf := cfg.P2P
//logger.SetLevel("debug")
pm := &peerManager{
handlerFactory: handlerFactory,
actorServ: iServ,
conf: p2pConf,
signer: signer,
mf: mf,
rm: rm,
mm: mm,
logger: logger,
mutex: &sync.Mutex{},
designatedPeers: make(map[peer.ID]PeerMeta, len(cfg.P2P.NPAddPeers)),
remotePeers: make(map[peer.ID]*remotePeerImpl, p2pConf.NPMaxPeers),
peerPool: make(map[peer.ID]PeerMeta, p2pConf.NPPeerPool),
peerCache: make([]RemotePeer, 0, p2pConf.NPMaxPeers),
addPeerChannel: make(chan PeerMeta, 2),
removePeerChannel: make(chan peer.ID),
fillPoolChannel: make(chan []PeerMeta),
eventListeners: make([]PeerEventListener, 0, 4),
finishChannel: make(chan struct{}),
}
// additional initializations
pm.init()
return pm
}
func (pm *peerManager) PrivateKey() crypto.PrivKey {
return pm.privateKey
}
func (pm *peerManager) PublicKey() crypto.PubKey {
return pm.publicKey
}
func (pm *peerManager) SelfMeta() PeerMeta {
return pm.selfMeta
}
func (pm *peerManager) SelfNodeID() peer.ID {
return pm.selfMeta.ID
}
func (pm *peerManager) RegisterEventListener(listener PeerEventListener) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
pm.eventListeners = append(pm.eventListeners, listener)
}
func (pm *peerManager) init() {
// check Key and address
priv := NodePrivKey()
pub := NodePubKey()
pid := NodeID()
pm.privateKey = priv
pm.publicKey = pub
// init address and port
// if not set, it look up ip addresses of machine and choose suitable one (but not so smart) and default port 7845
peerAddr, peerPort := pm.getProtocolAddrs()
pm.selfMeta.IPAddress = peerAddr.String()
pm.selfMeta.Port = uint32(peerPort)
pm.selfMeta.ID = pid
// if bindAddress or bindPort is not set, it will be same as NetProtocolAddr or NetProtocolPort
if len(pm.conf.NPBindAddr) > 0 {
bindAddr := net.ParseIP(pm.conf.NPBindAddr)
if bindAddr == nil {
panic("invalid NPBindAddr " + pm.conf.NPBindAddr)
}
pm.bindAddress = bindAddr
} else {
pm.bindAddress = peerAddr
}
if pm.conf.NPBindPort > 0 {
pm.bindPort = pm.conf.NPBindPort
} else {
pm.bindPort = peerPort
}
// set meta info
// TODO more survey libp2p NAT configuration
// set designated peers
pm.addDesignatedPeers()
}
func (pm *peerManager) getProtocolAddrs() (protocolAddr net.IP, protocolPort int) {
if len(pm.conf.NetProtocolAddr) != 0 {
protocolAddr = net.ParseIP(pm.conf.NetProtocolAddr)
if protocolAddr == nil {
panic("invalid NetProtocolAddr " + pm.conf.NetProtocolAddr)
}
if protocolAddr.IsUnspecified() {
panic("NetProtocolAddr should be a specified IP address, not 0.0.0.0")
}
} else {
extIP, err := externalIP()
if err != nil {
panic("error while finding IP address: " + err.Error())
}
protocolAddr = extIP
}
protocolPort = pm.conf.NetProtocolPort
if protocolPort <= 0 {
panic("invalid NetProtocolPort " + strconv.Itoa(pm.conf.NetProtocolPort))
}
return
}
func (pm *peerManager) run() {
go pm.runManagePeers()
// need to start listen after chainservice is read to init
// FIXME: adhoc code
go func() {
time.Sleep(time.Second * 3)
pm.startListener()
// addition should start after all modules are started
go func() {
time.Sleep(time.Second * 2)
for _, meta := range pm.designatedPeers {
pm.addPeerChannel <- meta
}
}()
}()
}
func (pm *peerManager) addDesignatedPeers() {
// add remote node from config
for _, target := range pm.conf.NPAddPeers {
// go-multiaddr implementation does not support recent p2p protocol yet, but deprecated name ipfs.
// This adhoc will be removed when go-multiaddr is patched.
target = strings.Replace(target, "/p2p/", "/ipfs/", 1)
targetAddr, err := ma.NewMultiaddr(target)
if err != nil {
pm.logger.Warn().Err(err).Str("target", target).Msg("invalid NPAddPeer address")
continue
}
splitted := strings.Split(targetAddr.String(), "/")
if len(splitted) != 7 {
pm.logger.Warn().Str("target", target).Msg("invalid NPAddPeer address")
continue
}
peerAddrString := splitted[2]
peerPortString := splitted[4]
peerPort, err := strconv.Atoi(peerPortString)
if err != nil {
pm.logger.Warn().Str("port", peerPortString).Msg("invalid Peer port")
continue
}
peerIDString := splitted[6]
peerID, err := peer.IDB58Decode(peerIDString)
if err != nil {
pm.logger.Warn().Str(LogPeerID, peerIDString).Msg("invalid PeerID")
continue
}
peerMeta := PeerMeta{
ID: peerID,
Port: uint32(peerPort),
IPAddress: peerAddrString,
Designated: true,
Outbound: true,
}
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Str("addr", peerAddrString).Int("port", peerPort).Msg("Adding Designated peer")
pm.designatedPeers[peerID] = peerMeta
}
}
func (pm *peerManager) runManagePeers() {
addrDuration := time.Minute * 3
addrTicker := time.NewTicker(addrDuration)
// reconnectRunners := make(map[peer.ID]*reconnectRunner)
MANLOOP:
for {
select {
case meta := <-pm.addPeerChannel:
if pm.addOutboundPeer(meta) {
if _, found := pm.designatedPeers[meta.ID]; found {
pm.rm.CancelJob(meta.ID)
}
}
case id := <-pm.removePeerChannel:
if pm.removePeer(id) {
if meta, found := pm.designatedPeers[id]; found {
pm.rm.AddJob(meta)
}
}
case <-addrTicker.C:
pm.checkAndCollectPeerListFromAll()
pm.logPeerMetrics()
case peerMetas := <-pm.fillPoolChannel:
pm.tryFillPool(&peerMetas)
case <-pm.finishChannel:
addrTicker.Stop()
pm.rm.Stop()
// TODO need to keep loop till all remote peer objects are removed, otherwise panic or channel deadlock can come.
break MANLOOP
}
}
// cleanup peers
for peerID := range pm.remotePeers {
pm.removePeer(peerID)
}
}
func (pm *peerManager) logPeerMetrics() {
if pm.logger.IsDebugEnabled() {
pm.logger.Debug().Msg(pm.mm.Summary())
}
}
// addOutboundPeer try to connect and handshake to remote peer. it can be called after peermanager is inited.
// It return true if peer is added or already exist, or return false if failed to add peer.
func (pm *peerManager) addOutboundPeer(meta PeerMeta) bool {
addrString := fmt.Sprintf("/ip4/%s/tcp/%d", meta.IPAddress, meta.Port)
var peerAddr, err = ma.NewMultiaddr(addrString)
if err != nil {
pm.logger.Warn().Err(err).Str("addr", addrString).Msg("invalid NPAddPeer address")
return false
}
var peerID = meta.ID
pm.mutex.Lock()
inboundPeer, ok := pm.remotePeers[peerID]
if ok {
// peer is already exist (and maybe inbound peer)
pm.logger.Info().Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("Peer is already managed by peermanager")
if meta.Designated {
// If remote peer was connected first. designated flag is not set yet.
inboundPeer.meta.Designated = true
}
pm.mutex.Unlock()
return true
}
pm.mutex.Unlock()
pm.Peerstore().AddAddr(peerID, peerAddr, meta.TTL())
ctx := context.Background()
s, err := pm.NewStream(ctx, meta.ID, aergoP2PSub)
if err != nil {
pm.logger.Info().Err(err).Str("addr", addrString).Str(LogPeerID, meta.ID.Pretty()).Str(LogProtoID, string(aergoP2PSub)).Msg("Error while get stream")
return false
}
rd := metric.NewReader(s)
wt := metric.NewWriter(s)
h := newHandshaker(pm, pm.actorServ, pm.logger, peerID)
rw, remoteStatus, err := h.handshakeOutboundPeerTimeout(rd, wt, defaultHandshakeTTL)
if err != nil {
pm.logger.Debug().Err(err).Str(LogPeerID, meta.ID.Pretty()).Msg("Failed to handshake")
//pm.sendGoAway(rw, "Failed to handshake")
s.Close()
return false
}
pm.mutex.Lock()
inboundPeer, ok = pm.remotePeers[peerID]
if ok {
if ComparePeerID(pm.selfMeta.ID, meta.ID) <= 0 {
pm.logger.Info().Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("Inbound connection was already handshaked while handshaking outbound connection, and remote peer is higher priority so closing this outbound connection.")
pm.mutex.Unlock()
pm.sendGoAway(rw, "Already handshaked")
s.Close()
return true
} else {
pm.logger.Info().Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("Inbound connection was already handshaked while handshaking outbound connection, but local peer is higher priority so closing that inbound connection")
// disconnect lower valued connection
pm.deletePeer(meta.ID)
inboundPeer.stop()
}
}
// update peer info to remote sent infor
meta = FromPeerAddress(remoteStatus.Sender)
outboundPeer := newRemotePeer(meta, pm, pm.actorServ, pm.logger, pm.mf, pm.signer, rw)
// insert Handlers
pm.handlerFactory.insertHandlers(outboundPeer)
go outboundPeer.runPeer()
pm.insertPeer(peerID, outboundPeer)
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Str("addr", net.ParseIP(meta.IPAddress).String()+":"+strconv.Itoa(int(meta.Port))).Msg("Outbound peer is added to peerService")
outboundPeer.metric = pm.mm.Add(peerID, rd, wt)
pm.mutex.Unlock()
addrs := pm.Peerstore().Addrs(peerID)
addrStrs := make([]string, len(addrs))
for i, addr := range addrs {
addrStrs[i] = addr.String()
}
pm.logger.Debug().Strs("addrs", addrStrs).Str(LogPeerID, outboundPeer.meta.ID.Pretty()).Msg("addresses of peer")
// peer is ready
h.doInitialSync()
// notice to p2pmanager that handshaking is finished
pm.NotifyPeerHandshake(peerID)
return true
}
func (pm *peerManager) sendGoAway(rw MsgReadWriter, msg string) {
goMsg := &types.GoAwayNotice{Message: msg}
// TODO code smell. non safe casting.
mo := pm.mf.newMsgRequestOrder(false, GoAway, goMsg).(*pbRequestOrder)
container := mo.message
rw.WriteMsg(container)
}
func (pm *peerManager) checkInPeerstore(peerID peer.ID) bool {
found := false
for _, existingPeerID := range pm.Peerstore().Peers() {
if existingPeerID == peerID {
found = true
break
}
}
return found
}
func (pm *peerManager) AddNewPeer(peer PeerMeta) {
pm.addPeerChannel <- peer
}
func (pm *peerManager) RemovePeer(peerID peer.ID) {
pm.removePeerChannel <- peerID
}
func (pm *peerManager) NotifyPeerHandshake(peerID peer.ID) {
pm.checkAndCollectPeerList(peerID)
}
func (pm *peerManager) NotifyPeerAddressReceived(metas []PeerMeta) {
pm.fillPoolChannel <- metas
}
// removePeer remove and disconnect managed remote peer connection
// It return true if peer is exist and managed by peermanager
func (pm *peerManager) removePeer(peerID peer.ID) bool {
pm.mutex.Lock()
target, ok := pm.remotePeers[peerID]
if !ok {
pm.mutex.Unlock()
return false
}
pm.deletePeer(peerID)
// No internal module access this peer anymore, but remote message can be received.
target.stop()
pm.mutex.Unlock()
// also disconnect connection
for _, existingPeerID := range pm.Peerstore().Peers() {
if existingPeerID == peerID {
for _, listener := range pm.eventListeners {
listener.OnRemovePeer(peerID)
}
pm.Network().ClosePeer(peerID)
return true
}
}
return true
}
func (pm *peerManager) Peerstore() pstore.Peerstore {
return pm.Host.Peerstore()
}
func (pm *peerManager) startListener() {
var err error
listens := make([]ma.Multiaddr, 0, 2)
// FIXME: should also support ip6 later
listen, err := ma.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", pm.bindAddress, pm.bindPort))
if err != nil {
panic("Can't estabilish listening address: " + err.Error())
}
listens = append(listens, listen)
peerStore := pstore.NewPeerstore(pstoremem.NewKeyBook(), pstoremem.NewAddrBook(), pstoremem.NewPeerMetadata())
newHost, err := libp2p.New(context.Background(), libp2p.Identity(pm.privateKey), libp2p.Peerstore(peerStore), libp2p.ListenAddrs(listens...))
if err != nil {
pm.logger.Fatal().Err(err).Str("addr", listen.String()).Msg("Couldn't listen from")
panic(err.Error())
}
pm.logger.Info().Str("pid", pm.SelfNodeID().Pretty()).Str("addr[0]", listens[0].String()).
Msg("Set self node's pid, and listening for connections")
pm.Host = newHost
pm.SetStreamHandler(aergoP2PSub, pm.onHandshake)
}
func (pm *peerManager) onHandshake(s inet.Stream) {
peerID := s.Conn().RemotePeer()
h := newHandshaker(pm, pm.actorServ, pm.logger, peerID)
rd := metric.NewReader(s)
wt := metric.NewWriter(s)
rw, statusMsg, err := h.handshakeInboundPeer(rd, wt)
if err != nil {
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Err(err).Msg("fail to handshake")
s.Close()
return
}
// TODO: check status
meta := FromPeerAddress(statusMsg.Sender)
// try Add peer
if inboundPeer, success := pm.tryAddInboundPeer(meta, rw); !success {
// failed to add
pm.sendGoAway(rw, "Concurrent handshake")
s.Close()
return
} else {
inboundPeer.metric = pm.mm.Add(peerID, rd, wt)
}
h.doInitialSync()
// notice to p2pmanager that handshaking is finished
pm.NotifyPeerHandshake(peerID)
}
func (pm *peerManager) tryAddInboundPeer(meta PeerMeta, rw MsgReadWriter) (*remotePeerImpl, bool) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
peerID := meta.ID
outboundPeer, found := pm.remotePeers[peerID]
if found {
if ComparePeerID(pm.selfMeta.ID, meta.ID) <= 0 {
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Msg("Outbound connection was already handshaked while handshaking inbound connection, and remote peer is higher priority so closing that outbound connection.")
pm.sendGoAway(rw, "Already handshaked")
pm.deletePeer(meta.ID)
outboundPeer.stop()
} else {
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Msg("Outbound connection was already handshaked while handshaking inbound connection, but local peer is higher priority and closing this inbound connection.")
// disconnect lower valued connection
return nil, false
}
}
inboundPeer := newRemotePeer(meta, pm, pm.actorServ, pm.logger, pm.mf, pm.signer, rw)
pm.handlerFactory.insertHandlers(inboundPeer)
go inboundPeer.runPeer()
pm.insertPeer(peerID, inboundPeer)
peerAddr := meta.ToPeerAddress()
addrs := pm.Peerstore().Addrs(peerID)
addrStrs := make([]string, len(addrs))
for i, addr := range addrs {
addrStrs[i] = addr.String()
}
pm.logger.Debug().Strs("addrs", addrStrs).Str(LogPeerID, inboundPeer.meta.ID.Pretty()).Msg("addresses of peer")
pm.logger.Info().Str(LogPeerID, peerID.Pretty()).Str("addr", getIP(&peerAddr).String()+":"+strconv.Itoa(int(peerAddr.Port))).Msg("Inbound peer is added to peerService")
return inboundPeer, true
}
func (pm *peerManager) Start() error {
pm.run()
//pm.conf.NPAddPeers
return nil
}
func (pm *peerManager) Stop() error {
// TODO stop service
// close(pm.addPeerChannel)
// close(pm.removePeerChannel)
pm.finishChannel <- struct{}{}
return nil
}
func (pm *peerManager) GetName() string {
return "p2p service"
}
func (pm *peerManager) checkAndCollectPeerListFromAll() {
if pm.hasEnoughPeers() {
return
}
for _, remotePeer := range pm.remotePeers {
pm.actorServ.SendRequest(message.P2PSvc, &message.GetAddressesMsg{ToWhom: remotePeer.meta.ID, Size: 20, Offset: 0})
}
}
func (pm *peerManager) checkAndCollectPeerList(ID peer.ID) { | }
peer, ok := pm.GetPeer(ID)
if !ok {
//pm.logger.Warnf("invalid peer id %s", ID.Pretty())
pm.logger.Warn().Str(LogPeerID, ID.Pretty()).Msg("invalid peer id")
return
}
pm.actorServ.SendRequest(message.P2PSvc, &message.GetAddressesMsg{ToWhom: peer.ID(), Size: 20, Offset: 0})
}
func (pm *peerManager) hasEnoughPeers() bool {
return len(pm.peerPool) >= pm.conf.NPPeerPool
}
// tryConnectPeers should be called in runManagePeers() only
func (pm *peerManager) tryFillPool(metas *[]PeerMeta) {
added := make([]PeerMeta, 0, len(*metas))
invalid := make([]string, 0)
for _, meta := range *metas {
if string(meta.ID) == "" {
invalid = append(invalid, meta.String())
continue
}
_, found := pm.peerPool[meta.ID]
if !found {
// change some properties
meta.Outbound = true
meta.Designated = false
pm.peerPool[meta.ID] = meta
added = append(added, meta)
}
}
if len(invalid) > 0 {
pm.logger.Warn().Strs("metas", invalid).Msg("invalid meta list was come")
}
pm.logger.Debug().Int("added_cnt", len(added)).Msg("Filled unknown peer addresses to peerpool")
pm.tryConnectPeers()
}
// tryConnectPeers should be called in runManagePeers() only
func (pm *peerManager) tryConnectPeers() {
remained := pm.conf.NPMaxPeers - len(pm.remotePeers)
for ID, meta := range pm.peerPool {
if _, found := pm.GetPeer(ID); found {
delete(pm.peerPool, ID)
continue
}
if meta.IPAddress == "" || meta.Port == 0 {
pm.logger.Warn().Str(LogPeerID, meta.ID.Pretty()).Str("addr", meta.IPAddress).
Uint32("port", meta.Port).Msg("Invalid peer meta informations")
continue
}
// in same go rountine.
pm.addOutboundPeer(meta)
remained--
if remained <= 0 {
break
}
}
}
func (pm *peerManager) GetPeer(ID peer.ID) (RemotePeer, bool) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
// vs code's lint does not allow direct return of map operation
ptr, ok := pm.remotePeers[ID]
if !ok {
return nil, false
}
return ptr, ok
}
func (pm *peerManager) GetPeers() []RemotePeer {
pm.mutex.Lock()
defer pm.mutex.Unlock()
return pm.peerCache
}
func (pm *peerManager) GetPeerAddresses() ([]*types.PeerAddress, []*types.NewBlockNotice, []types.PeerState) {
peers := make([]*types.PeerAddress, 0, len(pm.remotePeers))
blks := make([]*types.NewBlockNotice, 0, len(pm.remotePeers))
states := make([]types.PeerState, 0, len(pm.remotePeers))
for _, aPeer := range pm.remotePeers {
addr := aPeer.meta.ToPeerAddress()
peers = append(peers, &addr)
blks = append(blks, aPeer.lastNotice)
states = append(states, aPeer.state)
}
return peers, blks, states
}
// this method should be called inside pm.mutex
func (pm *peerManager) insertPeer(ID peer.ID, peer *remotePeerImpl) {
pm.remotePeers[ID] = peer
pm.updatePeerCache()
}
// this method should be called inside pm.mutex
func (pm *peerManager) deletePeer(ID peer.ID) {
pm.mm.Remove(ID)
delete(pm.remotePeers, ID)
pm.updatePeerCache()
}
func (pm *peerManager) updatePeerCache() {
newSlice := make([]RemotePeer, 0, len(pm.remotePeers))
for _, peer := range pm.remotePeers {
newSlice = append(newSlice, peer)
}
pm.peerCache = newSlice
} | if pm.hasEnoughPeers() {
return | random_line_split |
Config.ts | import {chain_cmps, mkcmp, cmp_order, Comparator} from '../Utils'
const image_ws_url = 'https://ws.spraakbanken.gu.se/ws/swell'
const pseuws_url = 'https://ws.spraakbanken.gu.se/ws/larka/pseuws'
export interface Example {
source: string
target: string
}
const ex = (source: string, target: string): Example => ({source, target})
const examples: Example[] = `
Alice and Bob went to Paris . Alice's wallet was stolen . // Alice:1:'firstname_female' and Bob:2:'firstname_male' went to Paris:3:city . Alice's:1:'firstname_female':gen wallet was stolen . | I don't know his lives . // I don't know where he~his lives .
He get to cleaned his son . // He got his~his son~son to:O clean:O the~ room~ .
We wrote down the number . // We wrote the number down~down .
`
.trim()
.split(/\n\n+/gm)
.map(line => ex.apply({}, line.split('//').map(side => side.trim()) as [string, string]))
const order_changing_labels: Record<string, true> = {
'S-adv': true,
'S-finV': true,
'S-WO': true,
WO: true,
INV: true,
OINV: true,
}
export const label_args: Record<string, number> = {
/*age_string: 1,*/
}
export type TaxonomyGroup = {
group: string
entries: {
label: string
desc: string
}[]
}
export type Taxonomy = TaxonomyGroup[]
const extra = 'gen def pl foreign'.split(' ')
const temporary = 'OBS! Cit-FL Com!'.split(' ')
const digits = /^\d+$/
/** An ordered set of label categories. */
export enum LabelOrder {
BASE,
NUM,
EXTRA,
TEMP,
}
/** Maps a label to a category in LabelOrder. */
export function label_order(label: string): LabelOrder {
if (temporary.includes(label)) {
return LabelOrder.TEMP
} else if (extra.includes(label)) {
return LabelOrder.EXTRA
} else if (digits.test(label)) {
return LabelOrder.NUM
} else {
return LabelOrder.BASE
}
}
/** Sorting function for labels. */
// Sort first by taxonomy, then label type, and finally alphabetically.
export const label_sort: Comparator<string> = chain_cmps(
mkcmp(label_taxonomy),
mkcmp(label_order),
cmp_order
)
const anonymization: Taxonomy = [
{
group: 'Morphology',
entries: [
{label: 'gen', desc: 'genitive'},
{label: 'def', desc: 'definite'},
{label: 'pl', desc: 'plural'},
],
},
{
group: 'Names',
entries: [
{label: 'firstname_male', desc: ''},
{label: 'firstname_female', desc: ''},
{label: 'firstname_unknown', desc: ''},
{label: 'initials', desc: ''},
{label: 'middlename', desc: ''},
{label: 'surname', desc: ''},
],
},
{
group: 'Geographic data',
entries: [
{label: 'foreign', desc: ''},
{label: 'area', desc: ''},
{label: 'city', desc: 'city including villages'},
{label: 'country', desc: 'except Sweden'},
{label: 'geo', desc: 'forest, lake, mountain, etc'},
{label: 'place', desc: ''},
{label: 'region', desc: ''},
{label: 'street_nr', desc: 'street number'},
{label: 'zip_code', desc: ''},
],
},
{
group: 'Institutions',
entries: [
{label: 'school', desc: ''},
{label: 'work', desc: ''},
{label: 'other_institution', desc: ''},
],
},
{
group: 'Transportation',
entries: [
{label: 'transport_name', desc: 'bus, metro, tram, train, express'},
{label: 'transport_nr', desc: 'number, color'},
],
},
{
group: 'Age',
entries: [{label: 'age_digits', desc: ''}, {label: 'age_string', desc: ''}],
},
{
group: 'Dates',
entries: [
{label: 'date_digits', desc: 'numerical date represenation, delimiters are retained'},
{label: 'day', desc: ''},
{label: 'month_digit', desc: ''},
{label: 'month_word', desc: ''},
{label: 'year', desc: ''},
],
},
{
group: 'Misc',
entries: [
{label: 'account_nr', desc: ''},
{label: 'email', desc: ''},
{label: 'extra', desc: ''},
{label: 'license_nr', desc: ''},
{label: 'other_nr_seq', desc: 'a sequence of numbers'},
{label: 'phone_nr', desc: ''},
{label: 'personid_nr', desc: ''},
{label: 'url', desc: ''},
],
},
{
group: 'Mark',
entries: [
{label: 'edu', desc: 'education, courses'},
{label: 'fam', desc: 'family members'},
{label: 'prof', desc: 'profession'},
{label: 'sensitive', desc: ''},
],
},
{
group: 'Other',
entries: [
{label: 'Cit-FL', desc: 'Citation for a language'},
{label: 'Com!', desc: 'Comment'},
{label: 'OBS!', desc: 'Attention'},
],
},
]
export const normalization: Taxonomy = [
{
group: 'Other',
entries: [
{
label: 'Cit-FL',
desc: 'Citation for a language'
},
{
label: 'Com!',
desc: 'Comment'
},
{
label: 'OBS!',
desc: 'Attention'
},
{
label: 'X',
desc: 'Impossible to interpret the writer’s intention',
},
],
},
]
// Julia's updated taxonomy 19 April 2018
export const correctannot: Taxonomy = [
{
group: 'Orthographic',
entries: [
{
label: 'O',
desc: 'Spelling',
},
{
label: 'O-Cap',
desc: 'Upper/lower case',
},
{
label: 'O-Comp',
desc: 'Spaces and hyphens between words',
},
],
},
{
group: 'Lexical',
entries: [
{
label: 'L-Der',
desc: 'Word formation (derivation and compounding)',
},
{
label: 'L-FL',
desc: 'Non-Swedish word corrected to Swedish word',
},
{
label: 'L-Ref',
desc: 'Choice of anaphoric expression',
},
{
label: 'L-W',
desc:
'Wrong word or phrase, other',
},
],
},
{
group: 'Morphological',
entries: [
{
label: 'M-Adj/adv',
desc: 'Adjective form of word corrected to adverb form',
},
{
label: 'M-Case',
desc: 'Nominative vs genitive/accusative',
},
{label: 'M-Def', desc: 'Definiteness: articles; forms of nouns and adjectives'},
{label: 'M-F', desc: 'Grammatical category kept, form changed'},
{label: 'M-Gend', desc: 'Gender'},
{label: 'M-Num', desc: 'Number'},
{
label: 'M-Other',
desc:
'Other morphological corrections, including change between different comparational forms of adjectives',
},
{label: 'M-Verb', desc: 'Verb forms; use of ha, komma and skola auxiliaries'},
],
},
{
group: 'Punctuation',
entries: [
{
label: 'P-M',
desc: 'Punctuation missing (added)',
},
{
label: 'P-R',
desc: 'Punctuation redundant (removed)',
},
{
label: 'P-Sent',
desc: 'Sentence segmentation',
},
{
label: 'P-W',
desc: 'Wrong punctuation',
},
],
},
{
group: 'Syntactical',
entries: [
{
label: 'S-Adv',
desc: 'Adverbial placement',
},
{
label: 'S-Comp',
desc: 'Compound vs multi-word expression, and other restructuring of the same lexical morphemes within a phrase',
},
{
label: 'S-Clause',
desc: 'Change of basic clause structure: syntactic function of components, hierarchical clause structure',
},
{
label: 'S-Ext',
desc: 'Extensive and complex correction',
},
{
label: 'S-FinV',
desc: 'Finite verb placement',
},
{
label: 'S-M',
desc:
'Word missing (added)',
},
{
label: 'S-Msubj',
desc: 'Subject missing (added)',
},
{
label: 'S-Other',
desc:
'Other syntactical correction',
},
{
label: 'S-R',
desc: 'Word redundant (removed)',
},
{
label: 'S-Type',
desc: 'Change of phrase type/part of speech',
},
{
label: 'S-WO',
desc: 'Word order, other',
},
],
},
{
group: 'Other',
entries: [
{
label: 'C',
desc: 'Consistency correction, necessitated by other correction',
},
{
label: 'Cit-FL',
desc: 'Non-Swedish word kept, i.e. not corrected',
},
{
label: 'Com!',
desc: 'Comments for the corpus user'
},
{
label: 'OBS!',
desc: 'Internal and temporary comments for the annotators'
},
{
label: 'Unid',
desc: 'Unidentified correction',
},
{
label: 'X',
desc: 'Unintelligible string',
},
],
},
]
function doc_url(title: string): string {
return 'https://spraakbanken.github.io/swell-project/' + title
}
const docs: Record<string, Record<string, string>> = {
anonymization: {
'pseudonymization guidelines': doc_url('Pseudonymization_guidelines'),
},
normalization: {
'normalization guidelines': doc_url('Normalization_guidelines'),
},
correctannot: {
'annotation guidelines': doc_url('Correction-annotation_guidelines'),
},
}
export const config = {
order_changing_labels,
examples,
image_ws_url,
pseuws_url,
taxonomy: {anonymization, normalization, correctannot},
docs,
}
/** What group does this label belong to?
(label_group('country') as TaxonomyGroup).group // => 'Geographic data'
label_group('quux') // => undefined
*/
export function label_group(label: string): TaxonomyGroup | undefined {
return config.taxonomy.anonymization.find(
group => !!group.entries.find(entry => entry.label == label)
)
}
export interface TaxonomyFind {
taxonomy: string
group: string
entry: {label: string; desc: string}
}
export function find_label(label: string): TaxonomyFind | undefined {
const order = label_order(label)
if (order === LabelOrder.NUM) {
return {taxonomy: 'anonymization', group: 'Number', entry: {label, desc: 'number'}}
}
if (order === LabelOrder.TEMP) {
return undefined
}
for (let taxonomy in config.taxonomy) {
for (let group of (config.taxonomy as {[mode: string]: Taxonomy})[taxonomy]) {
let entry = group.entries.find(entry => entry.label == label)
if (entry !== undefined) return {taxonomy, group: group.group, entry}
}
}
}
/** Get the taxonomy domain (editor mode) of a label. */
export function label_taxonomy(label: string): string | null {
return find_label(label) ? find_label(label)!.taxonomy : null
}
/** Does the named taxonomy include the given label? */
export function taxonomy_has_label(taxonomy: string, label: string): boolean {
if (!(taxonomy in config.taxonomy)) return false
const tax: Record<string, TaxonomyGroup[]> = config.taxonomy
return !!tax[taxonomy].find(g => !!g.entries.find(l => l.label == label))
} |
Their was a problem yesteray . // There was a problem yesterday .
| random_line_split |
Config.ts | import {chain_cmps, mkcmp, cmp_order, Comparator} from '../Utils'
const image_ws_url = 'https://ws.spraakbanken.gu.se/ws/swell'
const pseuws_url = 'https://ws.spraakbanken.gu.se/ws/larka/pseuws'
export interface Example {
source: string
target: string
}
const ex = (source: string, target: string): Example => ({source, target})
const examples: Example[] = `
Alice and Bob went to Paris . Alice's wallet was stolen . // Alice:1:'firstname_female' and Bob:2:'firstname_male' went to Paris:3:city . Alice's:1:'firstname_female':gen wallet was stolen .
Their was a problem yesteray . // There was a problem yesterday .
I don't know his lives . // I don't know where he~his lives .
He get to cleaned his son . // He got his~his son~son to:O clean:O the~ room~ .
We wrote down the number . // We wrote the number down~down .
`
.trim()
.split(/\n\n+/gm)
.map(line => ex.apply({}, line.split('//').map(side => side.trim()) as [string, string]))
const order_changing_labels: Record<string, true> = {
'S-adv': true,
'S-finV': true,
'S-WO': true,
WO: true,
INV: true,
OINV: true,
}
export const label_args: Record<string, number> = {
/*age_string: 1,*/
}
export type TaxonomyGroup = {
group: string
entries: {
label: string
desc: string
}[]
}
export type Taxonomy = TaxonomyGroup[]
const extra = 'gen def pl foreign'.split(' ')
const temporary = 'OBS! Cit-FL Com!'.split(' ')
const digits = /^\d+$/
/** An ordered set of label categories. */
export enum LabelOrder {
BASE,
NUM,
EXTRA,
TEMP,
}
/** Maps a label to a category in LabelOrder. */
export function label_order(label: string): LabelOrder {
if (temporary.includes(label)) {
return LabelOrder.TEMP
} else if (extra.includes(label)) {
return LabelOrder.EXTRA
} else if (digits.test(label)) {
return LabelOrder.NUM
} else {
return LabelOrder.BASE
}
}
/** Sorting function for labels. */
// Sort first by taxonomy, then label type, and finally alphabetically.
export const label_sort: Comparator<string> = chain_cmps(
mkcmp(label_taxonomy),
mkcmp(label_order),
cmp_order
)
const anonymization: Taxonomy = [
{
group: 'Morphology',
entries: [
{label: 'gen', desc: 'genitive'},
{label: 'def', desc: 'definite'},
{label: 'pl', desc: 'plural'},
],
},
{
group: 'Names',
entries: [
{label: 'firstname_male', desc: ''},
{label: 'firstname_female', desc: ''},
{label: 'firstname_unknown', desc: ''},
{label: 'initials', desc: ''},
{label: 'middlename', desc: ''},
{label: 'surname', desc: ''},
],
},
{
group: 'Geographic data',
entries: [
{label: 'foreign', desc: ''},
{label: 'area', desc: ''},
{label: 'city', desc: 'city including villages'},
{label: 'country', desc: 'except Sweden'},
{label: 'geo', desc: 'forest, lake, mountain, etc'},
{label: 'place', desc: ''},
{label: 'region', desc: ''},
{label: 'street_nr', desc: 'street number'},
{label: 'zip_code', desc: ''},
],
},
{
group: 'Institutions',
entries: [
{label: 'school', desc: ''},
{label: 'work', desc: ''},
{label: 'other_institution', desc: ''},
],
},
{
group: 'Transportation',
entries: [
{label: 'transport_name', desc: 'bus, metro, tram, train, express'},
{label: 'transport_nr', desc: 'number, color'},
],
},
{
group: 'Age',
entries: [{label: 'age_digits', desc: ''}, {label: 'age_string', desc: ''}],
},
{
group: 'Dates',
entries: [
{label: 'date_digits', desc: 'numerical date represenation, delimiters are retained'},
{label: 'day', desc: ''},
{label: 'month_digit', desc: ''},
{label: 'month_word', desc: ''},
{label: 'year', desc: ''},
],
},
{
group: 'Misc',
entries: [
{label: 'account_nr', desc: ''},
{label: 'email', desc: ''},
{label: 'extra', desc: ''},
{label: 'license_nr', desc: ''},
{label: 'other_nr_seq', desc: 'a sequence of numbers'},
{label: 'phone_nr', desc: ''},
{label: 'personid_nr', desc: ''},
{label: 'url', desc: ''},
],
},
{
group: 'Mark',
entries: [
{label: 'edu', desc: 'education, courses'},
{label: 'fam', desc: 'family members'},
{label: 'prof', desc: 'profession'},
{label: 'sensitive', desc: ''},
],
},
{
group: 'Other',
entries: [
{label: 'Cit-FL', desc: 'Citation for a language'},
{label: 'Com!', desc: 'Comment'},
{label: 'OBS!', desc: 'Attention'},
],
},
]
export const normalization: Taxonomy = [
{
group: 'Other',
entries: [
{
label: 'Cit-FL',
desc: 'Citation for a language'
},
{
label: 'Com!',
desc: 'Comment'
},
{
label: 'OBS!',
desc: 'Attention'
},
{
label: 'X',
desc: 'Impossible to interpret the writer’s intention',
},
],
},
]
// Julia's updated taxonomy 19 April 2018
export const correctannot: Taxonomy = [
{
group: 'Orthographic',
entries: [
{
label: 'O',
desc: 'Spelling',
},
{
label: 'O-Cap',
desc: 'Upper/lower case',
},
{
label: 'O-Comp',
desc: 'Spaces and hyphens between words',
},
],
},
{
group: 'Lexical',
entries: [
{
label: 'L-Der',
desc: 'Word formation (derivation and compounding)',
},
{
label: 'L-FL',
desc: 'Non-Swedish word corrected to Swedish word',
},
{
label: 'L-Ref',
desc: 'Choice of anaphoric expression',
},
{
label: 'L-W',
desc:
'Wrong word or phrase, other',
},
],
},
{
group: 'Morphological',
entries: [
{
label: 'M-Adj/adv',
desc: 'Adjective form of word corrected to adverb form',
},
{
label: 'M-Case',
desc: 'Nominative vs genitive/accusative',
},
{label: 'M-Def', desc: 'Definiteness: articles; forms of nouns and adjectives'},
{label: 'M-F', desc: 'Grammatical category kept, form changed'},
{label: 'M-Gend', desc: 'Gender'},
{label: 'M-Num', desc: 'Number'},
{
label: 'M-Other',
desc:
'Other morphological corrections, including change between different comparational forms of adjectives',
},
{label: 'M-Verb', desc: 'Verb forms; use of ha, komma and skola auxiliaries'},
],
},
{
group: 'Punctuation',
entries: [
{
label: 'P-M',
desc: 'Punctuation missing (added)',
},
{
label: 'P-R',
desc: 'Punctuation redundant (removed)',
},
{
label: 'P-Sent',
desc: 'Sentence segmentation',
},
{
label: 'P-W',
desc: 'Wrong punctuation',
},
],
},
{
group: 'Syntactical',
entries: [
{
label: 'S-Adv',
desc: 'Adverbial placement',
},
{
label: 'S-Comp',
desc: 'Compound vs multi-word expression, and other restructuring of the same lexical morphemes within a phrase',
},
{
label: 'S-Clause',
desc: 'Change of basic clause structure: syntactic function of components, hierarchical clause structure',
},
{
label: 'S-Ext',
desc: 'Extensive and complex correction',
},
{
label: 'S-FinV',
desc: 'Finite verb placement',
},
{
label: 'S-M',
desc:
'Word missing (added)',
},
{
label: 'S-Msubj',
desc: 'Subject missing (added)',
},
{
label: 'S-Other',
desc:
'Other syntactical correction',
},
{
label: 'S-R',
desc: 'Word redundant (removed)',
},
{
label: 'S-Type',
desc: 'Change of phrase type/part of speech',
},
{
label: 'S-WO',
desc: 'Word order, other',
},
],
},
{
group: 'Other',
entries: [
{
label: 'C',
desc: 'Consistency correction, necessitated by other correction',
},
{
label: 'Cit-FL',
desc: 'Non-Swedish word kept, i.e. not corrected',
},
{
label: 'Com!',
desc: 'Comments for the corpus user'
},
{
label: 'OBS!',
desc: 'Internal and temporary comments for the annotators'
},
{
label: 'Unid',
desc: 'Unidentified correction',
},
{
label: 'X',
desc: 'Unintelligible string',
},
],
},
]
function doc_url(title: string): string {
return 'https://spraakbanken.github.io/swell-project/' + title
}
const docs: Record<string, Record<string, string>> = {
anonymization: {
'pseudonymization guidelines': doc_url('Pseudonymization_guidelines'),
},
normalization: {
'normalization guidelines': doc_url('Normalization_guidelines'),
},
correctannot: {
'annotation guidelines': doc_url('Correction-annotation_guidelines'),
},
}
export const config = {
order_changing_labels,
examples,
image_ws_url,
pseuws_url,
taxonomy: {anonymization, normalization, correctannot},
docs,
}
/** What group does this label belong to?
(label_group('country') as TaxonomyGroup).group // => 'Geographic data'
label_group('quux') // => undefined
*/
export function label_group(label: string): TaxonomyGroup | undefined {
return config.taxonomy.anonymization.find(
group => !!group.entries.find(entry => entry.label == label)
)
}
export interface TaxonomyFind {
taxonomy: string
group: string
entry: {label: string; desc: string}
}
export function find_label(label: string): TaxonomyFind | undefined {
const order = label_order(label)
if (order === LabelOrder.NUM) {
return {taxonomy: 'anonymization', group: 'Number', entry: {label, desc: 'number'}}
}
if (order === LabelOrder.TEMP) {
return undefined
}
for (let taxonomy in config.taxonomy) {
for (let group of (config.taxonomy as {[mode: string]: Taxonomy})[taxonomy]) {
let entry = group.entries.find(entry => entry.label == label)
if (entry !== undefined) return {taxonomy, group: group.group, entry}
}
}
}
/** Get the taxonomy domain (editor mode) of a label. */
export function label_taxonomy(label: string): string | null {
return find_label(label) ? find_label(label)!.taxonomy : null
}
/** Does the named taxonomy include the given label? */
export function ta | axonomy: string, label: string): boolean {
if (!(taxonomy in config.taxonomy)) return false
const tax: Record<string, TaxonomyGroup[]> = config.taxonomy
return !!tax[taxonomy].find(g => !!g.entries.find(l => l.label == label))
}
| xonomy_has_label(t | identifier_name |
Config.ts | import {chain_cmps, mkcmp, cmp_order, Comparator} from '../Utils'
const image_ws_url = 'https://ws.spraakbanken.gu.se/ws/swell'
const pseuws_url = 'https://ws.spraakbanken.gu.se/ws/larka/pseuws'
export interface Example {
source: string
target: string
}
const ex = (source: string, target: string): Example => ({source, target})
const examples: Example[] = `
Alice and Bob went to Paris . Alice's wallet was stolen . // Alice:1:'firstname_female' and Bob:2:'firstname_male' went to Paris:3:city . Alice's:1:'firstname_female':gen wallet was stolen .
Their was a problem yesteray . // There was a problem yesterday .
I don't know his lives . // I don't know where he~his lives .
He get to cleaned his son . // He got his~his son~son to:O clean:O the~ room~ .
We wrote down the number . // We wrote the number down~down .
`
.trim()
.split(/\n\n+/gm)
.map(line => ex.apply({}, line.split('//').map(side => side.trim()) as [string, string]))
const order_changing_labels: Record<string, true> = {
'S-adv': true,
'S-finV': true,
'S-WO': true,
WO: true,
INV: true,
OINV: true,
}
export const label_args: Record<string, number> = {
/*age_string: 1,*/
}
export type TaxonomyGroup = {
group: string
entries: {
label: string
desc: string
}[]
}
export type Taxonomy = TaxonomyGroup[]
const extra = 'gen def pl foreign'.split(' ')
const temporary = 'OBS! Cit-FL Com!'.split(' ')
const digits = /^\d+$/
/** An ordered set of label categories. */
export enum LabelOrder {
BASE,
NUM,
EXTRA,
TEMP,
}
/** Maps a label to a category in LabelOrder. */
export function label_order(label: string): LabelOrder {
if (temporary.includes(label)) {
return LabelOrder.TEMP
} else if (extra.includes(label)) {
return LabelOrder.EXTRA
} else if (digits.test(label)) {
return LabelOrder.NUM
} else {
return LabelOrder.BASE
}
}
/** Sorting function for labels. */
// Sort first by taxonomy, then label type, and finally alphabetically.
export const label_sort: Comparator<string> = chain_cmps(
mkcmp(label_taxonomy),
mkcmp(label_order),
cmp_order
)
const anonymization: Taxonomy = [
{
group: 'Morphology',
entries: [
{label: 'gen', desc: 'genitive'},
{label: 'def', desc: 'definite'},
{label: 'pl', desc: 'plural'},
],
},
{
group: 'Names',
entries: [
{label: 'firstname_male', desc: ''},
{label: 'firstname_female', desc: ''},
{label: 'firstname_unknown', desc: ''},
{label: 'initials', desc: ''},
{label: 'middlename', desc: ''},
{label: 'surname', desc: ''},
],
},
{
group: 'Geographic data',
entries: [
{label: 'foreign', desc: ''},
{label: 'area', desc: ''},
{label: 'city', desc: 'city including villages'},
{label: 'country', desc: 'except Sweden'},
{label: 'geo', desc: 'forest, lake, mountain, etc'},
{label: 'place', desc: ''},
{label: 'region', desc: ''},
{label: 'street_nr', desc: 'street number'},
{label: 'zip_code', desc: ''},
],
},
{
group: 'Institutions',
entries: [
{label: 'school', desc: ''},
{label: 'work', desc: ''},
{label: 'other_institution', desc: ''},
],
},
{
group: 'Transportation',
entries: [
{label: 'transport_name', desc: 'bus, metro, tram, train, express'},
{label: 'transport_nr', desc: 'number, color'},
],
},
{
group: 'Age',
entries: [{label: 'age_digits', desc: ''}, {label: 'age_string', desc: ''}],
},
{
group: 'Dates',
entries: [
{label: 'date_digits', desc: 'numerical date represenation, delimiters are retained'},
{label: 'day', desc: ''},
{label: 'month_digit', desc: ''},
{label: 'month_word', desc: ''},
{label: 'year', desc: ''},
],
},
{
group: 'Misc',
entries: [
{label: 'account_nr', desc: ''},
{label: 'email', desc: ''},
{label: 'extra', desc: ''},
{label: 'license_nr', desc: ''},
{label: 'other_nr_seq', desc: 'a sequence of numbers'},
{label: 'phone_nr', desc: ''},
{label: 'personid_nr', desc: ''},
{label: 'url', desc: ''},
],
},
{
group: 'Mark',
entries: [
{label: 'edu', desc: 'education, courses'},
{label: 'fam', desc: 'family members'},
{label: 'prof', desc: 'profession'},
{label: 'sensitive', desc: ''},
],
},
{
group: 'Other',
entries: [
{label: 'Cit-FL', desc: 'Citation for a language'},
{label: 'Com!', desc: 'Comment'},
{label: 'OBS!', desc: 'Attention'},
],
},
]
export const normalization: Taxonomy = [
{
group: 'Other',
entries: [
{
label: 'Cit-FL',
desc: 'Citation for a language'
},
{
label: 'Com!',
desc: 'Comment'
},
{
label: 'OBS!',
desc: 'Attention'
},
{
label: 'X',
desc: 'Impossible to interpret the writer’s intention',
},
],
},
]
// Julia's updated taxonomy 19 April 2018
export const correctannot: Taxonomy = [
{
group: 'Orthographic',
entries: [
{
label: 'O',
desc: 'Spelling',
},
{
label: 'O-Cap',
desc: 'Upper/lower case',
},
{
label: 'O-Comp',
desc: 'Spaces and hyphens between words',
},
],
},
{
group: 'Lexical',
entries: [
{
label: 'L-Der',
desc: 'Word formation (derivation and compounding)',
},
{
label: 'L-FL',
desc: 'Non-Swedish word corrected to Swedish word',
},
{
label: 'L-Ref',
desc: 'Choice of anaphoric expression',
},
{
label: 'L-W',
desc:
'Wrong word or phrase, other',
},
],
},
{
group: 'Morphological',
entries: [
{
label: 'M-Adj/adv',
desc: 'Adjective form of word corrected to adverb form',
},
{
label: 'M-Case',
desc: 'Nominative vs genitive/accusative',
},
{label: 'M-Def', desc: 'Definiteness: articles; forms of nouns and adjectives'},
{label: 'M-F', desc: 'Grammatical category kept, form changed'},
{label: 'M-Gend', desc: 'Gender'},
{label: 'M-Num', desc: 'Number'},
{
label: 'M-Other',
desc:
'Other morphological corrections, including change between different comparational forms of adjectives',
},
{label: 'M-Verb', desc: 'Verb forms; use of ha, komma and skola auxiliaries'},
],
},
{
group: 'Punctuation',
entries: [
{
label: 'P-M',
desc: 'Punctuation missing (added)',
},
{
label: 'P-R',
desc: 'Punctuation redundant (removed)',
},
{
label: 'P-Sent',
desc: 'Sentence segmentation',
},
{
label: 'P-W',
desc: 'Wrong punctuation',
},
],
},
{
group: 'Syntactical',
entries: [
{
label: 'S-Adv',
desc: 'Adverbial placement',
},
{
label: 'S-Comp',
desc: 'Compound vs multi-word expression, and other restructuring of the same lexical morphemes within a phrase',
},
{
label: 'S-Clause',
desc: 'Change of basic clause structure: syntactic function of components, hierarchical clause structure',
},
{
label: 'S-Ext',
desc: 'Extensive and complex correction',
},
{
label: 'S-FinV',
desc: 'Finite verb placement',
},
{
label: 'S-M',
desc:
'Word missing (added)',
},
{
label: 'S-Msubj',
desc: 'Subject missing (added)',
},
{
label: 'S-Other',
desc:
'Other syntactical correction',
},
{
label: 'S-R',
desc: 'Word redundant (removed)',
},
{
label: 'S-Type',
desc: 'Change of phrase type/part of speech',
},
{
label: 'S-WO',
desc: 'Word order, other',
},
],
},
{
group: 'Other',
entries: [
{
label: 'C',
desc: 'Consistency correction, necessitated by other correction',
},
{
label: 'Cit-FL',
desc: 'Non-Swedish word kept, i.e. not corrected',
},
{
label: 'Com!',
desc: 'Comments for the corpus user'
},
{
label: 'OBS!',
desc: 'Internal and temporary comments for the annotators'
},
{
label: 'Unid',
desc: 'Unidentified correction',
},
{
label: 'X',
desc: 'Unintelligible string',
},
],
},
]
function doc_url(title: string): string {
| const docs: Record<string, Record<string, string>> = {
anonymization: {
'pseudonymization guidelines': doc_url('Pseudonymization_guidelines'),
},
normalization: {
'normalization guidelines': doc_url('Normalization_guidelines'),
},
correctannot: {
'annotation guidelines': doc_url('Correction-annotation_guidelines'),
},
}
export const config = {
order_changing_labels,
examples,
image_ws_url,
pseuws_url,
taxonomy: {anonymization, normalization, correctannot},
docs,
}
/** What group does this label belong to?
(label_group('country') as TaxonomyGroup).group // => 'Geographic data'
label_group('quux') // => undefined
*/
export function label_group(label: string): TaxonomyGroup | undefined {
return config.taxonomy.anonymization.find(
group => !!group.entries.find(entry => entry.label == label)
)
}
export interface TaxonomyFind {
taxonomy: string
group: string
entry: {label: string; desc: string}
}
export function find_label(label: string): TaxonomyFind | undefined {
const order = label_order(label)
if (order === LabelOrder.NUM) {
return {taxonomy: 'anonymization', group: 'Number', entry: {label, desc: 'number'}}
}
if (order === LabelOrder.TEMP) {
return undefined
}
for (let taxonomy in config.taxonomy) {
for (let group of (config.taxonomy as {[mode: string]: Taxonomy})[taxonomy]) {
let entry = group.entries.find(entry => entry.label == label)
if (entry !== undefined) return {taxonomy, group: group.group, entry}
}
}
}
/** Get the taxonomy domain (editor mode) of a label. */
export function label_taxonomy(label: string): string | null {
return find_label(label) ? find_label(label)!.taxonomy : null
}
/** Does the named taxonomy include the given label? */
export function taxonomy_has_label(taxonomy: string, label: string): boolean {
if (!(taxonomy in config.taxonomy)) return false
const tax: Record<string, TaxonomyGroup[]> = config.taxonomy
return !!tax[taxonomy].find(g => !!g.entries.find(l => l.label == label))
}
| return 'https://spraakbanken.github.io/swell-project/' + title
}
| identifier_body |
filesystem.rs | use async_std;
use async_std::io::ReadExt;
use async_std::stream::StreamExt;
use async_std::task::spawn;
use crate::{BoxedError, BoxedErrorResult};
use crate::component_manager::*;
use crate::constants;
use crate::easyhash::{EasyHash, Hex};
use crate::globals;
use crate::heartbeat;
use crate::operation::*;
use serde::{Serialize, Deserialize};
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
use std::fmt;
use std::future::Future;
use std::io::Write;
pub fn get(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
if args.len() != 2 {
return Err("Usage: get distributed_filename local_path".into())
}
let distributed_filename = args[0].to_string();
let local_path = args[1].to_string();
async_std::task::block_on(get_distributed_file(distributed_filename, local_path))?;
Ok(())
}
// args[0] = path to local file
// args[1] = distributed filename
pub fn put(args: Vec<&str>, sender: &OperationSender) -> BoxedErrorResult<()> {
check_joined()?;
if args.len() != 2 {
return Err("Usage: put local_path distributed_filename".into())
}
let local_path = args[0];
let distributed_filename = args[1];
// Figure out who I am giving this file to
let dest_ids = gen_file_owners(&distributed_filename)?;
// Gossip who has the file now
sender.send(
SendableOperation::for_successors(Box::new(NewFileOwnersOperation {
distributed_filename: distributed_filename.to_string(),
new_owners: dest_ids
.iter()
.map(|x| x.to_string())
.collect::<HashSet<_>>()
}))
)?;
// Send them the file
async_std::task::block_on(send_file_to_all(local_path.to_string(),
distributed_filename.to_string(),
&dest_ids))?;
Ok(())
}
pub fn ls(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
let invalid_args: BoxedErrorResult<()> = Err("Usage: ls [distributed_filename]".into());
match args.len() {
0 => {
// All
print_file_owners(None, true)?;
Ok(())
},
1 => {
// Just File
let distributed_filename = args[0];
print_file_owners(Some(distributed_filename), false)?;
Ok(())
},
_ => invalid_args
}
}
// TODO: You wrote this very late - maybe fix
fn print_file_owners(maybe_distributed_filename: Option<&str>, full: bool) -> BoxedErrorResult<()> {
let all_file_owners = globals::ALL_FILE_OWNERS.read();
match (maybe_distributed_filename, full) {
(Some(_), true) => {
Err("Cannot set distributed_filename and full to true when printing owners".into())
},
(Some(distributed_filename), false) => {
// Print the files owners
match all_file_owners.get(distributed_filename) {
Some(owners) => {
println!("{:?}", owners);
},
None => {
// A little unoptimal - change if above format changes
println!("{{}}");
}
}
Ok(())
},
(None, true) => {
// Print the whole map
println!("{:?}", *all_file_owners);
Ok(())
},
(None, false) => {
Err("Cannot print owners of nonexistant distributed_filename with full set to false".into())
}
}
}
async fn get_distributed_file(distributed_filename: String, local_path: String) -> BoxedErrorResult<()> {
// TODO: Find owners
let operation = SendableOperation::for_owners(&distributed_filename, Box::new(GetOperation {
distributed_filename: distributed_filename.clone(),
local_path: local_path
}));
let mut streams = operation
.write_all_tcp_async()
.await?;
// TODO: Redo whatever tf going on here
match streams.len() {
0 => Err(format!("No owners found for file {}", distributed_filename).into()),
_ => {
let (result, source) = streams[0]
.try_read_operation()
.await?;
result.execute(source)?;
Ok(())
}
}
}
async fn read_file_to_buf(local_path: &String) -> BoxedErrorResult<Vec<u8>> {
let mut data_buf: Vec<u8> = Vec::new();
let mut file = async_std::fs::File::open(&local_path).await?;
file.read_to_end(&mut data_buf).await?;
Ok(data_buf)
}
async fn send_file_to_all(local_path: String, distributed_filename: String, dest_ids: &Vec<String>) ->
BoxedErrorResult<()> {
let data_buf = read_file_to_buf(&local_path).await?;
let operation = SendableOperation::for_id_list(dest_ids.clone(), Box::new(SendFileOperation {
filename: distributed_filename,
data: data_buf,
is_distributed: true
}));
operation.write_all_tcp_async().await?;
Ok(())
}
pub async fn file_server<'a>(_sender: &'a OperationSender) -> BoxedErrorResult<()> {
let server = globals::SERVER_SOCKET.read();
let mut incoming = server.incoming();
while let Some(stream) = incoming.next().await {
let connection = stream?;
log(format!("Handling connection from {:?}", connection.peer_addr()));
spawn(handle_connection(connection));
}
Ok(())
}
async fn handle_connection(mut connection: async_std::net::TcpStream) -> BoxedErrorResult<()> {
let (operation, source) = connection.try_read_operation().await?;
// TODO: Think about what standard we want with these
let _generated_operations = operation.execute(source)?;
Ok(())
}
// Helpers
fn gen_file_owners(filename: &str) -> BoxedErrorResult<Vec<String>> {
let file_idx = filename.easyhash();
heartbeat::gen_neighbor_list_from(file_idx as i32, 1, constants::NUM_OWNERS, true)
}
// TODO: This function makes the entire system assume there are always at least two nodes in the system
// and the file must have an owner or else the operation will not work correctly. This is fine for now
// but it is worth improving sooner rather than later (make distinct Error types to differentiate, etc).
fn gen_new_file_owner(filename: &str) -> BoxedErrorResult<String> {
match globals::ALL_FILE_OWNERS.read().get(filename) {
Some(owners) => {
let potential_owners = gen_file_owners(filename)?;
for potential_owner in &potential_owners {
if !owners.contains(potential_owner) {
return Ok(potential_owner.clone());
}
}
Err(format!("No new owners available for file {}", filename).into())
},
None => Err(format!("No owner found for file {}", filename).into())
}
}
fn | (filename: &String) -> String {
format!("{}/{}", constants::DATA_DIR, filename)
}
// Returns messages to be gossiped
pub fn handle_failed_node(failed_id: &String) -> BoxedErrorResult<Vec<SendableOperation>> {
match heartbeat::is_master() {
true => {
let mut generated_operations: Vec<SendableOperation> = Vec::new();
let myself_source = Source::myself();
// Find files they owned
let mut lost_files: HashSet<String> = HashSet::new();
for (distributed_filename, owners) in globals::ALL_FILE_OWNERS.read().iter() {
if owners.contains(failed_id) {
lost_files.insert(distributed_filename.clone());
}
}
log("Found lost files".to_string());
// Send that they no longer own those files
// Separate operation so that this acts like a confirmation to fully forget about the node from
// the master. Can be used with a delay later if you want more error resistance.
let lost_file_operation = LostFilesOperation {
failed_owner: failed_id.clone(),
lost_files: lost_files.clone()
};
generated_operations.append(&mut lost_file_operation.execute(myself_source.clone())?);
log("Executed lost files operation locally".to_string());
// Gen new owners of the file and propagate
let mut new_owners: HashMap<String, HashSet<String>> = HashMap::new();
for lost_file in &lost_files {
let new_owner = gen_new_file_owner(&lost_file)?;
// TODO: Maybe optimize this into one fat packet - probably a new operation?
let new_owner_operation = NewFileOwnersOperation {
distributed_filename: lost_file.clone(),
new_owners: vec![new_owner].iter().map(|x| x.to_string()).collect()
};
generated_operations.append(&mut new_owner_operation.execute(myself_source.clone())?);
}
log("Executed all new_owner operations locally".to_string());
Ok(generated_operations)
},
false => {
Ok(vec![])
}
}
}
// Operations
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GetOperation {
pub distributed_filename: String,
pub local_path: String
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct NewFileOwnersOperation {
pub distributed_filename: String,
pub new_owners: HashSet<String>
}
#[derive(Serialize, Deserialize, Clone)]
pub struct SendFileOperation {
pub filename: String,
pub data: Vec<u8>,
pub is_distributed: bool
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LostFilesOperation {
pub failed_owner: String,
pub lost_files: HashSet<String>
}
// Trait Impls
impl OperationWriteExecute for GetOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("GET ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let local_path = distributed_file_path(&self.distributed_filename);
let data_buf = async_std::task::block_on(read_file_to_buf(&local_path))?;
let operation = SendableOperation::for_single_tcp_stream(
TryInto::<async_std::net::TcpStream>::try_into(source)?,
Box::new(SendFileOperation {
filename: self.local_path.clone(),
data: data_buf,
is_distributed: false
}));
async_std::task::block_on(operation.write_all_tcp_async());
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for NewFileOwnersOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("NFO ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Add this file to your map with the new people that have it
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
let mut file_owners = all_file_owners.entry(self.distributed_filename.clone()).or_insert(HashSet::new());
match (&self.new_owners - file_owners).len() {
0 => {
Ok(vec![])
},
_ => {
*file_owners = &self.new_owners | file_owners;
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
}
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for SendFileOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("FILE")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Check if the file exists before overwriting
let filename = match self.is_distributed {
true => format!("{}/{}", constants::DATA_DIR, self.filename),
false => self.filename.clone()
};
let mut file = std::fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(filename)?;
file.write_all(&self.data);
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl fmt::Debug for SendFileOperation {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let formatted_data = if self.data.len() > 40 {
format!("{:?}...", &self.data[..40])
} else {
format!("{:?}", &self.data)
};
fmt.debug_struct("SendFileOperation")
.field("filename", &self.filename)
.field("data", &formatted_data)
.field("is_distributed", &self.is_distributed)
.finish()
}
}
impl OperationWriteExecute for LostFilesOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("LOST")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let mut did_remove = false;
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
for lost_file in &self.lost_files {
if let Some(owners) = all_file_owners.get_mut(lost_file) {
did_remove |= owners.remove(&self.failed_owner);
}
}
if did_remove {
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
} else {
Ok(vec![])
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
| distributed_file_path | identifier_name |
filesystem.rs | use async_std;
use async_std::io::ReadExt;
use async_std::stream::StreamExt;
use async_std::task::spawn;
use crate::{BoxedError, BoxedErrorResult};
use crate::component_manager::*;
use crate::constants;
use crate::easyhash::{EasyHash, Hex};
use crate::globals;
use crate::heartbeat;
use crate::operation::*;
use serde::{Serialize, Deserialize};
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
use std::fmt;
use std::future::Future;
use std::io::Write;
pub fn get(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
if args.len() != 2 {
return Err("Usage: get distributed_filename local_path".into())
}
let distributed_filename = args[0].to_string();
let local_path = args[1].to_string();
async_std::task::block_on(get_distributed_file(distributed_filename, local_path))?;
Ok(())
}
// args[0] = path to local file
// args[1] = distributed filename
pub fn put(args: Vec<&str>, sender: &OperationSender) -> BoxedErrorResult<()> {
check_joined()?;
if args.len() != 2 {
return Err("Usage: put local_path distributed_filename".into())
}
let local_path = args[0];
let distributed_filename = args[1];
// Figure out who I am giving this file to
let dest_ids = gen_file_owners(&distributed_filename)?;
// Gossip who has the file now
sender.send(
SendableOperation::for_successors(Box::new(NewFileOwnersOperation {
distributed_filename: distributed_filename.to_string(),
new_owners: dest_ids
.iter()
.map(|x| x.to_string())
.collect::<HashSet<_>>()
}))
)?;
// Send them the file
async_std::task::block_on(send_file_to_all(local_path.to_string(),
distributed_filename.to_string(),
&dest_ids))?;
Ok(())
}
pub fn ls(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
let invalid_args: BoxedErrorResult<()> = Err("Usage: ls [distributed_filename]".into());
match args.len() {
0 => {
// All
print_file_owners(None, true)?;
Ok(())
},
1 => {
// Just File
let distributed_filename = args[0];
print_file_owners(Some(distributed_filename), false)?;
Ok(())
},
_ => invalid_args
}
}
// TODO: You wrote this very late - maybe fix
fn print_file_owners(maybe_distributed_filename: Option<&str>, full: bool) -> BoxedErrorResult<()> {
let all_file_owners = globals::ALL_FILE_OWNERS.read();
match (maybe_distributed_filename, full) {
(Some(_), true) => {
Err("Cannot set distributed_filename and full to true when printing owners".into())
},
(Some(distributed_filename), false) => {
// Print the files owners
match all_file_owners.get(distributed_filename) {
Some(owners) => {
println!("{:?}", owners);
},
None => {
// A little unoptimal - change if above format changes
println!("{{}}");
}
}
Ok(())
},
(None, true) => {
// Print the whole map
println!("{:?}", *all_file_owners);
Ok(())
},
(None, false) => {
Err("Cannot print owners of nonexistant distributed_filename with full set to false".into())
}
}
}
async fn get_distributed_file(distributed_filename: String, local_path: String) -> BoxedErrorResult<()> {
// TODO: Find owners
let operation = SendableOperation::for_owners(&distributed_filename, Box::new(GetOperation {
distributed_filename: distributed_filename.clone(),
local_path: local_path
}));
let mut streams = operation
.write_all_tcp_async()
.await?;
// TODO: Redo whatever tf going on here
match streams.len() {
0 => Err(format!("No owners found for file {}", distributed_filename).into()),
_ => {
let (result, source) = streams[0]
.try_read_operation()
.await?;
result.execute(source)?;
Ok(())
}
}
}
async fn read_file_to_buf(local_path: &String) -> BoxedErrorResult<Vec<u8>> {
let mut data_buf: Vec<u8> = Vec::new();
let mut file = async_std::fs::File::open(&local_path).await?;
file.read_to_end(&mut data_buf).await?;
Ok(data_buf)
}
async fn send_file_to_all(local_path: String, distributed_filename: String, dest_ids: &Vec<String>) ->
BoxedErrorResult<()> {
let data_buf = read_file_to_buf(&local_path).await?;
let operation = SendableOperation::for_id_list(dest_ids.clone(), Box::new(SendFileOperation {
filename: distributed_filename,
data: data_buf,
is_distributed: true
}));
operation.write_all_tcp_async().await?;
Ok(())
}
pub async fn file_server<'a>(_sender: &'a OperationSender) -> BoxedErrorResult<()> {
let server = globals::SERVER_SOCKET.read();
let mut incoming = server.incoming();
while let Some(stream) = incoming.next().await {
let connection = stream?;
log(format!("Handling connection from {:?}", connection.peer_addr()));
spawn(handle_connection(connection));
}
Ok(())
}
async fn handle_connection(mut connection: async_std::net::TcpStream) -> BoxedErrorResult<()> {
let (operation, source) = connection.try_read_operation().await?;
// TODO: Think about what standard we want with these
let _generated_operations = operation.execute(source)?;
Ok(())
}
// Helpers
fn gen_file_owners(filename: &str) -> BoxedErrorResult<Vec<String>> {
let file_idx = filename.easyhash();
heartbeat::gen_neighbor_list_from(file_idx as i32, 1, constants::NUM_OWNERS, true)
}
// TODO: This function makes the entire system assume there are always at least two nodes in the system
// and the file must have an owner or else the operation will not work correctly. This is fine for now
// but it is worth improving sooner rather than later (make distinct Error types to differentiate, etc).
fn gen_new_file_owner(filename: &str) -> BoxedErrorResult<String> {
match globals::ALL_FILE_OWNERS.read().get(filename) {
Some(owners) => {
let potential_owners = gen_file_owners(filename)?;
for potential_owner in &potential_owners {
if !owners.contains(potential_owner) |
}
Err(format!("No new owners available for file {}", filename).into())
},
None => Err(format!("No owner found for file {}", filename).into())
}
}
fn distributed_file_path(filename: &String) -> String {
format!("{}/{}", constants::DATA_DIR, filename)
}
// Returns messages to be gossiped
pub fn handle_failed_node(failed_id: &String) -> BoxedErrorResult<Vec<SendableOperation>> {
match heartbeat::is_master() {
true => {
let mut generated_operations: Vec<SendableOperation> = Vec::new();
let myself_source = Source::myself();
// Find files they owned
let mut lost_files: HashSet<String> = HashSet::new();
for (distributed_filename, owners) in globals::ALL_FILE_OWNERS.read().iter() {
if owners.contains(failed_id) {
lost_files.insert(distributed_filename.clone());
}
}
log("Found lost files".to_string());
// Send that they no longer own those files
// Separate operation so that this acts like a confirmation to fully forget about the node from
// the master. Can be used with a delay later if you want more error resistance.
let lost_file_operation = LostFilesOperation {
failed_owner: failed_id.clone(),
lost_files: lost_files.clone()
};
generated_operations.append(&mut lost_file_operation.execute(myself_source.clone())?);
log("Executed lost files operation locally".to_string());
// Gen new owners of the file and propagate
let mut new_owners: HashMap<String, HashSet<String>> = HashMap::new();
for lost_file in &lost_files {
let new_owner = gen_new_file_owner(&lost_file)?;
// TODO: Maybe optimize this into one fat packet - probably a new operation?
let new_owner_operation = NewFileOwnersOperation {
distributed_filename: lost_file.clone(),
new_owners: vec![new_owner].iter().map(|x| x.to_string()).collect()
};
generated_operations.append(&mut new_owner_operation.execute(myself_source.clone())?);
}
log("Executed all new_owner operations locally".to_string());
Ok(generated_operations)
},
false => {
Ok(vec![])
}
}
}
// Operations
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GetOperation {
pub distributed_filename: String,
pub local_path: String
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct NewFileOwnersOperation {
pub distributed_filename: String,
pub new_owners: HashSet<String>
}
#[derive(Serialize, Deserialize, Clone)]
pub struct SendFileOperation {
pub filename: String,
pub data: Vec<u8>,
pub is_distributed: bool
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LostFilesOperation {
pub failed_owner: String,
pub lost_files: HashSet<String>
}
// Trait Impls
impl OperationWriteExecute for GetOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("GET ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let local_path = distributed_file_path(&self.distributed_filename);
let data_buf = async_std::task::block_on(read_file_to_buf(&local_path))?;
let operation = SendableOperation::for_single_tcp_stream(
TryInto::<async_std::net::TcpStream>::try_into(source)?,
Box::new(SendFileOperation {
filename: self.local_path.clone(),
data: data_buf,
is_distributed: false
}));
async_std::task::block_on(operation.write_all_tcp_async());
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for NewFileOwnersOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("NFO ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Add this file to your map with the new people that have it
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
let mut file_owners = all_file_owners.entry(self.distributed_filename.clone()).or_insert(HashSet::new());
match (&self.new_owners - file_owners).len() {
0 => {
Ok(vec![])
},
_ => {
*file_owners = &self.new_owners | file_owners;
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
}
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for SendFileOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("FILE")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Check if the file exists before overwriting
let filename = match self.is_distributed {
true => format!("{}/{}", constants::DATA_DIR, self.filename),
false => self.filename.clone()
};
let mut file = std::fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(filename)?;
file.write_all(&self.data);
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl fmt::Debug for SendFileOperation {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let formatted_data = if self.data.len() > 40 {
format!("{:?}...", &self.data[..40])
} else {
format!("{:?}", &self.data)
};
fmt.debug_struct("SendFileOperation")
.field("filename", &self.filename)
.field("data", &formatted_data)
.field("is_distributed", &self.is_distributed)
.finish()
}
}
impl OperationWriteExecute for LostFilesOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("LOST")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let mut did_remove = false;
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
for lost_file in &self.lost_files {
if let Some(owners) = all_file_owners.get_mut(lost_file) {
did_remove |= owners.remove(&self.failed_owner);
}
}
if did_remove {
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
} else {
Ok(vec![])
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
| {
return Ok(potential_owner.clone());
} | conditional_block |
filesystem.rs | use async_std;
use async_std::io::ReadExt;
use async_std::stream::StreamExt;
use async_std::task::spawn;
use crate::{BoxedError, BoxedErrorResult};
use crate::component_manager::*;
use crate::constants;
use crate::easyhash::{EasyHash, Hex};
use crate::globals;
use crate::heartbeat;
use crate::operation::*;
use serde::{Serialize, Deserialize};
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
use std::fmt;
use std::future::Future;
use std::io::Write;
pub fn get(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
if args.len() != 2 {
return Err("Usage: get distributed_filename local_path".into())
}
let distributed_filename = args[0].to_string();
let local_path = args[1].to_string();
async_std::task::block_on(get_distributed_file(distributed_filename, local_path))?;
Ok(())
}
// args[0] = path to local file
// args[1] = distributed filename
pub fn put(args: Vec<&str>, sender: &OperationSender) -> BoxedErrorResult<()> {
check_joined()?;
if args.len() != 2 {
return Err("Usage: put local_path distributed_filename".into())
}
let local_path = args[0];
let distributed_filename = args[1];
// Figure out who I am giving this file to
let dest_ids = gen_file_owners(&distributed_filename)?;
// Gossip who has the file now
sender.send(
SendableOperation::for_successors(Box::new(NewFileOwnersOperation {
distributed_filename: distributed_filename.to_string(),
new_owners: dest_ids
.iter()
.map(|x| x.to_string())
.collect::<HashSet<_>>()
}))
)?;
// Send them the file
async_std::task::block_on(send_file_to_all(local_path.to_string(),
distributed_filename.to_string(),
&dest_ids))?;
Ok(())
}
pub fn ls(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
let invalid_args: BoxedErrorResult<()> = Err("Usage: ls [distributed_filename]".into());
match args.len() {
0 => {
// All
print_file_owners(None, true)?;
Ok(())
},
1 => {
// Just File
let distributed_filename = args[0];
print_file_owners(Some(distributed_filename), false)?;
Ok(())
},
_ => invalid_args
}
}
// TODO: You wrote this very late - maybe fix
fn print_file_owners(maybe_distributed_filename: Option<&str>, full: bool) -> BoxedErrorResult<()> {
let all_file_owners = globals::ALL_FILE_OWNERS.read();
match (maybe_distributed_filename, full) {
(Some(_), true) => {
Err("Cannot set distributed_filename and full to true when printing owners".into())
},
(Some(distributed_filename), false) => {
// Print the files owners
match all_file_owners.get(distributed_filename) {
Some(owners) => {
println!("{:?}", owners);
},
None => {
// A little unoptimal - change if above format changes
println!("{{}}");
}
}
Ok(())
},
(None, true) => {
// Print the whole map
println!("{:?}", *all_file_owners);
Ok(())
},
(None, false) => {
Err("Cannot print owners of nonexistant distributed_filename with full set to false".into())
}
}
}
async fn get_distributed_file(distributed_filename: String, local_path: String) -> BoxedErrorResult<()> {
// TODO: Find owners
let operation = SendableOperation::for_owners(&distributed_filename, Box::new(GetOperation {
distributed_filename: distributed_filename.clone(),
local_path: local_path
}));
let mut streams = operation
.write_all_tcp_async()
.await?;
// TODO: Redo whatever tf going on here
match streams.len() {
0 => Err(format!("No owners found for file {}", distributed_filename).into()),
_ => {
let (result, source) = streams[0]
.try_read_operation()
.await?;
result.execute(source)?;
Ok(())
}
}
}
async fn read_file_to_buf(local_path: &String) -> BoxedErrorResult<Vec<u8>> {
let mut data_buf: Vec<u8> = Vec::new();
let mut file = async_std::fs::File::open(&local_path).await?;
file.read_to_end(&mut data_buf).await?;
Ok(data_buf)
}
async fn send_file_to_all(local_path: String, distributed_filename: String, dest_ids: &Vec<String>) ->
BoxedErrorResult<()> {
let data_buf = read_file_to_buf(&local_path).await?;
let operation = SendableOperation::for_id_list(dest_ids.clone(), Box::new(SendFileOperation {
filename: distributed_filename,
data: data_buf,
is_distributed: true
}));
operation.write_all_tcp_async().await?;
Ok(())
}
pub async fn file_server<'a>(_sender: &'a OperationSender) -> BoxedErrorResult<()> {
let server = globals::SERVER_SOCKET.read();
let mut incoming = server.incoming();
while let Some(stream) = incoming.next().await {
let connection = stream?;
log(format!("Handling connection from {:?}", connection.peer_addr()));
spawn(handle_connection(connection));
}
Ok(())
}
async fn handle_connection(mut connection: async_std::net::TcpStream) -> BoxedErrorResult<()> {
let (operation, source) = connection.try_read_operation().await?;
// TODO: Think about what standard we want with these
let _generated_operations = operation.execute(source)?;
Ok(())
}
// Helpers
fn gen_file_owners(filename: &str) -> BoxedErrorResult<Vec<String>> {
let file_idx = filename.easyhash();
heartbeat::gen_neighbor_list_from(file_idx as i32, 1, constants::NUM_OWNERS, true)
} |
// TODO: This function makes the entire system assume there are always at least two nodes in the system
// and the file must have an owner or else the operation will not work correctly. This is fine for now
// but it is worth improving sooner rather than later (make distinct Error types to differentiate, etc).
fn gen_new_file_owner(filename: &str) -> BoxedErrorResult<String> {
match globals::ALL_FILE_OWNERS.read().get(filename) {
Some(owners) => {
let potential_owners = gen_file_owners(filename)?;
for potential_owner in &potential_owners {
if !owners.contains(potential_owner) {
return Ok(potential_owner.clone());
}
}
Err(format!("No new owners available for file {}", filename).into())
},
None => Err(format!("No owner found for file {}", filename).into())
}
}
fn distributed_file_path(filename: &String) -> String {
format!("{}/{}", constants::DATA_DIR, filename)
}
// Returns messages to be gossiped
pub fn handle_failed_node(failed_id: &String) -> BoxedErrorResult<Vec<SendableOperation>> {
match heartbeat::is_master() {
true => {
let mut generated_operations: Vec<SendableOperation> = Vec::new();
let myself_source = Source::myself();
// Find files they owned
let mut lost_files: HashSet<String> = HashSet::new();
for (distributed_filename, owners) in globals::ALL_FILE_OWNERS.read().iter() {
if owners.contains(failed_id) {
lost_files.insert(distributed_filename.clone());
}
}
log("Found lost files".to_string());
// Send that they no longer own those files
// Separate operation so that this acts like a confirmation to fully forget about the node from
// the master. Can be used with a delay later if you want more error resistance.
let lost_file_operation = LostFilesOperation {
failed_owner: failed_id.clone(),
lost_files: lost_files.clone()
};
generated_operations.append(&mut lost_file_operation.execute(myself_source.clone())?);
log("Executed lost files operation locally".to_string());
// Gen new owners of the file and propagate
let mut new_owners: HashMap<String, HashSet<String>> = HashMap::new();
for lost_file in &lost_files {
let new_owner = gen_new_file_owner(&lost_file)?;
// TODO: Maybe optimize this into one fat packet - probably a new operation?
let new_owner_operation = NewFileOwnersOperation {
distributed_filename: lost_file.clone(),
new_owners: vec![new_owner].iter().map(|x| x.to_string()).collect()
};
generated_operations.append(&mut new_owner_operation.execute(myself_source.clone())?);
}
log("Executed all new_owner operations locally".to_string());
Ok(generated_operations)
},
false => {
Ok(vec![])
}
}
}
// Operations
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GetOperation {
pub distributed_filename: String,
pub local_path: String
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct NewFileOwnersOperation {
pub distributed_filename: String,
pub new_owners: HashSet<String>
}
#[derive(Serialize, Deserialize, Clone)]
pub struct SendFileOperation {
pub filename: String,
pub data: Vec<u8>,
pub is_distributed: bool
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LostFilesOperation {
pub failed_owner: String,
pub lost_files: HashSet<String>
}
// Trait Impls
impl OperationWriteExecute for GetOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("GET ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let local_path = distributed_file_path(&self.distributed_filename);
let data_buf = async_std::task::block_on(read_file_to_buf(&local_path))?;
let operation = SendableOperation::for_single_tcp_stream(
TryInto::<async_std::net::TcpStream>::try_into(source)?,
Box::new(SendFileOperation {
filename: self.local_path.clone(),
data: data_buf,
is_distributed: false
}));
async_std::task::block_on(operation.write_all_tcp_async());
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for NewFileOwnersOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("NFO ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Add this file to your map with the new people that have it
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
let mut file_owners = all_file_owners.entry(self.distributed_filename.clone()).or_insert(HashSet::new());
match (&self.new_owners - file_owners).len() {
0 => {
Ok(vec![])
},
_ => {
*file_owners = &self.new_owners | file_owners;
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
}
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for SendFileOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("FILE")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Check if the file exists before overwriting
let filename = match self.is_distributed {
true => format!("{}/{}", constants::DATA_DIR, self.filename),
false => self.filename.clone()
};
let mut file = std::fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(filename)?;
file.write_all(&self.data);
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl fmt::Debug for SendFileOperation {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let formatted_data = if self.data.len() > 40 {
format!("{:?}...", &self.data[..40])
} else {
format!("{:?}", &self.data)
};
fmt.debug_struct("SendFileOperation")
.field("filename", &self.filename)
.field("data", &formatted_data)
.field("is_distributed", &self.is_distributed)
.finish()
}
}
impl OperationWriteExecute for LostFilesOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("LOST")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let mut did_remove = false;
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
for lost_file in &self.lost_files {
if let Some(owners) = all_file_owners.get_mut(lost_file) {
did_remove |= owners.remove(&self.failed_owner);
}
}
if did_remove {
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
} else {
Ok(vec![])
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
} | random_line_split |
|
filesystem.rs | use async_std;
use async_std::io::ReadExt;
use async_std::stream::StreamExt;
use async_std::task::spawn;
use crate::{BoxedError, BoxedErrorResult};
use crate::component_manager::*;
use crate::constants;
use crate::easyhash::{EasyHash, Hex};
use crate::globals;
use crate::heartbeat;
use crate::operation::*;
use serde::{Serialize, Deserialize};
use std::collections::{HashMap, HashSet};
use std::convert::TryInto;
use std::fmt;
use std::future::Future;
use std::io::Write;
pub fn get(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
if args.len() != 2 {
return Err("Usage: get distributed_filename local_path".into())
}
let distributed_filename = args[0].to_string();
let local_path = args[1].to_string();
async_std::task::block_on(get_distributed_file(distributed_filename, local_path))?;
Ok(())
}
// args[0] = path to local file
// args[1] = distributed filename
pub fn put(args: Vec<&str>, sender: &OperationSender) -> BoxedErrorResult<()> {
check_joined()?;
if args.len() != 2 {
return Err("Usage: put local_path distributed_filename".into())
}
let local_path = args[0];
let distributed_filename = args[1];
// Figure out who I am giving this file to
let dest_ids = gen_file_owners(&distributed_filename)?;
// Gossip who has the file now
sender.send(
SendableOperation::for_successors(Box::new(NewFileOwnersOperation {
distributed_filename: distributed_filename.to_string(),
new_owners: dest_ids
.iter()
.map(|x| x.to_string())
.collect::<HashSet<_>>()
}))
)?;
// Send them the file
async_std::task::block_on(send_file_to_all(local_path.to_string(),
distributed_filename.to_string(),
&dest_ids))?;
Ok(())
}
pub fn ls(args: Vec<&str>) -> BoxedErrorResult<()> {
check_joined()?;
let invalid_args: BoxedErrorResult<()> = Err("Usage: ls [distributed_filename]".into());
match args.len() {
0 => {
// All
print_file_owners(None, true)?;
Ok(())
},
1 => {
// Just File
let distributed_filename = args[0];
print_file_owners(Some(distributed_filename), false)?;
Ok(())
},
_ => invalid_args
}
}
// TODO: You wrote this very late - maybe fix
fn print_file_owners(maybe_distributed_filename: Option<&str>, full: bool) -> BoxedErrorResult<()> {
let all_file_owners = globals::ALL_FILE_OWNERS.read();
match (maybe_distributed_filename, full) {
(Some(_), true) => {
Err("Cannot set distributed_filename and full to true when printing owners".into())
},
(Some(distributed_filename), false) => {
// Print the files owners
match all_file_owners.get(distributed_filename) {
Some(owners) => {
println!("{:?}", owners);
},
None => {
// A little unoptimal - change if above format changes
println!("{{}}");
}
}
Ok(())
},
(None, true) => {
// Print the whole map
println!("{:?}", *all_file_owners);
Ok(())
},
(None, false) => {
Err("Cannot print owners of nonexistant distributed_filename with full set to false".into())
}
}
}
async fn get_distributed_file(distributed_filename: String, local_path: String) -> BoxedErrorResult<()> {
// TODO: Find owners
let operation = SendableOperation::for_owners(&distributed_filename, Box::new(GetOperation {
distributed_filename: distributed_filename.clone(),
local_path: local_path
}));
let mut streams = operation
.write_all_tcp_async()
.await?;
// TODO: Redo whatever tf going on here
match streams.len() {
0 => Err(format!("No owners found for file {}", distributed_filename).into()),
_ => {
let (result, source) = streams[0]
.try_read_operation()
.await?;
result.execute(source)?;
Ok(())
}
}
}
async fn read_file_to_buf(local_path: &String) -> BoxedErrorResult<Vec<u8>> {
let mut data_buf: Vec<u8> = Vec::new();
let mut file = async_std::fs::File::open(&local_path).await?;
file.read_to_end(&mut data_buf).await?;
Ok(data_buf)
}
async fn send_file_to_all(local_path: String, distributed_filename: String, dest_ids: &Vec<String>) ->
BoxedErrorResult<()> {
let data_buf = read_file_to_buf(&local_path).await?;
let operation = SendableOperation::for_id_list(dest_ids.clone(), Box::new(SendFileOperation {
filename: distributed_filename,
data: data_buf,
is_distributed: true
}));
operation.write_all_tcp_async().await?;
Ok(())
}
pub async fn file_server<'a>(_sender: &'a OperationSender) -> BoxedErrorResult<()> |
async fn handle_connection(mut connection: async_std::net::TcpStream) -> BoxedErrorResult<()> {
let (operation, source) = connection.try_read_operation().await?;
// TODO: Think about what standard we want with these
let _generated_operations = operation.execute(source)?;
Ok(())
}
// Helpers
fn gen_file_owners(filename: &str) -> BoxedErrorResult<Vec<String>> {
let file_idx = filename.easyhash();
heartbeat::gen_neighbor_list_from(file_idx as i32, 1, constants::NUM_OWNERS, true)
}
// TODO: This function makes the entire system assume there are always at least two nodes in the system
// and the file must have an owner or else the operation will not work correctly. This is fine for now
// but it is worth improving sooner rather than later (make distinct Error types to differentiate, etc).
fn gen_new_file_owner(filename: &str) -> BoxedErrorResult<String> {
match globals::ALL_FILE_OWNERS.read().get(filename) {
Some(owners) => {
let potential_owners = gen_file_owners(filename)?;
for potential_owner in &potential_owners {
if !owners.contains(potential_owner) {
return Ok(potential_owner.clone());
}
}
Err(format!("No new owners available for file {}", filename).into())
},
None => Err(format!("No owner found for file {}", filename).into())
}
}
fn distributed_file_path(filename: &String) -> String {
format!("{}/{}", constants::DATA_DIR, filename)
}
// Returns messages to be gossiped
pub fn handle_failed_node(failed_id: &String) -> BoxedErrorResult<Vec<SendableOperation>> {
match heartbeat::is_master() {
true => {
let mut generated_operations: Vec<SendableOperation> = Vec::new();
let myself_source = Source::myself();
// Find files they owned
let mut lost_files: HashSet<String> = HashSet::new();
for (distributed_filename, owners) in globals::ALL_FILE_OWNERS.read().iter() {
if owners.contains(failed_id) {
lost_files.insert(distributed_filename.clone());
}
}
log("Found lost files".to_string());
// Send that they no longer own those files
// Separate operation so that this acts like a confirmation to fully forget about the node from
// the master. Can be used with a delay later if you want more error resistance.
let lost_file_operation = LostFilesOperation {
failed_owner: failed_id.clone(),
lost_files: lost_files.clone()
};
generated_operations.append(&mut lost_file_operation.execute(myself_source.clone())?);
log("Executed lost files operation locally".to_string());
// Gen new owners of the file and propagate
let mut new_owners: HashMap<String, HashSet<String>> = HashMap::new();
for lost_file in &lost_files {
let new_owner = gen_new_file_owner(&lost_file)?;
// TODO: Maybe optimize this into one fat packet - probably a new operation?
let new_owner_operation = NewFileOwnersOperation {
distributed_filename: lost_file.clone(),
new_owners: vec![new_owner].iter().map(|x| x.to_string()).collect()
};
generated_operations.append(&mut new_owner_operation.execute(myself_source.clone())?);
}
log("Executed all new_owner operations locally".to_string());
Ok(generated_operations)
},
false => {
Ok(vec![])
}
}
}
// Operations
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct GetOperation {
pub distributed_filename: String,
pub local_path: String
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct NewFileOwnersOperation {
pub distributed_filename: String,
pub new_owners: HashSet<String>
}
#[derive(Serialize, Deserialize, Clone)]
pub struct SendFileOperation {
pub filename: String,
pub data: Vec<u8>,
pub is_distributed: bool
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LostFilesOperation {
pub failed_owner: String,
pub lost_files: HashSet<String>
}
// Trait Impls
impl OperationWriteExecute for GetOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("GET ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let local_path = distributed_file_path(&self.distributed_filename);
let data_buf = async_std::task::block_on(read_file_to_buf(&local_path))?;
let operation = SendableOperation::for_single_tcp_stream(
TryInto::<async_std::net::TcpStream>::try_into(source)?,
Box::new(SendFileOperation {
filename: self.local_path.clone(),
data: data_buf,
is_distributed: false
}));
async_std::task::block_on(operation.write_all_tcp_async());
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for NewFileOwnersOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("NFO ")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Add this file to your map with the new people that have it
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
let mut file_owners = all_file_owners.entry(self.distributed_filename.clone()).or_insert(HashSet::new());
match (&self.new_owners - file_owners).len() {
0 => {
Ok(vec![])
},
_ => {
*file_owners = &self.new_owners | file_owners;
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
}
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl OperationWriteExecute for SendFileOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("FILE")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
// TODO: Check if the file exists before overwriting
let filename = match self.is_distributed {
true => format!("{}/{}", constants::DATA_DIR, self.filename),
false => self.filename.clone()
};
let mut file = std::fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(filename)?;
file.write_all(&self.data);
Ok(vec![])
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
impl fmt::Debug for SendFileOperation {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
let formatted_data = if self.data.len() > 40 {
format!("{:?}...", &self.data[..40])
} else {
format!("{:?}", &self.data)
};
fmt.debug_struct("SendFileOperation")
.field("filename", &self.filename)
.field("data", &formatted_data)
.field("is_distributed", &self.is_distributed)
.finish()
}
}
impl OperationWriteExecute for LostFilesOperation {
fn to_bytes(&self) -> BoxedErrorResult<Vec<u8>> {
Ok(create_buf(&self, str_to_vec("LOST")))
}
fn execute(&self, source: Source) -> BoxedErrorResult<Vec<SendableOperation>> {
let mut did_remove = false;
let mut all_file_owners = globals::ALL_FILE_OWNERS.get_mut();
for lost_file in &self.lost_files {
if let Some(owners) = all_file_owners.get_mut(lost_file) {
did_remove |= owners.remove(&self.failed_owner);
}
}
if did_remove {
Ok(vec![SendableOperation::for_successors(Box::new(self.clone()))])
} else {
Ok(vec![])
}
}
fn to_string(&self) -> String { format!("{:?}", self) }
}
| {
let server = globals::SERVER_SOCKET.read();
let mut incoming = server.incoming();
while let Some(stream) = incoming.next().await {
let connection = stream?;
log(format!("Handling connection from {:?}", connection.peer_addr()));
spawn(handle_connection(connection));
}
Ok(())
} | identifier_body |
main.rs | mod utils;
use chrono::{DateTime, Utc, TimeZone};
use actix::{Actor, Handler, Message, AsyncContext};
use actix_web::{http, web, HttpResponse, App};
use actix_web::middleware::cors::Cors;
use futures::{Future};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use log::debug;
use crate::utils::ErrString;
#[derive(Debug, Serialize, Clone)]
struct Item {
pub title: Option<String>,
pub link: Option<String>,
pub content: Option<String>,
pub pub_date: Option<DateTime<Utc>>,
pub guid: String,
pub unread: bool,
}
#[derive(Clone, Debug, Serialize)]
struct Feed {
pub title: String,
pub last_updated: DateTime<Utc>,
pub items: Vec<Item>,
}
impl Feed {
pub fn merge(&mut self, other: Feed) {
self.title = other.title;
self.last_updated = other.last_updated;
let mut items: HashMap<&str, Item> = self.items.iter().map(|item| (item.guid.as_str(), item.clone())).collect();
for item in other.items.iter() {
let guid = &item.guid;
items.entry(&guid).or_insert_with(|| Item {
title: item.title.to_owned(),
link: item.link.to_owned(),
content: item.content.to_owned(),
pub_date: item.pub_date.to_owned(),
guid: guid.to_owned(),
unread: true,
});
}
self.items = items.drain().map(|(_, v)| v).collect();
self.items.sort_by_key(|item| item.pub_date.clone());
}
}
#[derive(Serialize)]
struct FeedInfo {
pub url: String,
pub title: String,
pub last_updated: DateTime<Utc>,
}
struct DownloadFeed(String);
#[derive(Deserialize)]
struct AddFeed { url: String }
#[derive(Deserialize)]
struct RemoveFeed { url: String }
#[derive(Deserialize, Debug)]
struct GetFeed { url: String }
struct ListFeeds;
#[derive(Deserialize, Debug)]
struct MarkRead { url: String, guid: String }
#[derive(Message)]
struct UpdateFeed { url: String, feed: Feed }
impl Message for DownloadFeed {
type Result = Result<Feed, String>;
}
impl Message for AddFeed {
type Result = Result<(), String>;
}
impl Message for RemoveFeed {
type Result = Result<(), String>;
}
impl Message for GetFeed {
type Result = Result<Feed, String>;
}
impl Message for ListFeeds {
type Result = Result<Vec<FeedInfo>, String>;
}
impl Message for MarkRead {
type Result = Result<bool, String>;
}
struct FeedStorage {
feeds: HashMap<String, Feed>,
downloader: actix::Addr<Downloader>,
}
impl Actor for FeedStorage {
type Context = actix::SyncContext<Self>;
}
impl Handler<DownloadFeed> for FeedStorage {
type Result = <DownloadFeed as Message>::Result;
fn handle(&mut self, msg: DownloadFeed, _: &mut Self::Context) -> Self::Result {
self.downloader.send(msg).wait().or_err("Download failed")?
}
}
impl Handler<AddFeed> for FeedStorage {
type Result = <AddFeed as Message>::Result;
fn handle(&mut self, msg: AddFeed, _: &mut Self::Context) -> Self::Result {
match self.feeds.entry(msg.url.clone()) {
std::collections::hash_map::Entry::Occupied(_) => Err("Feed already exists".into()),
std::collections::hash_map::Entry::Vacant(e) => {
debug!("will download {}", &msg.url);
self.downloader.send(DownloadFeed(msg.url))
.wait()
.or_err("Failed to download")?
.map(|feed| {
debug!("downloaded");
e.insert(feed);
})
}
}
}
}
impl Handler<RemoveFeed> for FeedStorage {
type Result = <RemoveFeed as Message>::Result;
fn handle(&mut self, msg: RemoveFeed, _: &mut Self::Context) -> Self::Result {
self.feeds.remove(&msg.url);
Ok(())
}
}
impl Handler<GetFeed> for FeedStorage {
type Result = <GetFeed as Message>::Result;
fn handle(&mut self, msg: GetFeed, _: &mut Self::Context) -> Self::Result {
match self.feeds.get(&msg.url) {
None => Err("Feed not found".into()),
Some(feed) => Ok(feed.clone()),
}
}
}
impl Handler<ListFeeds> for FeedStorage {
type Result = <ListFeeds as Message>::Result;
fn handle(&mut self, _: ListFeeds, _: &mut Self::Context) -> Self::Result {
Ok(self.feeds.iter().map(|(k, v)| FeedInfo{url: k.clone(), title: v.title.clone(), last_updated: v.last_updated.clone()}).collect())
}
}
impl Handler<MarkRead> for FeedStorage {
type Result = <MarkRead as Message>::Result;
fn handle(&mut self, msg: MarkRead, _: &mut Self::Context) -> Self::Result {
let mut updated = false;
if let Some(feed) = self.feeds.get_mut(&msg.url) {
for item in feed.items.iter_mut().filter(|k| &k.guid == &msg.guid).take(1) {
item.unread = false;
updated = true;
}
}
Ok(updated)
}
}
impl Handler<UpdateFeed> for FeedStorage {
type Result = <UpdateFeed as Message>::Result;
fn handle(&mut self, msg: UpdateFeed, _: &mut Self::Context) -> Self::Result {
if let Some(feed) = self.feeds.get_mut(&msg.url) | ;
}
}
struct Downloader;
impl Actor for Downloader {
type Context = actix::Context<Self>;
}
impl Handler<DownloadFeed> for Downloader {
type Result = <DownloadFeed as Message>::Result;
fn handle(&mut self, msg: DownloadFeed, _: &mut Self::Context) -> Self::Result {
let channel = rss::Channel::from_url(&msg.0).or_err("Channel not downloaded")?;
let mut items = vec![];
for item in channel.items().iter() {
let guid = item.guid().or_err("broken channel")?.value();
items.push(Item {
title: item.title().map(|s| s.to_string()),
link: item.link().map(|s| s.to_string()),
content: item.content().or(item.description()).map(|s| s.to_string()),
pub_date: item.pub_date().and_then(|date| DateTime::parse_from_rfc2822(date).ok().map(|d| d.with_timezone(&Utc))),
guid: guid.to_string(),
unread: true,
});
}
Ok(Feed{
title: channel.title().to_owned(),
last_updated: match channel.last_build_date() {
None => items
.iter()
.map(|item| &item.pub_date)
.max()
.map(|date| date.to_owned())
.unwrap_or(Some(Utc.timestamp(0, 0)))
.unwrap_or(Utc.timestamp(0, 0)),
Some(s) => DateTime::parse_from_rfc2822(s).map(|d| d.with_timezone(&Utc)).unwrap_or(Utc.timestamp(0, 0))
},
items: items
})
}
}
struct Updater {
storage: actix::Addr<FeedStorage>,
downloader: actix::Addr<Downloader>,
handle: Option<actix::SpawnHandle>,
arbiter: actix::Arbiter,
}
impl Actor for Updater {
type Context = actix::Context<Self>;
fn started(&mut self, ctx: &mut <Self as Actor>::Context) {
let storage = self.storage.clone();
let downloader = self.downloader.clone();
let arbiter = self.arbiter.clone();
self.handle = Some(ctx.run_interval(std::time::Duration::new(60, 0), move |_, _| {
let storage = storage.clone();
let downloader = downloader.clone();
let arbiter = arbiter.clone();
arbiter.exec_fn(move || {
if let Ok(Ok(infos)) = storage.send(ListFeeds).wait() {
debug!("got {} feeds, updating", infos.len());
for info in infos {
if let Ok(Ok(new_feed)) = downloader.send(DownloadFeed(info.url.clone())).wait() {
if let Ok(()) = storage.send(UpdateFeed{url: info.url.clone(), feed: new_feed}).wait() {
debug!("successfully updated {}", info.url);
}
}
}
}
});
}));
}
}
fn process_response<T: Serialize, E: Serialize, E2, F: FnOnce(E) -> actix_web::Error>(response: Result<Result<T, E>, E2>, f: F) -> Result<HttpResponse, actix_web::Error> {
match response {
Ok(Ok(data)) => Ok(HttpResponse::Ok().json(data)),
Ok(Err(e)) => Err(f(e)),
_ => Err(actix_web::error::ErrorInternalServerError("Application overload"))
}
}
#[derive(Clone)]
struct State { storage: actix::Addr<FeedStorage> }
fn add_feed(url_info: web::Form<AddFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn remove_feed(url_info: web::Form<RemoveFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn get_feed(url_info: web::Query<GetFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorNotFound))
}
fn list_feeds(data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(ListFeeds).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn mark_read(url_info: web::Form<MarkRead>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn actix_main() -> Result<(), std::io::Error> {
let downloader_addr = Downloader.start();
let feed_storage_addr = {
let addr = downloader_addr.clone();
actix::SyncArbiter::start(1, move || FeedStorage{
feeds: HashMap::new(),
downloader: addr.clone(),
})
};
let state = State{storage: feed_storage_addr.clone()};
let updater = Updater{storage: feed_storage_addr, downloader: downloader_addr.clone(), handle: None, arbiter: actix::Arbiter::new()};
updater.start();
let mut server = actix_web::HttpServer::new(move || {
App::new()
.data(state.clone())
.wrap(
Cors::new()
.allowed_methods(vec!["GET", "POST", "OPTIONS"])
.allowed_headers(vec![
http::header::ACCEPT,
http::header::CONTENT_TYPE,
http::header::HeaderName::from_static("x-requested-with")
])
.max_age(3600)
)
.wrap(actix_web::middleware::Logger::default())
.route("/add", web::post().to_async(add_feed))
.route("/remove", web::post().to_async(remove_feed))
.route("/read", web::post().to_async(mark_read))
.route("/list", web::get().to_async(list_feeds))
.route("/get", web::get().to_async(get_feed))
});
let mut listenfd = listenfd::ListenFd::from_env();
server = if let Some(l) = listenfd.take_tcp_listener(0)? {
server.listen(l)?
} else {
server.bind("[::1]:8000")?
};
println!("Started HTTP server on {:?}", server.addrs_with_scheme().iter().map(|(a, s)| format!("{}://{}/", s, a)).collect::<Vec<_>>());
server.start();
Ok(())
}
pub fn main() -> Result<(), std::io::Error> {
std::env::set_var("RUST_LOG", "actix_web=debug,rssreader=debug");
env_logger::init();
actix::System::run(|| {actix_main().expect("App crashed");} )
}
| {
feed.merge(msg.feed)
} | conditional_block |
main.rs | mod utils;
use chrono::{DateTime, Utc, TimeZone};
use actix::{Actor, Handler, Message, AsyncContext};
use actix_web::{http, web, HttpResponse, App};
use actix_web::middleware::cors::Cors;
use futures::{Future};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use log::debug;
use crate::utils::ErrString;
#[derive(Debug, Serialize, Clone)]
struct Item {
pub title: Option<String>,
pub link: Option<String>,
pub content: Option<String>,
pub pub_date: Option<DateTime<Utc>>,
pub guid: String,
pub unread: bool,
}
#[derive(Clone, Debug, Serialize)]
struct Feed {
pub title: String,
pub last_updated: DateTime<Utc>,
pub items: Vec<Item>,
}
impl Feed {
pub fn merge(&mut self, other: Feed) {
self.title = other.title;
self.last_updated = other.last_updated;
let mut items: HashMap<&str, Item> = self.items.iter().map(|item| (item.guid.as_str(), item.clone())).collect();
for item in other.items.iter() {
let guid = &item.guid;
items.entry(&guid).or_insert_with(|| Item {
title: item.title.to_owned(),
link: item.link.to_owned(),
content: item.content.to_owned(),
pub_date: item.pub_date.to_owned(),
guid: guid.to_owned(),
unread: true,
});
}
self.items = items.drain().map(|(_, v)| v).collect();
self.items.sort_by_key(|item| item.pub_date.clone());
}
}
#[derive(Serialize)]
struct FeedInfo {
pub url: String,
pub title: String,
pub last_updated: DateTime<Utc>,
}
struct DownloadFeed(String);
#[derive(Deserialize)]
struct AddFeed { url: String }
#[derive(Deserialize)]
struct RemoveFeed { url: String }
#[derive(Deserialize, Debug)]
struct GetFeed { url: String }
struct ListFeeds;
#[derive(Deserialize, Debug)]
struct MarkRead { url: String, guid: String }
#[derive(Message)]
struct UpdateFeed { url: String, feed: Feed }
impl Message for DownloadFeed {
type Result = Result<Feed, String>;
}
impl Message for AddFeed {
type Result = Result<(), String>;
}
impl Message for RemoveFeed {
type Result = Result<(), String>;
}
impl Message for GetFeed {
type Result = Result<Feed, String>;
}
impl Message for ListFeeds {
type Result = Result<Vec<FeedInfo>, String>;
}
impl Message for MarkRead {
type Result = Result<bool, String>;
}
struct FeedStorage {
feeds: HashMap<String, Feed>,
downloader: actix::Addr<Downloader>,
}
impl Actor for FeedStorage {
type Context = actix::SyncContext<Self>;
}
impl Handler<DownloadFeed> for FeedStorage {
type Result = <DownloadFeed as Message>::Result;
fn handle(&mut self, msg: DownloadFeed, _: &mut Self::Context) -> Self::Result {
self.downloader.send(msg).wait().or_err("Download failed")?
}
}
impl Handler<AddFeed> for FeedStorage {
type Result = <AddFeed as Message>::Result;
fn handle(&mut self, msg: AddFeed, _: &mut Self::Context) -> Self::Result {
match self.feeds.entry(msg.url.clone()) {
std::collections::hash_map::Entry::Occupied(_) => Err("Feed already exists".into()),
std::collections::hash_map::Entry::Vacant(e) => {
debug!("will download {}", &msg.url);
self.downloader.send(DownloadFeed(msg.url))
.wait()
.or_err("Failed to download")?
.map(|feed| {
debug!("downloaded");
e.insert(feed);
})
}
}
}
}
impl Handler<RemoveFeed> for FeedStorage {
type Result = <RemoveFeed as Message>::Result;
fn handle(&mut self, msg: RemoveFeed, _: &mut Self::Context) -> Self::Result {
self.feeds.remove(&msg.url);
Ok(())
}
}
impl Handler<GetFeed> for FeedStorage {
type Result = <GetFeed as Message>::Result;
fn handle(&mut self, msg: GetFeed, _: &mut Self::Context) -> Self::Result {
match self.feeds.get(&msg.url) {
None => Err("Feed not found".into()),
Some(feed) => Ok(feed.clone()),
}
}
}
impl Handler<ListFeeds> for FeedStorage {
type Result = <ListFeeds as Message>::Result;
fn handle(&mut self, _: ListFeeds, _: &mut Self::Context) -> Self::Result {
Ok(self.feeds.iter().map(|(k, v)| FeedInfo{url: k.clone(), title: v.title.clone(), last_updated: v.last_updated.clone()}).collect())
}
}
impl Handler<MarkRead> for FeedStorage {
type Result = <MarkRead as Message>::Result;
fn handle(&mut self, msg: MarkRead, _: &mut Self::Context) -> Self::Result {
let mut updated = false;
if let Some(feed) = self.feeds.get_mut(&msg.url) {
for item in feed.items.iter_mut().filter(|k| &k.guid == &msg.guid).take(1) {
item.unread = false;
updated = true;
}
}
Ok(updated)
}
}
impl Handler<UpdateFeed> for FeedStorage {
type Result = <UpdateFeed as Message>::Result;
fn handle(&mut self, msg: UpdateFeed, _: &mut Self::Context) -> Self::Result {
if let Some(feed) = self.feeds.get_mut(&msg.url) {
feed.merge(msg.feed)
};
}
}
struct Downloader;
impl Actor for Downloader {
type Context = actix::Context<Self>;
}
impl Handler<DownloadFeed> for Downloader {
type Result = <DownloadFeed as Message>::Result;
fn handle(&mut self, msg: DownloadFeed, _: &mut Self::Context) -> Self::Result {
let channel = rss::Channel::from_url(&msg.0).or_err("Channel not downloaded")?;
let mut items = vec![];
for item in channel.items().iter() {
let guid = item.guid().or_err("broken channel")?.value();
items.push(Item {
title: item.title().map(|s| s.to_string()),
link: item.link().map(|s| s.to_string()),
content: item.content().or(item.description()).map(|s| s.to_string()),
pub_date: item.pub_date().and_then(|date| DateTime::parse_from_rfc2822(date).ok().map(|d| d.with_timezone(&Utc))),
guid: guid.to_string(),
unread: true,
});
}
Ok(Feed{
title: channel.title().to_owned(),
last_updated: match channel.last_build_date() {
None => items
.iter()
.map(|item| &item.pub_date)
.max()
.map(|date| date.to_owned())
.unwrap_or(Some(Utc.timestamp(0, 0)))
.unwrap_or(Utc.timestamp(0, 0)),
Some(s) => DateTime::parse_from_rfc2822(s).map(|d| d.with_timezone(&Utc)).unwrap_or(Utc.timestamp(0, 0))
},
items: items
})
}
}
struct Updater {
storage: actix::Addr<FeedStorage>,
downloader: actix::Addr<Downloader>,
handle: Option<actix::SpawnHandle>,
arbiter: actix::Arbiter,
}
impl Actor for Updater {
type Context = actix::Context<Self>;
fn started(&mut self, ctx: &mut <Self as Actor>::Context) {
let storage = self.storage.clone();
let downloader = self.downloader.clone();
let arbiter = self.arbiter.clone();
self.handle = Some(ctx.run_interval(std::time::Duration::new(60, 0), move |_, _| {
let storage = storage.clone();
let downloader = downloader.clone();
let arbiter = arbiter.clone();
arbiter.exec_fn(move || {
if let Ok(Ok(infos)) = storage.send(ListFeeds).wait() {
debug!("got {} feeds, updating", infos.len());
for info in infos {
if let Ok(Ok(new_feed)) = downloader.send(DownloadFeed(info.url.clone())).wait() {
if let Ok(()) = storage.send(UpdateFeed{url: info.url.clone(), feed: new_feed}).wait() {
debug!("successfully updated {}", info.url);
}
}
}
}
});
}));
}
}
fn process_response<T: Serialize, E: Serialize, E2, F: FnOnce(E) -> actix_web::Error>(response: Result<Result<T, E>, E2>, f: F) -> Result<HttpResponse, actix_web::Error> {
match response {
Ok(Ok(data)) => Ok(HttpResponse::Ok().json(data)),
Ok(Err(e)) => Err(f(e)),
_ => Err(actix_web::error::ErrorInternalServerError("Application overload"))
}
}
#[derive(Clone)]
struct State { storage: actix::Addr<FeedStorage> }
fn add_feed(url_info: web::Form<AddFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn remove_feed(url_info: web::Form<RemoveFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn get_feed(url_info: web::Query<GetFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorNotFound))
}
fn list_feeds(data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(ListFeeds).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn mark_read(url_info: web::Form<MarkRead>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn actix_main() -> Result<(), std::io::Error> {
let downloader_addr = Downloader.start();
let feed_storage_addr = {
let addr = downloader_addr.clone();
actix::SyncArbiter::start(1, move || FeedStorage{
feeds: HashMap::new(),
downloader: addr.clone(),
})
};
let state = State{storage: feed_storage_addr.clone()};
let updater = Updater{storage: feed_storage_addr, downloader: downloader_addr.clone(), handle: None, arbiter: actix::Arbiter::new()};
updater.start();
let mut server = actix_web::HttpServer::new(move || {
App::new()
.data(state.clone())
.wrap(
Cors::new()
.allowed_methods(vec!["GET", "POST", "OPTIONS"])
.allowed_headers(vec![
http::header::ACCEPT,
http::header::CONTENT_TYPE,
http::header::HeaderName::from_static("x-requested-with")
])
.max_age(3600)
)
.wrap(actix_web::middleware::Logger::default())
.route("/add", web::post().to_async(add_feed))
.route("/remove", web::post().to_async(remove_feed))
.route("/read", web::post().to_async(mark_read))
.route("/list", web::get().to_async(list_feeds))
.route("/get", web::get().to_async(get_feed))
});
let mut listenfd = listenfd::ListenFd::from_env();
server = if let Some(l) = listenfd.take_tcp_listener(0)? { | server.start();
Ok(())
}
pub fn main() -> Result<(), std::io::Error> {
std::env::set_var("RUST_LOG", "actix_web=debug,rssreader=debug");
env_logger::init();
actix::System::run(|| {actix_main().expect("App crashed");} )
} | server.listen(l)?
} else {
server.bind("[::1]:8000")?
};
println!("Started HTTP server on {:?}", server.addrs_with_scheme().iter().map(|(a, s)| format!("{}://{}/", s, a)).collect::<Vec<_>>()); | random_line_split |
main.rs | mod utils;
use chrono::{DateTime, Utc, TimeZone};
use actix::{Actor, Handler, Message, AsyncContext};
use actix_web::{http, web, HttpResponse, App};
use actix_web::middleware::cors::Cors;
use futures::{Future};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use log::debug;
use crate::utils::ErrString;
#[derive(Debug, Serialize, Clone)]
struct | {
pub title: Option<String>,
pub link: Option<String>,
pub content: Option<String>,
pub pub_date: Option<DateTime<Utc>>,
pub guid: String,
pub unread: bool,
}
#[derive(Clone, Debug, Serialize)]
struct Feed {
pub title: String,
pub last_updated: DateTime<Utc>,
pub items: Vec<Item>,
}
impl Feed {
pub fn merge(&mut self, other: Feed) {
self.title = other.title;
self.last_updated = other.last_updated;
let mut items: HashMap<&str, Item> = self.items.iter().map(|item| (item.guid.as_str(), item.clone())).collect();
for item in other.items.iter() {
let guid = &item.guid;
items.entry(&guid).or_insert_with(|| Item {
title: item.title.to_owned(),
link: item.link.to_owned(),
content: item.content.to_owned(),
pub_date: item.pub_date.to_owned(),
guid: guid.to_owned(),
unread: true,
});
}
self.items = items.drain().map(|(_, v)| v).collect();
self.items.sort_by_key(|item| item.pub_date.clone());
}
}
#[derive(Serialize)]
struct FeedInfo {
pub url: String,
pub title: String,
pub last_updated: DateTime<Utc>,
}
struct DownloadFeed(String);
#[derive(Deserialize)]
struct AddFeed { url: String }
#[derive(Deserialize)]
struct RemoveFeed { url: String }
#[derive(Deserialize, Debug)]
struct GetFeed { url: String }
struct ListFeeds;
#[derive(Deserialize, Debug)]
struct MarkRead { url: String, guid: String }
#[derive(Message)]
struct UpdateFeed { url: String, feed: Feed }
impl Message for DownloadFeed {
type Result = Result<Feed, String>;
}
impl Message for AddFeed {
type Result = Result<(), String>;
}
impl Message for RemoveFeed {
type Result = Result<(), String>;
}
impl Message for GetFeed {
type Result = Result<Feed, String>;
}
impl Message for ListFeeds {
type Result = Result<Vec<FeedInfo>, String>;
}
impl Message for MarkRead {
type Result = Result<bool, String>;
}
struct FeedStorage {
feeds: HashMap<String, Feed>,
downloader: actix::Addr<Downloader>,
}
impl Actor for FeedStorage {
type Context = actix::SyncContext<Self>;
}
impl Handler<DownloadFeed> for FeedStorage {
type Result = <DownloadFeed as Message>::Result;
fn handle(&mut self, msg: DownloadFeed, _: &mut Self::Context) -> Self::Result {
self.downloader.send(msg).wait().or_err("Download failed")?
}
}
impl Handler<AddFeed> for FeedStorage {
type Result = <AddFeed as Message>::Result;
fn handle(&mut self, msg: AddFeed, _: &mut Self::Context) -> Self::Result {
match self.feeds.entry(msg.url.clone()) {
std::collections::hash_map::Entry::Occupied(_) => Err("Feed already exists".into()),
std::collections::hash_map::Entry::Vacant(e) => {
debug!("will download {}", &msg.url);
self.downloader.send(DownloadFeed(msg.url))
.wait()
.or_err("Failed to download")?
.map(|feed| {
debug!("downloaded");
e.insert(feed);
})
}
}
}
}
impl Handler<RemoveFeed> for FeedStorage {
type Result = <RemoveFeed as Message>::Result;
fn handle(&mut self, msg: RemoveFeed, _: &mut Self::Context) -> Self::Result {
self.feeds.remove(&msg.url);
Ok(())
}
}
impl Handler<GetFeed> for FeedStorage {
type Result = <GetFeed as Message>::Result;
fn handle(&mut self, msg: GetFeed, _: &mut Self::Context) -> Self::Result {
match self.feeds.get(&msg.url) {
None => Err("Feed not found".into()),
Some(feed) => Ok(feed.clone()),
}
}
}
impl Handler<ListFeeds> for FeedStorage {
type Result = <ListFeeds as Message>::Result;
fn handle(&mut self, _: ListFeeds, _: &mut Self::Context) -> Self::Result {
Ok(self.feeds.iter().map(|(k, v)| FeedInfo{url: k.clone(), title: v.title.clone(), last_updated: v.last_updated.clone()}).collect())
}
}
impl Handler<MarkRead> for FeedStorage {
type Result = <MarkRead as Message>::Result;
fn handle(&mut self, msg: MarkRead, _: &mut Self::Context) -> Self::Result {
let mut updated = false;
if let Some(feed) = self.feeds.get_mut(&msg.url) {
for item in feed.items.iter_mut().filter(|k| &k.guid == &msg.guid).take(1) {
item.unread = false;
updated = true;
}
}
Ok(updated)
}
}
impl Handler<UpdateFeed> for FeedStorage {
type Result = <UpdateFeed as Message>::Result;
fn handle(&mut self, msg: UpdateFeed, _: &mut Self::Context) -> Self::Result {
if let Some(feed) = self.feeds.get_mut(&msg.url) {
feed.merge(msg.feed)
};
}
}
struct Downloader;
impl Actor for Downloader {
type Context = actix::Context<Self>;
}
impl Handler<DownloadFeed> for Downloader {
type Result = <DownloadFeed as Message>::Result;
fn handle(&mut self, msg: DownloadFeed, _: &mut Self::Context) -> Self::Result {
let channel = rss::Channel::from_url(&msg.0).or_err("Channel not downloaded")?;
let mut items = vec![];
for item in channel.items().iter() {
let guid = item.guid().or_err("broken channel")?.value();
items.push(Item {
title: item.title().map(|s| s.to_string()),
link: item.link().map(|s| s.to_string()),
content: item.content().or(item.description()).map(|s| s.to_string()),
pub_date: item.pub_date().and_then(|date| DateTime::parse_from_rfc2822(date).ok().map(|d| d.with_timezone(&Utc))),
guid: guid.to_string(),
unread: true,
});
}
Ok(Feed{
title: channel.title().to_owned(),
last_updated: match channel.last_build_date() {
None => items
.iter()
.map(|item| &item.pub_date)
.max()
.map(|date| date.to_owned())
.unwrap_or(Some(Utc.timestamp(0, 0)))
.unwrap_or(Utc.timestamp(0, 0)),
Some(s) => DateTime::parse_from_rfc2822(s).map(|d| d.with_timezone(&Utc)).unwrap_or(Utc.timestamp(0, 0))
},
items: items
})
}
}
struct Updater {
storage: actix::Addr<FeedStorage>,
downloader: actix::Addr<Downloader>,
handle: Option<actix::SpawnHandle>,
arbiter: actix::Arbiter,
}
impl Actor for Updater {
type Context = actix::Context<Self>;
fn started(&mut self, ctx: &mut <Self as Actor>::Context) {
let storage = self.storage.clone();
let downloader = self.downloader.clone();
let arbiter = self.arbiter.clone();
self.handle = Some(ctx.run_interval(std::time::Duration::new(60, 0), move |_, _| {
let storage = storage.clone();
let downloader = downloader.clone();
let arbiter = arbiter.clone();
arbiter.exec_fn(move || {
if let Ok(Ok(infos)) = storage.send(ListFeeds).wait() {
debug!("got {} feeds, updating", infos.len());
for info in infos {
if let Ok(Ok(new_feed)) = downloader.send(DownloadFeed(info.url.clone())).wait() {
if let Ok(()) = storage.send(UpdateFeed{url: info.url.clone(), feed: new_feed}).wait() {
debug!("successfully updated {}", info.url);
}
}
}
}
});
}));
}
}
fn process_response<T: Serialize, E: Serialize, E2, F: FnOnce(E) -> actix_web::Error>(response: Result<Result<T, E>, E2>, f: F) -> Result<HttpResponse, actix_web::Error> {
match response {
Ok(Ok(data)) => Ok(HttpResponse::Ok().json(data)),
Ok(Err(e)) => Err(f(e)),
_ => Err(actix_web::error::ErrorInternalServerError("Application overload"))
}
}
#[derive(Clone)]
struct State { storage: actix::Addr<FeedStorage> }
fn add_feed(url_info: web::Form<AddFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn remove_feed(url_info: web::Form<RemoveFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn get_feed(url_info: web::Query<GetFeed>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorNotFound))
}
fn list_feeds(data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(ListFeeds).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn mark_read(url_info: web::Form<MarkRead>, data: web::Data<State>) -> impl Future<Item = HttpResponse, Error = actix_web::Error> {
web::block(move || data.storage.send(url_info.into_inner()).wait())
.then(|res| process_response(res, actix_web::error::ErrorInternalServerError))
}
fn actix_main() -> Result<(), std::io::Error> {
let downloader_addr = Downloader.start();
let feed_storage_addr = {
let addr = downloader_addr.clone();
actix::SyncArbiter::start(1, move || FeedStorage{
feeds: HashMap::new(),
downloader: addr.clone(),
})
};
let state = State{storage: feed_storage_addr.clone()};
let updater = Updater{storage: feed_storage_addr, downloader: downloader_addr.clone(), handle: None, arbiter: actix::Arbiter::new()};
updater.start();
let mut server = actix_web::HttpServer::new(move || {
App::new()
.data(state.clone())
.wrap(
Cors::new()
.allowed_methods(vec!["GET", "POST", "OPTIONS"])
.allowed_headers(vec![
http::header::ACCEPT,
http::header::CONTENT_TYPE,
http::header::HeaderName::from_static("x-requested-with")
])
.max_age(3600)
)
.wrap(actix_web::middleware::Logger::default())
.route("/add", web::post().to_async(add_feed))
.route("/remove", web::post().to_async(remove_feed))
.route("/read", web::post().to_async(mark_read))
.route("/list", web::get().to_async(list_feeds))
.route("/get", web::get().to_async(get_feed))
});
let mut listenfd = listenfd::ListenFd::from_env();
server = if let Some(l) = listenfd.take_tcp_listener(0)? {
server.listen(l)?
} else {
server.bind("[::1]:8000")?
};
println!("Started HTTP server on {:?}", server.addrs_with_scheme().iter().map(|(a, s)| format!("{}://{}/", s, a)).collect::<Vec<_>>());
server.start();
Ok(())
}
pub fn main() -> Result<(), std::io::Error> {
std::env::set_var("RUST_LOG", "actix_web=debug,rssreader=debug");
env_logger::init();
actix::System::run(|| {actix_main().expect("App crashed");} )
}
| Item | identifier_name |
sssmc39_scheme.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Functions and structs that specifically define the SLIPS-0039 scheme
use super::{Share, Splitter};
use crate::error::{Error, ErrorKind};
use std::collections::BTreeMap;
use std::fmt;
use crate::util;
/// Struct for returned shares
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct GroupShare {
/// Group id
pub group_id: u16,
/// iteration exponent
pub iteration_exponent: u8,
/// group index
pub group_index: u8,
/// group threshold
pub group_threshold: u8,
/// number of group shares
pub group_count: u8,
/// member threshold:
pub member_threshold: u8,
/// Member shares for the group
pub member_shares: Vec<Share>,
}
impl fmt::Display for GroupShare {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"Group {} of {} - {} of {} shares required: ",
self.group_index + 1,
self.group_count,
self.member_threshold,
self.member_shares.len()
)?;
for s in &self.member_shares {
for w in s.to_mnemonic().unwrap() {
write!(f, "{} ", w)?;
}
writeln!(f)?;
}
Ok(())
}
}
impl GroupShare {
/// return list of mnemonics
pub fn mnemonic_list(&self) -> Result<Vec<Vec<String>>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?);
}
Ok(ret_vec)
}
/// return list of mnemonics as space separated strings
pub fn mnemonic_list_flat(&self) -> Result<Vec<String>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?.iter().fold(String::new(), |mut acc, s| {
acc.push_str(s);
acc.push(' ');
acc
}))
}
Ok(ret_vec)
}
/// decode member shares to single share
pub fn decode_shares(&mut self) -> Result<Share, Error> {
let sp = Splitter::new(None);
sp.recover_secret(&self.member_shares, self.member_threshold)
}
}
/// Split a master secret into mnemonic shares
/// group_threshold: The number of groups required to reconstruct the master secret
/// groups: A list of (member_threshold, member_count) pairs for each group, where member_count
/// is the number of shares to generate for the group and member_threshold is the number of
/// members required to reconstruct the group secret.
/// master_secret: The master secret to split.
/// passphrase: The passphrase used to encrypt the master secret.
/// iteration_exponent: The iteration exponent.
/// return: List of mnemonics.
pub fn generate_mnemonics(
group_threshold: u8,
groups: &[(u8, u8)],
master_secret: &[u8],
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
// Generate a 'proto share' so to speak, with identifer generated and group data filled
let mut proto_share = Share::new()?;
proto_share.group_threshold = group_threshold;
proto_share.group_count = groups.len() as u8;
if master_secret.len() * 8 < proto_share.config.min_strength_bits as usize {
return Err(ErrorKind::Value(format!(
"The length of the master secret ({} bytes) must be at least {} bytes.",
master_secret.len(),
(f64::from(proto_share.config.min_strength_bits) / 8f64).ceil(),
)))?;
}
if master_secret.len() % 2 != 0 {
return Err(ErrorKind::Value(
"The length of the master secret in bytes must be an even number".to_string(),
))?;
}
if group_threshold as usize > groups.len() {
return Err(ErrorKind::Value(format!(
"The requested group threshold ({}) must not exceed the number of groups ({}).",
group_threshold,
groups.len()
)))?;
}
let encoder = util::encrypt::MasterSecretEnc::new()?;
let encrypted_master_secret = encoder.encrypt(
master_secret,
passphrase,
iteration_exponent,
proto_share.identifier,
);
let sp = Splitter::new(None);
let group_shares = sp.split_secret(
&proto_share,
group_threshold,
groups.len() as u8,
&encrypted_master_secret,
)?;
let mut retval: Vec<GroupShare> = vec![];
let gs_len = group_shares.len();
for (i, elem) in group_shares.into_iter().enumerate() {
proto_share.group_index = i as u8;
proto_share.group_threshold = group_threshold;
proto_share.group_count = gs_len as u8;
let (member_threshold, member_count) = groups[i];
let member_shares = sp.split_secret(
&proto_share,
member_threshold,
member_count,
&elem.share_value,
)?;
retval.push(GroupShare {
group_id: proto_share.identifier,
iteration_exponent,
group_index: i as u8,
group_threshold,
group_count: gs_len as u8,
member_threshold,
member_shares,
});
}
Ok(retval)
}
pub fn generate_mnemonics_random(
group_threshold: u8,
groups: &[(u8, u8)],
strength_bits: u16,
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
let proto_share = Share::new()?;
if strength_bits < proto_share.config.min_strength_bits {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be at least {} bits.",
strength_bits, proto_share.config.min_strength_bits,
)))?;
}
if strength_bits % 16 != 0 {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be a multiple of 16 bits.",
strength_bits,
)))?;
}
generate_mnemonics(
group_threshold,
groups,
&util::fill_vec_rand(strength_bits as usize / 8),
passphrase,
iteration_exponent,
)
}
/// Combines mnemonic shares to obtain the master secret which was previously split using
/// Shamir's secret sharing scheme.
/// mnemonics: List of mnemonics.
/// passphrase: The passphrase used to encrypt the master secret.
/// return: The master secret.
pub fn combine_mnemonics(mnemonics: &[Vec<String>], passphrase: &str) -> Result<Vec<u8>, Error> {
let group_shares = decode_mnemonics(mnemonics)?;
let mut shares = vec![];
for mut gs in group_shares {
shares.push(gs.decode_shares()?);
}
let sp = Splitter::new(None);
// restore proper member index for groups
let shares = shares
.into_iter()
.map(|mut s| {
s.member_index = s.group_index;
s
})
.collect::<Vec<_>>();
let ems = sp.recover_secret(&shares, shares[0].group_threshold)?;
let encoder = util::encrypt::MasterSecretEnc::new()?;
let dms = encoder.decrypt( | );
Ok(dms)
}
/// Decodes all Mnemonics to a list of shares and performs error checking
fn decode_mnemonics(mnemonics: &[Vec<String>]) -> Result<Vec<GroupShare>, Error> {
let mut shares = vec![];
if mnemonics.is_empty() {
return Err(ErrorKind::Mnemonic(
"List of mnemonics is empty.".to_string(),
))?;
}
let check_len = mnemonics[0].len();
for m in mnemonics {
if m.len() != check_len {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same length.".to_string(),
))?;
}
shares.push(Share::from_mnemonic(m)?);
}
let check_share = shares[0].clone();
for s in shares.iter() {
if s.identifier != check_share.identifier
|| s.iteration_exponent != check_share.iteration_exponent
{
return Err(ErrorKind::Mnemonic(format!(
"Invalid set of mnemonics. All mnemonics must begin with the same {} words. \
(Identifier and iteration exponent must be the same).",
s.config.id_exp_length_words,
)))?;
}
if s.group_threshold != check_share.group_threshold {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group threshold"
.to_string(),
))?;
}
if s.group_count != check_share.group_count {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group count"
.to_string(),
))?;
}
}
let mut group_index_map = BTreeMap::new();
for s in shares {
if !group_index_map.contains_key(&s.group_index) {
let group_share = GroupShare {
group_id: s.identifier,
group_index: s.group_index,
group_threshold: s.group_threshold,
iteration_exponent: s.iteration_exponent,
group_count: s.group_count,
member_shares: vec![s.clone()],
member_threshold: s.member_threshold,
};
group_index_map.insert(group_share.group_index, group_share);
} else {
let e = group_index_map.get_mut(&s.group_index).unwrap();
e.member_shares.push(s);
}
}
if group_index_map.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonic groups ({}). The required number \
of groups is {}.",
group_index_map.len(),
check_share.group_threshold,
)))?;
}
let groups: Vec<GroupShare> = group_index_map
.into_iter()
.map(|g| g.1)
// remove groups where number of shares is below the member threshold
.filter(|g| g.member_shares.len() >= g.member_threshold as usize)
.collect();
if groups.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(
"Insufficient number of groups with member counts that meet member threshold."
.to_string(),
))?;
}
// TODO: Should probably return info making problem mnemonics easier to identify
for g in groups.iter() {
if g.member_shares.len() < g.member_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonics (Group {}). At least {} mnemonics \
are required.",
g.group_index, g.member_threshold,
)))?;
}
let test_share = g.member_shares[0].clone();
for ms in g.member_shares.iter() {
if test_share.member_threshold != ms.member_threshold {
return Err(ErrorKind::Mnemonic(
"Mismatching member thresholds".to_string(),
))?;
}
}
}
Ok(groups)
}
#[cfg(test)]
mod tests {
use super::*;
fn flatten_mnemonics(nms: &[GroupShare]) -> Result<Vec<Vec<String>>, Error> {
let mut ret = vec![];
for m in nms {
for s in m.member_shares.iter() {
ret.push(s.to_mnemonic()?);
}
}
Ok(ret)
}
#[test]
fn generate_mnemonics_test() -> Result<(), Error> {
let master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
// single 3 of 5 test, splat out all mnemonics
println!("Single 3 of 5 Encoded: {:?}", master_secret);
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// Test a few distinct groups
let mns = generate_mnemonics(
2,
&[(3, 5), (2, 5), (3, 3), (13, 16)],
&master_secret,
"",
0,
)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// work through some varying sized secrets
let mut master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
for _ in 0..32 {
master_secret.push(0);
master_secret.push(1);
println!("Single 3 of 5 Encoded: {:?}", master_secret);
println!("master secret length: {}", master_secret.len());
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
}
// Test case for particular case which failed with different threshold lenghts
// TODO: Fold this in to other tests
let one = "slavery flea acrobat eclipse cultural emission yield invasion seafood says insect square bucket orbit leaves closet heat ugly database decorate";
let two = "slavery flea acrobat emerald aviation escape year axle method forget rebound burden museum game suitable brave texture deploy together flash";
let three = "slavery flea acrobat envelope best ceiling dragon threaten isolate headset decrease organize crunch fiction sniff carbon museum username glasses plunge";
let four = "slavery flea beard echo cradle rebound penalty minister literary object have hazard elephant meaning enemy empty result capture peanut believe";
let five = "slavery flea beard email blind lips evaluate repair decent rich mortgage swimming branch decision unkind ultimate military sugar prepare airport";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let _result = combine_mnemonics(&input, "TREZOR")?;
Ok(())
}
// For temporary use as we have no command-line at present
#[test]
fn split_master_secret() -> Result<(), Error> {
let master_secret = b"fdd99010e03f3141662adb33644d5fd2bea0238fa805a2d21e396a22b926558c";
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret.to_vec(), "", 0)?;
for s in &mns {
println!("{}", s);
}
let one = "ending senior academic acne acne lizard armed wrist fancy center blimp broken branch ceiling type bishop senior window mother dominant humidity kidney flip leader cover pupal swimming quarter findings picture much impulse answer threaten bishop express brother sharp unwrap bulge leaves guest ladybug imply thumb dress brave orbit orbit garbage vexed brave deploy tofu regular unusual hunting carbon year";
let two = "ending senior academic agree acid grill magazine trip impact diagnose headset year puny adorn swimming knife aquatic airline prayer hairy unfold forbid diminish sweater brave column holy spit superior replace script oasis firefly scared goat divorce oral laundry violence merit golden founder unusual taste preach ruin lying bumpy single glasses fitness argue daisy secret loud squeeze theater husky already";
let three = "ending senior academic amazing academic carbon sheriff march ordinary advocate climate quarter explain view glasses distance scandal modify maiden welcome include webcam snapshot lilac finance faint facility quantity daughter trash formal failure execute grasp necklace trust bishop privacy library infant slim envy parcel boring mixture deploy dough deny patrol evening brave idea blessing slush lizard woman teaspoon news exclude";
let four = "ending senior academic arcade acquire work exceed network revenue blanket force fiber ting standard fatigue extend acid holiday raspy pink vegan survive river step golden scandal tendency spray parcel vintage amuse remove best else unknown overall mild breathe nuclear wrist criminal jury deal rescue symbolic slow predator railroad verify involve require graduate ambition unknown repair scandal hobo voice railroad";
let five = "ending senior academic axle acquire golden velvet depart swing endorse champion estate slush alien burning painting obesity surprise punish gasoline elephant educate declare rebuild plains making unkind carve exotic unfold counter cowboy extra fantasy cleanup pickup increase type deliver together fumes nylon acrobat fatigue listen elder toxic losing paper image aide satisfy award axis evoke capital academic violence canyon";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let result = combine_mnemonics(&input, "")?;
println!("Result: {}", String::from_utf8(result).unwrap());
Ok(())
}
} | &ems.share_value,
passphrase,
ems.iteration_exponent,
ems.identifier, | random_line_split |
sssmc39_scheme.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Functions and structs that specifically define the SLIPS-0039 scheme
use super::{Share, Splitter};
use crate::error::{Error, ErrorKind};
use std::collections::BTreeMap;
use std::fmt;
use crate::util;
/// Struct for returned shares
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct GroupShare {
/// Group id
pub group_id: u16,
/// iteration exponent
pub iteration_exponent: u8,
/// group index
pub group_index: u8,
/// group threshold
pub group_threshold: u8,
/// number of group shares
pub group_count: u8,
/// member threshold:
pub member_threshold: u8,
/// Member shares for the group
pub member_shares: Vec<Share>,
}
impl fmt::Display for GroupShare {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"Group {} of {} - {} of {} shares required: ",
self.group_index + 1,
self.group_count,
self.member_threshold,
self.member_shares.len()
)?;
for s in &self.member_shares {
for w in s.to_mnemonic().unwrap() {
write!(f, "{} ", w)?;
}
writeln!(f)?;
}
Ok(())
}
}
impl GroupShare {
/// return list of mnemonics
pub fn mnemonic_list(&self) -> Result<Vec<Vec<String>>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?);
}
Ok(ret_vec)
}
/// return list of mnemonics as space separated strings
pub fn mnemonic_list_flat(&self) -> Result<Vec<String>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?.iter().fold(String::new(), |mut acc, s| {
acc.push_str(s);
acc.push(' ');
acc
}))
}
Ok(ret_vec)
}
/// decode member shares to single share
pub fn decode_shares(&mut self) -> Result<Share, Error> {
let sp = Splitter::new(None);
sp.recover_secret(&self.member_shares, self.member_threshold)
}
}
/// Split a master secret into mnemonic shares
/// group_threshold: The number of groups required to reconstruct the master secret
/// groups: A list of (member_threshold, member_count) pairs for each group, where member_count
/// is the number of shares to generate for the group and member_threshold is the number of
/// members required to reconstruct the group secret.
/// master_secret: The master secret to split.
/// passphrase: The passphrase used to encrypt the master secret.
/// iteration_exponent: The iteration exponent.
/// return: List of mnemonics.
pub fn generate_mnemonics(
group_threshold: u8,
groups: &[(u8, u8)],
master_secret: &[u8],
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
// Generate a 'proto share' so to speak, with identifer generated and group data filled
let mut proto_share = Share::new()?;
proto_share.group_threshold = group_threshold;
proto_share.group_count = groups.len() as u8;
if master_secret.len() * 8 < proto_share.config.min_strength_bits as usize {
return Err(ErrorKind::Value(format!(
"The length of the master secret ({} bytes) must be at least {} bytes.",
master_secret.len(),
(f64::from(proto_share.config.min_strength_bits) / 8f64).ceil(),
)))?;
}
if master_secret.len() % 2 != 0 {
return Err(ErrorKind::Value(
"The length of the master secret in bytes must be an even number".to_string(),
))?;
}
if group_threshold as usize > groups.len() {
return Err(ErrorKind::Value(format!(
"The requested group threshold ({}) must not exceed the number of groups ({}).",
group_threshold,
groups.len()
)))?;
}
let encoder = util::encrypt::MasterSecretEnc::new()?;
let encrypted_master_secret = encoder.encrypt(
master_secret,
passphrase,
iteration_exponent,
proto_share.identifier,
);
let sp = Splitter::new(None);
let group_shares = sp.split_secret(
&proto_share,
group_threshold,
groups.len() as u8,
&encrypted_master_secret,
)?;
let mut retval: Vec<GroupShare> = vec![];
let gs_len = group_shares.len();
for (i, elem) in group_shares.into_iter().enumerate() {
proto_share.group_index = i as u8;
proto_share.group_threshold = group_threshold;
proto_share.group_count = gs_len as u8;
let (member_threshold, member_count) = groups[i];
let member_shares = sp.split_secret(
&proto_share,
member_threshold,
member_count,
&elem.share_value,
)?;
retval.push(GroupShare {
group_id: proto_share.identifier,
iteration_exponent,
group_index: i as u8,
group_threshold,
group_count: gs_len as u8,
member_threshold,
member_shares,
});
}
Ok(retval)
}
pub fn generate_mnemonics_random(
group_threshold: u8,
groups: &[(u8, u8)],
strength_bits: u16,
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
let proto_share = Share::new()?;
if strength_bits < proto_share.config.min_strength_bits {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be at least {} bits.",
strength_bits, proto_share.config.min_strength_bits,
)))?;
}
if strength_bits % 16 != 0 {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be a multiple of 16 bits.",
strength_bits,
)))?;
}
generate_mnemonics(
group_threshold,
groups,
&util::fill_vec_rand(strength_bits as usize / 8),
passphrase,
iteration_exponent,
)
}
/// Combines mnemonic shares to obtain the master secret which was previously split using
/// Shamir's secret sharing scheme.
/// mnemonics: List of mnemonics.
/// passphrase: The passphrase used to encrypt the master secret.
/// return: The master secret.
pub fn combine_mnemonics(mnemonics: &[Vec<String>], passphrase: &str) -> Result<Vec<u8>, Error> {
let group_shares = decode_mnemonics(mnemonics)?;
let mut shares = vec![];
for mut gs in group_shares {
shares.push(gs.decode_shares()?);
}
let sp = Splitter::new(None);
// restore proper member index for groups
let shares = shares
.into_iter()
.map(|mut s| {
s.member_index = s.group_index;
s
})
.collect::<Vec<_>>();
let ems = sp.recover_secret(&shares, shares[0].group_threshold)?;
let encoder = util::encrypt::MasterSecretEnc::new()?;
let dms = encoder.decrypt(
&ems.share_value,
passphrase,
ems.iteration_exponent,
ems.identifier,
);
Ok(dms)
}
/// Decodes all Mnemonics to a list of shares and performs error checking
fn decode_mnemonics(mnemonics: &[Vec<String>]) -> Result<Vec<GroupShare>, Error> {
let mut shares = vec![];
if mnemonics.is_empty() {
return Err(ErrorKind::Mnemonic(
"List of mnemonics is empty.".to_string(),
))?;
}
let check_len = mnemonics[0].len();
for m in mnemonics {
if m.len() != check_len {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same length.".to_string(),
))?;
}
shares.push(Share::from_mnemonic(m)?);
}
let check_share = shares[0].clone();
for s in shares.iter() {
if s.identifier != check_share.identifier
|| s.iteration_exponent != check_share.iteration_exponent
{
return Err(ErrorKind::Mnemonic(format!(
"Invalid set of mnemonics. All mnemonics must begin with the same {} words. \
(Identifier and iteration exponent must be the same).",
s.config.id_exp_length_words,
)))?;
}
if s.group_threshold != check_share.group_threshold {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group threshold"
.to_string(),
))?;
}
if s.group_count != check_share.group_count {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group count"
.to_string(),
))?;
}
}
let mut group_index_map = BTreeMap::new();
for s in shares {
if !group_index_map.contains_key(&s.group_index) {
let group_share = GroupShare {
group_id: s.identifier,
group_index: s.group_index,
group_threshold: s.group_threshold,
iteration_exponent: s.iteration_exponent,
group_count: s.group_count,
member_shares: vec![s.clone()],
member_threshold: s.member_threshold,
};
group_index_map.insert(group_share.group_index, group_share);
} else {
let e = group_index_map.get_mut(&s.group_index).unwrap();
e.member_shares.push(s);
}
}
if group_index_map.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonic groups ({}). The required number \
of groups is {}.",
group_index_map.len(),
check_share.group_threshold,
)))?;
}
let groups: Vec<GroupShare> = group_index_map
.into_iter()
.map(|g| g.1)
// remove groups where number of shares is below the member threshold
.filter(|g| g.member_shares.len() >= g.member_threshold as usize)
.collect();
if groups.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(
"Insufficient number of groups with member counts that meet member threshold."
.to_string(),
))?;
}
// TODO: Should probably return info making problem mnemonics easier to identify
for g in groups.iter() {
if g.member_shares.len() < g.member_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonics (Group {}). At least {} mnemonics \
are required.",
g.group_index, g.member_threshold,
)))?;
}
let test_share = g.member_shares[0].clone();
for ms in g.member_shares.iter() {
if test_share.member_threshold != ms.member_threshold {
return Err(ErrorKind::Mnemonic(
"Mismatching member thresholds".to_string(),
))?;
}
}
}
Ok(groups)
}
#[cfg(test)]
mod tests {
use super::*;
fn flatten_mnemonics(nms: &[GroupShare]) -> Result<Vec<Vec<String>>, Error> {
let mut ret = vec![];
for m in nms {
for s in m.member_shares.iter() {
ret.push(s.to_mnemonic()?);
}
}
Ok(ret)
}
#[test]
fn generate_mnemonics_test() -> Result<(), Error> |
// For temporary use as we have no command-line at present
#[test]
fn split_master_secret() -> Result<(), Error> {
let master_secret = b"fdd99010e03f3141662adb33644d5fd2bea0238fa805a2d21e396a22b926558c";
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret.to_vec(), "", 0)?;
for s in &mns {
println!("{}", s);
}
let one = "ending senior academic acne acne lizard armed wrist fancy center blimp broken branch ceiling type bishop senior window mother dominant humidity kidney flip leader cover pupal swimming quarter findings picture much impulse answer threaten bishop express brother sharp unwrap bulge leaves guest ladybug imply thumb dress brave orbit orbit garbage vexed brave deploy tofu regular unusual hunting carbon year";
let two = "ending senior academic agree acid grill magazine trip impact diagnose headset year puny adorn swimming knife aquatic airline prayer hairy unfold forbid diminish sweater brave column holy spit superior replace script oasis firefly scared goat divorce oral laundry violence merit golden founder unusual taste preach ruin lying bumpy single glasses fitness argue daisy secret loud squeeze theater husky already";
let three = "ending senior academic amazing academic carbon sheriff march ordinary advocate climate quarter explain view glasses distance scandal modify maiden welcome include webcam snapshot lilac finance faint facility quantity daughter trash formal failure execute grasp necklace trust bishop privacy library infant slim envy parcel boring mixture deploy dough deny patrol evening brave idea blessing slush lizard woman teaspoon news exclude";
let four = "ending senior academic arcade acquire work exceed network revenue blanket force fiber ting standard fatigue extend acid holiday raspy pink vegan survive river step golden scandal tendency spray parcel vintage amuse remove best else unknown overall mild breathe nuclear wrist criminal jury deal rescue symbolic slow predator railroad verify involve require graduate ambition unknown repair scandal hobo voice railroad";
let five = "ending senior academic axle acquire golden velvet depart swing endorse champion estate slush alien burning painting obesity surprise punish gasoline elephant educate declare rebuild plains making unkind carve exotic unfold counter cowboy extra fantasy cleanup pickup increase type deliver together fumes nylon acrobat fatigue listen elder toxic losing paper image aide satisfy award axis evoke capital academic violence canyon";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let result = combine_mnemonics(&input, "")?;
println!("Result: {}", String::from_utf8(result).unwrap());
Ok(())
}
}
| {
let master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
// single 3 of 5 test, splat out all mnemonics
println!("Single 3 of 5 Encoded: {:?}", master_secret);
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// Test a few distinct groups
let mns = generate_mnemonics(
2,
&[(3, 5), (2, 5), (3, 3), (13, 16)],
&master_secret,
"",
0,
)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// work through some varying sized secrets
let mut master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
for _ in 0..32 {
master_secret.push(0);
master_secret.push(1);
println!("Single 3 of 5 Encoded: {:?}", master_secret);
println!("master secret length: {}", master_secret.len());
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
}
// Test case for particular case which failed with different threshold lenghts
// TODO: Fold this in to other tests
let one = "slavery flea acrobat eclipse cultural emission yield invasion seafood says insect square bucket orbit leaves closet heat ugly database decorate";
let two = "slavery flea acrobat emerald aviation escape year axle method forget rebound burden museum game suitable brave texture deploy together flash";
let three = "slavery flea acrobat envelope best ceiling dragon threaten isolate headset decrease organize crunch fiction sniff carbon museum username glasses plunge";
let four = "slavery flea beard echo cradle rebound penalty minister literary object have hazard elephant meaning enemy empty result capture peanut believe";
let five = "slavery flea beard email blind lips evaluate repair decent rich mortgage swimming branch decision unkind ultimate military sugar prepare airport";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let _result = combine_mnemonics(&input, "TREZOR")?;
Ok(())
} | identifier_body |
sssmc39_scheme.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Functions and structs that specifically define the SLIPS-0039 scheme
use super::{Share, Splitter};
use crate::error::{Error, ErrorKind};
use std::collections::BTreeMap;
use std::fmt;
use crate::util;
/// Struct for returned shares
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct GroupShare {
/// Group id
pub group_id: u16,
/// iteration exponent
pub iteration_exponent: u8,
/// group index
pub group_index: u8,
/// group threshold
pub group_threshold: u8,
/// number of group shares
pub group_count: u8,
/// member threshold:
pub member_threshold: u8,
/// Member shares for the group
pub member_shares: Vec<Share>,
}
impl fmt::Display for GroupShare {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"Group {} of {} - {} of {} shares required: ",
self.group_index + 1,
self.group_count,
self.member_threshold,
self.member_shares.len()
)?;
for s in &self.member_shares {
for w in s.to_mnemonic().unwrap() {
write!(f, "{} ", w)?;
}
writeln!(f)?;
}
Ok(())
}
}
impl GroupShare {
/// return list of mnemonics
pub fn mnemonic_list(&self) -> Result<Vec<Vec<String>>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?);
}
Ok(ret_vec)
}
/// return list of mnemonics as space separated strings
pub fn mnemonic_list_flat(&self) -> Result<Vec<String>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?.iter().fold(String::new(), |mut acc, s| {
acc.push_str(s);
acc.push(' ');
acc
}))
}
Ok(ret_vec)
}
/// decode member shares to single share
pub fn decode_shares(&mut self) -> Result<Share, Error> {
let sp = Splitter::new(None);
sp.recover_secret(&self.member_shares, self.member_threshold)
}
}
/// Split a master secret into mnemonic shares
/// group_threshold: The number of groups required to reconstruct the master secret
/// groups: A list of (member_threshold, member_count) pairs for each group, where member_count
/// is the number of shares to generate for the group and member_threshold is the number of
/// members required to reconstruct the group secret.
/// master_secret: The master secret to split.
/// passphrase: The passphrase used to encrypt the master secret.
/// iteration_exponent: The iteration exponent.
/// return: List of mnemonics.
pub fn generate_mnemonics(
group_threshold: u8,
groups: &[(u8, u8)],
master_secret: &[u8],
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
// Generate a 'proto share' so to speak, with identifer generated and group data filled
let mut proto_share = Share::new()?;
proto_share.group_threshold = group_threshold;
proto_share.group_count = groups.len() as u8;
if master_secret.len() * 8 < proto_share.config.min_strength_bits as usize {
return Err(ErrorKind::Value(format!(
"The length of the master secret ({} bytes) must be at least {} bytes.",
master_secret.len(),
(f64::from(proto_share.config.min_strength_bits) / 8f64).ceil(),
)))?;
}
if master_secret.len() % 2 != 0 {
return Err(ErrorKind::Value(
"The length of the master secret in bytes must be an even number".to_string(),
))?;
}
if group_threshold as usize > groups.len() {
return Err(ErrorKind::Value(format!(
"The requested group threshold ({}) must not exceed the number of groups ({}).",
group_threshold,
groups.len()
)))?;
}
let encoder = util::encrypt::MasterSecretEnc::new()?;
let encrypted_master_secret = encoder.encrypt(
master_secret,
passphrase,
iteration_exponent,
proto_share.identifier,
);
let sp = Splitter::new(None);
let group_shares = sp.split_secret(
&proto_share,
group_threshold,
groups.len() as u8,
&encrypted_master_secret,
)?;
let mut retval: Vec<GroupShare> = vec![];
let gs_len = group_shares.len();
for (i, elem) in group_shares.into_iter().enumerate() {
proto_share.group_index = i as u8;
proto_share.group_threshold = group_threshold;
proto_share.group_count = gs_len as u8;
let (member_threshold, member_count) = groups[i];
let member_shares = sp.split_secret(
&proto_share,
member_threshold,
member_count,
&elem.share_value,
)?;
retval.push(GroupShare {
group_id: proto_share.identifier,
iteration_exponent,
group_index: i as u8,
group_threshold,
group_count: gs_len as u8,
member_threshold,
member_shares,
});
}
Ok(retval)
}
pub fn generate_mnemonics_random(
group_threshold: u8,
groups: &[(u8, u8)],
strength_bits: u16,
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
let proto_share = Share::new()?;
if strength_bits < proto_share.config.min_strength_bits {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be at least {} bits.",
strength_bits, proto_share.config.min_strength_bits,
)))?;
}
if strength_bits % 16 != 0 {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be a multiple of 16 bits.",
strength_bits,
)))?;
}
generate_mnemonics(
group_threshold,
groups,
&util::fill_vec_rand(strength_bits as usize / 8),
passphrase,
iteration_exponent,
)
}
/// Combines mnemonic shares to obtain the master secret which was previously split using
/// Shamir's secret sharing scheme.
/// mnemonics: List of mnemonics.
/// passphrase: The passphrase used to encrypt the master secret.
/// return: The master secret.
pub fn combine_mnemonics(mnemonics: &[Vec<String>], passphrase: &str) -> Result<Vec<u8>, Error> {
let group_shares = decode_mnemonics(mnemonics)?;
let mut shares = vec![];
for mut gs in group_shares {
shares.push(gs.decode_shares()?);
}
let sp = Splitter::new(None);
// restore proper member index for groups
let shares = shares
.into_iter()
.map(|mut s| {
s.member_index = s.group_index;
s
})
.collect::<Vec<_>>();
let ems = sp.recover_secret(&shares, shares[0].group_threshold)?;
let encoder = util::encrypt::MasterSecretEnc::new()?;
let dms = encoder.decrypt(
&ems.share_value,
passphrase,
ems.iteration_exponent,
ems.identifier,
);
Ok(dms)
}
/// Decodes all Mnemonics to a list of shares and performs error checking
fn decode_mnemonics(mnemonics: &[Vec<String>]) -> Result<Vec<GroupShare>, Error> {
let mut shares = vec![];
if mnemonics.is_empty() |
let check_len = mnemonics[0].len();
for m in mnemonics {
if m.len() != check_len {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same length.".to_string(),
))?;
}
shares.push(Share::from_mnemonic(m)?);
}
let check_share = shares[0].clone();
for s in shares.iter() {
if s.identifier != check_share.identifier
|| s.iteration_exponent != check_share.iteration_exponent
{
return Err(ErrorKind::Mnemonic(format!(
"Invalid set of mnemonics. All mnemonics must begin with the same {} words. \
(Identifier and iteration exponent must be the same).",
s.config.id_exp_length_words,
)))?;
}
if s.group_threshold != check_share.group_threshold {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group threshold"
.to_string(),
))?;
}
if s.group_count != check_share.group_count {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group count"
.to_string(),
))?;
}
}
let mut group_index_map = BTreeMap::new();
for s in shares {
if !group_index_map.contains_key(&s.group_index) {
let group_share = GroupShare {
group_id: s.identifier,
group_index: s.group_index,
group_threshold: s.group_threshold,
iteration_exponent: s.iteration_exponent,
group_count: s.group_count,
member_shares: vec![s.clone()],
member_threshold: s.member_threshold,
};
group_index_map.insert(group_share.group_index, group_share);
} else {
let e = group_index_map.get_mut(&s.group_index).unwrap();
e.member_shares.push(s);
}
}
if group_index_map.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonic groups ({}). The required number \
of groups is {}.",
group_index_map.len(),
check_share.group_threshold,
)))?;
}
let groups: Vec<GroupShare> = group_index_map
.into_iter()
.map(|g| g.1)
// remove groups where number of shares is below the member threshold
.filter(|g| g.member_shares.len() >= g.member_threshold as usize)
.collect();
if groups.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(
"Insufficient number of groups with member counts that meet member threshold."
.to_string(),
))?;
}
// TODO: Should probably return info making problem mnemonics easier to identify
for g in groups.iter() {
if g.member_shares.len() < g.member_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonics (Group {}). At least {} mnemonics \
are required.",
g.group_index, g.member_threshold,
)))?;
}
let test_share = g.member_shares[0].clone();
for ms in g.member_shares.iter() {
if test_share.member_threshold != ms.member_threshold {
return Err(ErrorKind::Mnemonic(
"Mismatching member thresholds".to_string(),
))?;
}
}
}
Ok(groups)
}
#[cfg(test)]
mod tests {
use super::*;
fn flatten_mnemonics(nms: &[GroupShare]) -> Result<Vec<Vec<String>>, Error> {
let mut ret = vec![];
for m in nms {
for s in m.member_shares.iter() {
ret.push(s.to_mnemonic()?);
}
}
Ok(ret)
}
#[test]
fn generate_mnemonics_test() -> Result<(), Error> {
let master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
// single 3 of 5 test, splat out all mnemonics
println!("Single 3 of 5 Encoded: {:?}", master_secret);
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// Test a few distinct groups
let mns = generate_mnemonics(
2,
&[(3, 5), (2, 5), (3, 3), (13, 16)],
&master_secret,
"",
0,
)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// work through some varying sized secrets
let mut master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
for _ in 0..32 {
master_secret.push(0);
master_secret.push(1);
println!("Single 3 of 5 Encoded: {:?}", master_secret);
println!("master secret length: {}", master_secret.len());
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
}
// Test case for particular case which failed with different threshold lenghts
// TODO: Fold this in to other tests
let one = "slavery flea acrobat eclipse cultural emission yield invasion seafood says insect square bucket orbit leaves closet heat ugly database decorate";
let two = "slavery flea acrobat emerald aviation escape year axle method forget rebound burden museum game suitable brave texture deploy together flash";
let three = "slavery flea acrobat envelope best ceiling dragon threaten isolate headset decrease organize crunch fiction sniff carbon museum username glasses plunge";
let four = "slavery flea beard echo cradle rebound penalty minister literary object have hazard elephant meaning enemy empty result capture peanut believe";
let five = "slavery flea beard email blind lips evaluate repair decent rich mortgage swimming branch decision unkind ultimate military sugar prepare airport";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let _result = combine_mnemonics(&input, "TREZOR")?;
Ok(())
}
// For temporary use as we have no command-line at present
#[test]
fn split_master_secret() -> Result<(), Error> {
let master_secret = b"fdd99010e03f3141662adb33644d5fd2bea0238fa805a2d21e396a22b926558c";
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret.to_vec(), "", 0)?;
for s in &mns {
println!("{}", s);
}
let one = "ending senior academic acne acne lizard armed wrist fancy center blimp broken branch ceiling type bishop senior window mother dominant humidity kidney flip leader cover pupal swimming quarter findings picture much impulse answer threaten bishop express brother sharp unwrap bulge leaves guest ladybug imply thumb dress brave orbit orbit garbage vexed brave deploy tofu regular unusual hunting carbon year";
let two = "ending senior academic agree acid grill magazine trip impact diagnose headset year puny adorn swimming knife aquatic airline prayer hairy unfold forbid diminish sweater brave column holy spit superior replace script oasis firefly scared goat divorce oral laundry violence merit golden founder unusual taste preach ruin lying bumpy single glasses fitness argue daisy secret loud squeeze theater husky already";
let three = "ending senior academic amazing academic carbon sheriff march ordinary advocate climate quarter explain view glasses distance scandal modify maiden welcome include webcam snapshot lilac finance faint facility quantity daughter trash formal failure execute grasp necklace trust bishop privacy library infant slim envy parcel boring mixture deploy dough deny patrol evening brave idea blessing slush lizard woman teaspoon news exclude";
let four = "ending senior academic arcade acquire work exceed network revenue blanket force fiber ting standard fatigue extend acid holiday raspy pink vegan survive river step golden scandal tendency spray parcel vintage amuse remove best else unknown overall mild breathe nuclear wrist criminal jury deal rescue symbolic slow predator railroad verify involve require graduate ambition unknown repair scandal hobo voice railroad";
let five = "ending senior academic axle acquire golden velvet depart swing endorse champion estate slush alien burning painting obesity surprise punish gasoline elephant educate declare rebuild plains making unkind carve exotic unfold counter cowboy extra fantasy cleanup pickup increase type deliver together fumes nylon acrobat fatigue listen elder toxic losing paper image aide satisfy award axis evoke capital academic violence canyon";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let result = combine_mnemonics(&input, "")?;
println!("Result: {}", String::from_utf8(result).unwrap());
Ok(())
}
}
| {
return Err(ErrorKind::Mnemonic(
"List of mnemonics is empty.".to_string(),
))?;
} | conditional_block |
sssmc39_scheme.rs | // Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Functions and structs that specifically define the SLIPS-0039 scheme
use super::{Share, Splitter};
use crate::error::{Error, ErrorKind};
use std::collections::BTreeMap;
use std::fmt;
use crate::util;
/// Struct for returned shares
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct GroupShare {
/// Group id
pub group_id: u16,
/// iteration exponent
pub iteration_exponent: u8,
/// group index
pub group_index: u8,
/// group threshold
pub group_threshold: u8,
/// number of group shares
pub group_count: u8,
/// member threshold:
pub member_threshold: u8,
/// Member shares for the group
pub member_shares: Vec<Share>,
}
impl fmt::Display for GroupShare {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"Group {} of {} - {} of {} shares required: ",
self.group_index + 1,
self.group_count,
self.member_threshold,
self.member_shares.len()
)?;
for s in &self.member_shares {
for w in s.to_mnemonic().unwrap() {
write!(f, "{} ", w)?;
}
writeln!(f)?;
}
Ok(())
}
}
impl GroupShare {
/// return list of mnemonics
pub fn mnemonic_list(&self) -> Result<Vec<Vec<String>>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?);
}
Ok(ret_vec)
}
/// return list of mnemonics as space separated strings
pub fn mnemonic_list_flat(&self) -> Result<Vec<String>, Error> {
let mut ret_vec = vec![];
for s in &self.member_shares {
ret_vec.push(s.to_mnemonic()?.iter().fold(String::new(), |mut acc, s| {
acc.push_str(s);
acc.push(' ');
acc
}))
}
Ok(ret_vec)
}
/// decode member shares to single share
pub fn decode_shares(&mut self) -> Result<Share, Error> {
let sp = Splitter::new(None);
sp.recover_secret(&self.member_shares, self.member_threshold)
}
}
/// Split a master secret into mnemonic shares
/// group_threshold: The number of groups required to reconstruct the master secret
/// groups: A list of (member_threshold, member_count) pairs for each group, where member_count
/// is the number of shares to generate for the group and member_threshold is the number of
/// members required to reconstruct the group secret.
/// master_secret: The master secret to split.
/// passphrase: The passphrase used to encrypt the master secret.
/// iteration_exponent: The iteration exponent.
/// return: List of mnemonics.
pub fn generate_mnemonics(
group_threshold: u8,
groups: &[(u8, u8)],
master_secret: &[u8],
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
// Generate a 'proto share' so to speak, with identifer generated and group data filled
let mut proto_share = Share::new()?;
proto_share.group_threshold = group_threshold;
proto_share.group_count = groups.len() as u8;
if master_secret.len() * 8 < proto_share.config.min_strength_bits as usize {
return Err(ErrorKind::Value(format!(
"The length of the master secret ({} bytes) must be at least {} bytes.",
master_secret.len(),
(f64::from(proto_share.config.min_strength_bits) / 8f64).ceil(),
)))?;
}
if master_secret.len() % 2 != 0 {
return Err(ErrorKind::Value(
"The length of the master secret in bytes must be an even number".to_string(),
))?;
}
if group_threshold as usize > groups.len() {
return Err(ErrorKind::Value(format!(
"The requested group threshold ({}) must not exceed the number of groups ({}).",
group_threshold,
groups.len()
)))?;
}
let encoder = util::encrypt::MasterSecretEnc::new()?;
let encrypted_master_secret = encoder.encrypt(
master_secret,
passphrase,
iteration_exponent,
proto_share.identifier,
);
let sp = Splitter::new(None);
let group_shares = sp.split_secret(
&proto_share,
group_threshold,
groups.len() as u8,
&encrypted_master_secret,
)?;
let mut retval: Vec<GroupShare> = vec![];
let gs_len = group_shares.len();
for (i, elem) in group_shares.into_iter().enumerate() {
proto_share.group_index = i as u8;
proto_share.group_threshold = group_threshold;
proto_share.group_count = gs_len as u8;
let (member_threshold, member_count) = groups[i];
let member_shares = sp.split_secret(
&proto_share,
member_threshold,
member_count,
&elem.share_value,
)?;
retval.push(GroupShare {
group_id: proto_share.identifier,
iteration_exponent,
group_index: i as u8,
group_threshold,
group_count: gs_len as u8,
member_threshold,
member_shares,
});
}
Ok(retval)
}
pub fn generate_mnemonics_random(
group_threshold: u8,
groups: &[(u8, u8)],
strength_bits: u16,
passphrase: &str,
iteration_exponent: u8,
) -> Result<Vec<GroupShare>, Error> {
let proto_share = Share::new()?;
if strength_bits < proto_share.config.min_strength_bits {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be at least {} bits.",
strength_bits, proto_share.config.min_strength_bits,
)))?;
}
if strength_bits % 16 != 0 {
return Err(ErrorKind::Value(format!(
"The requested strength of the master secret({} bits) must be a multiple of 16 bits.",
strength_bits,
)))?;
}
generate_mnemonics(
group_threshold,
groups,
&util::fill_vec_rand(strength_bits as usize / 8),
passphrase,
iteration_exponent,
)
}
/// Combines mnemonic shares to obtain the master secret which was previously split using
/// Shamir's secret sharing scheme.
/// mnemonics: List of mnemonics.
/// passphrase: The passphrase used to encrypt the master secret.
/// return: The master secret.
pub fn combine_mnemonics(mnemonics: &[Vec<String>], passphrase: &str) -> Result<Vec<u8>, Error> {
let group_shares = decode_mnemonics(mnemonics)?;
let mut shares = vec![];
for mut gs in group_shares {
shares.push(gs.decode_shares()?);
}
let sp = Splitter::new(None);
// restore proper member index for groups
let shares = shares
.into_iter()
.map(|mut s| {
s.member_index = s.group_index;
s
})
.collect::<Vec<_>>();
let ems = sp.recover_secret(&shares, shares[0].group_threshold)?;
let encoder = util::encrypt::MasterSecretEnc::new()?;
let dms = encoder.decrypt(
&ems.share_value,
passphrase,
ems.iteration_exponent,
ems.identifier,
);
Ok(dms)
}
/// Decodes all Mnemonics to a list of shares and performs error checking
fn | (mnemonics: &[Vec<String>]) -> Result<Vec<GroupShare>, Error> {
let mut shares = vec![];
if mnemonics.is_empty() {
return Err(ErrorKind::Mnemonic(
"List of mnemonics is empty.".to_string(),
))?;
}
let check_len = mnemonics[0].len();
for m in mnemonics {
if m.len() != check_len {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same length.".to_string(),
))?;
}
shares.push(Share::from_mnemonic(m)?);
}
let check_share = shares[0].clone();
for s in shares.iter() {
if s.identifier != check_share.identifier
|| s.iteration_exponent != check_share.iteration_exponent
{
return Err(ErrorKind::Mnemonic(format!(
"Invalid set of mnemonics. All mnemonics must begin with the same {} words. \
(Identifier and iteration exponent must be the same).",
s.config.id_exp_length_words,
)))?;
}
if s.group_threshold != check_share.group_threshold {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group threshold"
.to_string(),
))?;
}
if s.group_count != check_share.group_count {
return Err(ErrorKind::Mnemonic(
"Invalid set of mnemonics. All mnemonics must have the same group count"
.to_string(),
))?;
}
}
let mut group_index_map = BTreeMap::new();
for s in shares {
if !group_index_map.contains_key(&s.group_index) {
let group_share = GroupShare {
group_id: s.identifier,
group_index: s.group_index,
group_threshold: s.group_threshold,
iteration_exponent: s.iteration_exponent,
group_count: s.group_count,
member_shares: vec![s.clone()],
member_threshold: s.member_threshold,
};
group_index_map.insert(group_share.group_index, group_share);
} else {
let e = group_index_map.get_mut(&s.group_index).unwrap();
e.member_shares.push(s);
}
}
if group_index_map.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonic groups ({}). The required number \
of groups is {}.",
group_index_map.len(),
check_share.group_threshold,
)))?;
}
let groups: Vec<GroupShare> = group_index_map
.into_iter()
.map(|g| g.1)
// remove groups where number of shares is below the member threshold
.filter(|g| g.member_shares.len() >= g.member_threshold as usize)
.collect();
if groups.len() < check_share.group_threshold as usize {
return Err(ErrorKind::Mnemonic(
"Insufficient number of groups with member counts that meet member threshold."
.to_string(),
))?;
}
// TODO: Should probably return info making problem mnemonics easier to identify
for g in groups.iter() {
if g.member_shares.len() < g.member_threshold as usize {
return Err(ErrorKind::Mnemonic(format!(
"Insufficient number of mnemonics (Group {}). At least {} mnemonics \
are required.",
g.group_index, g.member_threshold,
)))?;
}
let test_share = g.member_shares[0].clone();
for ms in g.member_shares.iter() {
if test_share.member_threshold != ms.member_threshold {
return Err(ErrorKind::Mnemonic(
"Mismatching member thresholds".to_string(),
))?;
}
}
}
Ok(groups)
}
#[cfg(test)]
mod tests {
use super::*;
fn flatten_mnemonics(nms: &[GroupShare]) -> Result<Vec<Vec<String>>, Error> {
let mut ret = vec![];
for m in nms {
for s in m.member_shares.iter() {
ret.push(s.to_mnemonic()?);
}
}
Ok(ret)
}
#[test]
fn generate_mnemonics_test() -> Result<(), Error> {
let master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
// single 3 of 5 test, splat out all mnemonics
println!("Single 3 of 5 Encoded: {:?}", master_secret);
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// Test a few distinct groups
let mns = generate_mnemonics(
2,
&[(3, 5), (2, 5), (3, 3), (13, 16)],
&master_secret,
"",
0,
)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
// work through some varying sized secrets
let mut master_secret = b"\x0c\x94\x90\xbcn\xd6\xbc\xbf\xac>\xbe}\xeeV\xf2P".to_vec();
for _ in 0..32 {
master_secret.push(0);
master_secret.push(1);
println!("Single 3 of 5 Encoded: {:?}", master_secret);
println!("master secret length: {}", master_secret.len());
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret, "", 0)?;
for s in &mns {
println!("{}", s);
}
let result = combine_mnemonics(&flatten_mnemonics(&mns)?, "")?;
println!("Single 3 of 5 Decoded: {:?}", result);
assert_eq!(result, master_secret);
}
// Test case for particular case which failed with different threshold lenghts
// TODO: Fold this in to other tests
let one = "slavery flea acrobat eclipse cultural emission yield invasion seafood says insect square bucket orbit leaves closet heat ugly database decorate";
let two = "slavery flea acrobat emerald aviation escape year axle method forget rebound burden museum game suitable brave texture deploy together flash";
let three = "slavery flea acrobat envelope best ceiling dragon threaten isolate headset decrease organize crunch fiction sniff carbon museum username glasses plunge";
let four = "slavery flea beard echo cradle rebound penalty minister literary object have hazard elephant meaning enemy empty result capture peanut believe";
let five = "slavery flea beard email blind lips evaluate repair decent rich mortgage swimming branch decision unkind ultimate military sugar prepare airport";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let _result = combine_mnemonics(&input, "TREZOR")?;
Ok(())
}
// For temporary use as we have no command-line at present
#[test]
fn split_master_secret() -> Result<(), Error> {
let master_secret = b"fdd99010e03f3141662adb33644d5fd2bea0238fa805a2d21e396a22b926558c";
let mns = generate_mnemonics(1, &[(3, 5)], &master_secret.to_vec(), "", 0)?;
for s in &mns {
println!("{}", s);
}
let one = "ending senior academic acne acne lizard armed wrist fancy center blimp broken branch ceiling type bishop senior window mother dominant humidity kidney flip leader cover pupal swimming quarter findings picture much impulse answer threaten bishop express brother sharp unwrap bulge leaves guest ladybug imply thumb dress brave orbit orbit garbage vexed brave deploy tofu regular unusual hunting carbon year";
let two = "ending senior academic agree acid grill magazine trip impact diagnose headset year puny adorn swimming knife aquatic airline prayer hairy unfold forbid diminish sweater brave column holy spit superior replace script oasis firefly scared goat divorce oral laundry violence merit golden founder unusual taste preach ruin lying bumpy single glasses fitness argue daisy secret loud squeeze theater husky already";
let three = "ending senior academic amazing academic carbon sheriff march ordinary advocate climate quarter explain view glasses distance scandal modify maiden welcome include webcam snapshot lilac finance faint facility quantity daughter trash formal failure execute grasp necklace trust bishop privacy library infant slim envy parcel boring mixture deploy dough deny patrol evening brave idea blessing slush lizard woman teaspoon news exclude";
let four = "ending senior academic arcade acquire work exceed network revenue blanket force fiber ting standard fatigue extend acid holiday raspy pink vegan survive river step golden scandal tendency spray parcel vintage amuse remove best else unknown overall mild breathe nuclear wrist criminal jury deal rescue symbolic slow predator railroad verify involve require graduate ambition unknown repair scandal hobo voice railroad";
let five = "ending senior academic axle acquire golden velvet depart swing endorse champion estate slush alien burning painting obesity surprise punish gasoline elephant educate declare rebuild plains making unkind carve exotic unfold counter cowboy extra fantasy cleanup pickup increase type deliver together fumes nylon acrobat fatigue listen elder toxic losing paper image aide satisfy award axis evoke capital academic violence canyon";
let mut input = vec![];
input.push(one.split(' ').map(|s| s.to_owned()).collect());
input.push(two.split(' ').map(|s| s.to_owned()).collect());
input.push(three.split(' ').map(|s| s.to_owned()).collect());
input.push(four.split(' ').map(|s| s.to_owned()).collect());
input.push(five.split(' ').map(|s| s.to_owned()).collect());
let result = combine_mnemonics(&input, "")?;
println!("Result: {}", String::from_utf8(result).unwrap());
Ok(())
}
}
| decode_mnemonics | identifier_name |
project.py | #!/usr/bin/env python
# Import modules
import numpy as np
import sklearn
from sklearn.preprocessing import LabelEncoder
import pickle
from sensor_stick.srv import GetNormals
from sensor_stick.features import compute_color_histograms
from sensor_stick.features import compute_normal_histograms
from visualization_msgs.msg import Marker
from sensor_stick.marker_tools import *
from sensor_stick.msg import DetectedObjectsArray
from sensor_stick.msg import DetectedObject
from sensor_stick.pcl_helper import *
import rospy
import tf
from geometry_msgs.msg import Pose
from std_msgs.msg import Float64
from std_msgs.msg import Int32
from std_msgs.msg import String
from pr2_robot.srv import *
from rospy_message_converter import message_converter
import yaml
# Helper function to get surface normals
def get_normals(cloud):
get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)
return get_normals_prox(cloud).cluster
# Helper function to create a yaml friendly dictionary from ROS messages
def make_yaml_dict(test_scene_num, arm_name, object_name, pick_pose, place_pose):
yaml_dict = {}
yaml_dict["test_scene_num"] = test_scene_num.data
yaml_dict["arm_name"] = arm_name.data
yaml_dict["object_name"] = object_name.data
yaml_dict["pick_pose"] = message_converter.convert_ros_message_to_dictionary(pick_pose)
yaml_dict["place_pose"] = message_converter.convert_ros_message_to_dictionary(place_pose)
return yaml_dict
# Helper function to output to yaml file
def send_to_yaml(yaml_filename, dict_list):
print "sending to yaml", dict_list
for d in dict_list:
print "test_scene_num", type(d["test_scene_num"]), "arm_name", type(d["arm_name"]), "object_name", type(d["object_name"]), "pick_pose", type(d["pick_pose"]), "place_pose", type(d["place_pose"])
data_dict = {"object_list": dict_list}
with open(yaml_filename, 'w') as outfile:
yaml.dump(data_dict, outfile, default_flow_style=False)
def statistical_outlier_removal(cloud):
# Much like the previous filters, we start by creating a filter object:
outlier_filter = cloud.make_statistical_outlier_filter()
# Set the number of neighboring points to analyze for any given point
outlier_filter.set_mean_k(50)
# Set threshold scale factor
x = 0.9
# Any point with a mean distance larger than global (mean distance+x*std_dev) will be considered outlier
outlier_filter.set_std_dev_mul_thresh(x)
# Finally call the filter function for magic
cloud_filtered = outlier_filter.filter()
return cloud_filtered
def voxel_downsample(cloud): | cloud (PointCloud_PointXYZRGB): A point cloud
Returns:
PointCloud_PointXYZRGB: A downsampled point cloud
"""
# Create a VoxelGrid filter object for our input point cloud
vox = cloud.make_voxel_grid_filter()
# Choose a voxel (also known as leaf) size
LEAF_SIZE = 0.005
# Set the voxel (or leaf) size
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
# Call the filter function to obtain the resultant downsampled point cloud
cloud_filtered = vox.filter()
return cloud_filtered
def apply_passthrough_filter(cloud, axis, axis_min, axis_max):
""" Apply a passthrough filter to a cloud
Args:
cloud (PointCloud_PointXYZRGB): A point cloud
Returns:
PointCloud_PointXYZRGB: A filtered point cloud
"""
# Create a PassThrough filter object.
passthrough = cloud.make_passthrough_filter()
# Assign axis and range to the passthrough filter object.
filter_axis = axis
passthrough.set_filter_field_name(filter_axis)
#axis_min = 0.6
#axis_max = 1.1
passthrough.set_filter_limits(axis_min, axis_max)
# Finally use the filter function to obtain the resultant point cloud.
cloud_filtered = passthrough.filter()
return cloud_filtered
def ransac(cloud, sacmodel):
""" Segments a cloud using a sac model
Args:
cloud (PointCloud_PointXYZRGB): A point cloud
sacmodel (pcl.SACMODEL): A model points will be fit to
Returns:
A set of inliers and coefficients
"""
# Create the segmentation object
seg = cloud.make_segmenter()
# Set the model you wish to fit
seg.set_model_type(sacmodel)
seg.set_method_type(pcl.SAC_RANSAC)
# Max distance for a point to be considered fitting the model
# Experiment with different values for max_distance
# for segmenting the table
max_distance = 0.01
seg.set_distance_threshold(max_distance)
# Call the segment function to obtain set of inlier indices and model coefficients
inliers, coefficients = seg.segment()
return inliers, coefficients
def euclidean_clustering(cloud):
white_cloud = XYZRGB_to_XYZ(cloud)
tree = white_cloud.make_kdtree()
# Create a cluster extraction object
ec = white_cloud.make_EuclideanClusterExtraction()
# Set tolerances for distance threshold
# as well as minimum and maximum cluster size (in points)
ec.set_ClusterTolerance(0.01)
ec.set_MinClusterSize(10)
ec.set_MaxClusterSize(25000)
# Search the k-d tree for clusters
ec.set_SearchMethod(tree)
# Extract indices for each of the discovered clusters
cluster_indices = ec.Extract()
return cluster_indices, white_cloud
def color_clusters(cluster_indices, white_cloud):
#Assign a color corresponding to each segmented object in scene
cluster_color = get_color_list(len(cluster_indices))
color_cluster_point_list = []
for j, indices in enumerate(cluster_indices):
for i, indice in enumerate(indices):
color_cluster_point_list.append([white_cloud[indice][0],
white_cloud[indice][1],
white_cloud[indice][2],
rgb_to_float(cluster_color[j])])
#Create new cloud containing all clusters, each with unique color
cluster_cloud = pcl.PointCloud_PointXYZRGB()
cluster_cloud.from_list(color_cluster_point_list)
return cluster_cloud
# Callback function for your Point Cloud Subscriber
def pcl_callback(pcl_msg):
# Exercise-2 TODOs:
# Convert ROS msg to PCL data
pcl_data = ros_to_pcl(pcl_msg)
#filename = 'pcl_data.pcd'
#pcl.save(pcl_data, filename)
# Statistical Outlier Filtering
cloud_filtered = statistical_outlier_removal(pcl_data)
#filename = 'statistical_outlier_removal.pcd'
#pcl.save(cloud_filtered, filename)
# Voxel Grid Downsampling
cloud_filtered = voxel_downsample(cloud_filtered)
#filename = 'voxel_downsampled.pcd'
#pcl.save(cloud_filtered, filename)
# PassThrough Filter along z
axis_min = 0.6
axis_max = 1.1
cloud_filtered = apply_passthrough_filter(cloud_filtered, 'z', axis_min, axis_max)
#filename = 'pass_through_filtered.pcd'
#pcl.save(cloud_filtered, filename)
# PassThrough Filter along y
axis_min = -0.5
axis_max = 0.5
cloud_filtered = apply_passthrough_filter(cloud_filtered, 'y', axis_min, axis_max)
filename = 'pass_through_filtered_y.pcd'
pcl.save(cloud_filtered, filename)
# RANSAC Plane Segmentation
inliers, coefficients = ransac(cloud_filtered, pcl.SACMODEL_PLANE)
# Extract inliers and outliers
extracted_inliers = cloud_filtered.extract(inliers, negative=False)
#filename = 'extracted_inliers.pcd'
#pcl.save(extracted_inliers, filename)
extracted_outliers = cloud_filtered.extract(inliers, negative=True)
#filename = 'extracted_outliers.pcd'
#pcl.save(extracted_outliers, filename)
cloud_table = extracted_inliers
cloud_objects = extracted_outliers
# Euclidean Clustering
cluster_indices, white_cloud = euclidean_clustering(cloud_objects)
# Create Cluster-Mask Point Cloud to visualize each cluster separately
cluster_cloud = color_clusters(cluster_indices, white_cloud)
filename = 'colored_cluster_cloud.pcd'
pcl.save(cluster_cloud, filename)
# Convert PCL data to ROS messages
ros_cloud_table = pcl_to_ros(cloud_table)
ros_cloud_objects = pcl_to_ros(cloud_objects)
ros_cluster_cloud = pcl_to_ros(cluster_cloud)
# Publish ROS messages
pcl_objects_pub.publish(ros_cloud_objects)
pcl_table_pub.publish(ros_cloud_table)
pcl_cluster_pub.publish(ros_cluster_cloud)
# Exercise-3 TODOs:
# Classify the clusters! (loop through each detected cluster one at a time)
detected_objects_labels = []
detected_objects = []
for index, pts_list in enumerate(cluster_indices):
# Grab the points for the cluster
pcl_cluster = cloud_objects.extract(pts_list)
# Compute the associated feature vector
ros_cluster = pcl_to_ros(pcl_cluster)
sample_cloud = ros_cluster
chists = compute_color_histograms(sample_cloud, using_hsv=True)
normals = get_normals(sample_cloud)
nhists = compute_normal_histograms(normals)
feature = np.concatenate((chists, nhists))
# Make the prediction
prediction = clf.predict(scaler.transform(feature.reshape(1,-1)))
label = encoder.inverse_transform(prediction)[0]
detected_objects_labels.append(label)
# Publish a label into RViz
label_pos = list(white_cloud[pts_list[0]])
label_pos[2] += .4
object_markers_pub.publish(make_label(label,label_pos, index))
# Add the detected object to the list of detected objects.
do = DetectedObject()
do.label = label
do.cloud = ros_cluster
detected_objects.append(do)
rospy.loginfo('Detected {} objects: {}'.format(len(detected_objects_labels), detected_objects_labels))
# Publish the list of detected objects
detected_objects_pub.publish(detected_objects)
# Suggested location for where to invoke your pr2_mover() function within pcl_callback()
# Could add some logic to determine whether or not your object detections are robust
# before calling pr2_mover()
detected_objects_list = detected_objects
try:
pr2_mover(detected_objects_list)
except rospy.ROSInterruptException:
pass
# function to load parameters and request PickPlace service
def pr2_mover(object_list):
# TODO: Initialize variables
dict_list = []
# TODO: Get/Read parameters
object_list_param = rospy.get_param('/object_list')
dropbox_list_param = rospy.get_param('/dropbox')
# TODO: Parse parameters into individual variables
test_scene_num = Int32()
test_scene_num.data = 3
""" labels = []
centroids = [] # to be list of tuples (x, y, z)
for object in object_list:
labels.append(object.label)
points_arr = ros_to_pcl(object.cloud).to_array()
centroids.append(np.mean(points_arr, axis=0)[:3])
"""
# TODO: Rotate PR2 in place to capture side tables for the collision map
# TODO: Loop through the pick list
for object in object_list:
# TODO: Get the PointCloud for a given object and obtain it's centroid
points_arr = ros_to_pcl(object.cloud).to_array()
c = np.mean(points_arr, axis=0)[:3]
centroid = map(np.asscalar, c)
print "type(centroid)", type(centroid), "[0]", type(centroid[0]), "[1]", type(centroid[1]), "[2]", type(centroid[2])
#centroids.append(np.mean(points_arr, axis=0)[:3])
object_name = String()
object_name.data = str(object.label)
# TODO: Create 'place_pose' for the object
group = None
for o in object_list_param:
if object.label == o['name']:
group = o['group']
print "for ", o['name'], "group found",group
object_arm_name = String()
place_pose = Pose()
for box in dropbox_list_param:
if group == box['group']:
# Assign the arm to be used for pick_place
object_arm_name.data = box['name']
place_pose.position.x = box['position'][0]
place_pose.position.y = box['position'][1]
place_pose.position.z = box['position'][2]
print "type(place_pose.x,y,z): (", type(place_pose.position.x), ",", type(place_pose.position.y), ",", type(place_pose.position.z), ")"
pick_pose = Pose()
pick_pose.position.x = centroid[0]
pick_pose.position.y = centroid[1]
pick_pose.position.z = centroid[2]
# TODO: Create a list of dictionaries (made with make_yaml_dict()) for later output to yaml format
yaml_dict = make_yaml_dict(test_scene_num, object_arm_name, object_name, pick_pose, place_pose)
dict_list.append(yaml_dict)
# Wait for 'pick_place_routine' service to come up
rospy.wait_for_service('pick_place_routine')
try:
pick_place_routine = rospy.ServiceProxy('pick_place_routine', PickPlace)
# Insert your message variables to be sent as a service request
resp = pick_place_routine(test_scene_num, object_name, object_arm_name, pick_pose, place_pose)
print ("Response: ",resp.success)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
# Output your request parameters into output yaml file
yaml_filename = "output_" + str(test_scene_num.data) + ".yaml"
send_to_yaml(yaml_filename, dict_list)
if __name__ == '__main__':
# ROS node initialization
rospy.init_node('clustering', anonymous=True)
# Create Subscribers
#pcl_sub = rospy.Subscriber("/sensor_stick/point_cloud", pc2.PointCloud2, pcl_callback, queue_size=1)
pcl_cam_sub = rospy.Subscriber("/pr2/world/points", pc2.PointCloud2, pcl_callback, queue_size=1)
# Create Publishers
pcl_objects_pub = rospy.Publisher("/pcl_objects", PointCloud2, queue_size=1)
pcl_table_pub = rospy.Publisher("/pcl_table", PointCloud2, queue_size=1)
pcl_cluster_pub = rospy.Publisher("/pcl_cluster", PointCloud2, queue_size=1)
object_markers_pub = rospy.Publisher("/object_markers", Marker, queue_size=1)
detected_objects_pub = rospy.Publisher("/detected_objects", DetectedObjectsArray, queue_size=1)
# Load Model From disk
model = pickle.load(open('model.sav', 'rb'))
clf = model['classifier']
encoder = LabelEncoder()
encoder.classes_ = model['classes']
scaler = model['scaler']
# Initialize color_list
get_color_list.color_list = []
# Spin while node is not shutdown
while not rospy.is_shutdown():
rospy.spin() | """ Voxel Grid filter
Args: | random_line_split |
project.py | #!/usr/bin/env python
# Import modules
import numpy as np
import sklearn
from sklearn.preprocessing import LabelEncoder
import pickle
from sensor_stick.srv import GetNormals
from sensor_stick.features import compute_color_histograms
from sensor_stick.features import compute_normal_histograms
from visualization_msgs.msg import Marker
from sensor_stick.marker_tools import *
from sensor_stick.msg import DetectedObjectsArray
from sensor_stick.msg import DetectedObject
from sensor_stick.pcl_helper import *
import rospy
import tf
from geometry_msgs.msg import Pose
from std_msgs.msg import Float64
from std_msgs.msg import Int32
from std_msgs.msg import String
from pr2_robot.srv import *
from rospy_message_converter import message_converter
import yaml
# Helper function to get surface normals
def get_normals(cloud):
get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)
return get_normals_prox(cloud).cluster
# Helper function to create a yaml friendly dictionary from ROS messages
def make_yaml_dict(test_scene_num, arm_name, object_name, pick_pose, place_pose):
yaml_dict = {}
yaml_dict["test_scene_num"] = test_scene_num.data
yaml_dict["arm_name"] = arm_name.data
yaml_dict["object_name"] = object_name.data
yaml_dict["pick_pose"] = message_converter.convert_ros_message_to_dictionary(pick_pose)
yaml_dict["place_pose"] = message_converter.convert_ros_message_to_dictionary(place_pose)
return yaml_dict
# Helper function to output to yaml file
def send_to_yaml(yaml_filename, dict_list):
print "sending to yaml", dict_list
for d in dict_list:
print "test_scene_num", type(d["test_scene_num"]), "arm_name", type(d["arm_name"]), "object_name", type(d["object_name"]), "pick_pose", type(d["pick_pose"]), "place_pose", type(d["place_pose"])
data_dict = {"object_list": dict_list}
with open(yaml_filename, 'w') as outfile:
yaml.dump(data_dict, outfile, default_flow_style=False)
def statistical_outlier_removal(cloud):
# Much like the previous filters, we start by creating a filter object:
outlier_filter = cloud.make_statistical_outlier_filter()
# Set the number of neighboring points to analyze for any given point
outlier_filter.set_mean_k(50)
# Set threshold scale factor
x = 0.9
# Any point with a mean distance larger than global (mean distance+x*std_dev) will be considered outlier
outlier_filter.set_std_dev_mul_thresh(x)
# Finally call the filter function for magic
cloud_filtered = outlier_filter.filter()
return cloud_filtered
def voxel_downsample(cloud):
""" Voxel Grid filter
Args:
cloud (PointCloud_PointXYZRGB): A point cloud
Returns:
PointCloud_PointXYZRGB: A downsampled point cloud
"""
# Create a VoxelGrid filter object for our input point cloud
vox = cloud.make_voxel_grid_filter()
# Choose a voxel (also known as leaf) size
LEAF_SIZE = 0.005
# Set the voxel (or leaf) size
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
# Call the filter function to obtain the resultant downsampled point cloud
cloud_filtered = vox.filter()
return cloud_filtered
def apply_passthrough_filter(cloud, axis, axis_min, axis_max):
""" Apply a passthrough filter to a cloud
Args:
cloud (PointCloud_PointXYZRGB): A point cloud
Returns:
PointCloud_PointXYZRGB: A filtered point cloud
"""
# Create a PassThrough filter object.
passthrough = cloud.make_passthrough_filter()
# Assign axis and range to the passthrough filter object.
filter_axis = axis
passthrough.set_filter_field_name(filter_axis)
#axis_min = 0.6
#axis_max = 1.1
passthrough.set_filter_limits(axis_min, axis_max)
# Finally use the filter function to obtain the resultant point cloud.
cloud_filtered = passthrough.filter()
return cloud_filtered
def ransac(cloud, sacmodel):
|
def euclidean_clustering(cloud):
white_cloud = XYZRGB_to_XYZ(cloud)
tree = white_cloud.make_kdtree()
# Create a cluster extraction object
ec = white_cloud.make_EuclideanClusterExtraction()
# Set tolerances for distance threshold
# as well as minimum and maximum cluster size (in points)
ec.set_ClusterTolerance(0.01)
ec.set_MinClusterSize(10)
ec.set_MaxClusterSize(25000)
# Search the k-d tree for clusters
ec.set_SearchMethod(tree)
# Extract indices for each of the discovered clusters
cluster_indices = ec.Extract()
return cluster_indices, white_cloud
def color_clusters(cluster_indices, white_cloud):
#Assign a color corresponding to each segmented object in scene
cluster_color = get_color_list(len(cluster_indices))
color_cluster_point_list = []
for j, indices in enumerate(cluster_indices):
for i, indice in enumerate(indices):
color_cluster_point_list.append([white_cloud[indice][0],
white_cloud[indice][1],
white_cloud[indice][2],
rgb_to_float(cluster_color[j])])
#Create new cloud containing all clusters, each with unique color
cluster_cloud = pcl.PointCloud_PointXYZRGB()
cluster_cloud.from_list(color_cluster_point_list)
return cluster_cloud
# Callback function for your Point Cloud Subscriber
def pcl_callback(pcl_msg):
# Exercise-2 TODOs:
# Convert ROS msg to PCL data
pcl_data = ros_to_pcl(pcl_msg)
#filename = 'pcl_data.pcd'
#pcl.save(pcl_data, filename)
# Statistical Outlier Filtering
cloud_filtered = statistical_outlier_removal(pcl_data)
#filename = 'statistical_outlier_removal.pcd'
#pcl.save(cloud_filtered, filename)
# Voxel Grid Downsampling
cloud_filtered = voxel_downsample(cloud_filtered)
#filename = 'voxel_downsampled.pcd'
#pcl.save(cloud_filtered, filename)
# PassThrough Filter along z
axis_min = 0.6
axis_max = 1.1
cloud_filtered = apply_passthrough_filter(cloud_filtered, 'z', axis_min, axis_max)
#filename = 'pass_through_filtered.pcd'
#pcl.save(cloud_filtered, filename)
# PassThrough Filter along y
axis_min = -0.5
axis_max = 0.5
cloud_filtered = apply_passthrough_filter(cloud_filtered, 'y', axis_min, axis_max)
filename = 'pass_through_filtered_y.pcd'
pcl.save(cloud_filtered, filename)
# RANSAC Plane Segmentation
inliers, coefficients = ransac(cloud_filtered, pcl.SACMODEL_PLANE)
# Extract inliers and outliers
extracted_inliers = cloud_filtered.extract(inliers, negative=False)
#filename = 'extracted_inliers.pcd'
#pcl.save(extracted_inliers, filename)
extracted_outliers = cloud_filtered.extract(inliers, negative=True)
#filename = 'extracted_outliers.pcd'
#pcl.save(extracted_outliers, filename)
cloud_table = extracted_inliers
cloud_objects = extracted_outliers
# Euclidean Clustering
cluster_indices, white_cloud = euclidean_clustering(cloud_objects)
# Create Cluster-Mask Point Cloud to visualize each cluster separately
cluster_cloud = color_clusters(cluster_indices, white_cloud)
filename = 'colored_cluster_cloud.pcd'
pcl.save(cluster_cloud, filename)
# Convert PCL data to ROS messages
ros_cloud_table = pcl_to_ros(cloud_table)
ros_cloud_objects = pcl_to_ros(cloud_objects)
ros_cluster_cloud = pcl_to_ros(cluster_cloud)
# Publish ROS messages
pcl_objects_pub.publish(ros_cloud_objects)
pcl_table_pub.publish(ros_cloud_table)
pcl_cluster_pub.publish(ros_cluster_cloud)
# Exercise-3 TODOs:
# Classify the clusters! (loop through each detected cluster one at a time)
detected_objects_labels = []
detected_objects = []
for index, pts_list in enumerate(cluster_indices):
# Grab the points for the cluster
pcl_cluster = cloud_objects.extract(pts_list)
# Compute the associated feature vector
ros_cluster = pcl_to_ros(pcl_cluster)
sample_cloud = ros_cluster
chists = compute_color_histograms(sample_cloud, using_hsv=True)
normals = get_normals(sample_cloud)
nhists = compute_normal_histograms(normals)
feature = np.concatenate((chists, nhists))
# Make the prediction
prediction = clf.predict(scaler.transform(feature.reshape(1,-1)))
label = encoder.inverse_transform(prediction)[0]
detected_objects_labels.append(label)
# Publish a label into RViz
label_pos = list(white_cloud[pts_list[0]])
label_pos[2] += .4
object_markers_pub.publish(make_label(label,label_pos, index))
# Add the detected object to the list of detected objects.
do = DetectedObject()
do.label = label
do.cloud = ros_cluster
detected_objects.append(do)
rospy.loginfo('Detected {} objects: {}'.format(len(detected_objects_labels), detected_objects_labels))
# Publish the list of detected objects
detected_objects_pub.publish(detected_objects)
# Suggested location for where to invoke your pr2_mover() function within pcl_callback()
# Could add some logic to determine whether or not your object detections are robust
# before calling pr2_mover()
detected_objects_list = detected_objects
try:
pr2_mover(detected_objects_list)
except rospy.ROSInterruptException:
pass
# function to load parameters and request PickPlace service
def pr2_mover(object_list):
# TODO: Initialize variables
dict_list = []
# TODO: Get/Read parameters
object_list_param = rospy.get_param('/object_list')
dropbox_list_param = rospy.get_param('/dropbox')
# TODO: Parse parameters into individual variables
test_scene_num = Int32()
test_scene_num.data = 3
""" labels = []
centroids = [] # to be list of tuples (x, y, z)
for object in object_list:
labels.append(object.label)
points_arr = ros_to_pcl(object.cloud).to_array()
centroids.append(np.mean(points_arr, axis=0)[:3])
"""
# TODO: Rotate PR2 in place to capture side tables for the collision map
# TODO: Loop through the pick list
for object in object_list:
# TODO: Get the PointCloud for a given object and obtain it's centroid
points_arr = ros_to_pcl(object.cloud).to_array()
c = np.mean(points_arr, axis=0)[:3]
centroid = map(np.asscalar, c)
print "type(centroid)", type(centroid), "[0]", type(centroid[0]), "[1]", type(centroid[1]), "[2]", type(centroid[2])
#centroids.append(np.mean(points_arr, axis=0)[:3])
object_name = String()
object_name.data = str(object.label)
# TODO: Create 'place_pose' for the object
group = None
for o in object_list_param:
if object.label == o['name']:
group = o['group']
print "for ", o['name'], "group found",group
object_arm_name = String()
place_pose = Pose()
for box in dropbox_list_param:
if group == box['group']:
# Assign the arm to be used for pick_place
object_arm_name.data = box['name']
place_pose.position.x = box['position'][0]
place_pose.position.y = box['position'][1]
place_pose.position.z = box['position'][2]
print "type(place_pose.x,y,z): (", type(place_pose.position.x), ",", type(place_pose.position.y), ",", type(place_pose.position.z), ")"
pick_pose = Pose()
pick_pose.position.x = centroid[0]
pick_pose.position.y = centroid[1]
pick_pose.position.z = centroid[2]
# TODO: Create a list of dictionaries (made with make_yaml_dict()) for later output to yaml format
yaml_dict = make_yaml_dict(test_scene_num, object_arm_name, object_name, pick_pose, place_pose)
dict_list.append(yaml_dict)
# Wait for 'pick_place_routine' service to come up
rospy.wait_for_service('pick_place_routine')
try:
pick_place_routine = rospy.ServiceProxy('pick_place_routine', PickPlace)
# Insert your message variables to be sent as a service request
resp = pick_place_routine(test_scene_num, object_name, object_arm_name, pick_pose, place_pose)
print ("Response: ",resp.success)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
# Output your request parameters into output yaml file
yaml_filename = "output_" + str(test_scene_num.data) + ".yaml"
send_to_yaml(yaml_filename, dict_list)
if __name__ == '__main__':
# ROS node initialization
rospy.init_node('clustering', anonymous=True)
# Create Subscribers
#pcl_sub = rospy.Subscriber("/sensor_stick/point_cloud", pc2.PointCloud2, pcl_callback, queue_size=1)
pcl_cam_sub = rospy.Subscriber("/pr2/world/points", pc2.PointCloud2, pcl_callback, queue_size=1)
# Create Publishers
pcl_objects_pub = rospy.Publisher("/pcl_objects", PointCloud2, queue_size=1)
pcl_table_pub = rospy.Publisher("/pcl_table", PointCloud2, queue_size=1)
pcl_cluster_pub = rospy.Publisher("/pcl_cluster", PointCloud2, queue_size=1)
object_markers_pub = rospy.Publisher("/object_markers", Marker, queue_size=1)
detected_objects_pub = rospy.Publisher("/detected_objects", DetectedObjectsArray, queue_size=1)
# Load Model From disk
model = pickle.load(open('model.sav', 'rb'))
clf = model['classifier']
encoder = LabelEncoder()
encoder.classes_ = model['classes']
scaler = model['scaler']
# Initialize color_list
get_color_list.color_list = []
# Spin while node is not shutdown
while not rospy.is_shutdown():
rospy.spin()
| """ Segments a cloud using a sac model
Args:
cloud (PointCloud_PointXYZRGB): A point cloud
sacmodel (pcl.SACMODEL): A model points will be fit to
Returns:
A set of inliers and coefficients
"""
# Create the segmentation object
seg = cloud.make_segmenter()
# Set the model you wish to fit
seg.set_model_type(sacmodel)
seg.set_method_type(pcl.SAC_RANSAC)
# Max distance for a point to be considered fitting the model
# Experiment with different values for max_distance
# for segmenting the table
max_distance = 0.01
seg.set_distance_threshold(max_distance)
# Call the segment function to obtain set of inlier indices and model coefficients
inliers, coefficients = seg.segment()
return inliers, coefficients | identifier_body |
project.py | #!/usr/bin/env python
# Import modules
import numpy as np
import sklearn
from sklearn.preprocessing import LabelEncoder
import pickle
from sensor_stick.srv import GetNormals
from sensor_stick.features import compute_color_histograms
from sensor_stick.features import compute_normal_histograms
from visualization_msgs.msg import Marker
from sensor_stick.marker_tools import *
from sensor_stick.msg import DetectedObjectsArray
from sensor_stick.msg import DetectedObject
from sensor_stick.pcl_helper import *
import rospy
import tf
from geometry_msgs.msg import Pose
from std_msgs.msg import Float64
from std_msgs.msg import Int32
from std_msgs.msg import String
from pr2_robot.srv import *
from rospy_message_converter import message_converter
import yaml
# Helper function to get surface normals
def get_normals(cloud):
get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)
return get_normals_prox(cloud).cluster
# Helper function to create a yaml friendly dictionary from ROS messages
def make_yaml_dict(test_scene_num, arm_name, object_name, pick_pose, place_pose):
yaml_dict = {}
yaml_dict["test_scene_num"] = test_scene_num.data
yaml_dict["arm_name"] = arm_name.data
yaml_dict["object_name"] = object_name.data
yaml_dict["pick_pose"] = message_converter.convert_ros_message_to_dictionary(pick_pose)
yaml_dict["place_pose"] = message_converter.convert_ros_message_to_dictionary(place_pose)
return yaml_dict
# Helper function to output to yaml file
def send_to_yaml(yaml_filename, dict_list):
print "sending to yaml", dict_list
for d in dict_list:
print "test_scene_num", type(d["test_scene_num"]), "arm_name", type(d["arm_name"]), "object_name", type(d["object_name"]), "pick_pose", type(d["pick_pose"]), "place_pose", type(d["place_pose"])
data_dict = {"object_list": dict_list}
with open(yaml_filename, 'w') as outfile:
yaml.dump(data_dict, outfile, default_flow_style=False)
def statistical_outlier_removal(cloud):
# Much like the previous filters, we start by creating a filter object:
outlier_filter = cloud.make_statistical_outlier_filter()
# Set the number of neighboring points to analyze for any given point
outlier_filter.set_mean_k(50)
# Set threshold scale factor
x = 0.9
# Any point with a mean distance larger than global (mean distance+x*std_dev) will be considered outlier
outlier_filter.set_std_dev_mul_thresh(x)
# Finally call the filter function for magic
cloud_filtered = outlier_filter.filter()
return cloud_filtered
def voxel_downsample(cloud):
""" Voxel Grid filter
Args:
cloud (PointCloud_PointXYZRGB): A point cloud
Returns:
PointCloud_PointXYZRGB: A downsampled point cloud
"""
# Create a VoxelGrid filter object for our input point cloud
vox = cloud.make_voxel_grid_filter()
# Choose a voxel (also known as leaf) size
LEAF_SIZE = 0.005
# Set the voxel (or leaf) size
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
# Call the filter function to obtain the resultant downsampled point cloud
cloud_filtered = vox.filter()
return cloud_filtered
def apply_passthrough_filter(cloud, axis, axis_min, axis_max):
""" Apply a passthrough filter to a cloud
Args:
cloud (PointCloud_PointXYZRGB): A point cloud
Returns:
PointCloud_PointXYZRGB: A filtered point cloud
"""
# Create a PassThrough filter object.
passthrough = cloud.make_passthrough_filter()
# Assign axis and range to the passthrough filter object.
filter_axis = axis
passthrough.set_filter_field_name(filter_axis)
#axis_min = 0.6
#axis_max = 1.1
passthrough.set_filter_limits(axis_min, axis_max)
# Finally use the filter function to obtain the resultant point cloud.
cloud_filtered = passthrough.filter()
return cloud_filtered
def ransac(cloud, sacmodel):
""" Segments a cloud using a sac model
Args:
cloud (PointCloud_PointXYZRGB): A point cloud
sacmodel (pcl.SACMODEL): A model points will be fit to
Returns:
A set of inliers and coefficients
"""
# Create the segmentation object
seg = cloud.make_segmenter()
# Set the model you wish to fit
seg.set_model_type(sacmodel)
seg.set_method_type(pcl.SAC_RANSAC)
# Max distance for a point to be considered fitting the model
# Experiment with different values for max_distance
# for segmenting the table
max_distance = 0.01
seg.set_distance_threshold(max_distance)
# Call the segment function to obtain set of inlier indices and model coefficients
inliers, coefficients = seg.segment()
return inliers, coefficients
def euclidean_clustering(cloud):
white_cloud = XYZRGB_to_XYZ(cloud)
tree = white_cloud.make_kdtree()
# Create a cluster extraction object
ec = white_cloud.make_EuclideanClusterExtraction()
# Set tolerances for distance threshold
# as well as minimum and maximum cluster size (in points)
ec.set_ClusterTolerance(0.01)
ec.set_MinClusterSize(10)
ec.set_MaxClusterSize(25000)
# Search the k-d tree for clusters
ec.set_SearchMethod(tree)
# Extract indices for each of the discovered clusters
cluster_indices = ec.Extract()
return cluster_indices, white_cloud
def color_clusters(cluster_indices, white_cloud):
#Assign a color corresponding to each segmented object in scene
cluster_color = get_color_list(len(cluster_indices))
color_cluster_point_list = []
for j, indices in enumerate(cluster_indices):
for i, indice in enumerate(indices):
color_cluster_point_list.append([white_cloud[indice][0],
white_cloud[indice][1],
white_cloud[indice][2],
rgb_to_float(cluster_color[j])])
#Create new cloud containing all clusters, each with unique color
cluster_cloud = pcl.PointCloud_PointXYZRGB()
cluster_cloud.from_list(color_cluster_point_list)
return cluster_cloud
# Callback function for your Point Cloud Subscriber
def pcl_callback(pcl_msg):
# Exercise-2 TODOs:
# Convert ROS msg to PCL data
pcl_data = ros_to_pcl(pcl_msg)
#filename = 'pcl_data.pcd'
#pcl.save(pcl_data, filename)
# Statistical Outlier Filtering
cloud_filtered = statistical_outlier_removal(pcl_data)
#filename = 'statistical_outlier_removal.pcd'
#pcl.save(cloud_filtered, filename)
# Voxel Grid Downsampling
cloud_filtered = voxel_downsample(cloud_filtered)
#filename = 'voxel_downsampled.pcd'
#pcl.save(cloud_filtered, filename)
# PassThrough Filter along z
axis_min = 0.6
axis_max = 1.1
cloud_filtered = apply_passthrough_filter(cloud_filtered, 'z', axis_min, axis_max)
#filename = 'pass_through_filtered.pcd'
#pcl.save(cloud_filtered, filename)
# PassThrough Filter along y
axis_min = -0.5
axis_max = 0.5
cloud_filtered = apply_passthrough_filter(cloud_filtered, 'y', axis_min, axis_max)
filename = 'pass_through_filtered_y.pcd'
pcl.save(cloud_filtered, filename)
# RANSAC Plane Segmentation
inliers, coefficients = ransac(cloud_filtered, pcl.SACMODEL_PLANE)
# Extract inliers and outliers
extracted_inliers = cloud_filtered.extract(inliers, negative=False)
#filename = 'extracted_inliers.pcd'
#pcl.save(extracted_inliers, filename)
extracted_outliers = cloud_filtered.extract(inliers, negative=True)
#filename = 'extracted_outliers.pcd'
#pcl.save(extracted_outliers, filename)
cloud_table = extracted_inliers
cloud_objects = extracted_outliers
# Euclidean Clustering
cluster_indices, white_cloud = euclidean_clustering(cloud_objects)
# Create Cluster-Mask Point Cloud to visualize each cluster separately
cluster_cloud = color_clusters(cluster_indices, white_cloud)
filename = 'colored_cluster_cloud.pcd'
pcl.save(cluster_cloud, filename)
# Convert PCL data to ROS messages
ros_cloud_table = pcl_to_ros(cloud_table)
ros_cloud_objects = pcl_to_ros(cloud_objects)
ros_cluster_cloud = pcl_to_ros(cluster_cloud)
# Publish ROS messages
pcl_objects_pub.publish(ros_cloud_objects)
pcl_table_pub.publish(ros_cloud_table)
pcl_cluster_pub.publish(ros_cluster_cloud)
# Exercise-3 TODOs:
# Classify the clusters! (loop through each detected cluster one at a time)
detected_objects_labels = []
detected_objects = []
for index, pts_list in enumerate(cluster_indices):
# Grab the points for the cluster
pcl_cluster = cloud_objects.extract(pts_list)
# Compute the associated feature vector
ros_cluster = pcl_to_ros(pcl_cluster)
sample_cloud = ros_cluster
chists = compute_color_histograms(sample_cloud, using_hsv=True)
normals = get_normals(sample_cloud)
nhists = compute_normal_histograms(normals)
feature = np.concatenate((chists, nhists))
# Make the prediction
prediction = clf.predict(scaler.transform(feature.reshape(1,-1)))
label = encoder.inverse_transform(prediction)[0]
detected_objects_labels.append(label)
# Publish a label into RViz
label_pos = list(white_cloud[pts_list[0]])
label_pos[2] += .4
object_markers_pub.publish(make_label(label,label_pos, index))
# Add the detected object to the list of detected objects.
do = DetectedObject()
do.label = label
do.cloud = ros_cluster
detected_objects.append(do)
rospy.loginfo('Detected {} objects: {}'.format(len(detected_objects_labels), detected_objects_labels))
# Publish the list of detected objects
detected_objects_pub.publish(detected_objects)
# Suggested location for where to invoke your pr2_mover() function within pcl_callback()
# Could add some logic to determine whether or not your object detections are robust
# before calling pr2_mover()
detected_objects_list = detected_objects
try:
pr2_mover(detected_objects_list)
except rospy.ROSInterruptException:
pass
# function to load parameters and request PickPlace service
def pr2_mover(object_list):
# TODO: Initialize variables
dict_list = []
# TODO: Get/Read parameters
object_list_param = rospy.get_param('/object_list')
dropbox_list_param = rospy.get_param('/dropbox')
# TODO: Parse parameters into individual variables
test_scene_num = Int32()
test_scene_num.data = 3
""" labels = []
centroids = [] # to be list of tuples (x, y, z)
for object in object_list:
labels.append(object.label)
points_arr = ros_to_pcl(object.cloud).to_array()
centroids.append(np.mean(points_arr, axis=0)[:3])
"""
# TODO: Rotate PR2 in place to capture side tables for the collision map
# TODO: Loop through the pick list
for object in object_list:
# TODO: Get the PointCloud for a given object and obtain it's centroid
points_arr = ros_to_pcl(object.cloud).to_array()
c = np.mean(points_arr, axis=0)[:3]
centroid = map(np.asscalar, c)
print "type(centroid)", type(centroid), "[0]", type(centroid[0]), "[1]", type(centroid[1]), "[2]", type(centroid[2])
#centroids.append(np.mean(points_arr, axis=0)[:3])
object_name = String()
object_name.data = str(object.label)
# TODO: Create 'place_pose' for the object
group = None
for o in object_list_param:
if object.label == o['name']:
group = o['group']
print "for ", o['name'], "group found",group
object_arm_name = String()
place_pose = Pose()
for box in dropbox_list_param:
if group == box['group']:
# Assign the arm to be used for pick_place
object_arm_name.data = box['name']
place_pose.position.x = box['position'][0]
place_pose.position.y = box['position'][1]
place_pose.position.z = box['position'][2]
print "type(place_pose.x,y,z): (", type(place_pose.position.x), ",", type(place_pose.position.y), ",", type(place_pose.position.z), ")"
pick_pose = Pose()
pick_pose.position.x = centroid[0]
pick_pose.position.y = centroid[1]
pick_pose.position.z = centroid[2]
# TODO: Create a list of dictionaries (made with make_yaml_dict()) for later output to yaml format
yaml_dict = make_yaml_dict(test_scene_num, object_arm_name, object_name, pick_pose, place_pose)
dict_list.append(yaml_dict)
# Wait for 'pick_place_routine' service to come up
rospy.wait_for_service('pick_place_routine')
try:
pick_place_routine = rospy.ServiceProxy('pick_place_routine', PickPlace)
# Insert your message variables to be sent as a service request
resp = pick_place_routine(test_scene_num, object_name, object_arm_name, pick_pose, place_pose)
print ("Response: ",resp.success)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
# Output your request parameters into output yaml file
yaml_filename = "output_" + str(test_scene_num.data) + ".yaml"
send_to_yaml(yaml_filename, dict_list)
if __name__ == '__main__':
# ROS node initialization
rospy.init_node('clustering', anonymous=True)
# Create Subscribers
#pcl_sub = rospy.Subscriber("/sensor_stick/point_cloud", pc2.PointCloud2, pcl_callback, queue_size=1)
pcl_cam_sub = rospy.Subscriber("/pr2/world/points", pc2.PointCloud2, pcl_callback, queue_size=1)
# Create Publishers
pcl_objects_pub = rospy.Publisher("/pcl_objects", PointCloud2, queue_size=1)
pcl_table_pub = rospy.Publisher("/pcl_table", PointCloud2, queue_size=1)
pcl_cluster_pub = rospy.Publisher("/pcl_cluster", PointCloud2, queue_size=1)
object_markers_pub = rospy.Publisher("/object_markers", Marker, queue_size=1)
detected_objects_pub = rospy.Publisher("/detected_objects", DetectedObjectsArray, queue_size=1)
# Load Model From disk
model = pickle.load(open('model.sav', 'rb'))
clf = model['classifier']
encoder = LabelEncoder()
encoder.classes_ = model['classes']
scaler = model['scaler']
# Initialize color_list
get_color_list.color_list = []
# Spin while node is not shutdown
while not rospy.is_shutdown():
| rospy.spin() | conditional_block |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.