file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
12.1k
suffix
large_stringlengths
0
12k
middle
large_stringlengths
0
7.51k
fim_type
large_stringclasses
4 values
gulpfile.js
server: { baseDir: settings.dist }, ghostMode: { clicks: true, location: true, forms: true, scroll: true }, open: "external", injectChanges: true, // inject CSS changes (false force a reload) browser: ["google chrome"], scrollProportionally: true, // Sync viewports to TOP position scrollThrottle: 50, }); }); /** * Build and copy all styles, scripts, images and fonts. * Depends on: clean */ gulp.task('build', ['info', 'clean'], function() { gulp.start('styles', 'scripts', 'images', 'copy', 'todo'); }); /** * Cleans the `dist` folder and other generated files */ gulp.task('clean', ['clear-cache'], function(cb) { del([settings.dist, 'todo.md', 'todo.json'], cb); }); /** * Clears the cache used by gulp-cache */ gulp.task('clear-cache', function() { // Or, just call this for everything cache.clearAll(); }); /** * Copies all to dist/ */ gulp.task('copy', ['copy-fonts', 'copy-template', 'copy-index'], function() {}); /** * Task for copying fonts only */ gulp.task('copy-fonts', function() { var deferred = q.defer(); // copy all fonts setTimeout(function() { gulp.src( settings.src + 'fonts/**') .pipe(gulp.dest(settings.dist + 'fonts')); deferred.resolve(); }, 1); return deferred.promise; }); /** * task for copying templates only */ gulp.task('copy-template', function() { // copy all html && json return gulp.src( [settings.src + 'js/app/**/*.html', settings.src + 'js/app/**/*.json']) .pipe(cache(gulp.dest('dist/js/app'))); }); /** * Task for copying index page only. Optionally add live reload script to it */ gulp.task('copy-index', function() { // copy the index.html return gulp.src(settings.src + 'index.html') .pipe(gulpif(argv.dev, replace(/app.min.js/g, 'app.js'))) .pipe(gulpif(argv.nohuna, replace('<script src=\'js/huna.min.js\'></script>', ''))) .pipe(gulpif(settings.liveReload, replace(/(\<\/body\>)/g, "<script>document.write('<script src=\"http://' + (location.host || 'localhost').split(':')[0] + ':35729/livereload.js?snipver=1\"></' + 'script>')</script>$1"))) .pipe(cache(gulp.dest(settings.dist))); }); /** * Default task. * Depends on: build */ gulp.task('default', ['build']); /** * Create Javascript documentation */ gulp.task('docs-js', ['todo'], function(){ var gulpDoxx = require('gulp-doxx'); gulp.src([settings.src + '/js/**/*.js', 'README.md', settings.reports + '/TODO.md']) .pipe(gulpDoxx({ title: config.name, urlPrefix: "file:///"+__dirname+settings.reports })) .pipe(gulp.dest(settings.reports)); }); /** * Task to optimize and deploy all images found in folder `src/img/**`. Result is copied to `dist/img` */ gulp.task('images', function() { var imagemin = require('gulp-imagemin'); var deferred = q.defer(); setTimeout(function() { gulp.src(settings.src + 'img/**/*') .pipe(plumber(settings.plumberConfig())) .pipe(cache(imagemin({ optimizationLevel: 5, progressive: true, interlaced: true }))) .pipe(gulp.dest(settings.dist + 'img')); deferred.resolve(); }, 1); return deferred.promise; }); /** * log some info */ gulp.task('info',function(){ // log project details gutil.log( gutil.colors.cyan("Running gulp on project "+config.name+" v"+ config.version) ); gutil.log( gutil.colors.cyan("Author: " + config.author[0].name) ); gutil.log( gutil.colors.cyan("Email : " + config.author[0].email) ); gutil.log( gutil.colors.cyan("Site : " + config.author[0].url) ); gutil.log( gutil.colors.cyan("Author: " + config.author[1].name) ); gutil.log( gutil.colors.cyan("Email : " + config.author[1].email) ); gutil.log( gutil.colors.cyan("Site : " + config.author[1].url) ); // log info gutil.log("If you have an enhancement or encounter a bug, please report them on", gutil.colors.magenta(config.bugs.url)); }); /** * Start the live reload server. Live reload will be triggered when a file in the `dist` folder changes. This will add a live-reload script to the index.html page, which makes it all happen. * Depends on: watch */ gulp.task('live-reload', ['watch'], function() { var livereload = require('gulp-livereload'); settings.liveReload = true; // first, delete the index.html from the dist folder as we will copy it later del([settings.dist + 'index.html']); // add livereload script to the index.html gulp.src([settings.src + 'index.html']) .pipe(gulpif(argv.dev, replace(/app.min.js/g, 'app.js'))) .pipe(gulpif(argv.nohuna, replace('<script src=\'js/huna.min.js\'></script>', ''))) .pipe(replace(/(\<\/body\>)/g, "<script>document.write('<script src=\"http://' + (location.host || 'localhost').split(':')[0] + ':35729/livereload.js?snipver=1\"></' + 'script>')</script>$1")) .pipe(gulp.dest(settings.dist)); // Create LiveReload server livereload.listen(); // Watch any files in dist/*, reload on change gulp.watch([settings.dist + '**']).on('change', livereload.changed); }); /** * Task to handle and deploy all javascript, application & vendor * * Depends on: scripts-app, scripts-vendor */ gulp.task('scripts', ['scripts-app','scripts-vendor']); /** * Removes the node_modules */ gulp.task('remove',['clean'], function(cb){ del('node_modules', cb); }); /** * Minifies all javascript found in the `src/js/**` folder. All files will be concatenated into `app.js`. Minified and non-minified versions are copied to the dist folder. * This will also generete sourcemaps for the minified version. * * Depends on: docs */ gulp.task('scripts-app', ['docs-js'], function() { var jshint = require('gulp-jshint'), ngannotate = require('gulp-ng-annotate'), stripDebug = require('gulp-strip-debug'), stylish = require('jshint-stylish'), sourcemaps = require('gulp-sourcemaps'), uglify = require('gulp-uglify'); // gulpify the huna library gulp.src([settings.src + 'js/app/huna.js']) .pipe(plumber(settings.plumberConfig())) .pipe(ngannotate({gulpWarnings: false})) .pipe(jshint()) .pipe(jshint.reporter(stylish)) .pipe(gulp.dest(settings.dist + 'js')) // make minified .pipe(rename({suffix: '.min'})) .pipe(gulpif(!argv.dev, stripDebug())) .pipe(sourcemaps.init()) .pipe(gulpif(!argv.dev, uglify())) .pipe(sourcemaps.write()) .pipe(gulp.dest(settings.dist + 'js')); return gulp.src(['!'+settings.src + 'js/app/huna.js', settings.src + 'js/app/**/*.js']) .pipe(plumber(settings.plumberConfig())) .pipe(ngannotate({gulpWarnings: false})) .pipe(jshint()) .pipe(jshint.reporter(stylish)) .pipe(concat('app.js')) .pipe(gulp.dest(settings.dist + 'js')) // make minified .pipe(rename({suffix: '.min'})) .pipe(gulpif(!argv.dev, stripDebug())) .pipe(sourcemaps.init()) .pipe(gulpif(!argv.dev, uglify())) .pipe(sourcemaps.write()) .pipe(gulp.dest(settings.dist + 'js')); }); /** * Task to handle all vendor specific javasript. All vendor javascript will be copied to the dist directory. Also a concatinated version will be made, available in \dist\js\vendor\vendor.js */ gulp.task('scripts-vendor', ['scripts-vendor-maps'], function() { // script must be included in the right order. First include angular, then angular-route return gulp.src([settings.src + 'js/vendor/*/**/angular.min.js',settings.src + 'js/vendor/**/*.js']) .pipe(gulp.dest(settings.dist + 'js/vendor')) .pipe(concat('vendor.js')) .pipe(gulp.dest(settings.dist + 'js/vendor')); }); /** * Copy all vendor .js.map files to the vendor location */ gulp.task('scripts-vendor-maps', function(){ var flatten = require('gulp
gulp.watch([settings.dist + '**']).on('change', function(){browserSync.reload({});notify({ message: 'Reload browser' });}); return browserSync({
random_line_split
gulpfile.js
settings.liveReload = true; // first, delete the index.html from the dist folder as we will copy it later del([settings.dist + 'index.html']); // add livereload script to the index.html gulp.src([settings.src + 'index.html']) .pipe(gulpif(argv.dev, replace(/app.min.js/g, 'app.js'))) .pipe(gulpif(argv.nohuna, replace('<script src=\'js/huna.min.js\'></script>', ''))) .pipe(replace(/(\<\/body\>)/g, "<script>document.write('<script src=\"http://' + (location.host || 'localhost').split(':')[0] + ':35729/livereload.js?snipver=1\"></' + 'script>')</script>$1")) .pipe(gulp.dest(settings.dist)); // Create LiveReload server livereload.listen(); // Watch any files in dist/*, reload on change gulp.watch([settings.dist + '**']).on('change', livereload.changed); }); /** * Task to handle and deploy all javascript, application & vendor * * Depends on: scripts-app, scripts-vendor */ gulp.task('scripts', ['scripts-app','scripts-vendor']); /** * Removes the node_modules */ gulp.task('remove',['clean'], function(cb){ del('node_modules', cb); }); /** * Minifies all javascript found in the `src/js/**` folder. All files will be concatenated into `app.js`. Minified and non-minified versions are copied to the dist folder. * This will also generete sourcemaps for the minified version. * * Depends on: docs */ gulp.task('scripts-app', ['docs-js'], function() { var jshint = require('gulp-jshint'), ngannotate = require('gulp-ng-annotate'), stripDebug = require('gulp-strip-debug'), stylish = require('jshint-stylish'), sourcemaps = require('gulp-sourcemaps'), uglify = require('gulp-uglify'); // gulpify the huna library gulp.src([settings.src + 'js/app/huna.js']) .pipe(plumber(settings.plumberConfig())) .pipe(ngannotate({gulpWarnings: false})) .pipe(jshint()) .pipe(jshint.reporter(stylish)) .pipe(gulp.dest(settings.dist + 'js')) // make minified .pipe(rename({suffix: '.min'})) .pipe(gulpif(!argv.dev, stripDebug())) .pipe(sourcemaps.init()) .pipe(gulpif(!argv.dev, uglify())) .pipe(sourcemaps.write()) .pipe(gulp.dest(settings.dist + 'js')); return gulp.src(['!'+settings.src + 'js/app/huna.js', settings.src + 'js/app/**/*.js']) .pipe(plumber(settings.plumberConfig())) .pipe(ngannotate({gulpWarnings: false})) .pipe(jshint()) .pipe(jshint.reporter(stylish)) .pipe(concat('app.js')) .pipe(gulp.dest(settings.dist + 'js')) // make minified .pipe(rename({suffix: '.min'})) .pipe(gulpif(!argv.dev, stripDebug())) .pipe(sourcemaps.init()) .pipe(gulpif(!argv.dev, uglify())) .pipe(sourcemaps.write()) .pipe(gulp.dest(settings.dist + 'js')); }); /** * Task to handle all vendor specific javasript. All vendor javascript will be copied to the dist directory. Also a concatinated version will be made, available in \dist\js\vendor\vendor.js */ gulp.task('scripts-vendor', ['scripts-vendor-maps'], function() { // script must be included in the right order. First include angular, then angular-route return gulp.src([settings.src + 'js/vendor/*/**/angular.min.js',settings.src + 'js/vendor/**/*.js']) .pipe(gulp.dest(settings.dist + 'js/vendor')) .pipe(concat('vendor.js')) .pipe(gulp.dest(settings.dist + 'js/vendor')); }); /** * Copy all vendor .js.map files to the vendor location */ gulp.task('scripts-vendor-maps', function(){ var flatten = require('gulp-flatten'); return gulp.src(settings.src + 'js/vendor/**/*.js.map') .pipe(flatten()) .pipe(gulp.dest(settings.dist + 'js/vendor')); }); /** * Task to start a server on port 4000. */ gulp.task('server', function(){ var express = require('express'), app = express(), url = require('url'), port = argv.port||settings.serverport, proxy = require('proxy-middleware'); app.use(express.static(__dirname + "/dist")); if (argv.remote) { app.use('/api', proxy(url.parse('http://huna.tuvok.nl:1337/api'))); } else { app.use('/api', proxy(url.parse('http://localhost:1337/api'))); } app.listen(port); gutil.log('Server started. Port', port,"baseDir",__dirname+"/"+settings.dist); }); gulp.task('nodemon', function(cb) { var nodemon = require('gulp-nodemon'); // We use this `called` variable to make sure the callback is only executed once var called = false; return nodemon({ script: 'app.js', watch: ['app.js', 'api/**/*.*', 'config/**/*.*'] }) .on('start', function onStart() { if (!called) { cb(); } called = true; }) .on('restart', function onRestart() { // Also reload the browsers after a slight delay setTimeout(function reload() { browserSync.reload({ stream: false }); }, 500); }); }); /** * Task to start the backend servers. * Depends on: backend-mongo, backend-server */ gulp.task('backend', ['backend-mongo', 'backend-server'], function () {}); /** * Task to start the backend mongo server * should be running before the backend-server */ gulp.task('backend-mongo', function () { var exec = require('child_process').exec; exec('mongod', function (err, stdout, stderr) { console.log(stdout); console.log(stderr); onError(err); }); }); /** * Task to start up the backend server * run the mongo db first */ gulp.task('backend-server', function () { var exec = require('child_process').exec; exec('node app.js', function (err, stdout, stderr) { console.log(stdout); console.log(stderr); onError(err); }); }); /** * Task to start a server on port 4000 and used the live reload functionality. * Depends on: server, live-reload */ gulp.task('start', ['live-reload', 'server'], function(){}); /** * Compile Sass into Css and minify it. Minified and non-minified versions are copied to the dist folder. * This will also auto prefix vendor specific rules. */ gulp.task('styles', function() { var autoprefixer = require('gulp-autoprefixer'), minifycss = require('gulp-minify-css'), sass = require('gulp-sass'); return gulp.src([settings.src + 'styles/main.scss', settings.src + '/js/vendor/**/c3.min.css']) .pipe(plumber(settings.plumberConfig())) .pipe(sass({ style: 'expanded' })) // .pipe(autoprefixer('last 2 version', 'safari 5', 'ie 8', 'ie 9', 'opera 12.1', 'ios 6', 'android 4')) .pipe(gulp.dest(settings.dist + 'css')) .pipe(rename({suffix: '.min'})) .pipe(minifycss()) .pipe(gulp.dest(settings.dist + 'css')); }); /** * Output TODO's & FIXME's in markdown and json file as well */ gulp.task('todo', function() { var todo = require('gulp-todo'); gulp.src([settings.src + 'js/app/**/*.js',settings.src + 'styles/app/**/*.scss']) .pipe(plumber(settings.plumberConfig())) .pipe(todo()) .pipe(gulp.dest(settings.reports)) //output todo.md as markdown .pipe(todo.reporter('json', {fileName: 'todo.json'})) .pipe(gulp.dest(settings.reports)) //output todo.json as json }); /** * Watches changes to template, Sass, javascript and image files. On change this will run the appropriate task, either: copy styles, scripts or images. */ gulp.task('watch', function() { // watch index.html gulp.watch(settings.src + 'index.html', ['copy-index']); // watch html files gulp.watch(settings.src + '**/*.html', ['copy-template']); // watch fonts gulp.watch(settings.src + 'fonts/**', ['copy-fonts']); // Watch .scss files gulp.watch(settings.src + 'styles/**/*.scss', ['styles']); // Watch app .js files gulp.watch(settings.src + 'js/app/**/*.js', ['scripts-app']); // Watch vendor .js files gulp.watch(settings.src + 'js/vendor/**/*.js', ['scripts-vendor']); // Watch image files gulp.watch(settings.src + 'img/**/*', ['images']); }); function onError(error)
{ // TODO log error with gutil notify.onError(function (error) { return error.message; }); this.emit('end'); }
identifier_body
transaction.rs
tokens paid for processing and storage of this transaction. pub fee: u64, /// Keys identifying programs in the instructions vector. pub program_ids: Vec<Pubkey>, /// Programs that will be executed in sequence and commited in one atomic transaction if all /// succeed. pub instructions: Vec<Instruction>, } impl Transaction { pub fn
<T: Serialize>( from_keypair: &Keypair, transaction_keys: &[Pubkey], program_id: Pubkey, userdata: &T, last_id: Hash, fee: u64, ) -> Self { let program_ids = vec![program_id]; let accounts = (0..=transaction_keys.len() as u8).collect(); let instructions = vec![Instruction::new(0, userdata, accounts)]; Self::new_with_instructions( from_keypair, transaction_keys, last_id, fee, program_ids, instructions, ) } /// Create a signed transaction /// * `from_keypair` - The key used to sign the transaction. This key is stored as keys[0] /// * `account_keys` - The keys for the transaction. These are the program state /// instances or token recipient keys. /// * `last_id` - The PoH hash. /// * `fee` - The transaction fee. /// * `program_ids` - The keys that identify programs used in the `instruction` vector. /// * `instructions` - The programs and their arguments that the transaction will execute atomically pub fn new_with_instructions( from_keypair: &Keypair, keys: &[Pubkey], last_id: Hash, fee: u64, program_ids: Vec<Pubkey>, instructions: Vec<Instruction>, ) -> Self { let from = from_keypair.pubkey(); let mut account_keys = vec![from]; account_keys.extend_from_slice(keys); let mut tx = Transaction { signature: Signature::default(), account_keys, last_id: Hash::default(), fee, program_ids, instructions, }; tx.sign(from_keypair, last_id); tx } pub fn userdata(&self, instruction_index: usize) -> &[u8] { &self.instructions[instruction_index].userdata } fn key_index(&self, instruction_index: usize, accounts_index: usize) -> Option<usize> { self.instructions .get(instruction_index) .and_then(|instruction| instruction.accounts.get(accounts_index)) .map(|&account_keys_index| account_keys_index as usize) } pub fn key(&self, instruction_index: usize, accounts_index: usize) -> Option<&Pubkey> { self.key_index(instruction_index, accounts_index) .and_then(|account_keys_index| self.account_keys.get(account_keys_index)) } pub fn signed_key(&self, instruction_index: usize, accounts_index: usize) -> Option<&Pubkey> { match self.key_index(instruction_index, accounts_index) { None => None, Some(0) => self.account_keys.get(0), Some(_) => None, } } pub fn program_id(&self, instruction_index: usize) -> &Pubkey { let program_ids_index = self.instructions[instruction_index].program_ids_index; &self.program_ids[program_ids_index as usize] } /// Get the transaction data to sign. pub fn get_sign_data(&self) -> Vec<u8> { let mut data = serialize(&self.account_keys).expect("serialize account_keys"); let last_id_data = serialize(&self.last_id).expect("serialize last_id"); data.extend_from_slice(&last_id_data); let fee_data = serialize(&self.fee).expect("serialize fee"); data.extend_from_slice(&fee_data); let program_ids = serialize(&self.program_ids).expect("serialize program_ids"); data.extend_from_slice(&program_ids); let instructions = serialize(&self.instructions).expect("serialize instructions"); data.extend_from_slice(&instructions); data } /// Sign this transaction. pub fn sign(&mut self, keypair: &Keypair, last_id: Hash) { self.last_id = last_id; let sign_data = self.get_sign_data(); self.signature = Signature::new(&keypair.sign::<Sha512>(&sign_data).to_bytes()); } /// Verify only the transaction signature. pub fn verify_signature(&self) -> bool { warn!("transaction signature verification called"); self.signature .verify(&self.from().as_ref(), &self.get_sign_data()) } /// Verify that references in the instructions are valid pub fn verify_refs(&self) -> bool { for instruction in &self.instructions { if (instruction.program_ids_index as usize) >= self.program_ids.len() { return false; } for account_index in &instruction.accounts { if (*account_index as usize) >= self.account_keys.len() { return false; } } } true } pub fn from(&self) -> &Pubkey { &self.account_keys[0] } // a hash of a slice of transactions only needs to hash the signatures pub fn hash(transactions: &[Transaction]) -> Hash { let mut hasher = Hasher::default(); transactions .iter() .for_each(|tx| hasher.hash(&tx.signature.as_ref())); hasher.result() } } #[cfg(test)] mod tests { use super::*; use bincode::serialize; use signature::GenKeys; #[test] fn test_refs() { let key = Keypair::new(); let key1 = Keypair::new().pubkey(); let key2 = Keypair::new().pubkey(); let prog1 = Keypair::new().pubkey(); let prog2 = Keypair::new().pubkey(); let instructions = vec![ Instruction::new(0, &(), vec![0, 1]), Instruction::new(1, &(), vec![0, 2]), ]; let tx = Transaction::new_with_instructions( &key, &[key1, key2], Default::default(), 0, vec![prog1, prog2], instructions, ); assert!(tx.verify_refs()); assert_eq!(tx.key(0, 0), Some(&key.pubkey())); assert_eq!(tx.signed_key(0, 0), Some(&key.pubkey())); assert_eq!(tx.key(1, 0), Some(&key.pubkey())); assert_eq!(tx.signed_key(1, 0), Some(&key.pubkey())); assert_eq!(tx.key(0, 1), Some(&key1)); assert_eq!(tx.signed_key(0, 1), None); assert_eq!(tx.key(1, 1), Some(&key2)); assert_eq!(tx.signed_key(1, 1), None); assert_eq!(tx.key(2, 0), None); assert_eq!(tx.signed_key(2, 0), None); assert_eq!(tx.key(0, 2), None); assert_eq!(tx.signed_key(0, 2), None); assert_eq!(*tx.program_id(0), prog1); assert_eq!(*tx.program_id(1), prog2); } #[test] fn test_refs_invalid_program_id() { let key = Keypair::new(); let instructions = vec![Instruction::new(1, &(), vec![])]; let tx = Transaction::new_with_instructions( &key, &[], Default::default(), 0, vec![], instructions, ); assert!(!tx.verify_refs()); } #[test] fn test_refs_invalid_account() { let key = Keypair::new(); let instructions = vec![Instruction::new(0, &(), vec![1])]; let tx = Transaction::new_with_instructions( &key, &[], Default::default(), 0, vec![Default::default()], instructions, ); assert_eq!(*tx.program_id(0), Default::default()); assert!(!tx.verify_refs()); } /// Detect binary changes in the serialized contract userdata, which could have a downstream /// affect on SDKs and DApps #[test] fn test_sdk_serialize() { let keypair = &GenKeys::new([0u8; 32]).gen_n_keypairs(1)[0]; let to = Pubkey::new(&[ 1, 1, 1, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 7, 6, 5, 4, 1, 1, 1, ]); let program_id = Pubkey::new(&[ 2, 2, 2, 4, 5, 6, 7, 8, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
new
identifier_name
transaction.rs
of tokens paid for processing and storage of this transaction. pub fee: u64, /// Keys identifying programs in the instructions vector. pub program_ids: Vec<Pubkey>, /// Programs that will be executed in sequence and commited in one atomic transaction if all /// succeed. pub instructions: Vec<Instruction>, } impl Transaction { pub fn new<T: Serialize>( from_keypair: &Keypair, transaction_keys: &[Pubkey], program_id: Pubkey, userdata: &T, last_id: Hash, fee: u64, ) -> Self { let program_ids = vec![program_id]; let accounts = (0..=transaction_keys.len() as u8).collect(); let instructions = vec![Instruction::new(0, userdata, accounts)]; Self::new_with_instructions( from_keypair, transaction_keys, last_id, fee, program_ids, instructions, ) } /// Create a signed transaction /// * `from_keypair` - The key used to sign the transaction. This key is stored as keys[0] /// * `account_keys` - The keys for the transaction. These are the program state /// instances or token recipient keys. /// * `last_id` - The PoH hash. /// * `fee` - The transaction fee. /// * `program_ids` - The keys that identify programs used in the `instruction` vector. /// * `instructions` - The programs and their arguments that the transaction will execute atomically pub fn new_with_instructions( from_keypair: &Keypair, keys: &[Pubkey], last_id: Hash, fee: u64, program_ids: Vec<Pubkey>, instructions: Vec<Instruction>, ) -> Self { let from = from_keypair.pubkey(); let mut account_keys = vec![from]; account_keys.extend_from_slice(keys); let mut tx = Transaction { signature: Signature::default(), account_keys, last_id: Hash::default(), fee, program_ids, instructions, }; tx.sign(from_keypair, last_id); tx } pub fn userdata(&self, instruction_index: usize) -> &[u8] { &self.instructions[instruction_index].userdata } fn key_index(&self, instruction_index: usize, accounts_index: usize) -> Option<usize> { self.instructions .get(instruction_index) .and_then(|instruction| instruction.accounts.get(accounts_index)) .map(|&account_keys_index| account_keys_index as usize) } pub fn key(&self, instruction_index: usize, accounts_index: usize) -> Option<&Pubkey> { self.key_index(instruction_index, accounts_index) .and_then(|account_keys_index| self.account_keys.get(account_keys_index)) } pub fn signed_key(&self, instruction_index: usize, accounts_index: usize) -> Option<&Pubkey> { match self.key_index(instruction_index, accounts_index) { None => None, Some(0) => self.account_keys.get(0), Some(_) => None, } } pub fn program_id(&self, instruction_index: usize) -> &Pubkey { let program_ids_index = self.instructions[instruction_index].program_ids_index; &self.program_ids[program_ids_index as usize] } /// Get the transaction data to sign. pub fn get_sign_data(&self) -> Vec<u8> { let mut data = serialize(&self.account_keys).expect("serialize account_keys"); let last_id_data = serialize(&self.last_id).expect("serialize last_id");
data.extend_from_slice(&fee_data); let program_ids = serialize(&self.program_ids).expect("serialize program_ids"); data.extend_from_slice(&program_ids); let instructions = serialize(&self.instructions).expect("serialize instructions"); data.extend_from_slice(&instructions); data } /// Sign this transaction. pub fn sign(&mut self, keypair: &Keypair, last_id: Hash) { self.last_id = last_id; let sign_data = self.get_sign_data(); self.signature = Signature::new(&keypair.sign::<Sha512>(&sign_data).to_bytes()); } /// Verify only the transaction signature. pub fn verify_signature(&self) -> bool { warn!("transaction signature verification called"); self.signature .verify(&self.from().as_ref(), &self.get_sign_data()) } /// Verify that references in the instructions are valid pub fn verify_refs(&self) -> bool { for instruction in &self.instructions { if (instruction.program_ids_index as usize) >= self.program_ids.len() { return false; } for account_index in &instruction.accounts { if (*account_index as usize) >= self.account_keys.len() { return false; } } } true } pub fn from(&self) -> &Pubkey { &self.account_keys[0] } // a hash of a slice of transactions only needs to hash the signatures pub fn hash(transactions: &[Transaction]) -> Hash { let mut hasher = Hasher::default(); transactions .iter() .for_each(|tx| hasher.hash(&tx.signature.as_ref())); hasher.result() } } #[cfg(test)] mod tests { use super::*; use bincode::serialize; use signature::GenKeys; #[test] fn test_refs() { let key = Keypair::new(); let key1 = Keypair::new().pubkey(); let key2 = Keypair::new().pubkey(); let prog1 = Keypair::new().pubkey(); let prog2 = Keypair::new().pubkey(); let instructions = vec![ Instruction::new(0, &(), vec![0, 1]), Instruction::new(1, &(), vec![0, 2]), ]; let tx = Transaction::new_with_instructions( &key, &[key1, key2], Default::default(), 0, vec![prog1, prog2], instructions, ); assert!(tx.verify_refs()); assert_eq!(tx.key(0, 0), Some(&key.pubkey())); assert_eq!(tx.signed_key(0, 0), Some(&key.pubkey())); assert_eq!(tx.key(1, 0), Some(&key.pubkey())); assert_eq!(tx.signed_key(1, 0), Some(&key.pubkey())); assert_eq!(tx.key(0, 1), Some(&key1)); assert_eq!(tx.signed_key(0, 1), None); assert_eq!(tx.key(1, 1), Some(&key2)); assert_eq!(tx.signed_key(1, 1), None); assert_eq!(tx.key(2, 0), None); assert_eq!(tx.signed_key(2, 0), None); assert_eq!(tx.key(0, 2), None); assert_eq!(tx.signed_key(0, 2), None); assert_eq!(*tx.program_id(0), prog1); assert_eq!(*tx.program_id(1), prog2); } #[test] fn test_refs_invalid_program_id() { let key = Keypair::new(); let instructions = vec![Instruction::new(1, &(), vec![])]; let tx = Transaction::new_with_instructions( &key, &[], Default::default(), 0, vec![], instructions, ); assert!(!tx.verify_refs()); } #[test] fn test_refs_invalid_account() { let key = Keypair::new(); let instructions = vec![Instruction::new(0, &(), vec![1])]; let tx = Transaction::new_with_instructions( &key, &[], Default::default(), 0, vec![Default::default()], instructions, ); assert_eq!(*tx.program_id(0), Default::default()); assert!(!tx.verify_refs()); } /// Detect binary changes in the serialized contract userdata, which could have a downstream /// affect on SDKs and DApps #[test] fn test_sdk_serialize() { let keypair = &GenKeys::new([0u8; 32]).gen_n_keypairs(1)[0]; let to = Pubkey::new(&[ 1, 1, 1, 4, 5, 6, 7, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 8, 7, 6, 5, 4, 1, 1, 1, ]); let program_id = Pubkey::new(&[ 2, 2, 2, 4, 5, 6, 7, 8, 9, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
data.extend_from_slice(&last_id_data); let fee_data = serialize(&self.fee).expect("serialize fee");
random_line_split
transaction.rs
} /// An atomic transaction #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] pub struct Transaction { /// A digital signature of `account_keys`, `program_ids`, `last_id`, `fee` and `instructions`, signed by `Pubkey`. pub signature: Signature, /// The `Pubkeys` that are executing this transaction userdata. The meaning of each key is /// program-specific. /// * account_keys[0] - Typically this is the `caller` public key. `signature` is verified with account_keys[0]. /// In the future which key pays the fee and which keys have signatures would be configurable. /// * account_keys[1] - Typically this is the program context or the recipient of the tokens pub account_keys: Vec<Pubkey>, /// The ID of a recent ledger entry. pub last_id: Hash, /// The number of tokens paid for processing and storage of this transaction. pub fee: u64, /// Keys identifying programs in the instructions vector. pub program_ids: Vec<Pubkey>, /// Programs that will be executed in sequence and commited in one atomic transaction if all /// succeed. pub instructions: Vec<Instruction>, } impl Transaction { pub fn new<T: Serialize>( from_keypair: &Keypair, transaction_keys: &[Pubkey], program_id: Pubkey, userdata: &T, last_id: Hash, fee: u64, ) -> Self { let program_ids = vec![program_id]; let accounts = (0..=transaction_keys.len() as u8).collect(); let instructions = vec![Instruction::new(0, userdata, accounts)]; Self::new_with_instructions( from_keypair, transaction_keys, last_id, fee, program_ids, instructions, ) } /// Create a signed transaction /// * `from_keypair` - The key used to sign the transaction. This key is stored as keys[0] /// * `account_keys` - The keys for the transaction. These are the program state /// instances or token recipient keys. /// * `last_id` - The PoH hash. /// * `fee` - The transaction fee. /// * `program_ids` - The keys that identify programs used in the `instruction` vector. /// * `instructions` - The programs and their arguments that the transaction will execute atomically pub fn new_with_instructions( from_keypair: &Keypair, keys: &[Pubkey], last_id: Hash, fee: u64, program_ids: Vec<Pubkey>, instructions: Vec<Instruction>, ) -> Self { let from = from_keypair.pubkey(); let mut account_keys = vec![from]; account_keys.extend_from_slice(keys); let mut tx = Transaction { signature: Signature::default(), account_keys, last_id: Hash::default(), fee, program_ids, instructions, }; tx.sign(from_keypair, last_id); tx } pub fn userdata(&self, instruction_index: usize) -> &[u8] { &self.instructions[instruction_index].userdata } fn key_index(&self, instruction_index: usize, accounts_index: usize) -> Option<usize> { self.instructions .get(instruction_index) .and_then(|instruction| instruction.accounts.get(accounts_index)) .map(|&account_keys_index| account_keys_index as usize) } pub fn key(&self, instruction_index: usize, accounts_index: usize) -> Option<&Pubkey> { self.key_index(instruction_index, accounts_index) .and_then(|account_keys_index| self.account_keys.get(account_keys_index)) } pub fn signed_key(&self, instruction_index: usize, accounts_index: usize) -> Option<&Pubkey> { match self.key_index(instruction_index, accounts_index) { None => None, Some(0) => self.account_keys.get(0), Some(_) => None, } } pub fn program_id(&self, instruction_index: usize) -> &Pubkey { let program_ids_index = self.instructions[instruction_index].program_ids_index; &self.program_ids[program_ids_index as usize] } /// Get the transaction data to sign. pub fn get_sign_data(&self) -> Vec<u8> { let mut data = serialize(&self.account_keys).expect("serialize account_keys"); let last_id_data = serialize(&self.last_id).expect("serialize last_id"); data.extend_from_slice(&last_id_data); let fee_data = serialize(&self.fee).expect("serialize fee"); data.extend_from_slice(&fee_data); let program_ids = serialize(&self.program_ids).expect("serialize program_ids"); data.extend_from_slice(&program_ids); let instructions = serialize(&self.instructions).expect("serialize instructions"); data.extend_from_slice(&instructions); data } /// Sign this transaction. pub fn sign(&mut self, keypair: &Keypair, last_id: Hash) { self.last_id = last_id; let sign_data = self.get_sign_data(); self.signature = Signature::new(&keypair.sign::<Sha512>(&sign_data).to_bytes()); } /// Verify only the transaction signature. pub fn verify_signature(&self) -> bool { warn!("transaction signature verification called"); self.signature .verify(&self.from().as_ref(), &self.get_sign_data()) } /// Verify that references in the instructions are valid pub fn verify_refs(&self) -> bool { for instruction in &self.instructions { if (instruction.program_ids_index as usize) >= self.program_ids.len() { return false; } for account_index in &instruction.accounts { if (*account_index as usize) >= self.account_keys.len() { return false; } } } true } pub fn from(&self) -> &Pubkey { &self.account_keys[0] } // a hash of a slice of transactions only needs to hash the signatures pub fn hash(transactions: &[Transaction]) -> Hash { let mut hasher = Hasher::default(); transactions .iter() .for_each(|tx| hasher.hash(&tx.signature.as_ref())); hasher.result() } } #[cfg(test)] mod tests { use super::*; use bincode::serialize; use signature::GenKeys; #[test] fn test_refs() { let key = Keypair::new(); let key1 = Keypair::new().pubkey(); let key2 = Keypair::new().pubkey(); let prog1 = Keypair::new().pubkey(); let prog2 = Keypair::new().pubkey(); let instructions = vec![ Instruction::new(0, &(), vec![0, 1]), Instruction::new(1, &(), vec![0, 2]), ]; let tx = Transaction::new_with_instructions( &key, &[key1, key2], Default::default(), 0, vec![prog1, prog2], instructions, ); assert!(tx.verify_refs()); assert_eq!(tx.key(0, 0), Some(&key.pubkey())); assert_eq!(tx.signed_key(0, 0), Some(&key.pubkey())); assert_eq!(tx.key(1, 0), Some(&key.pubkey())); assert_eq!(tx.signed_key(1, 0), Some(&key.pubkey())); assert_eq!(tx.key(0, 1), Some(&key1)); assert_eq!(tx.signed_key(0, 1), None); assert_eq!(tx.key(1, 1), Some(&key2)); assert_eq!(tx.signed_key(1, 1), None); assert_eq!(tx.key(2, 0), None); assert_eq!(tx.signed_key(2, 0), None); assert_eq!(tx.key(0, 2), None); assert_eq!(tx.signed_key(0, 2), None); assert_eq!(*tx.program_id(0), prog1); assert_eq!(*tx.program_id(1), prog2); } #[test] fn test_refs_invalid_program_id() { let key = Keypair::new(); let instructions = vec![Instruction::new(1, &(), vec![])]; let tx = Transaction::new_with_instructions( &key, &[], Default::default(), 0, vec![], instructions, ); assert!(!tx.verify_refs()); } #[test] fn test_refs_invalid_account() { let key = Keypair::new(); let instructions = vec![Instruction::new(0, &(), vec![1])]; let tx = Transaction::new_with_instructions( &key, &[], Default::default(), 0, vec![Default::default()], instructions, ); assert_eq!(*tx.program_id(0), Default::default()); assert!(!tx.verify_refs()); } /// Detect binary changes in the serialized contract userdata, which could have a downstream /// affect on SDKs and DApps
{ let userdata = serialize(userdata).unwrap(); Instruction { program_ids_index, userdata, accounts, } }
identifier_body
getqf.py
= symbol_string.split(",") symbol_file.close() path = file.split('/') name = path[len(path)-1] name = name.split('.') index_lists[name[0]] = symbol_list return index_lists def get_data(symbollist, index_name, ext = ''): """ Takes a list of symbols, and requests the key statistics page from yahoo for that company. \ Searches for all the table data for that company and returns a dictionary of symbols for keys mapped to\ a list of statistical data for that information. """ data_lists = {} for index, symbol in enumerate(symbollist): symbollist[index] = (symbol + ext, data_lists, index_name) pool = Pool(5) ##map symbol list to _get_data() fn. return tuple, with (symbol, statlist). pool.map(_get_data, symbollist) return data_lists def _get_data(param):
if index in keystatrows: stat_atom = stat.get_text() if stat_atom is None or stat_atom == 'N/A': stat_atom = 'NaN' table_data_list.append(stat_atom) if len(table_data_list) < 2: print full_symbol, "Not found" else: print full_symbol, "Got data" data_lists[full_symbol] = table_data_list else: print resp.getcode(), symbol except: print "Timed out for {}".format(symbol) def add_labels(data_lists, table_labels): """ Takes a dictionary with symbols for keys mapped to a list of statistic values.\ Returns a dictionary with symbols for keys mapped to a dictionary with the statistic label \ for keys, and the associated figure for the value. The new dictionary is returned """ labeled_dictionary_collection = {} for symbol, data_list in data_lists.iteritems(): if len(data_list) > 1: labeled_dictionary_collection[symbol] = dict(zip(table_labels,data_list)) return labeled_dictionary_collection def remove_number_symbols(data_lists): """ Takes a dictionary with symbols for keys mapped to a list of statistic values, and looks for placeholder values,\ replacing them with the appropriate number value. The function then returns a new dictionary with the changes made. """ billion = 'B' million = 'M' thousand = 'K' percent = '%' dot = '\.' comma = ',' B = re.compile(billion) M = re.compile(million) K = re.compile(thousand) Perc = re.compile(percent) Dot = re.compile(dot) Comm = re.compile(comma) fltpoint_dict = {} for symbol, datalist in data_lists.iteritems(): new_data_list = [] if len(datalist) > 1: for statistic in datalist: if percent in statistic: statistic = Perc.sub('', statistic) statistic = Comm.sub('', statistic) new_data_list.append(float(statistic)) elif comma in statistic or 'May' in statistic or 'Mar' in statistic: statistic = Comm.sub('', statistic) try: statistic = float(statistic) except: pass new_data_list.append(statistic) elif billion in statistic or million in statistic or thousand in statistic: statistic = B.sub('0000000', statistic) statistic = M.sub('00000', statistic) statistic = K.sub('0', statistic) statistic = Dot.sub('', statistic) new_data_list.append(float(statistic)) else: try: statistic = float(statistic) except: pass new_data_list.append(statistic) fltpoint_dict[symbol] = new_data_list else: fltpoint_dict[symbol] = ['N/A'] return fltpoint_dict def run_stats(index_dicts): stat_dataframes = {} for index, company_dict in index_dicts.iteritems(): index_df = pd.DataFrame.from_dict(company_dict, orient = 'index') stat_list = [] for header, column in index_df.iteritems(): try: stat_list.append(column.mean()) except: stat_list.append('No mean available') stat_dataframes[index] = stat_list NA_stat_frame = pd.DataFrame.from_dict(stat_dataframes, orient = 'index') return NA_stat_frame def combine_indexes(index_list): NA_companies = index_list[0] for index in xrange(1,len(index_list)): NA_companies.update(index_list[index]) NA_companies_frame = pd.DataFrame.from_dict(NA_companies, orient = 'index') return NA_companies_frame def scraper(): indexlist = [] indexlist.append('dataFiles/nsdqct.csv') indexlist.append('dataFiles/nsdqe.csv') indexlist.append('dataFiles/nye.csv') indexlist.append('dataFiles/tsxog.csv') indexlist.append('dataFiles/tsxct.csv') indexlist.append('dataFiles/tsxvct.csv') indexlist.append('dataFiles/tsxvog.csv') table_labels = [ "Date Time Gathered", "Market Cap (intraday)5", "Enterprise Value 3", "Trailing P/E (ttm, intraday)", # "Forward P/E 1","PEG Ratio (5 yr expected) 1", # "Price/Sales (ttm)", # "Price/Book (mrq)", # "Enterprise Value/Revenue (ttm) 3", # "Enterprise Value/EBITDA (ttm) 6", # "Fiscal Year Ends", # "Most Recent Quarter (mrq)", "Profit Margin (ttm)", # "Operating Margin (ttm)", # "Return on Assets (ttm)", # "Return on Equity (ttm)", "Revenue (ttm)", # "Revenue Per Share (ttm)", # "Qtrly Revenue Growth (yoy)", "Gross Profit (ttm)", "EBITDA (ttm) 6", # "Net Income Avl to Common (ttm)", "Diluted EPS (ttm)", # "Qtrly Earnings Growth (yoy)", # "Total Cash (mrq)", # "Total Cash Per Share (mrq)", # "Total Debt (mrq)", "Total Debt/Equity (mrq)", # "Current Ratio (mrq)", # "Book Value Per Share (mrq)", # "Operating Cash Flow (ttm)", "Levered Free Cash Flow (ttm)", # "Beta", "52-Week Change3", # "S&P500 52-Week Change3", "52-Week High 3", "52-Week Low 3", "50-Day Moving Average 3", "200-Day Moving Average 3", # "Avg Vol (3 month) 3", # "Avg Vol (10 day) 3", # "Shares Outstanding 5", # "Float", "% Held by Insiders 1", "% Held by Institutions 1", # "Shares Short 3", # "Short Ratio 3", # "Short % of Float 3", # "Shares Short (prior month) 3", # "Forward Annual Dividend Rate 4", # "Forward Annual Dividend Yield 4", "Trailing Annual Dividend Yield 3", "Trailing Annual Dividend Yield3", "5 Year Average Dividend Yield 4", # "Payout Ratio 4", # "Dividend Date 3", # "Ex-Dividend Date 4", "Last Split Factor 2", "Last Split Date 3",] indexlist = load_files(indexlist) # # ##test grab## # testlist = ['dataFiles/nsdqct.csv'] # testindexlist = load_files(testlist) # ##end here## ##Get the data and store it in a dict of indexes to company symbols to lists of values ## qfindexdict = {} for index, symbollist in indexlist.iteritems(): if index == 'tsxvct' or index == 'tsxvog': qfindex
symbol = param[0] data_lists = param[1] index = param[2] url = "http://finance.yahoo.com/q/ks?s={}+Key+Statistics".format(symbol) try: resp = urllib2.urlopen(url, timeout = 10) if resp.getcode() == 200: htmltext = BeautifulSoup(resp.read()) data_table_pattern = "yfnc_tabledata1" result_list = htmltext.findAll(class_= data_table_pattern) current_date_time = datetime.datetime.now() formatted_date_stamp = current_date_time.strftime("%A %B %d, %Y") result_list.insert(0,formatted_date_stamp) full_symbol = symbol + ':' + index table_data_list = [] table_data_list.append(formatted_date_stamp) keystatrows = set([1,2,3,12,16,19,20,22,27,31,33,35,36,37,38,42,43,50,51,52,56,57]) for index, stat in enumerate(result_list):
identifier_body
getqf.py
(symbolfilepaths, csvdelim = ","): """ Takes a list of symbol file paths to respective csv files, and loads their content into a \ dictionary of file-paths to lists containing the content of the csv. The delimeter of the csv files \ defaults to a ','. """ index_lists = {} for file in symbolfilepaths: symbol_file = open(file) symbol_string = symbol_file.read() symbol_list = symbol_string.split(",") symbol_file.close() path = file.split('/') name = path[len(path)-1] name = name.split('.') index_lists[name[0]] = symbol_list return index_lists def get_data(symbollist, index_name, ext = ''): """ Takes a list of symbols, and requests the key statistics page from yahoo for that company. \ Searches for all the table data for that company and returns a dictionary of symbols for keys mapped to\ a list of statistical data for that information. """ data_lists = {} for index, symbol in enumerate(symbollist): symbollist[index] = (symbol + ext, data_lists, index_name) pool = Pool(5) ##map symbol list to _get_data() fn. return tuple, with (symbol, statlist). pool.map(_get_data, symbollist) return data_lists def _get_data(param): symbol = param[0] data_lists = param[1] index = param[2] url = "http://finance.yahoo.com/q/ks?s={}+Key+Statistics".format(symbol) try: resp = urllib2.urlopen(url, timeout = 10) if resp.getcode() == 200: htmltext = BeautifulSoup(resp.read()) data_table_pattern = "yfnc_tabledata1" result_list = htmltext.findAll(class_= data_table_pattern) current_date_time = datetime.datetime.now() formatted_date_stamp = current_date_time.strftime("%A %B %d, %Y") result_list.insert(0,formatted_date_stamp) full_symbol = symbol + ':' + index table_data_list = [] table_data_list.append(formatted_date_stamp) keystatrows = set([1,2,3,12,16,19,20,22,27,31,33,35,36,37,38,42,43,50,51,52,56,57]) for index, stat in enumerate(result_list): if index in keystatrows: stat_atom = stat.get_text() if stat_atom is None or stat_atom == 'N/A': stat_atom = 'NaN' table_data_list.append(stat_atom) if len(table_data_list) < 2: print full_symbol, "Not found" else: print full_symbol, "Got data" data_lists[full_symbol] = table_data_list else: print resp.getcode(), symbol except: print "Timed out for {}".format(symbol) def add_labels(data_lists, table_labels): """ Takes a dictionary with symbols for keys mapped to a list of statistic values.\ Returns a dictionary with symbols for keys mapped to a dictionary with the statistic label \ for keys, and the associated figure for the value. The new dictionary is returned """ labeled_dictionary_collection = {} for symbol, data_list in data_lists.iteritems(): if len(data_list) > 1: labeled_dictionary_collection[symbol] = dict(zip(table_labels,data_list)) return labeled_dictionary_collection def remove_number_symbols(data_lists): """ Takes a dictionary with symbols for keys mapped to a list of statistic values, and looks for placeholder values,\ replacing them with the appropriate number value. The function then returns a new dictionary with the changes made. """ billion = 'B' million = 'M' thousand = 'K' percent = '%' dot = '\.' comma = ',' B = re.compile(billion) M = re.compile(million) K = re.compile(thousand) Perc = re.compile(percent) Dot = re.compile(dot) Comm = re.compile(comma) fltpoint_dict = {} for symbol, datalist in data_lists.iteritems(): new_data_list = [] if len(datalist) > 1: for statistic in datalist: if percent in statistic: statistic = Perc.sub('', statistic) statistic = Comm.sub('', statistic) new_data_list.append(float(statistic)) elif comma in statistic or 'May' in statistic or 'Mar' in statistic: statistic = Comm.sub('', statistic) try: statistic = float(statistic) except: pass new_data_list.append(statistic) elif billion in statistic or million in statistic or thousand in statistic: statistic = B.sub('0000000', statistic) statistic = M.sub('00000', statistic) statistic = K.sub('0', statistic) statistic = Dot.sub('', statistic) new_data_list.append(float(statistic)) else: try: statistic = float(statistic) except: pass new_data_list.append(statistic) fltpoint_dict[symbol] = new_data_list else: fltpoint_dict[symbol] = ['N/A'] return fltpoint_dict def run_stats(index_dicts): stat_dataframes = {} for index, company_dict in index_dicts.iteritems(): index_df = pd.DataFrame.from_dict(company_dict, orient = 'index') stat_list = [] for header, column in index_df.iteritems(): try: stat_list.append(column.mean()) except: stat_list.append('No mean available') stat_dataframes[index] = stat_list NA_stat_frame = pd.DataFrame.from_dict(stat_dataframes, orient = 'index') return NA_stat_frame def combine_indexes(index_list): NA_companies = index_list[0] for index in xrange(1,len(index_list)): NA_companies.update(index_list[index]) NA_companies_frame = pd.DataFrame.from_dict(NA_companies, orient = 'index') return NA_companies_frame def scraper(): indexlist = [] indexlist.append('dataFiles/nsdqct.csv') indexlist.append('dataFiles/nsdqe.csv') indexlist.append('dataFiles/nye.csv') indexlist.append('dataFiles/tsxog.csv') indexlist.append('dataFiles/tsxct.csv') indexlist.append('dataFiles/tsxvct.csv') indexlist.append('dataFiles/tsxvog.csv') table_labels = [ "Date Time Gathered", "Market Cap (intraday)5", "Enterprise Value 3", "Trailing P/E (ttm, intraday)", # "Forward P/E 1","PEG Ratio (5 yr expected) 1", # "Price/Sales (ttm)", # "Price/Book (mrq)", # "Enterprise Value/Revenue (ttm) 3", # "Enterprise Value/EBITDA (ttm) 6", # "Fiscal Year Ends", # "Most Recent Quarter (mrq)", "Profit Margin (ttm)", # "Operating Margin (ttm)", # "Return on Assets (ttm)", # "Return on Equity (ttm)", "Revenue (ttm)", # "Revenue Per Share (ttm)", # "Qtrly Revenue Growth (yoy)", "Gross Profit (ttm)", "EBITDA (ttm) 6", # "Net Income Avl to Common (ttm)", "Diluted EPS (ttm)", # "Qtrly Earnings Growth (yoy)", # "Total Cash (mrq)", # "Total Cash Per Share (mrq)", # "Total Debt (mrq)", "Total Debt/Equity (mrq)", # "Current Ratio (mrq)", # "Book Value Per Share (mrq)", # "Operating Cash Flow (ttm)", "Levered Free Cash Flow (ttm)", # "Beta", "52-Week Change3", # "S&P500 52-Week Change3", "52-Week High 3", "52-Week Low 3", "50-Day Moving Average 3", "200-Day Moving Average 3", # "Avg Vol (3 month) 3", # "Avg Vol (10 day) 3", # "Shares Outstanding 5", # "Float", "% Held by Insiders 1", "% Held by Institutions 1", # "Shares Short 3", # "Short Ratio 3", # "Short % of Float 3", # "Shares Short (prior month) 3", # "Forward Annual Dividend Rate 4", # "Forward Annual Dividend Yield 4", "Trailing Annual Dividend Yield 3", "Trailing Annual Dividend Yield3", "5 Year Average Dividend Yield 4", # "Payout Ratio 4", # "Dividend Date 3", # "Ex-Dividend Date 4", "Last Split Factor 2", "Last Split Date 3",] indexlist = load_files(indexlist) # #
load_files
identifier_name
getqf.py
and requests the key statistics page from yahoo for that company. \ Searches for all the table data for that company and returns a dictionary of symbols for keys mapped to\ a list of statistical data for that information. """ data_lists = {} for index, symbol in enumerate(symbollist): symbollist[index] = (symbol + ext, data_lists, index_name) pool = Pool(5) ##map symbol list to _get_data() fn. return tuple, with (symbol, statlist). pool.map(_get_data, symbollist) return data_lists def _get_data(param): symbol = param[0] data_lists = param[1] index = param[2] url = "http://finance.yahoo.com/q/ks?s={}+Key+Statistics".format(symbol) try: resp = urllib2.urlopen(url, timeout = 10) if resp.getcode() == 200: htmltext = BeautifulSoup(resp.read()) data_table_pattern = "yfnc_tabledata1" result_list = htmltext.findAll(class_= data_table_pattern) current_date_time = datetime.datetime.now() formatted_date_stamp = current_date_time.strftime("%A %B %d, %Y") result_list.insert(0,formatted_date_stamp) full_symbol = symbol + ':' + index table_data_list = [] table_data_list.append(formatted_date_stamp) keystatrows = set([1,2,3,12,16,19,20,22,27,31,33,35,36,37,38,42,43,50,51,52,56,57]) for index, stat in enumerate(result_list): if index in keystatrows: stat_atom = stat.get_text() if stat_atom is None or stat_atom == 'N/A': stat_atom = 'NaN' table_data_list.append(stat_atom) if len(table_data_list) < 2: print full_symbol, "Not found" else: print full_symbol, "Got data" data_lists[full_symbol] = table_data_list else: print resp.getcode(), symbol except: print "Timed out for {}".format(symbol) def add_labels(data_lists, table_labels): """ Takes a dictionary with symbols for keys mapped to a list of statistic values.\ Returns a dictionary with symbols for keys mapped to a dictionary with the statistic label \ for keys, and the associated figure for the value. The new dictionary is returned """ labeled_dictionary_collection = {} for symbol, data_list in data_lists.iteritems(): if len(data_list) > 1: labeled_dictionary_collection[symbol] = dict(zip(table_labels,data_list)) return labeled_dictionary_collection def remove_number_symbols(data_lists): """ Takes a dictionary with symbols for keys mapped to a list of statistic values, and looks for placeholder values,\ replacing them with the appropriate number value. The function then returns a new dictionary with the changes made. """ billion = 'B' million = 'M' thousand = 'K' percent = '%' dot = '\.' comma = ',' B = re.compile(billion) M = re.compile(million) K = re.compile(thousand) Perc = re.compile(percent) Dot = re.compile(dot) Comm = re.compile(comma) fltpoint_dict = {} for symbol, datalist in data_lists.iteritems(): new_data_list = [] if len(datalist) > 1: for statistic in datalist: if percent in statistic: statistic = Perc.sub('', statistic) statistic = Comm.sub('', statistic) new_data_list.append(float(statistic)) elif comma in statistic or 'May' in statistic or 'Mar' in statistic: statistic = Comm.sub('', statistic) try: statistic = float(statistic) except: pass new_data_list.append(statistic) elif billion in statistic or million in statistic or thousand in statistic: statistic = B.sub('0000000', statistic) statistic = M.sub('00000', statistic) statistic = K.sub('0', statistic) statistic = Dot.sub('', statistic) new_data_list.append(float(statistic)) else: try: statistic = float(statistic) except: pass new_data_list.append(statistic) fltpoint_dict[symbol] = new_data_list else: fltpoint_dict[symbol] = ['N/A'] return fltpoint_dict def run_stats(index_dicts): stat_dataframes = {} for index, company_dict in index_dicts.iteritems(): index_df = pd.DataFrame.from_dict(company_dict, orient = 'index') stat_list = [] for header, column in index_df.iteritems(): try: stat_list.append(column.mean()) except: stat_list.append('No mean available') stat_dataframes[index] = stat_list NA_stat_frame = pd.DataFrame.from_dict(stat_dataframes, orient = 'index') return NA_stat_frame def combine_indexes(index_list): NA_companies = index_list[0] for index in xrange(1,len(index_list)): NA_companies.update(index_list[index]) NA_companies_frame = pd.DataFrame.from_dict(NA_companies, orient = 'index') return NA_companies_frame def scraper(): indexlist = [] indexlist.append('dataFiles/nsdqct.csv') indexlist.append('dataFiles/nsdqe.csv') indexlist.append('dataFiles/nye.csv') indexlist.append('dataFiles/tsxog.csv') indexlist.append('dataFiles/tsxct.csv') indexlist.append('dataFiles/tsxvct.csv') indexlist.append('dataFiles/tsxvog.csv') table_labels = [ "Date Time Gathered", "Market Cap (intraday)5", "Enterprise Value 3", "Trailing P/E (ttm, intraday)", # "Forward P/E 1","PEG Ratio (5 yr expected) 1", # "Price/Sales (ttm)", # "Price/Book (mrq)", # "Enterprise Value/Revenue (ttm) 3", # "Enterprise Value/EBITDA (ttm) 6", # "Fiscal Year Ends", # "Most Recent Quarter (mrq)", "Profit Margin (ttm)", # "Operating Margin (ttm)", # "Return on Assets (ttm)", # "Return on Equity (ttm)", "Revenue (ttm)", # "Revenue Per Share (ttm)", # "Qtrly Revenue Growth (yoy)", "Gross Profit (ttm)", "EBITDA (ttm) 6", # "Net Income Avl to Common (ttm)", "Diluted EPS (ttm)", # "Qtrly Earnings Growth (yoy)", # "Total Cash (mrq)", # "Total Cash Per Share (mrq)", # "Total Debt (mrq)", "Total Debt/Equity (mrq)", # "Current Ratio (mrq)", # "Book Value Per Share (mrq)", # "Operating Cash Flow (ttm)", "Levered Free Cash Flow (ttm)", # "Beta", "52-Week Change3", # "S&P500 52-Week Change3", "52-Week High 3", "52-Week Low 3", "50-Day Moving Average 3", "200-Day Moving Average 3", # "Avg Vol (3 month) 3", # "Avg Vol (10 day) 3", # "Shares Outstanding 5", # "Float", "% Held by Insiders 1", "% Held by Institutions 1", # "Shares Short 3", # "Short Ratio 3", # "Short % of Float 3", # "Shares Short (prior month) 3", # "Forward Annual Dividend Rate 4", # "Forward Annual Dividend Yield 4", "Trailing Annual Dividend Yield 3", "Trailing Annual Dividend Yield3", "5 Year Average Dividend Yield 4", # "Payout Ratio 4", # "Dividend Date 3", # "Ex-Dividend Date 4", "Last Split Factor 2", "Last Split Date 3",] indexlist = load_files(indexlist) # # ##test grab## # testlist = ['dataFiles/nsdqct.csv'] # testindexlist = load_files(testlist) # ##end here## ##Get the data and store it in a dict of indexes to company symbols to lists of values ## qfindexdict = {} for index, symbollist in indexlist.iteritems():
if index == 'tsxvct' or index == 'tsxvog': qfindexdict[index] = get_data(symbollist, index, '.V') elif index == 'tsxct' or index == 'tsxog': qfindexdict[index] = get_data(symbollist, index, '.TO') else: qfindexdict[index] = get_data(symbollist, index)
conditional_block
getqf.py
= symbol_string.split(",") symbol_file.close() path = file.split('/') name = path[len(path)-1] name = name.split('.') index_lists[name[0]] = symbol_list return index_lists def get_data(symbollist, index_name, ext = ''): """ Takes a list of symbols, and requests the key statistics page from yahoo for that company. \ Searches for all the table data for that company and returns a dictionary of symbols for keys mapped to\ a list of statistical data for that information. """ data_lists = {} for index, symbol in enumerate(symbollist): symbollist[index] = (symbol + ext, data_lists, index_name) pool = Pool(5) ##map symbol list to _get_data() fn. return tuple, with (symbol, statlist). pool.map(_get_data, symbollist) return data_lists def _get_data(param): symbol = param[0] data_lists = param[1] index = param[2] url = "http://finance.yahoo.com/q/ks?s={}+Key+Statistics".format(symbol) try: resp = urllib2.urlopen(url, timeout = 10) if resp.getcode() == 200: htmltext = BeautifulSoup(resp.read()) data_table_pattern = "yfnc_tabledata1" result_list = htmltext.findAll(class_= data_table_pattern) current_date_time = datetime.datetime.now() formatted_date_stamp = current_date_time.strftime("%A %B %d, %Y") result_list.insert(0,formatted_date_stamp) full_symbol = symbol + ':' + index table_data_list = [] table_data_list.append(formatted_date_stamp) keystatrows = set([1,2,3,12,16,19,20,22,27,31,33,35,36,37,38,42,43,50,51,52,56,57]) for index, stat in enumerate(result_list): if index in keystatrows: stat_atom = stat.get_text() if stat_atom is None or stat_atom == 'N/A': stat_atom = 'NaN' table_data_list.append(stat_atom) if len(table_data_list) < 2: print full_symbol, "Not found" else: print full_symbol, "Got data" data_lists[full_symbol] = table_data_list else: print resp.getcode(), symbol except: print "Timed out for {}".format(symbol) def add_labels(data_lists, table_labels): """ Takes a dictionary with symbols for keys mapped to a list of statistic values.\ Returns a dictionary with symbols for keys mapped to a dictionary with the statistic label \ for keys, and the associated figure for the value. The new dictionary is returned """ labeled_dictionary_collection = {} for symbol, data_list in data_lists.iteritems(): if len(data_list) > 1: labeled_dictionary_collection[symbol] = dict(zip(table_labels,data_list)) return labeled_dictionary_collection def remove_number_symbols(data_lists): """ Takes a dictionary with symbols for keys mapped to a list of statistic values, and looks for placeholder values,\ replacing them with the appropriate number value. The function then returns a new dictionary with the changes made. """ billion = 'B' million = 'M' thousand = 'K' percent = '%' dot = '\.' comma = ',' B = re.compile(billion) M = re.compile(million) K = re.compile(thousand) Perc = re.compile(percent) Dot = re.compile(dot) Comm = re.compile(comma) fltpoint_dict = {} for symbol, datalist in data_lists.iteritems(): new_data_list = [] if len(datalist) > 1: for statistic in datalist: if percent in statistic: statistic = Perc.sub('', statistic) statistic = Comm.sub('', statistic) new_data_list.append(float(statistic)) elif comma in statistic or 'May' in statistic or 'Mar' in statistic: statistic = Comm.sub('', statistic) try: statistic = float(statistic) except: pass new_data_list.append(statistic) elif billion in statistic or million in statistic or thousand in statistic: statistic = B.sub('0000000', statistic) statistic = M.sub('00000', statistic) statistic = K.sub('0', statistic) statistic = Dot.sub('', statistic) new_data_list.append(float(statistic)) else: try: statistic = float(statistic) except: pass new_data_list.append(statistic) fltpoint_dict[symbol] = new_data_list else: fltpoint_dict[symbol] = ['N/A'] return fltpoint_dict def run_stats(index_dicts): stat_dataframes = {} for index, company_dict in index_dicts.iteritems(): index_df = pd.DataFrame.from_dict(company_dict, orient = 'index') stat_list = [] for header, column in index_df.iteritems(): try: stat_list.append(column.mean()) except: stat_list.append('No mean available') stat_dataframes[index] = stat_list NA_stat_frame = pd.DataFrame.from_dict(stat_dataframes, orient = 'index') return NA_stat_frame def combine_indexes(index_list): NA_companies = index_list[0] for index in xrange(1,len(index_list)): NA_companies.update(index_list[index]) NA_companies_frame = pd.DataFrame.from_dict(NA_companies, orient = 'index') return NA_companies_frame def scraper(): indexlist = [] indexlist.append('dataFiles/nsdqct.csv') indexlist.append('dataFiles/nsdqe.csv') indexlist.append('dataFiles/nye.csv') indexlist.append('dataFiles/tsxog.csv') indexlist.append('dataFiles/tsxct.csv') indexlist.append('dataFiles/tsxvct.csv') indexlist.append('dataFiles/tsxvog.csv') table_labels = [ "Date Time Gathered", "Market Cap (intraday)5", "Enterprise Value 3", "Trailing P/E (ttm, intraday)", # "Forward P/E 1","PEG Ratio (5 yr expected) 1", # "Price/Sales (ttm)", # "Price/Book (mrq)", # "Enterprise Value/Revenue (ttm) 3", # "Enterprise Value/EBITDA (ttm) 6", # "Fiscal Year Ends", # "Most Recent Quarter (mrq)", "Profit Margin (ttm)", # "Operating Margin (ttm)", # "Return on Assets (ttm)", # "Return on Equity (ttm)", "Revenue (ttm)", # "Revenue Per Share (ttm)", # "Qtrly Revenue Growth (yoy)", "Gross Profit (ttm)", "EBITDA (ttm) 6", # "Net Income Avl to Common (ttm)", "Diluted EPS (ttm)", # "Qtrly Earnings Growth (yoy)", # "Total Cash (mrq)", # "Total Cash Per Share (mrq)", # "Total Debt (mrq)", "Total Debt/Equity (mrq)", # "Current Ratio (mrq)", # "Book Value Per Share (mrq)", # "Operating Cash Flow (ttm)", "Levered Free Cash Flow (ttm)", # "Beta", "52-Week Change3", # "S&P500 52-Week Change3", "52-Week High 3", "52-Week Low 3", "50-Day Moving Average 3", "200-Day Moving Average 3", # "Avg Vol (3 month) 3", # "Avg Vol (10 day) 3", # "Shares Outstanding 5", # "Float", "% Held by Insiders 1", "% Held by Institutions 1", # "Shares Short 3", # "Short Ratio 3", # "Short % of Float 3", # "Shares Short (prior month) 3", # "Forward Annual Dividend Rate 4", # "Forward Annual Dividend Yield 4", "Trailing Annual Dividend Yield 3", "Trailing Annual Dividend Yield3", "5 Year Average Dividend Yield 4", # "Payout Ratio 4", # "Dividend Date 3", # "Ex-Dividend Date 4", "Last Split Factor 2", "Last Split Date 3",] indexlist = load_files(indexlist)
# testindexlist = load_files(testlist) # ##end here## ##Get the data and store it in a dict of indexes to company symbols to lists of values ## qfindexdict = {} for index, symbollist in indexlist.iteritems(): if index == 'tsxvct' or index == 'tsxvog': qfindex
# # ##test grab## # testlist = ['dataFiles/nsdqct.csv']
random_line_split
main.rs
= Options::new(); // Create the file argument opts.optopt("d", "", "Specify destination file", "NAME"); // Create help flag (-h or --help) opts.optflag("h", "help", "Print this help menu"); // Create version l opts.optflag("v", "version", "Check the version you're running"); // Use the innate parse() method // https://doc.rust-lang.org/1.2.0/book/match.html // https://doc.rust-lang.org/std/macro.panic.html let matches = match opts.parse(&commandline_args[1..]){ Ok(m) => { m } Err(f) => {panic!(f.to_string())} }; // Handle help flags if matches.opt_present("h"){ let brief = format!("Usage: {} FILE [options]", program); print!("{}", opts.usage(&brief)); return; } else if matches.opt_present("v"){ println!("Version: v{}", VERSION.unwrap_or("unknown")); return; } // Check if the input file has been specified let input = if !matches.free.is_empty(){ matches.free[0].clone() } else { let brief = format!("Usage: {} FILE [options]", program); print!("{}", opts.usage(&brief)); return; }; // Check if the destination is empty - if so, we extract the name from given source path let dest = match matches.opt_str("d") { Some(x) => x, None => extract_file_name_if_empty_string(input.clone()), }; // Get URL to see what type of protocol we're dealing with let url = input.clone(); let url = url.parse::<hyper::Uri>().unwrap(); // Depending on the protocol - call appropriate functions match url.scheme(){ Some("http") => http_download_single_file(url, &dest[..]), Some("https") => https_download_single_file(url, &dest[..]), Some("ftp") => ftp_download_single_file(input, &dest[..]), // Some("ftps") => ftps_download_single_file(input, &dest[..]), Some(&_) => panic!("Sorry, unknown protocol!"), None => panic!("Sorry, no protocol given!"), } } // Download a single file form FTP server // fn ftps_download_single_file(input: std::string::String, destination: &str){ // } // Download a single file form FTP server fn ftp_download_single_file(input: std::string::String, destination: &str){ let (host, directory, file) = parse_data_from_ftp_fullpath(input.clone()); // Create a connection to an FTP server and authenticate to it. let mut ftp_stream = FtpStream::connect(host).unwrap_or_else(|err| panic!("{}", err) ); // Set transfer_type to binary so we can properly transport images let _ = ftp_stream.transfer_type(ftp::types::FileType::Binary); let (user, password) = parse_userdata_from_ftp_fullpath(input); let _ = ftp_stream.login(&user[..], &password[..]).unwrap(); // Change into a new directory, relative to the one we are currently in. let _ = ftp_stream.cwd(&directory[..]).unwrap(); let path = Path::new(destination); let display = path.display(); let reader = ftp_stream.get(&file).unwrap(); let iterator = reader.bytes(); //Open a file in write-only mode, returns `io::Result<File>` let mut local_file = match File::create(&path) { Err(why) => panic!("couldn't create {}: {}", display, why.description()), Ok(file) => file, }; for byte in iterator { // println!("{}", byte.unwrap()); match local_file.write(&[byte.unwrap()]) { Err(why) => { panic!("couldn't write to {}: {}", display, why.description()) }, Ok(_) => (), }; } let _ = local_file.flush(); // -- BufReader, iteracja po byte'ach -- // let mut reader = ftp_stream.get(file).unwrap(); // //Open a file in write-only mode, returns `io::Result<File>` // let mut local_file = match File::create(&path) { // Err(why) => panic!("couldn't create {}: {}", // display, // why.description()), // Ok(file) => file, // }; // loop{ // let chunk = read_n(&mut reader, 5); // match chunk { // Ok(v) => match io::stdout().write_all(&v) { // Err(why) => { // panic!("couldn't write to {}: {}", display, // why.description()) // }, // Ok(_) => (), // }, // Err(0) => return, // Err(_) => panic!("OMG!"), // }; // } // -- simple_retr -- // let remote_file = ftp_stream.simple_retr("file").unwrap(); // println!("Read file with contents\n{}\n", str::from_utf8(&remote_file.into_inner()).unwrap()); // Terminate the connection to the server. let _ = ftp_stream.quit(); } #[allow(dead_code)] fn read_n<R>(reader: R, bytes_to_read: u64) -> Result<Vec<u8>, i32> where R: Read, { let mut buf = vec![]; let mut chunk = reader.take(bytes_to_read); let status = chunk.read_to_end(&mut buf); // Do appropriate error handling match status { Ok(0) => Err(0), Ok(_) => Ok(buf), _ => panic!("Didn't read enough"), } } // Function that uses futures #[allow(dead_code)] #[allow(unused_variables, unused_mut)] fn http_download_single_file_work(url: hyper::Uri, destination: &str){ let mut core = tokio_core::reactor::Core::new().unwrap(); let handle = core.handle(); let client = Client::new(&handle); let work = client.get(url).and_then(|res| { println!("Response: {}", res.status()); println!("Headers: \n{}", res.headers()); res.body().for_each(|chunk| { io::stdout().write_all(&chunk).map_err(From::from) }) }).map(|_| { println!("\n\nDone."); }); core.run(work).unwrap(); } // Function that downloads a single file // It doesnt user futures - blocking and not very effective fn http_download_single_file(url: hyper::Uri, destination: &str){ let mut core = tokio_core::reactor::Core::new().unwrap(); let handle = core.handle(); let client = Client::new(&handle); let work = client.get(url); let reponse = core.run(work).unwrap(); let buf2 = reponse.body().collect(); let finally = match core.run(buf2){ Ok(res) => res, Err(_) => panic!("OMG"), }; let path = Path::new(destination); let display = path.display(); // Open a file in write-only mode, returns `io::Result<File>` let mut file = match File::create(&path) { Err(why) => panic!("couldn't create {}: {}", display, why.description()), Ok(file) => file, }; for x in &finally { match file.write_all(&x) { Err(why) => { panic!("couldn't write to {}: {}", display, why.description()) }, Ok(_) => (), } } println!("successfully wrote to {}", display); } // Function that downloads a single file // It doesnt user futures - blocking and not very effective fn https_download_single_file(url: hyper::Uri, destination: &str){ let mut core = tokio_core::reactor::Core::new().unwrap(); let client = Client::configure().connector(::hyper_tls::HttpsConnector::new(4, &core.handle()).unwrap()).build(&core.handle()); let work = client.get(url); let reponse = core.run(work).unwrap(); let buf2 = reponse.body().collect(); let finally = match core.run(buf2){ Ok(res) => res, Err(_) => panic!("OMG"), }; let path = Path::new(destination); let display = path.display(); // Open a file in write-only mode, returns `io::Result<File>` let mut file = match File::create(&path) { Err(why) => panic!("couldn't create {}: {}", display, why.description()), Ok(file) => file, };
match file.write_all(&x) { Err(why) => { panic!("couldn't write to {}: {}", display, why.description()) }, Ok(_) => (), } } println!("successfully wrote to {}", display); } fn extract_file_name_if_empty_string(fullpath: std::string::String)
for x in &finally {
random_line_split
main.rs
// https://doc.rust-lang.org/std/macro.panic.html let matches = match opts.parse(&commandline_args[1..]){ Ok(m) => { m } Err(f) => {panic!(f.to_string())} }; // Handle help flags if matches.opt_present("h"){ let brief = format!("Usage: {} FILE [options]", program); print!("{}", opts.usage(&brief)); return; } else if matches.opt_present("v"){ println!("Version: v{}", VERSION.unwrap_or("unknown")); return; } // Check if the input file has been specified let input = if !matches.free.is_empty(){ matches.free[0].clone() } else { let brief = format!("Usage: {} FILE [options]", program); print!("{}", opts.usage(&brief)); return; }; // Check if the destination is empty - if so, we extract the name from given source path let dest = match matches.opt_str("d") { Some(x) => x, None => extract_file_name_if_empty_string(input.clone()), }; // Get URL to see what type of protocol we're dealing with let url = input.clone(); let url = url.parse::<hyper::Uri>().unwrap(); // Depending on the protocol - call appropriate functions match url.scheme(){ Some("http") => http_download_single_file(url, &dest[..]), Some("https") => https_download_single_file(url, &dest[..]), Some("ftp") => ftp_download_single_file(input, &dest[..]), // Some("ftps") => ftps_download_single_file(input, &dest[..]), Some(&_) => panic!("Sorry, unknown protocol!"), None => panic!("Sorry, no protocol given!"), } } // Download a single file form FTP server // fn ftps_download_single_file(input: std::string::String, destination: &str){ // } // Download a single file form FTP server fn ftp_download_single_file(input: std::string::String, destination: &str){ let (host, directory, file) = parse_data_from_ftp_fullpath(input.clone()); // Create a connection to an FTP server and authenticate to it. let mut ftp_stream = FtpStream::connect(host).unwrap_or_else(|err| panic!("{}", err) ); // Set transfer_type to binary so we can properly transport images let _ = ftp_stream.transfer_type(ftp::types::FileType::Binary); let (user, password) = parse_userdata_from_ftp_fullpath(input); let _ = ftp_stream.login(&user[..], &password[..]).unwrap(); // Change into a new directory, relative to the one we are currently in. let _ = ftp_stream.cwd(&directory[..]).unwrap(); let path = Path::new(destination); let display = path.display(); let reader = ftp_stream.get(&file).unwrap(); let iterator = reader.bytes(); //Open a file in write-only mode, returns `io::Result<File>` let mut local_file = match File::create(&path) { Err(why) => panic!("couldn't create {}: {}", display, why.description()), Ok(file) => file, }; for byte in iterator { // println!("{}", byte.unwrap()); match local_file.write(&[byte.unwrap()]) { Err(why) => { panic!("couldn't write to {}: {}", display, why.description()) }, Ok(_) => (), }; } let _ = local_file.flush(); // -- BufReader, iteracja po byte'ach -- // let mut reader = ftp_stream.get(file).unwrap(); // //Open a file in write-only mode, returns `io::Result<File>` // let mut local_file = match File::create(&path) { // Err(why) => panic!("couldn't create {}: {}", // display, // why.description()), // Ok(file) => file, // }; // loop{ // let chunk = read_n(&mut reader, 5); // match chunk { // Ok(v) => match io::stdout().write_all(&v) { // Err(why) => { // panic!("couldn't write to {}: {}", display, // why.description()) // }, // Ok(_) => (), // }, // Err(0) => return, // Err(_) => panic!("OMG!"), // }; // } // -- simple_retr -- // let remote_file = ftp_stream.simple_retr("file").unwrap(); // println!("Read file with contents\n{}\n", str::from_utf8(&remote_file.into_inner()).unwrap()); // Terminate the connection to the server. let _ = ftp_stream.quit(); } #[allow(dead_code)] fn read_n<R>(reader: R, bytes_to_read: u64) -> Result<Vec<u8>, i32> where R: Read, { let mut buf = vec![]; let mut chunk = reader.take(bytes_to_read); let status = chunk.read_to_end(&mut buf); // Do appropriate error handling match status { Ok(0) => Err(0), Ok(_) => Ok(buf), _ => panic!("Didn't read enough"), } } // Function that uses futures #[allow(dead_code)] #[allow(unused_variables, unused_mut)] fn http_download_single_file_work(url: hyper::Uri, destination: &str){ let mut core = tokio_core::reactor::Core::new().unwrap(); let handle = core.handle(); let client = Client::new(&handle); let work = client.get(url).and_then(|res| { println!("Response: {}", res.status()); println!("Headers: \n{}", res.headers()); res.body().for_each(|chunk| { io::stdout().write_all(&chunk).map_err(From::from) }) }).map(|_| { println!("\n\nDone."); }); core.run(work).unwrap(); } // Function that downloads a single file // It doesnt user futures - blocking and not very effective fn http_download_single_file(url: hyper::Uri, destination: &str){ let mut core = tokio_core::reactor::Core::new().unwrap(); let handle = core.handle(); let client = Client::new(&handle); let work = client.get(url); let reponse = core.run(work).unwrap(); let buf2 = reponse.body().collect(); let finally = match core.run(buf2){ Ok(res) => res, Err(_) => panic!("OMG"), }; let path = Path::new(destination); let display = path.display(); // Open a file in write-only mode, returns `io::Result<File>` let mut file = match File::create(&path) { Err(why) => panic!("couldn't create {}: {}", display, why.description()), Ok(file) => file, }; for x in &finally { match file.write_all(&x) { Err(why) => { panic!("couldn't write to {}: {}", display, why.description()) }, Ok(_) => (), } } println!("successfully wrote to {}", display); } // Function that downloads a single file // It doesnt user futures - blocking and not very effective fn https_download_single_file(url: hyper::Uri, destination: &str){ let mut core = tokio_core::reactor::Core::new().unwrap(); let client = Client::configure().connector(::hyper_tls::HttpsConnector::new(4, &core.handle()).unwrap()).build(&core.handle()); let work = client.get(url); let reponse = core.run(work).unwrap(); let buf2 = reponse.body().collect(); let finally = match core.run(buf2){ Ok(res) => res, Err(_) => panic!("OMG"), }; let path = Path::new(destination); let display = path.display(); // Open a file in write-only mode, returns `io::Result<File>` let mut file = match File::create(&path) { Err(why) => panic!("couldn't create {}: {}", display, why
{ const VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION"); pretty_env_logger::init().unwrap(); // Using args() instead of args_os(), cause they never panic let commandline_args: Vec<_> = env::args().collect(); let program = commandline_args[0].clone(); // Use the getopts package Options structure let mut opts = Options::new(); // Create the file argument opts.optopt("d", "", "Specify destination file", "NAME"); // Create help flag (-h or --help) opts.optflag("h", "help", "Print this help menu"); // Create version l opts.optflag("v", "version", "Check the version you're running"); // Use the innate parse() method // https://doc.rust-lang.org/1.2.0/book/match.html
identifier_body
main.rs
= Options::new(); // Create the file argument opts.optopt("d", "", "Specify destination file", "NAME"); // Create help flag (-h or --help) opts.optflag("h", "help", "Print this help menu"); // Create version l opts.optflag("v", "version", "Check the version you're running"); // Use the innate parse() method // https://doc.rust-lang.org/1.2.0/book/match.html // https://doc.rust-lang.org/std/macro.panic.html let matches = match opts.parse(&commandline_args[1..]){ Ok(m) => { m } Err(f) => {panic!(f.to_string())} }; // Handle help flags if matches.opt_present("h"){ let brief = format!("Usage: {} FILE [options]", program); print!("{}", opts.usage(&brief)); return; } else if matches.opt_present("v"){ println!("Version: v{}", VERSION.unwrap_or("unknown")); return; } // Check if the input file has been specified let input = if !matches.free.is_empty(){ matches.free[0].clone() } else { let brief = format!("Usage: {} FILE [options]", program); print!("{}", opts.usage(&brief)); return; }; // Check if the destination is empty - if so, we extract the name from given source path let dest = match matches.opt_str("d") { Some(x) => x, None => extract_file_name_if_empty_string(input.clone()), }; // Get URL to see what type of protocol we're dealing with let url = input.clone(); let url = url.parse::<hyper::Uri>().unwrap(); // Depending on the protocol - call appropriate functions match url.scheme(){ Some("http") => http_download_single_file(url, &dest[..]), Some("https") => https_download_single_file(url, &dest[..]), Some("ftp") => ftp_download_single_file(input, &dest[..]), // Some("ftps") => ftps_download_single_file(input, &dest[..]), Some(&_) => panic!("Sorry, unknown protocol!"), None => panic!("Sorry, no protocol given!"), } } // Download a single file form FTP server // fn ftps_download_single_file(input: std::string::String, destination: &str){ // } // Download a single file form FTP server fn ftp_download_single_file(input: std::string::String, destination: &str){ let (host, directory, file) = parse_data_from_ftp_fullpath(input.clone()); // Create a connection to an FTP server and authenticate to it. let mut ftp_stream = FtpStream::connect(host).unwrap_or_else(|err| panic!("{}", err) ); // Set transfer_type to binary so we can properly transport images let _ = ftp_stream.transfer_type(ftp::types::FileType::Binary); let (user, password) = parse_userdata_from_ftp_fullpath(input); let _ = ftp_stream.login(&user[..], &password[..]).unwrap(); // Change into a new directory, relative to the one we are currently in. let _ = ftp_stream.cwd(&directory[..]).unwrap(); let path = Path::new(destination); let display = path.display(); let reader = ftp_stream.get(&file).unwrap(); let iterator = reader.bytes(); //Open a file in write-only mode, returns `io::Result<File>` let mut local_file = match File::create(&path) { Err(why) => panic!("couldn't create {}: {}", display, why.description()), Ok(file) => file, }; for byte in iterator { // println!("{}", byte.unwrap()); match local_file.write(&[byte.unwrap()]) { Err(why) => { panic!("couldn't write to {}: {}", display, why.description()) }, Ok(_) => (), }; } let _ = local_file.flush(); // -- BufReader, iteracja po byte'ach -- // let mut reader = ftp_stream.get(file).unwrap(); // //Open a file in write-only mode, returns `io::Result<File>` // let mut local_file = match File::create(&path) { // Err(why) => panic!("couldn't create {}: {}", // display, // why.description()), // Ok(file) => file, // }; // loop{ // let chunk = read_n(&mut reader, 5); // match chunk { // Ok(v) => match io::stdout().write_all(&v) { // Err(why) => { // panic!("couldn't write to {}: {}", display, // why.description()) // }, // Ok(_) => (), // }, // Err(0) => return, // Err(_) => panic!("OMG!"), // }; // } // -- simple_retr -- // let remote_file = ftp_stream.simple_retr("file").unwrap(); // println!("Read file with contents\n{}\n", str::from_utf8(&remote_file.into_inner()).unwrap()); // Terminate the connection to the server. let _ = ftp_stream.quit(); } #[allow(dead_code)] fn read_n<R>(reader: R, bytes_to_read: u64) -> Result<Vec<u8>, i32> where R: Read, { let mut buf = vec![]; let mut chunk = reader.take(bytes_to_read); let status = chunk.read_to_end(&mut buf); // Do appropriate error handling match status { Ok(0) => Err(0), Ok(_) => Ok(buf), _ => panic!("Didn't read enough"), } } // Function that uses futures #[allow(dead_code)] #[allow(unused_variables, unused_mut)] fn http_download_single_file_work(url: hyper::Uri, destination: &str){ let mut core = tokio_core::reactor::Core::new().unwrap(); let handle = core.handle(); let client = Client::new(&handle); let work = client.get(url).and_then(|res| { println!("Response: {}", res.status()); println!("Headers: \n{}", res.headers()); res.body().for_each(|chunk| { io::stdout().write_all(&chunk).map_err(From::from) }) }).map(|_| { println!("\n\nDone."); }); core.run(work).unwrap(); } // Function that downloads a single file // It doesnt user futures - blocking and not very effective fn http_download_single_file(url: hyper::Uri, destination: &str){ let mut core = tokio_core::reactor::Core::new().unwrap(); let handle = core.handle(); let client = Client::new(&handle); let work = client.get(url); let reponse = core.run(work).unwrap(); let buf2 = reponse.body().collect(); let finally = match core.run(buf2){ Ok(res) => res, Err(_) => panic!("OMG"), }; let path = Path::new(destination); let display = path.display(); // Open a file in write-only mode, returns `io::Result<File>` let mut file = match File::create(&path) { Err(why) => panic!("couldn't create {}: {}", display, why.description()), Ok(file) => file, }; for x in &finally { match file.write_all(&x) { Err(why) => { panic!("couldn't write to {}: {}", display, why.description()) }, Ok(_) => (), } } println!("successfully wrote to {}", display); } // Function that downloads a single file // It doesnt user futures - blocking and not very effective fn
(url: hyper::Uri, destination: &str){ let mut core = tokio_core::reactor::Core::new().unwrap(); let client = Client::configure().connector(::hyper_tls::HttpsConnector::new(4, &core.handle()).unwrap()).build(&core.handle()); let work = client.get(url); let reponse = core.run(work).unwrap(); let buf2 = reponse.body().collect(); let finally = match core.run(buf2){ Ok(res) => res, Err(_) => panic!("OMG"), }; let path = Path::new(destination); let display = path.display(); // Open a file in write-only mode, returns `io::Result<File>` let mut file = match File::create(&path) { Err(why) => panic!("couldn't create {}: {}", display, why.description()), Ok(file) => file, }; for x in &finally { match file.write_all(&x) { Err(why) => { panic!("couldn't write to {}: {}", display, why.description()) }, Ok(_) => (), } } println!("successfully wrote to {}", display); } fn extract_file_name_if_empty_string(fullpath: std::string::String)
https_download_single_file
identifier_name
update_webhook_message.rs
( self, ) -> ( UpdateWebhookMessageErrorType, Option<Box<dyn Error + Send + Sync>>, ) { (self.kind, self.source) } } impl Display for UpdateWebhookMessageError { fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult { match &self.kind { UpdateWebhookMessageErrorType::ComponentCount { count } => { Display::fmt(count, f)?; f.write_str(" components were provided, but only ")?; Display::fmt(&ComponentValidationError::COMPONENT_COUNT, f)?; f.write_str(" root components are allowed") } UpdateWebhookMessageErrorType::ComponentInvalid { .. } => { f.write_str("a provided component is invalid") } UpdateWebhookMessageErrorType::ContentInvalid => { f.write_str("message content is invalid") } UpdateWebhookMessageErrorType::EmbedTooLarge { .. } => { f.write_str("length of one of the embeds is too large") } UpdateWebhookMessageErrorType::TooManyEmbeds => { f.write_str("only 10 embeds may be provided") } } } } impl Error for UpdateWebhookMessageError { fn source(&self) -> Option<&(dyn Error + 'static)> { self.source .as_ref() .map(|source| &**source as &(dyn Error + 'static)) } } /// Type of [`UpdateWebhookMessageError`] that occurred. #[derive(Debug)] #[non_exhaustive] pub enum UpdateWebhookMessageErrorType { /// Content is over 2000 UTF-16 characters. ContentInvalid, /// Length of one of the embeds is over 6000 characters. EmbedTooLarge { /// Index of the embed that was too large. /// /// This can be used to index into the provided embeds to retrieve the /// invalid embed. index: usize, }, /// An invalid message component was provided. ComponentInvalid { /// Additional details about the validation failure type. kind: ComponentValidationErrorType, }, /// Too many message components were provided. ComponentCount { /// Number of components that were provided. count: usize, }, /// Too many embeds were provided. /// /// A webhook can have up to 10 embeds. TooManyEmbeds, } #[derive(Serialize)] struct UpdateWebhookMessageFields<'a> { #[serde(skip_serializing_if = "Option::is_none")] allowed_mentions: Option<AllowedMentions>, #[serde(skip_serializing_if = "request::slice_is_empty")] attachments: &'a [Attachment], #[serde(skip_serializing_if = "Option::is_none")] components: Option<NullableField<&'a [Component]>>, #[serde(skip_serializing_if = "Option::is_none")] content: Option<NullableField<&'a str>>, #[serde(skip_serializing_if = "Option::is_none")] embeds: Option<NullableField<&'a [Embed]>>, #[serde(skip_serializing_if = "Option::is_none")] payload_json: Option<&'a [u8]>, } /// Update a message created by a webhook. /// /// A webhook's message must always have at least one embed or some amount of /// content. If you wish to delete a webhook's message refer to /// [`DeleteWebhookMessage`]. /// /// # Examples /// /// Update a webhook's message by setting the content to `test <@3>` - /// attempting to mention user ID 3 - and specifying that only that the user may /// not be mentioned. /// /// ```no_run /// # use twilight_http::Client; /// use twilight_model::{ /// channel::message::AllowedMentions, /// id::{MessageId, WebhookId} /// }; /// /// # #[tokio::main] /// # async fn main() -> Result<(), Box<dyn std::error::Error>> { /// # let client = Client::new("token".to_owned()); /// client.update_webhook_message(WebhookId(1), "token here", MessageId(2)) /// // By creating a default set of allowed mentions, no entity can be /// // mentioned. /// .allowed_mentions(AllowedMentions::default()) /// .content(Some("test <@3>"))? /// .exec() /// .await?; /// # Ok(()) } /// ``` /// /// [`DeleteWebhookMessage`]: super::DeleteWebhookMessage #[must_use = "requests must be configured and executed"] pub struct UpdateWebhookMessage<'a> { fields: UpdateWebhookMessageFields<'a>, files: &'a [(&'a str, &'a [u8])], http: &'a Client, message_id: MessageId, reason: Option<&'a str>, token: &'a str, webhook_id: WebhookId, } impl<'a> UpdateWebhookMessage<'a> { /// Maximum number of embeds that a webhook's message may have. pub const EMBED_COUNT_LIMIT: usize = 10; pub(crate) const fn new( http: &'a Client, webhook_id: WebhookId, token: &'a str, message_id: MessageId, ) -> Self { Self { fields: UpdateWebhookMessageFields { allowed_mentions: None, attachments: &[], components: None, content: None, embeds: None, payload_json: None, }, files: &[], http, message_id, reason: None, token, webhook_id, } } /// Set the allowed mentions in the message. pub fn allowed_mentions(mut self, allowed: AllowedMentions) -> Self { self.fields.allowed_mentions.replace(allowed); self } /// Specify multiple attachments already present in the target message to keep. /// /// If called, all unspecified attachments will be removed from the message. /// If not called, all attachments will be kept. pub const fn attachments(mut self, attachments: &'a [Attachment]) -> Self { self.fields.attachments = attachments; self } /// Add multiple [`Component`]s to a message. /// /// Calling this method multiple times will clear previous calls. /// /// Pass `None` to clear existing components. /// /// # Errors /// /// Returns an [`UpdateWebhookMessageErrorType::ComponentCount`] error /// type if too many components are provided. /// /// Returns an [`UpdateWebhookMessageErrorType::ComponentInvalid`] error /// type if one of the provided components is invalid. pub fn components( mut self, components: Option<&'a [Component]>, ) -> Result<Self, UpdateWebhookMessageError> { if let Some(components) = components.as_ref() { validate_inner::components(components).map_err(|source| { let (kind, inner_source) = source.into_parts(); match kind { ComponentValidationErrorType::ComponentCount { count } => { UpdateWebhookMessageError { kind: UpdateWebhookMessageErrorType::ComponentCount { count }, source: inner_source, } } other => UpdateWebhookMessageError { kind: UpdateWebhookMessageErrorType::ComponentInvalid { kind: other }, source: inner_source, }, } })?; } self.fields.components = Some(NullableField(components)); Ok(self) } /// Set the content of the message. /// /// Pass `None` if you want to remove the message content. /// /// Note that if there is are no embeds then you will not be able to remove /// the content of the message. /// /// The maximum length is 2000 UTF-16 characters. /// /// # Errors /// /// Returns an [`UpdateWebhookMessageErrorType::ContentInvalid`] error type if /// the content length is too long. pub fn content(mut self, content: Option<&'a str>) -> Result<Self, UpdateWebhookMessageError> { if let Some(content_ref) = content { if !validate_inner::content_limit(content_ref) { return Err(UpdateWebhookMessageError { kind: UpdateWebhookMessageErrorType::ContentInvalid, source: None, }); } } self.fields.content = Some(NullableField(content)); Ok(self) } /// Set the list of embeds of the webhook's message. /// /// Pass `None` to remove all of the embeds. /// /// The maximum number of allowed embeds is defined by /// [`EMBED_COUNT_LIMIT`]. /// /// The total character length of each embed must not exceed 6000 /// characters. Additionally, the internal fields also have character /// limits. Refer to [the discord docs] for more information. /// /// # Examples /// /// Create an embed and update the message with the new embed. The content /// of the original message is unaffected and only the embed(s) are /// modified. /// /// ```no_run /// # use twilight_http::Client; /// use twilight_embed_builder::EmbedBuilder; /// use twilight_model::id::{MessageId, WebhookId}; /// /// # #[tokio::main] async fn main() -> Result<(), Box<dyn std
into_parts
identifier_name
update_webhook_message.rs
embeds: Option<NullableField<&'a [Embed]>>, #[serde(skip_serializing_if = "Option::is_none")] payload_json: Option<&'a [u8]>, } /// Update a message created by a webhook. /// /// A webhook's message must always have at least one embed or some amount of /// content. If you wish to delete a webhook's message refer to /// [`DeleteWebhookMessage`]. /// /// # Examples /// /// Update a webhook's message by setting the content to `test <@3>` - /// attempting to mention user ID 3 - and specifying that only that the user may /// not be mentioned. /// /// ```no_run /// # use twilight_http::Client; /// use twilight_model::{ /// channel::message::AllowedMentions, /// id::{MessageId, WebhookId} /// }; /// /// # #[tokio::main] /// # async fn main() -> Result<(), Box<dyn std::error::Error>> { /// # let client = Client::new("token".to_owned()); /// client.update_webhook_message(WebhookId(1), "token here", MessageId(2)) /// // By creating a default set of allowed mentions, no entity can be /// // mentioned. /// .allowed_mentions(AllowedMentions::default()) /// .content(Some("test <@3>"))? /// .exec() /// .await?; /// # Ok(()) } /// ``` /// /// [`DeleteWebhookMessage`]: super::DeleteWebhookMessage #[must_use = "requests must be configured and executed"] pub struct UpdateWebhookMessage<'a> { fields: UpdateWebhookMessageFields<'a>, files: &'a [(&'a str, &'a [u8])], http: &'a Client, message_id: MessageId, reason: Option<&'a str>, token: &'a str, webhook_id: WebhookId, } impl<'a> UpdateWebhookMessage<'a> { /// Maximum number of embeds that a webhook's message may have. pub const EMBED_COUNT_LIMIT: usize = 10; pub(crate) const fn new( http: &'a Client, webhook_id: WebhookId, token: &'a str, message_id: MessageId, ) -> Self { Self { fields: UpdateWebhookMessageFields { allowed_mentions: None, attachments: &[], components: None, content: None, embeds: None, payload_json: None, }, files: &[], http, message_id, reason: None, token, webhook_id, } } /// Set the allowed mentions in the message. pub fn allowed_mentions(mut self, allowed: AllowedMentions) -> Self { self.fields.allowed_mentions.replace(allowed); self } /// Specify multiple attachments already present in the target message to keep. /// /// If called, all unspecified attachments will be removed from the message. /// If not called, all attachments will be kept. pub const fn attachments(mut self, attachments: &'a [Attachment]) -> Self { self.fields.attachments = attachments; self } /// Add multiple [`Component`]s to a message. /// /// Calling this method multiple times will clear previous calls. /// /// Pass `None` to clear existing components. /// /// # Errors /// /// Returns an [`UpdateWebhookMessageErrorType::ComponentCount`] error /// type if too many components are provided. /// /// Returns an [`UpdateWebhookMessageErrorType::ComponentInvalid`] error /// type if one of the provided components is invalid. pub fn components( mut self, components: Option<&'a [Component]>, ) -> Result<Self, UpdateWebhookMessageError> { if let Some(components) = components.as_ref() { validate_inner::components(components).map_err(|source| { let (kind, inner_source) = source.into_parts(); match kind { ComponentValidationErrorType::ComponentCount { count } => { UpdateWebhookMessageError { kind: UpdateWebhookMessageErrorType::ComponentCount { count }, source: inner_source, } } other => UpdateWebhookMessageError { kind: UpdateWebhookMessageErrorType::ComponentInvalid { kind: other }, source: inner_source, }, } })?; } self.fields.components = Some(NullableField(components)); Ok(self) } /// Set the content of the message. /// /// Pass `None` if you want to remove the message content. /// /// Note that if there is are no embeds then you will not be able to remove /// the content of the message. /// /// The maximum length is 2000 UTF-16 characters. /// /// # Errors /// /// Returns an [`UpdateWebhookMessageErrorType::ContentInvalid`] error type if /// the content length is too long. pub fn content(mut self, content: Option<&'a str>) -> Result<Self, UpdateWebhookMessageError> { if let Some(content_ref) = content { if !validate_inner::content_limit(content_ref) { return Err(UpdateWebhookMessageError { kind: UpdateWebhookMessageErrorType::ContentInvalid, source: None, }); } } self.fields.content = Some(NullableField(content)); Ok(self) } /// Set the list of embeds of the webhook's message. /// /// Pass `None` to remove all of the embeds. /// /// The maximum number of allowed embeds is defined by /// [`EMBED_COUNT_LIMIT`]. /// /// The total character length of each embed must not exceed 6000 /// characters. Additionally, the internal fields also have character /// limits. Refer to [the discord docs] for more information. /// /// # Examples /// /// Create an embed and update the message with the new embed. The content /// of the original message is unaffected and only the embed(s) are /// modified. /// /// ```no_run /// # use twilight_http::Client; /// use twilight_embed_builder::EmbedBuilder; /// use twilight_model::id::{MessageId, WebhookId}; /// /// # #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { /// # let client = Client::new("token".to_owned()); /// let embed = EmbedBuilder::new() /// .description("Powerful, flexible, and scalable ecosystem of Rust libraries for the Discord API.") /// .title("Twilight") /// .url("https://twilight.rs") /// .build()?; /// /// client.update_webhook_message(WebhookId(1), "token", MessageId(2)) /// .embeds(Some(&[embed]))? /// .exec() /// .await?; /// # Ok(()) } /// ``` /// /// # Errors /// /// Returns an [`UpdateWebhookMessageErrorType::EmbedTooLarge`] error type /// if one of the embeds are too large. /// /// Returns an [`UpdateWebhookMessageErrorType::TooManyEmbeds`] error type /// if more than 10 embeds are provided. /// /// [the discord docs]: https://discord.com/developers/docs/resources/channel#embed-limits /// [`EMBED_COUNT_LIMIT`]: Self::EMBED_COUNT_LIMIT pub fn embeds( mut self, embeds: Option<&'a [Embed]>, ) -> Result<Self, UpdateWebhookMessageError> { if let Some(embeds_present) = embeds.as_deref() { if embeds_present.len() > Self::EMBED_COUNT_LIMIT { return Err(UpdateWebhookMessageError { kind: UpdateWebhookMessageErrorType::TooManyEmbeds, source: None, }); } for (idx, embed) in embeds_present.iter().enumerate() { if let Err(source) = validate_inner::embed(embed) { return Err(UpdateWebhookMessageError { kind: UpdateWebhookMessageErrorType::EmbedTooLarge { index: idx }, source: Some(Box::new(source)), }); } } } self.fields.embeds = Some(NullableField(embeds)); Ok(self) } /// Attach multiple files to the webhook. /// /// Calling this method will clear any previous calls. pub const fn files(mut self, files: &'a [(&'a str, &'a [u8])]) -> Self { self.files = files; self } /// JSON encoded body of any additional request fields. /// /// If this method is called, all other fields are ignored, except for /// [`files`]. See [Discord Docs/Create Message] and /// [`ExecuteWebhook::payload_json`]. /// /// [`files`]: Self::files /// [`ExecuteWebhook::payload_json`]: super::ExecuteWebhook::payload_json /// [Discord Docs/Create Message]: https://discord.com/developers/docs/resources/channel#create-message-params pub const fn payload_json(mut self, payload_json: &'a [u8]) -> Self
{ self.fields.payload_json = Some(payload_json); self }
identifier_body
update_webhook_message.rs
ContentInvalid => { f.write_str("message content is invalid") } UpdateWebhookMessageErrorType::EmbedTooLarge { .. } => { f.write_str("length of one of the embeds is too large") } UpdateWebhookMessageErrorType::TooManyEmbeds => { f.write_str("only 10 embeds may be provided") } } } } impl Error for UpdateWebhookMessageError { fn source(&self) -> Option<&(dyn Error + 'static)> { self.source .as_ref() .map(|source| &**source as &(dyn Error + 'static)) } } /// Type of [`UpdateWebhookMessageError`] that occurred. #[derive(Debug)] #[non_exhaustive] pub enum UpdateWebhookMessageErrorType { /// Content is over 2000 UTF-16 characters. ContentInvalid, /// Length of one of the embeds is over 6000 characters. EmbedTooLarge { /// Index of the embed that was too large. /// /// This can be used to index into the provided embeds to retrieve the /// invalid embed. index: usize, }, /// An invalid message component was provided. ComponentInvalid { /// Additional details about the validation failure type. kind: ComponentValidationErrorType, }, /// Too many message components were provided. ComponentCount { /// Number of components that were provided. count: usize, }, /// Too many embeds were provided. /// /// A webhook can have up to 10 embeds. TooManyEmbeds, } #[derive(Serialize)] struct UpdateWebhookMessageFields<'a> { #[serde(skip_serializing_if = "Option::is_none")] allowed_mentions: Option<AllowedMentions>, #[serde(skip_serializing_if = "request::slice_is_empty")] attachments: &'a [Attachment], #[serde(skip_serializing_if = "Option::is_none")] components: Option<NullableField<&'a [Component]>>, #[serde(skip_serializing_if = "Option::is_none")] content: Option<NullableField<&'a str>>, #[serde(skip_serializing_if = "Option::is_none")] embeds: Option<NullableField<&'a [Embed]>>, #[serde(skip_serializing_if = "Option::is_none")] payload_json: Option<&'a [u8]>, } /// Update a message created by a webhook. /// /// A webhook's message must always have at least one embed or some amount of /// content. If you wish to delete a webhook's message refer to /// [`DeleteWebhookMessage`]. /// /// # Examples /// /// Update a webhook's message by setting the content to `test <@3>` - /// attempting to mention user ID 3 - and specifying that only that the user may /// not be mentioned. /// /// ```no_run /// # use twilight_http::Client; /// use twilight_model::{ /// channel::message::AllowedMentions, /// id::{MessageId, WebhookId} /// }; /// /// # #[tokio::main] /// # async fn main() -> Result<(), Box<dyn std::error::Error>> { /// # let client = Client::new("token".to_owned()); /// client.update_webhook_message(WebhookId(1), "token here", MessageId(2)) /// // By creating a default set of allowed mentions, no entity can be /// // mentioned. /// .allowed_mentions(AllowedMentions::default()) /// .content(Some("test <@3>"))? /// .exec() /// .await?; /// # Ok(()) } /// ``` /// /// [`DeleteWebhookMessage`]: super::DeleteWebhookMessage #[must_use = "requests must be configured and executed"] pub struct UpdateWebhookMessage<'a> { fields: UpdateWebhookMessageFields<'a>, files: &'a [(&'a str, &'a [u8])], http: &'a Client, message_id: MessageId, reason: Option<&'a str>, token: &'a str, webhook_id: WebhookId, } impl<'a> UpdateWebhookMessage<'a> { /// Maximum number of embeds that a webhook's message may have. pub const EMBED_COUNT_LIMIT: usize = 10; pub(crate) const fn new( http: &'a Client, webhook_id: WebhookId, token: &'a str, message_id: MessageId, ) -> Self { Self { fields: UpdateWebhookMessageFields { allowed_mentions: None, attachments: &[], components: None, content: None, embeds: None, payload_json: None, }, files: &[], http, message_id, reason: None, token, webhook_id, } } /// Set the allowed mentions in the message. pub fn allowed_mentions(mut self, allowed: AllowedMentions) -> Self { self.fields.allowed_mentions.replace(allowed); self } /// Specify multiple attachments already present in the target message to keep. /// /// If called, all unspecified attachments will be removed from the message. /// If not called, all attachments will be kept. pub const fn attachments(mut self, attachments: &'a [Attachment]) -> Self { self.fields.attachments = attachments; self } /// Add multiple [`Component`]s to a message. /// /// Calling this method multiple times will clear previous calls. /// /// Pass `None` to clear existing components. /// /// # Errors /// /// Returns an [`UpdateWebhookMessageErrorType::ComponentCount`] error /// type if too many components are provided. /// /// Returns an [`UpdateWebhookMessageErrorType::ComponentInvalid`] error /// type if one of the provided components is invalid. pub fn components( mut self, components: Option<&'a [Component]>, ) -> Result<Self, UpdateWebhookMessageError> { if let Some(components) = components.as_ref() { validate_inner::components(components).map_err(|source| { let (kind, inner_source) = source.into_parts(); match kind { ComponentValidationErrorType::ComponentCount { count } => { UpdateWebhookMessageError { kind: UpdateWebhookMessageErrorType::ComponentCount { count }, source: inner_source, } } other => UpdateWebhookMessageError { kind: UpdateWebhookMessageErrorType::ComponentInvalid { kind: other }, source: inner_source, }, } })?; } self.fields.components = Some(NullableField(components)); Ok(self) } /// Set the content of the message. /// /// Pass `None` if you want to remove the message content. /// /// Note that if there is are no embeds then you will not be able to remove /// the content of the message. /// /// The maximum length is 2000 UTF-16 characters. /// /// # Errors /// /// Returns an [`UpdateWebhookMessageErrorType::ContentInvalid`] error type if /// the content length is too long. pub fn content(mut self, content: Option<&'a str>) -> Result<Self, UpdateWebhookMessageError> { if let Some(content_ref) = content { if !validate_inner::content_limit(content_ref) { return Err(UpdateWebhookMessageError { kind: UpdateWebhookMessageErrorType::ContentInvalid, source: None, }); } } self.fields.content = Some(NullableField(content)); Ok(self) } /// Set the list of embeds of the webhook's message. /// /// Pass `None` to remove all of the embeds. /// /// The maximum number of allowed embeds is defined by /// [`EMBED_COUNT_LIMIT`]. /// /// The total character length of each embed must not exceed 6000 /// characters. Additionally, the internal fields also have character /// limits. Refer to [the discord docs] for more information. /// /// # Examples ///
/// of the original message is unaffected and only the embed(s) are /// modified. /// /// ```no_run /// # use twilight_http::Client; /// use twilight_embed_builder::EmbedBuilder; /// use twilight_model::id::{MessageId, WebhookId}; /// /// # #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { /// # let client = Client::new("token".to_owned()); /// let embed = EmbedBuilder::new() /// .description("Powerful, flexible, and scalable ecosystem of Rust libraries for the Discord API.") /// .title("Twilight") /// .url("https://twilight.rs") /// .build()?; /// /// client.update_webhook_message(WebhookId(1), "token", MessageId(2)) /// .embeds(Some(&[embed]))? /// .exec() /// .await?; /// # Ok(()) } /// ``` /// /// # Errors /// /// Returns an [`UpdateWebhookMessageErrorType::EmbedTooLarge`] error type /// if one of the embeds are too
/// Create an embed and update the message with the new embed. The content
random_line_split
update_webhook_message.rs
Invalid => { f.write_str("message content is invalid") } UpdateWebhookMessageErrorType::EmbedTooLarge { .. } => { f.write_str("length of one of the embeds is too large") } UpdateWebhookMessageErrorType::TooManyEmbeds => { f.write_str("only 10 embeds may be provided") } } } } impl Error for UpdateWebhookMessageError { fn source(&self) -> Option<&(dyn Error + 'static)> { self.source .as_ref() .map(|source| &**source as &(dyn Error + 'static)) } } /// Type of [`UpdateWebhookMessageError`] that occurred. #[derive(Debug)] #[non_exhaustive] pub enum UpdateWebhookMessageErrorType { /// Content is over 2000 UTF-16 characters. ContentInvalid, /// Length of one of the embeds is over 6000 characters. EmbedTooLarge { /// Index of the embed that was too large. /// /// This can be used to index into the provided embeds to retrieve the /// invalid embed. index: usize, }, /// An invalid message component was provided. ComponentInvalid { /// Additional details about the validation failure type. kind: ComponentValidationErrorType, }, /// Too many message components were provided. ComponentCount { /// Number of components that were provided. count: usize, }, /// Too many embeds were provided. /// /// A webhook can have up to 10 embeds. TooManyEmbeds, } #[derive(Serialize)] struct UpdateWebhookMessageFields<'a> { #[serde(skip_serializing_if = "Option::is_none")] allowed_mentions: Option<AllowedMentions>, #[serde(skip_serializing_if = "request::slice_is_empty")] attachments: &'a [Attachment], #[serde(skip_serializing_if = "Option::is_none")] components: Option<NullableField<&'a [Component]>>, #[serde(skip_serializing_if = "Option::is_none")] content: Option<NullableField<&'a str>>, #[serde(skip_serializing_if = "Option::is_none")] embeds: Option<NullableField<&'a [Embed]>>, #[serde(skip_serializing_if = "Option::is_none")] payload_json: Option<&'a [u8]>, } /// Update a message created by a webhook. /// /// A webhook's message must always have at least one embed or some amount of /// content. If you wish to delete a webhook's message refer to /// [`DeleteWebhookMessage`]. /// /// # Examples /// /// Update a webhook's message by setting the content to `test <@3>` - /// attempting to mention user ID 3 - and specifying that only that the user may /// not be mentioned. /// /// ```no_run /// # use twilight_http::Client; /// use twilight_model::{ /// channel::message::AllowedMentions, /// id::{MessageId, WebhookId} /// }; /// /// # #[tokio::main] /// # async fn main() -> Result<(), Box<dyn std::error::Error>> { /// # let client = Client::new("token".to_owned()); /// client.update_webhook_message(WebhookId(1), "token here", MessageId(2)) /// // By creating a default set of allowed mentions, no entity can be /// // mentioned. /// .allowed_mentions(AllowedMentions::default()) /// .content(Some("test <@3>"))? /// .exec() /// .await?; /// # Ok(()) } /// ``` /// /// [`DeleteWebhookMessage`]: super::DeleteWebhookMessage #[must_use = "requests must be configured and executed"] pub struct UpdateWebhookMessage<'a> { fields: UpdateWebhookMessageFields<'a>, files: &'a [(&'a str, &'a [u8])], http: &'a Client, message_id: MessageId, reason: Option<&'a str>, token: &'a str, webhook_id: WebhookId, } impl<'a> UpdateWebhookMessage<'a> { /// Maximum number of embeds that a webhook's message may have. pub const EMBED_COUNT_LIMIT: usize = 10; pub(crate) const fn new( http: &'a Client, webhook_id: WebhookId, token: &'a str, message_id: MessageId, ) -> Self { Self { fields: UpdateWebhookMessageFields { allowed_mentions: None, attachments: &[], components: None, content: None, embeds: None, payload_json: None, }, files: &[], http, message_id, reason: None, token, webhook_id, } } /// Set the allowed mentions in the message. pub fn allowed_mentions(mut self, allowed: AllowedMentions) -> Self { self.fields.allowed_mentions.replace(allowed); self } /// Specify multiple attachments already present in the target message to keep. /// /// If called, all unspecified attachments will be removed from the message. /// If not called, all attachments will be kept. pub const fn attachments(mut self, attachments: &'a [Attachment]) -> Self { self.fields.attachments = attachments; self } /// Add multiple [`Component`]s to a message. /// /// Calling this method multiple times will clear previous calls. /// /// Pass `None` to clear existing components. /// /// # Errors /// /// Returns an [`UpdateWebhookMessageErrorType::ComponentCount`] error /// type if too many components are provided. /// /// Returns an [`UpdateWebhookMessageErrorType::ComponentInvalid`] error /// type if one of the provided components is invalid. pub fn components( mut self, components: Option<&'a [Component]>, ) -> Result<Self, UpdateWebhookMessageError> { if let Some(components) = components.as_ref()
self.fields.components = Some(NullableField(components)); Ok(self) } /// Set the content of the message. /// /// Pass `None` if you want to remove the message content. /// /// Note that if there is are no embeds then you will not be able to remove /// the content of the message. /// /// The maximum length is 2000 UTF-16 characters. /// /// # Errors /// /// Returns an [`UpdateWebhookMessageErrorType::ContentInvalid`] error type if /// the content length is too long. pub fn content(mut self, content: Option<&'a str>) -> Result<Self, UpdateWebhookMessageError> { if let Some(content_ref) = content { if !validate_inner::content_limit(content_ref) { return Err(UpdateWebhookMessageError { kind: UpdateWebhookMessageErrorType::ContentInvalid, source: None, }); } } self.fields.content = Some(NullableField(content)); Ok(self) } /// Set the list of embeds of the webhook's message. /// /// Pass `None` to remove all of the embeds. /// /// The maximum number of allowed embeds is defined by /// [`EMBED_COUNT_LIMIT`]. /// /// The total character length of each embed must not exceed 6000 /// characters. Additionally, the internal fields also have character /// limits. Refer to [the discord docs] for more information. /// /// # Examples /// /// Create an embed and update the message with the new embed. The content /// of the original message is unaffected and only the embed(s) are /// modified. /// /// ```no_run /// # use twilight_http::Client; /// use twilight_embed_builder::EmbedBuilder; /// use twilight_model::id::{MessageId, WebhookId}; /// /// # #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { /// # let client = Client::new("token".to_owned()); /// let embed = EmbedBuilder::new() /// .description("Powerful, flexible, and scalable ecosystem of Rust libraries for the Discord API.") /// .title("Twilight") /// .url("https://twilight.rs") /// .build()?; /// /// client.update_webhook_message(WebhookId(1), "token", MessageId(2)) /// .embeds(Some(&[embed]))? /// .exec() /// .await?; /// # Ok(()) } /// ``` /// /// # Errors /// /// Returns an [`UpdateWebhookMessageErrorType::EmbedTooLarge`] error type /// if one of the embeds
{ validate_inner::components(components).map_err(|source| { let (kind, inner_source) = source.into_parts(); match kind { ComponentValidationErrorType::ComponentCount { count } => { UpdateWebhookMessageError { kind: UpdateWebhookMessageErrorType::ComponentCount { count }, source: inner_source, } } other => UpdateWebhookMessageError { kind: UpdateWebhookMessageErrorType::ComponentInvalid { kind: other }, source: inner_source, }, } })?; }
conditional_block
131. Custom Exceptions - Coding.py
# %% ''' Now someone using our library can expect to trap **any** exception we raise by catching the `WebScraperException` type, or anything more specific if they prefer: ''' # %% try: raise PingTimeoutException('Ping to www.... timed out') except HTTPException as ex: print(repr(ex)) # %% ''' or more broadly: ''' # %% try: raise PingTimeoutException('Ping time out') except WebScraperException as ex: print(repr(ex)) # %% ''' So this is very useful when we write modules or packages and want to keep our exception hierarchy neatly contained with some base exception class. This way, users of our class are not forced to use `except Exception` to trap exceptions we might raise from inside our library. ''' # %% ''' Custom exception classes are like any custom class, which means we can add custom attributes, properties and methods to the class. ''' # %% ''' This might be useful to provide additional context and functionality to our exceptions. ''' # %% ''' For example, suppose we are writing a REST API. When we raise a custom exception, we'll also want to return an HTTP exception response to the API caller. We could write code like this in our API calls: ''' # %% ''' Suppose we need to retrieve an account (by ID) from a database. Here I'm just going to mock this: ''' # %% class APIException(Exception): """Base API exception""" # %% class ApplicationException(APIException): """Indicates an application error (not user caused) - 5xx HTTP type errors""" class DBException(ApplicationException): """General database exception""" class DBConnectionError(DBException): """Indicates an error connecting to database""" class ClientException(APIException): """Indicates exception that was caused by user, not an internal error""" class NotFoundError(ClientException): """Indicates resource was not found""" class NotAuthorizedError(ClientException): """User is not authorized to perform requested action on resource""" class Account: def __init__(self, account_id, account_type): self.account_id = account_id self.account_type = account_type # %% ''' So we have this exception hierarchy: ``` APIException - ApplicationException (5xx errors) - DBException - DBConnectionError - ClientException - NotFoundError - NotAuthorizedError ``` ''' # %% def lookup_account_by_id(account_id): # mock of various exceptions that could be raised getting an account from database if not isinstance(account_id, int) or account_id <= 0: raise ClientException(f'Account number {account_id} is invalid.') if account_id < 100: raise DBConnectionError('Permanent failure connecting to database.') elif account_id < 200: raise NotAuthorizedError('User does not have permissions to read this account') elif account_id < 300: raise NotFoundError(f'Account not found.') else: return Account(account_id, 'Savings') # %% ''' Now suppose we have this endpoint for a **GET** on the **Account** resource, and we need to return the appropriate HTTP exception, and message to the user. ''' # %% ''' We're going to make use of the `HTTPStatus` enumeration we have seen before. ''' # %% from http import HTTPStatus # %% def get_account(account_id): try: account = lookup_account_by_id(account_id) except ApplicationException as ex: return HTTPStatus.INTERNAL_SERVER_ERROR, str(ex) except NotFoundError as ex: return HTTPStatus.NOT_FOUND, 'The account {} does not exist.'.format(account_id) except NotAuthorizedError as ex: return HTTPStatus.UNAUTHORIZED, 'You do not have the proper authorization.' except ClientException as ex: return HTTPStatus.BAD_REQUEST, str(ex) else: return HTTPStatus.OK, {"id": account.account_id, "type": account.account_type} # %% ''' Now when we call our end point with different account numbers: ''' # %% get_account('abc') # %% get_account(50) # %% get_account(150) # %% get_account(250) # %% get_account(350) # %% ''' As you can see this was quite a lot of exception handling we had to do. And really, the HTTP status and message shoudl remain consistent with any exception type. ''' # %% ''' So instead of dealing with it the way we did, we are going to do the work in the exception classes themselves. ''' # %% ''' First we know we need an `HTTPStatus` for each exception, as well as an error message to present to our user that may need to be different from the internal error message we would want to log for example. ''' # %% class APIException(Exception): """Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." # %% ''' Now having the default `internal_err_msg` and `user_err_msg` is great, but what if we ever wanted to override it for some reason? ''' # %% ''' Let's create an `__init__` to take care of that: ''' # %% class APIException(Exception): """Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." def __init__(self, *args, user_err_msg = None): if args: self.internal_err_msg = args[0] super().__init__(*args) else: super().__init__(self.internal_err_msg) if user_err_msg is not None: self.user_err_msg = user_err_msg # %% ''' And we can use this exception quite easily: ''' # %% try: raise APIException() except APIException as ex: print(repr(ex)) print(ex.user_err_msg) # %% ''' Or with a custom (internal) message: ''' # %% try: raise APIException('custom message...', 10, 20) except APIException as ex: print(repr(ex)) # %% ''' And of course, the user message can be customized too: ''' # %% try: raise APIException('custom message...', 10, 20, user_err_msg='custom user message') except APIException as ex: print(repr(ex)) print(ex.user_err_msg) # %% ''' While we're at it, we know that we'll need to return the same JSON format when an exception occurs - so let's write it into our base exception class: ''' # %% import json class APIException(Exception): """Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." def __init__(self, *args, user_err_msg = None): if args: self.internal_err_msg = args[0] super().__init__(*args) else: super().__init__(self.internal_err_msg) if user_err_msg is not None: self.user_err_msg = user_err_msg def to_json(self): err_object = {'status': self.http_status, 'message': self.user_err_msg} return json.dumps(err_object) # %% ''' Now we can easily use this base class, and get consistent results: ''' # %% try: raise APIException() except APIException as ex: print(repr(ex), ex.to_json()) # %% ''' And because we'll want to log exceptions, let's also write a logger directly into our base class: ''' # %% from datetime import datetime class APIException(Exception): """Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." def __init__(self, *args, user_err_msg = None): if args: self.internal_err_msg = args[0] super().__init__(*args) else: super().__init__(self.internal_err_msg) if user_err_msg is not None: self.user_err_msg = user_err_msg def to_json(self): err_object = {'status': self.http_status, 'message': self.user_err_msg} return json.dumps(err_object) def log_exception(self): exception = { "type": type(self).__name__, "http_status": self.http_status, "message": self.args[0] if self.args else self.internal_err_msg, "args": self.args[1:] } print(f'EXCEPTION: {datetime.utcnow().isoformat()}: {exception}') # %% try: raise APIException() except APIException as ex: ex.log_exception() print(ex.to_json()) # %% ''' Now let's finish up our hierarchy: ''' # %% class ApplicationException(APIException): """Indicates an application error (not user caused) - 5xx HTTP type errors""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = "Generic server side exception." user_err_msg = "We are sorry. An unexpected error occurred on our end." class DBException(ApplicationException):
"""General database exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = "Database exception." user_err_msg = "We are sorry. An unexpected error occurred on our end."
identifier_body
131. Custom Exceptions - Coding.py
HTTPStatus.OK, {"id": account.account_id, "type": account.account_type} # %% ''' Now when we call our end point with different account numbers: ''' # %% get_account('abc') # %% get_account(50) # %% get_account(150) # %% get_account(250) # %% get_account(350) # %% ''' As you can see this was quite a lot of exception handling we had to do. And really, the HTTP status and message shoudl remain consistent with any exception type. ''' # %% ''' So instead of dealing with it the way we did, we are going to do the work in the exception classes themselves. ''' # %% ''' First we know we need an `HTTPStatus` for each exception, as well as an error message to present to our user that may need to be different from the internal error message we would want to log for example. ''' # %% class APIException(Exception): """Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." # %% ''' Now having the default `internal_err_msg` and `user_err_msg` is great, but what if we ever wanted to override it for some reason? ''' # %% ''' Let's create an `__init__` to take care of that: ''' # %% class APIException(Exception): """Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." def __init__(self, *args, user_err_msg = None): if args: self.internal_err_msg = args[0] super().__init__(*args) else: super().__init__(self.internal_err_msg) if user_err_msg is not None: self.user_err_msg = user_err_msg # %% ''' And we can use this exception quite easily: ''' # %% try: raise APIException() except APIException as ex: print(repr(ex)) print(ex.user_err_msg) # %% ''' Or with a custom (internal) message: ''' # %% try: raise APIException('custom message...', 10, 20) except APIException as ex: print(repr(ex)) # %% ''' And of course, the user message can be customized too: ''' # %% try: raise APIException('custom message...', 10, 20, user_err_msg='custom user message') except APIException as ex: print(repr(ex)) print(ex.user_err_msg) # %% ''' While we're at it, we know that we'll need to return the same JSON format when an exception occurs - so let's write it into our base exception class: ''' # %% import json class APIException(Exception): """Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." def __init__(self, *args, user_err_msg = None): if args: self.internal_err_msg = args[0] super().__init__(*args) else: super().__init__(self.internal_err_msg) if user_err_msg is not None: self.user_err_msg = user_err_msg def to_json(self): err_object = {'status': self.http_status, 'message': self.user_err_msg} return json.dumps(err_object) # %% ''' Now we can easily use this base class, and get consistent results: ''' # %% try: raise APIException() except APIException as ex: print(repr(ex), ex.to_json()) # %% ''' And because we'll want to log exceptions, let's also write a logger directly into our base class: ''' # %% from datetime import datetime class APIException(Exception): """Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." def __init__(self, *args, user_err_msg = None): if args: self.internal_err_msg = args[0] super().__init__(*args) else: super().__init__(self.internal_err_msg) if user_err_msg is not None: self.user_err_msg = user_err_msg def to_json(self): err_object = {'status': self.http_status, 'message': self.user_err_msg} return json.dumps(err_object) def log_exception(self): exception = { "type": type(self).__name__, "http_status": self.http_status, "message": self.args[0] if self.args else self.internal_err_msg, "args": self.args[1:] } print(f'EXCEPTION: {datetime.utcnow().isoformat()}: {exception}') # %% try: raise APIException() except APIException as ex: ex.log_exception() print(ex.to_json()) # %% ''' Now let's finish up our hierarchy: ''' # %% class ApplicationException(APIException): """Indicates an application error (not user caused) - 5xx HTTP type errors""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = "Generic server side exception." user_err_msg = "We are sorry. An unexpected error occurred on our end." class DBException(ApplicationException): """General database exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = "Database exception." user_err_msg = "We are sorry. An unexpected error occurred on our end." class DBConnectionError(DBException): """Indicates an error connecting to database""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = "DB connection error." user_err_msg = "We are sorry. An unexpected error occurred on our end." class ClientException(APIException): """Indicates exception that was caused by user, not an internal error""" http_status = HTTPStatus.BAD_REQUEST internal_err_msg = "Client submitted bad request." user_err_msg = "A bad request was received." class NotFoundError(ClientException): """Indicates resource was not found""" http_status = HTTPStatus.NOT_FOUND internal_err_msg = "Resource was not found." user_err_msg = "Requested resource was not found." class NotAuthorizedError(ClientException): """User is not authorized to perform requested action on resource""" http_status = HTTPStatus.UNAUTHORIZED internal_err_msg = "Client not authorized to perform operation." user_err_msg = "You are not authorized to perform this request." # %% ''' Also, since we have a but more functionality available to us with our exceptions, let's refine the function that raises these exceptions: ''' # %% def lookup_account_by_id(account_id): # mock of various exceptions that could be raised getting an account from database if not isinstance(account_id, int) or account_id <= 0: raise ClientException(f'Account number {account_id} is invalid.', f'account_id = {account_id}', 'type error - account number not an integer') if account_id < 100: raise DBConnectionError('Permanent failure connecting to database.', 'db=db01') elif account_id < 200: raise NotAuthorizedError('User does not have permissions to read this account', f'account_id={account_id}') elif account_id < 300: raise NotFoundError(f'Account not found.', f'account_id={account_id}') else: return Account(account_id, 'Savings') # %% ''' Now we can re-write our API endpoint and very easily handle those exceptions: ''' # %% def get_account(account_id): try: account = lookup_account_by_id(account_id) except APIException as ex: ex.log_exception() return ex.to_json() else: return HTTPStatus.OK, {"id": account.account_id, "type": account.account_type} # %% get_account('ABC') # %% get_account(50) # %% get_account(150) # %% get_account(250) # %% get_account(350) # %% ''' #### Inheriting from Multiple Exceptions ''' # %% ''' We haven't covered multiple inheritance yet, but Python supports it, and it is very easy to use to solve a specific problem we may encounter with exceptions, so i want to mention it here. ''' # %% ''' Although we may want to raise a custom exception for some specific error, sometimes we may be wondering whether to raise a built-in exception that would work just as well, or raise a custom exception. ''' # %% ''' Here's an example of where this might occur: ''' # %% ''' Suppose we have a custom exception we use to tell a user of our function/library that the value they provided to some function is not the right value - maybe it needs to be a integer greater than or equal to 0. ''' # %% ''' We might have a custom exception just for that - remember what we discussed earlier, we might want our application to raise custom exceptions for everything, based off some application base exception our users could broadly trap. ''' # %% class AppException(Exception): """generic application exception""" class NegativeIntegerError(AppException): """Used to indicate an error when an integer is negative.""" # %% def set_age(age): if age < 0:
raise NegativeIntegerError('age cannot be negative')
conditional_block
131. Custom Exceptions - Coding.py
class TimeoutException(HTTPException): """Indicates a general timeout exception in http connectivity""" class PingTimeoutException(TimeoutException): """Ping time out""" class LoadTimeoutException(TimeoutException): """Page load time out""" class ParserException(WebScraperException): """General page parsing exception""" # %% ''' As you can see we have this hierarchy: ''' # %% ''' ``` WebScraperException - HTTPException - InvalidUrlException - TimeoutException - PingTimeoutException - LoadTimeoutException - ParserException ``` ''' # %% ''' Now someone using our library can expect to trap **any** exception we raise by catching the `WebScraperException` type, or anything more specific if they prefer: ''' # %% try: raise PingTimeoutException('Ping to www.... timed out') except HTTPException as ex: print(repr(ex)) # %% ''' or more broadly: ''' # %% try: raise PingTimeoutException('Ping time out') except WebScraperException as ex: print(repr(ex)) # %% ''' So this is very useful when we write modules or packages and want to keep our exception hierarchy neatly contained with some base exception class. This way, users of our class are not forced to use `except Exception` to trap exceptions we might raise from inside our library. ''' # %% ''' Custom exception classes are like any custom class, which means we can add custom attributes, properties and methods to the class. ''' # %% ''' This might be useful to provide additional context and functionality to our exceptions. ''' # %% ''' For example, suppose we are writing a REST API. When we raise a custom exception, we'll also want to return an HTTP exception response to the API caller. We could write code like this in our API calls: ''' # %% ''' Suppose we need to retrieve an account (by ID) from a database. Here I'm just going to mock this: ''' # %% class APIException(Exception): """Base API exception""" # %% class ApplicationException(APIException): """Indicates an application error (not user caused) - 5xx HTTP type errors""" class DBException(ApplicationException): """General database exception""" class DBConnectionError(DBException): """Indicates an error connecting to database""" class ClientException(APIException): """Indicates exception that was caused by user, not an internal error""" class NotFoundError(ClientException): """Indicates resource was not found""" class NotAuthorizedError(ClientException): """User is not authorized to perform requested action on resource""" class Account: def __init__(self, account_id, account_type): self.account_id = account_id self.account_type = account_type # %% ''' So we have this exception hierarchy: ``` APIException - ApplicationException (5xx errors) - DBException - DBConnectionError - ClientException - NotFoundError - NotAuthorizedError ``` ''' # %% def lookup_account_by_id(account_id): # mock of various exceptions that could be raised getting an account from database if not isinstance(account_id, int) or account_id <= 0: raise ClientException(f'Account number {account_id} is invalid.') if account_id < 100: raise DBConnectionError('Permanent failure connecting to database.') elif account_id < 200: raise NotAuthorizedError('User does not have permissions to read this account') elif account_id < 300: raise NotFoundError(f'Account not found.') else: return Account(account_id, 'Savings') # %% ''' Now suppose we have this endpoint for a **GET** on the **Account** resource, and we need to return the appropriate HTTP exception, and message to the user. ''' # %% ''' We're going to make use of the `HTTPStatus` enumeration we have seen before. ''' # %% from http import HTTPStatus # %% def get_account(account_id): try: account = lookup_account_by_id(account_id) except ApplicationException as ex: return HTTPStatus.INTERNAL_SERVER_ERROR, str(ex) except NotFoundError as ex: return HTTPStatus.NOT_FOUND, 'The account {} does not exist.'.format(account_id) except NotAuthorizedError as ex: return HTTPStatus.UNAUTHORIZED, 'You do not have the proper authorization.' except ClientException as ex: return HTTPStatus.BAD_REQUEST, str(ex) else: return HTTPStatus.OK, {"id": account.account_id, "type": account.account_type} # %% ''' Now when we call our end point with different account numbers: ''' # %% get_account('abc') # %% get_account(50) # %% get_account(150) # %% get_account(250) # %% get_account(350) # %% ''' As you can see this was quite a lot of exception handling we had to do. And really, the HTTP status and message shoudl remain consistent with any exception type. ''' # %% ''' So instead of dealing with it the way we did, we are going to do the work in the exception classes themselves. ''' # %% ''' First we know we need an `HTTPStatus` for each exception, as well as an error message to present to our user that may need to be different from the internal error message we would want to log for example. ''' # %% class APIException(Exception): """Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." # %% ''' Now having the default `internal_err_msg` and `user_err_msg` is great, but what if we ever wanted to override it for some reason? ''' # %% ''' Let's create an `__init__` to take care of that: ''' # %% class APIException(Exception): """Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." def __init__(self, *args, user_err_msg = None): if args: self.internal_err_msg = args[0] super().__init__(*args) else: super().__init__(self.internal_err_msg) if user_err_msg is not None: self.user_err_msg = user_err_msg # %% ''' And we can use this exception quite easily: ''' # %% try: raise APIException() except APIException as ex: print(repr(ex)) print(ex.user_err_msg) # %% ''' Or with a custom (internal) message: ''' # %% try: raise APIException('custom message...', 10, 20) except APIException as ex: print(repr(ex)) # %% ''' And of course, the user message can be customized too: ''' # %% try: raise APIException('custom message...', 10, 20, user_err_msg='custom user message') except APIException as ex: print(repr(ex)) print(ex.user_err_msg) # %% ''' While we're at it, we know that we'll need to return the same JSON format when an exception occurs - so let's write it into our base exception class: ''' # %% import json class APIException(Exception):
"""Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." def __init__(self, *args, user_err_msg = None): if args: self.internal_err_msg = args[0] super().__init__(*args) else: super().__init__(self.internal_err_msg) if user_err_msg is not None: self.user_err_msg = user_err_msg def to_json(self): err_object = {'status': self.http_status, 'message': self.user_err_msg} return json.dumps(err_object) # %% ''' Now we can easily use this base class, and get consistent results: ''' # %% try: raise APIException() except APIException as ex: print(repr(ex), ex.to_json()) # %% ''' And because we'll want to log exceptions, let's also write a logger directly into our base class: ''' # %% from datetime import datetime class APIException(Exception): """Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." def __init__(self, *args, user_err_msg = None): if args: self.internal_err_msg = args[0] super().__init__(*args) else: super().__init__(self.internal_err_msg) if user_err_msg is not None: self.user_err_msg = user_err_msg def to_json(self): err_object = {'status': self.http_status, 'message': self.user_err_msg} return json.dumps(err_object) def log_exception(self): exception = { "type": type(self).__name__, "http_status": self.http_status, "message": self.args[0] if self.args else self.internal_err_msg, "args": self.args[1:] } print(f'EXCEPTION: {datetime.utcnow().isoformat()}: {exception}') # %% try: raise APIException() except APIException as ex: ex.log_exception() print(ex.to_json()) # %% '''
random_line_split
131. Custom Exceptions - Coding.py
(HTTPException): """Indicates the url is invalid (dns lookup fails)""" class TimeoutException(HTTPException): """Indicates a general timeout exception in http connectivity""" class PingTimeoutException(TimeoutException): """Ping time out""" class LoadTimeoutException(TimeoutException): """Page load time out""" class ParserException(WebScraperException): """General page parsing exception""" # %% ''' As you can see we have this hierarchy: ''' # %% ''' ``` WebScraperException - HTTPException - InvalidUrlException - TimeoutException - PingTimeoutException - LoadTimeoutException - ParserException ``` ''' # %% ''' Now someone using our library can expect to trap **any** exception we raise by catching the `WebScraperException` type, or anything more specific if they prefer: ''' # %% try: raise PingTimeoutException('Ping to www.... timed out') except HTTPException as ex: print(repr(ex)) # %% ''' or more broadly: ''' # %% try: raise PingTimeoutException('Ping time out') except WebScraperException as ex: print(repr(ex)) # %% ''' So this is very useful when we write modules or packages and want to keep our exception hierarchy neatly contained with some base exception class. This way, users of our class are not forced to use `except Exception` to trap exceptions we might raise from inside our library. ''' # %% ''' Custom exception classes are like any custom class, which means we can add custom attributes, properties and methods to the class. ''' # %% ''' This might be useful to provide additional context and functionality to our exceptions. ''' # %% ''' For example, suppose we are writing a REST API. When we raise a custom exception, we'll also want to return an HTTP exception response to the API caller. We could write code like this in our API calls: ''' # %% ''' Suppose we need to retrieve an account (by ID) from a database. Here I'm just going to mock this: ''' # %% class APIException(Exception): """Base API exception""" # %% class ApplicationException(APIException): """Indicates an application error (not user caused) - 5xx HTTP type errors""" class DBException(ApplicationException): """General database exception""" class DBConnectionError(DBException): """Indicates an error connecting to database""" class ClientException(APIException): """Indicates exception that was caused by user, not an internal error""" class NotFoundError(ClientException): """Indicates resource was not found""" class NotAuthorizedError(ClientException): """User is not authorized to perform requested action on resource""" class Account: def __init__(self, account_id, account_type): self.account_id = account_id self.account_type = account_type # %% ''' So we have this exception hierarchy: ``` APIException - ApplicationException (5xx errors) - DBException - DBConnectionError - ClientException - NotFoundError - NotAuthorizedError ``` ''' # %% def lookup_account_by_id(account_id): # mock of various exceptions that could be raised getting an account from database if not isinstance(account_id, int) or account_id <= 0: raise ClientException(f'Account number {account_id} is invalid.') if account_id < 100: raise DBConnectionError('Permanent failure connecting to database.') elif account_id < 200: raise NotAuthorizedError('User does not have permissions to read this account') elif account_id < 300: raise NotFoundError(f'Account not found.') else: return Account(account_id, 'Savings') # %% ''' Now suppose we have this endpoint for a **GET** on the **Account** resource, and we need to return the appropriate HTTP exception, and message to the user. ''' # %% ''' We're going to make use of the `HTTPStatus` enumeration we have seen before. ''' # %% from http import HTTPStatus # %% def get_account(account_id): try: account = lookup_account_by_id(account_id) except ApplicationException as ex: return HTTPStatus.INTERNAL_SERVER_ERROR, str(ex) except NotFoundError as ex: return HTTPStatus.NOT_FOUND, 'The account {} does not exist.'.format(account_id) except NotAuthorizedError as ex: return HTTPStatus.UNAUTHORIZED, 'You do not have the proper authorization.' except ClientException as ex: return HTTPStatus.BAD_REQUEST, str(ex) else: return HTTPStatus.OK, {"id": account.account_id, "type": account.account_type} # %% ''' Now when we call our end point with different account numbers: ''' # %% get_account('abc') # %% get_account(50) # %% get_account(150) # %% get_account(250) # %% get_account(350) # %% ''' As you can see this was quite a lot of exception handling we had to do. And really, the HTTP status and message shoudl remain consistent with any exception type. ''' # %% ''' So instead of dealing with it the way we did, we are going to do the work in the exception classes themselves. ''' # %% ''' First we know we need an `HTTPStatus` for each exception, as well as an error message to present to our user that may need to be different from the internal error message we would want to log for example. ''' # %% class APIException(Exception): """Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." # %% ''' Now having the default `internal_err_msg` and `user_err_msg` is great, but what if we ever wanted to override it for some reason? ''' # %% ''' Let's create an `__init__` to take care of that: ''' # %% class APIException(Exception): """Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." def __init__(self, *args, user_err_msg = None): if args: self.internal_err_msg = args[0] super().__init__(*args) else: super().__init__(self.internal_err_msg) if user_err_msg is not None: self.user_err_msg = user_err_msg # %% ''' And we can use this exception quite easily: ''' # %% try: raise APIException() except APIException as ex: print(repr(ex)) print(ex.user_err_msg) # %% ''' Or with a custom (internal) message: ''' # %% try: raise APIException('custom message...', 10, 20) except APIException as ex: print(repr(ex)) # %% ''' And of course, the user message can be customized too: ''' # %% try: raise APIException('custom message...', 10, 20, user_err_msg='custom user message') except APIException as ex: print(repr(ex)) print(ex.user_err_msg) # %% ''' While we're at it, we know that we'll need to return the same JSON format when an exception occurs - so let's write it into our base exception class: ''' # %% import json class APIException(Exception): """Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." def __init__(self, *args, user_err_msg = None): if args: self.internal_err_msg = args[0] super().__init__(*args) else: super().__init__(self.internal_err_msg) if user_err_msg is not None: self.user_err_msg = user_err_msg def to_json(self): err_object = {'status': self.http_status, 'message': self.user_err_msg} return json.dumps(err_object) # %% ''' Now we can easily use this base class, and get consistent results: ''' # %% try: raise APIException() except APIException as ex: print(repr(ex), ex.to_json()) # %% ''' And because we'll want to log exceptions, let's also write a logger directly into our base class: ''' # %% from datetime import datetime class APIException(Exception): """Base API exception""" http_status = HTTPStatus.INTERNAL_SERVER_ERROR internal_err_msg = 'API exception occurred.' user_err_msg = "We are sorry. An unexpected error occurred on our end." def __init__(self, *args, user_err_msg = None): if args: self.internal_err_msg = args[0] super().__init__(*args) else: super().__init__(self.internal_err_msg) if user_err_msg is not None: self.user_err_msg = user_err_msg def to_json(self): err_object = {'status': self.http_status, 'message': self.user_err_msg} return json.dumps(err_object) def log_exception(self): exception = { "type": type(self).__name__, "http_status": self.http_status, "message": self.args[0] if self.args else self.internal_err_msg, "args": self.args[1:] } print(f'EXCEPTION: {datetime.utcnow().isoformat()}: {exception}') # %% try: raise APIException()
InvalidUrlException
identifier_name
mode.rs
spawn(move || get_default_modes(verbose)); let currents = currents_handle.join().unwrap()?; let defaults = defaults_handle.join().unwrap()?; let displays = match d { Some(disps) => disps, None => { let mut tmp: Vec<String> = Vec::with_capacity(currents.len()); for mode in &currents { tmp.push(mode.display.clone()); } tmp } }; println!("{:?}",&currents); // these loops are because xrandr doesn't let you update modes or delete them while in use for disp in displays { for default in &defaults { if default.display == disp { if verbose { println!("Switching to default mode to allow updating of the current mode"); } switch_mode(&default.name, &disp, verbose)?; // switch the display to its default mode to enable deletion of in-use mode } } if verbose { println!("Removing mode {} from display {}",&n,&disp); } let mut cmd = process::Command::new("xrandr"); cmd.arg("--delmode").arg(disp.clone()).arg(n.clone()); println!("{:?}",cmd.output().unwrap()); } Ok(()) } */ pub fn add_mode(w: Option<&str>, h: Option<&str>, r: Option<&str>, d: Option<&str>, n: Option<&str>, t: Option<&str>, f: Option<&str>, test: bool, save: bool, verbose: bool) -> Result<(),Error> { let current_modes = get_current_modes(verbose)?; // Use first current display mode for parameters not supplied // and as the fallback if test option is used let width = w.unwrap_or(&current_modes[0].width).to_string(); let height = h.unwrap_or(&current_modes[0].height).to_string(); let rate = r.unwrap_or(&current_modes[0].rate).to_string(); let display = d.unwrap_or(&current_modes[0].display).to_string(); let tmp = format!("{}x{}_{}",width,height,rate); // default test timeout is 10 seconds. let name = match n { Some(nm) => String::from(nm), None => { tmp } }; let i_mode = InputMode { width, height, rate, display: String::from(&display), name: name.clone() }; let mut d_vec: Vec<String> = Vec::with_capacity(1); d_vec.push(display.clone()); // compute CVT timings and delete xrandr mode concurrently; wait for deletion before adding to xrandr //let del_handle = thread::spawn(move || delete_mode_xrandr(&name, Some(d_vec), verbose)); let cvt_handle = thread::spawn(move || gen_cvt_mode(&i_mode, verbose)); let fallback_cvt_handle = thread::spawn(move || gen_cvt_mode(&current_modes[0], verbose)); //let _ = del_handle.join().unwrap(); let cvt = cvt_handle.join().unwrap(); let fallback_cvt = fallback_cvt_handle.join().unwrap(); new_mode(&cvt, &display, verbose)?; if test { test_mode(&cvt, &fallback_cvt, &display, t, verbose)?; } if save { fileio::save_mode(&cvt,f,verbose)? } Ok(()) } pub fn apply_mode(n: &str, d: &str, t: Option<&str>, test: bool, persist: bool, verbose: bool) -> Result<(), io::Error> { println!("Applying mode {} to display {}.",n,d); let mode = fileio::get_mode(n, None, verbose).unwrap(); if test { let default_modes = get_default_modes(verbose)?; let default_mode = gen_cvt_mode(&default_modes[0],verbose); test_mode(&mode, &default_mode, d, t, verbose)?; println!("Keep the mode you just tested? y/n"); let mut input = String::new(); while !(input.contains("y") || input.contains("n")) { let _ = io::stdin().read_line(&mut input); if input.contains("n") { return Ok(()); } } } switch_mode(n, d, verbose)?; if persist { fileio::save_mode_persistent(&mode, verbose)?; } Ok(()) } fn test_mode(mode: &CvtMode, default_mode: &CvtMode, display: &str, t: Option<&str>, verbose: bool) -> Result<(), io::Error> { let name = &mode.get_name(); let default_name = &default_mode.get_name(); let timeout: u64 = match t { Some(time) => { let tmp = match time.parse() { Ok(kk) => kk, Err(_) => { eprintln!("Error: timeout must be an integer greater than zero. Using default timeout of 10 seconds."); 10 // just default to 10 secs if invalid timeout provided rather than returning an error } }; if tmp > 0 { tmp } else { 10 // default to 10 secs if none given } } None => 10 }; let delay = time::Duration::from_secs(timeout); if verbose { println!("Testing mode {} on display {} for {} secs.", name, display, timeout); thread::sleep(time::Duration::from_secs(1)); } if verbose { let _ = thread::spawn(move || util::print_countdown(timeout)); // this should maybe print regardless of verbose option, idk } let handle = thread::spawn(move || thread::sleep(delay)); switch_mode(name, display, verbose)?; handle.join().expect("Timer thread had an error."); if verbose { println!("Reverting to mode {} on display {}.", default_name, display); } switch_mode(default_name, display, verbose)?; Ok(()) } fn gen_cvt_mode(input: &InputMode, verbose: bool) -> CvtMode { if verbose { println!("Generating coordinated video timings for mode {}",input.name); } let mut cmd = process::Command::new("cvt"); cmd.arg(&input.width).arg(&input.height).arg(&input.rate); let output = cmd.output().unwrap(); let out = str::from_utf8(&output.stdout).unwrap(); let lines: Vec<_> = out.split('"').collect(); let mut t: Vec<_> = lines[2][2..lines[2].len()-1].split(" ").collect(); let mut i=0; while i < t.len() { if t[i] == "" || t[i] == "\t" { t.remove(i); } else { i += 1; } } let tmp = CvtMode { name: input.name.to_owned(), clock: String::from(t[0]), h_disp: String::from(t[1]), h_sync_start: String::from(t[2]), h_sync_end: String::from(t[3]), h_total: String::from(t[4]), v_disp: String::from(t[5]), v_sync_start: String::from(t[6]), v_sync_end: String::from(t[7]), v_total: String::from(t[8]), flags: format!("{} {}",t[9],t[10]), }; if verbose { println!("{:?}",tmp); } tmp } // Retrieves modes which are currently in use fn get_current_modes(verbose: bool) -> Result<Vec<InputMode>, Error> { if verbose { println!("Retrieving current display configuration."); } let re = Regex::new(r"(\S+)\s+connected.*\n[[a-zA-Z0-9\.]*\n]*\s*([0-9]+)x([0-9]+)\s*([0-9]+\.[0-9]+)\*").unwrap(); util::get_modes_helper(&re, verbose) } // Retrieves the default modes for each display fn get_default_modes(verbose: bool) -> Result<Vec<InputMode>, Error> { if verbose { println!("Retrieving current display configuration."); } let re = Regex::new(r"(\S+)\s+connected.*\n[[a-zA-Z0-9\.]*\n]*\s*([0-9]+)x([0-9]+)\s*([0-9]+\.[0-9]+)[\*]?\+").unwrap(); util::get_modes_helper(&re, verbose) } fn switch_mode(name: &str, display: &str, verbose: bool) -> Result<(), io::Error> { let mut cmd = process::Command::new("xrandr"); cmd.arg("--output").arg(&display).arg("--mode").arg(name); if verbose { println!("Applying mode {} to display {}",name,&display); } cmd.output()?; if verbose { println!("Successfully applied mode {} to display {}",name, &display); } Ok(()) } // Adds the newly created mode to xrandr fn new_mode(mode: &CvtMode, display: &str, verbose: bool) -> Result<(), io::Error> { let mut cmd = process::Command::new("xrandr");
cmd.arg("--newmode")
random_line_split
mode.rs
if default.display == disp { if verbose { println!("Switching to default mode to allow updating of the current mode"); } switch_mode(&default.name, &disp, verbose)?; // switch the display to its default mode to enable deletion of in-use mode } } if verbose { println!("Removing mode {} from display {}",&n,&disp); } let mut cmd = process::Command::new("xrandr"); cmd.arg("--delmode").arg(disp.clone()).arg(n.clone()); println!("{:?}",cmd.output().unwrap()); } Ok(()) } */ pub fn add_mode(w: Option<&str>, h: Option<&str>, r: Option<&str>, d: Option<&str>, n: Option<&str>, t: Option<&str>, f: Option<&str>, test: bool, save: bool, verbose: bool) -> Result<(),Error> { let current_modes = get_current_modes(verbose)?; // Use first current display mode for parameters not supplied // and as the fallback if test option is used let width = w.unwrap_or(&current_modes[0].width).to_string(); let height = h.unwrap_or(&current_modes[0].height).to_string(); let rate = r.unwrap_or(&current_modes[0].rate).to_string(); let display = d.unwrap_or(&current_modes[0].display).to_string(); let tmp = format!("{}x{}_{}",width,height,rate); // default test timeout is 10 seconds. let name = match n { Some(nm) => String::from(nm), None => { tmp } }; let i_mode = InputMode { width, height, rate, display: String::from(&display), name: name.clone() }; let mut d_vec: Vec<String> = Vec::with_capacity(1); d_vec.push(display.clone()); // compute CVT timings and delete xrandr mode concurrently; wait for deletion before adding to xrandr //let del_handle = thread::spawn(move || delete_mode_xrandr(&name, Some(d_vec), verbose)); let cvt_handle = thread::spawn(move || gen_cvt_mode(&i_mode, verbose)); let fallback_cvt_handle = thread::spawn(move || gen_cvt_mode(&current_modes[0], verbose)); //let _ = del_handle.join().unwrap(); let cvt = cvt_handle.join().unwrap(); let fallback_cvt = fallback_cvt_handle.join().unwrap(); new_mode(&cvt, &display, verbose)?; if test { test_mode(&cvt, &fallback_cvt, &display, t, verbose)?; } if save { fileio::save_mode(&cvt,f,verbose)? } Ok(()) } pub fn apply_mode(n: &str, d: &str, t: Option<&str>, test: bool, persist: bool, verbose: bool) -> Result<(), io::Error> { println!("Applying mode {} to display {}.",n,d); let mode = fileio::get_mode(n, None, verbose).unwrap(); if test { let default_modes = get_default_modes(verbose)?; let default_mode = gen_cvt_mode(&default_modes[0],verbose); test_mode(&mode, &default_mode, d, t, verbose)?; println!("Keep the mode you just tested? y/n"); let mut input = String::new(); while !(input.contains("y") || input.contains("n")) { let _ = io::stdin().read_line(&mut input); if input.contains("n") { return Ok(()); } } } switch_mode(n, d, verbose)?; if persist { fileio::save_mode_persistent(&mode, verbose)?; } Ok(()) } fn test_mode(mode: &CvtMode, default_mode: &CvtMode, display: &str, t: Option<&str>, verbose: bool) -> Result<(), io::Error> { let name = &mode.get_name(); let default_name = &default_mode.get_name(); let timeout: u64 = match t { Some(time) => { let tmp = match time.parse() { Ok(kk) => kk, Err(_) => { eprintln!("Error: timeout must be an integer greater than zero. Using default timeout of 10 seconds."); 10 // just default to 10 secs if invalid timeout provided rather than returning an error } }; if tmp > 0 { tmp } else { 10 // default to 10 secs if none given } } None => 10 }; let delay = time::Duration::from_secs(timeout); if verbose { println!("Testing mode {} on display {} for {} secs.", name, display, timeout); thread::sleep(time::Duration::from_secs(1)); } if verbose { let _ = thread::spawn(move || util::print_countdown(timeout)); // this should maybe print regardless of verbose option, idk } let handle = thread::spawn(move || thread::sleep(delay)); switch_mode(name, display, verbose)?; handle.join().expect("Timer thread had an error."); if verbose { println!("Reverting to mode {} on display {}.", default_name, display); } switch_mode(default_name, display, verbose)?; Ok(()) } fn gen_cvt_mode(input: &InputMode, verbose: bool) -> CvtMode { if verbose { println!("Generating coordinated video timings for mode {}",input.name); } let mut cmd = process::Command::new("cvt"); cmd.arg(&input.width).arg(&input.height).arg(&input.rate); let output = cmd.output().unwrap(); let out = str::from_utf8(&output.stdout).unwrap(); let lines: Vec<_> = out.split('"').collect(); let mut t: Vec<_> = lines[2][2..lines[2].len()-1].split(" ").collect(); let mut i=0; while i < t.len() { if t[i] == "" || t[i] == "\t" { t.remove(i); } else { i += 1; } } let tmp = CvtMode { name: input.name.to_owned(), clock: String::from(t[0]), h_disp: String::from(t[1]), h_sync_start: String::from(t[2]), h_sync_end: String::from(t[3]), h_total: String::from(t[4]), v_disp: String::from(t[5]), v_sync_start: String::from(t[6]), v_sync_end: String::from(t[7]), v_total: String::from(t[8]), flags: format!("{} {}",t[9],t[10]), }; if verbose { println!("{:?}",tmp); } tmp } // Retrieves modes which are currently in use fn get_current_modes(verbose: bool) -> Result<Vec<InputMode>, Error> { if verbose { println!("Retrieving current display configuration."); } let re = Regex::new(r"(\S+)\s+connected.*\n[[a-zA-Z0-9\.]*\n]*\s*([0-9]+)x([0-9]+)\s*([0-9]+\.[0-9]+)\*").unwrap(); util::get_modes_helper(&re, verbose) } // Retrieves the default modes for each display fn get_default_modes(verbose: bool) -> Result<Vec<InputMode>, Error> { if verbose { println!("Retrieving current display configuration."); } let re = Regex::new(r"(\S+)\s+connected.*\n[[a-zA-Z0-9\.]*\n]*\s*([0-9]+)x([0-9]+)\s*([0-9]+\.[0-9]+)[\*]?\+").unwrap(); util::get_modes_helper(&re, verbose) } fn switch_mode(name: &str, display: &str, verbose: bool) -> Result<(), io::Error> { let mut cmd = process::Command::new("xrandr"); cmd.arg("--output").arg(&display).arg("--mode").arg(name); if verbose { println!("Applying mode {} to display {}",name,&display); } cmd.output()?; if verbose { println!("Successfully applied mode {} to display {}",name, &display); } Ok(()) } // Adds the newly created mode to xrandr fn new_mode(mode: &CvtMode, display: &str, verbose: bool) -> Result<(), io::Error>
{ let mut cmd = process::Command::new("xrandr"); cmd.arg("--newmode") .arg(&mode.name) .arg(&mode.clock) .arg(&mode.h_disp) .arg(&mode.h_sync_start) .arg(&mode.h_sync_end) .arg(&mode.h_total) .arg(&mode.v_disp) .arg(&mode.v_sync_start) .arg(&mode.v_sync_end) .arg(&mode.v_total) .arg(&mode.flags); if verbose { println!("Creating xrandr mode {}",&mode.name); } cmd.output()?; if verbose { println!("Adding mode {} for display {}.",&mode.name,display);
identifier_body
mode.rs
String, v_sync_start: String, v_sync_end: String, v_total: String, flags: String, } impl CvtMode { pub fn get_name(&self) -> &str { &self.name } /* pub fn new_empty() -> CvtMode { CvtMode { name: String::new(), clock: String::new(), h_disp: String::new(), h_sync_start: String::new(), h_sync_end: String::new(), h_total: String::new(), v_disp: String::new(), v_sync_start: String::new(), v_sync_end: String::new(), v_total: String::new(), flags: String::new(), } } */ } // Some(d) would be a vec of the displays for which to delete the mode; if d is None, the mode will be removed from all connected displays // xrandr doesn't seem to think the program has access to user-created modes for deletion; // could run as root but would rather not. // TODO: address deletion permission issue /* fn delete_mode_xrandr(n: &str, d: Option<Vec<String>>, verbose: bool) -> Result<(),Error> { for display in d.unwrap() { delete_mode(&n,&display); } let currents_handle = thread::spawn(move || get_current_modes(verbose)); let defaults_handle = thread::spawn(move || get_default_modes(verbose)); let currents = currents_handle.join().unwrap()?; let defaults = defaults_handle.join().unwrap()?; let displays = match d { Some(disps) => disps, None => { let mut tmp: Vec<String> = Vec::with_capacity(currents.len()); for mode in &currents { tmp.push(mode.display.clone()); } tmp } }; println!("{:?}",&currents); // these loops are because xrandr doesn't let you update modes or delete them while in use for disp in displays { for default in &defaults { if default.display == disp { if verbose { println!("Switching to default mode to allow updating of the current mode"); } switch_mode(&default.name, &disp, verbose)?; // switch the display to its default mode to enable deletion of in-use mode } } if verbose { println!("Removing mode {} from display {}",&n,&disp); } let mut cmd = process::Command::new("xrandr"); cmd.arg("--delmode").arg(disp.clone()).arg(n.clone()); println!("{:?}",cmd.output().unwrap()); } Ok(()) } */ pub fn
(w: Option<&str>, h: Option<&str>, r: Option<&str>, d: Option<&str>, n: Option<&str>, t: Option<&str>, f: Option<&str>, test: bool, save: bool, verbose: bool) -> Result<(),Error> { let current_modes = get_current_modes(verbose)?; // Use first current display mode for parameters not supplied // and as the fallback if test option is used let width = w.unwrap_or(&current_modes[0].width).to_string(); let height = h.unwrap_or(&current_modes[0].height).to_string(); let rate = r.unwrap_or(&current_modes[0].rate).to_string(); let display = d.unwrap_or(&current_modes[0].display).to_string(); let tmp = format!("{}x{}_{}",width,height,rate); // default test timeout is 10 seconds. let name = match n { Some(nm) => String::from(nm), None => { tmp } }; let i_mode = InputMode { width, height, rate, display: String::from(&display), name: name.clone() }; let mut d_vec: Vec<String> = Vec::with_capacity(1); d_vec.push(display.clone()); // compute CVT timings and delete xrandr mode concurrently; wait for deletion before adding to xrandr //let del_handle = thread::spawn(move || delete_mode_xrandr(&name, Some(d_vec), verbose)); let cvt_handle = thread::spawn(move || gen_cvt_mode(&i_mode, verbose)); let fallback_cvt_handle = thread::spawn(move || gen_cvt_mode(&current_modes[0], verbose)); //let _ = del_handle.join().unwrap(); let cvt = cvt_handle.join().unwrap(); let fallback_cvt = fallback_cvt_handle.join().unwrap(); new_mode(&cvt, &display, verbose)?; if test { test_mode(&cvt, &fallback_cvt, &display, t, verbose)?; } if save { fileio::save_mode(&cvt,f,verbose)? } Ok(()) } pub fn apply_mode(n: &str, d: &str, t: Option<&str>, test: bool, persist: bool, verbose: bool) -> Result<(), io::Error> { println!("Applying mode {} to display {}.",n,d); let mode = fileio::get_mode(n, None, verbose).unwrap(); if test { let default_modes = get_default_modes(verbose)?; let default_mode = gen_cvt_mode(&default_modes[0],verbose); test_mode(&mode, &default_mode, d, t, verbose)?; println!("Keep the mode you just tested? y/n"); let mut input = String::new(); while !(input.contains("y") || input.contains("n")) { let _ = io::stdin().read_line(&mut input); if input.contains("n") { return Ok(()); } } } switch_mode(n, d, verbose)?; if persist { fileio::save_mode_persistent(&mode, verbose)?; } Ok(()) } fn test_mode(mode: &CvtMode, default_mode: &CvtMode, display: &str, t: Option<&str>, verbose: bool) -> Result<(), io::Error> { let name = &mode.get_name(); let default_name = &default_mode.get_name(); let timeout: u64 = match t { Some(time) => { let tmp = match time.parse() { Ok(kk) => kk, Err(_) => { eprintln!("Error: timeout must be an integer greater than zero. Using default timeout of 10 seconds."); 10 // just default to 10 secs if invalid timeout provided rather than returning an error } }; if tmp > 0 { tmp } else { 10 // default to 10 secs if none given } } None => 10 }; let delay = time::Duration::from_secs(timeout); if verbose { println!("Testing mode {} on display {} for {} secs.", name, display, timeout); thread::sleep(time::Duration::from_secs(1)); } if verbose { let _ = thread::spawn(move || util::print_countdown(timeout)); // this should maybe print regardless of verbose option, idk } let handle = thread::spawn(move || thread::sleep(delay)); switch_mode(name, display, verbose)?; handle.join().expect("Timer thread had an error."); if verbose { println!("Reverting to mode {} on display {}.", default_name, display); } switch_mode(default_name, display, verbose)?; Ok(()) } fn gen_cvt_mode(input: &InputMode, verbose: bool) -> CvtMode { if verbose { println!("Generating coordinated video timings for mode {}",input.name); } let mut cmd = process::Command::new("cvt"); cmd.arg(&input.width).arg(&input.height).arg(&input.rate); let output = cmd.output().unwrap(); let out = str::from_utf8(&output.stdout).unwrap(); let lines: Vec<_> = out.split('"').collect(); let mut t: Vec<_> = lines[2][2..lines[2].len()-1].split(" ").collect(); let mut i=0; while i < t.len() { if t[i] == "" || t[i] == "\t" { t.remove(i); } else { i += 1; } } let tmp = CvtMode { name: input.name.to_owned(), clock: String::from(t[0]), h_disp: String::from(t[1]), h_sync_start: String::from(t[2]), h_sync_end: String::from(t[3]), h_total: String::from(t[4]), v_disp: String::from(t[5]), v_sync_start: String::from(t[6]), v_sync_end: String::from(t[7]), v_total: String::from(t[8]), flags: format!("{} {}",t[9],t[10]), }; if verbose { println!("{:?}",tmp); } tmp } // Retrieves modes which are currently in use fn get_current_modes(verbose: bool) -> Result<Vec<InputMode>, Error> { if verbose { println!("Retrieving current display configuration."); } let re = Regex::new(r"(\S+)\s+connected.*\n[[a-zA-Z0-9\.]*\n]*\s*([0-9]+)x([0-9]+)\s*([0-9]+\.[0
add_mode
identifier_name
model.py
=Event) # Store underlying data model self._data_types = ('image', 'coords') self._data_type = None # Save the line style params self._width = width self._color = color self._colors = get_color_names() # averaging and length attributes self._averaging = averaging self._length = length # update flags self._need_display_update = False self._need_visual_update = False # assign vector data and establish default behavior self._raw_data = None self._original_data = vectors self._current_data = vectors self._vectors = self._convert_to_vector_type(vectors) vertices, triangles = self._generate_meshes(self._vectors, self.width) self._mesh_vertices = vertices self._mesh_triangles = triangles if name is None:
else: self.name = name self._qt_properties = QtVectorsLayer(self) # ====================== Property getter and setters ===================== @property def _original_data(self) -> np.ndarray: return self._raw_data @_original_data.setter def _original_data(self, data: np.ndarray): """Must preserve data used at construction. Specifically for default averaging/length adjustments. averaging/length adjustments recalculate the underlying data Parameters ---------- data : np.ndarray """ if self._raw_data is None: self._raw_data = data @property def vectors(self) -> np.ndarray: return self._vectors @vectors.setter def vectors(self, vectors: np.ndarray): """Can accept two data types: 1) (N, 4) array with elements (y, x, v, u), where x-y are position (center) and u-v are x-y projections of the vector 2) (N, M, 2) array with elements (v, u) where u-v are x-y projections of the vector vector position is one per-pixel in the NxM array Parameters ---------- vectors : np.ndarray """ self._original_data = vectors self._current_data = vectors self._vectors = self._convert_to_vector_type(self._current_data) vertices, triangles = self._generate_meshes(self._vectors, self.width) self._mesh_vertices = vertices self._mesh_triangles = triangles self.viewer._child_layer_changed = True self.refresh() def _convert_to_vector_type(self, vectors): """Check on input data for proper shape and dtype Parameters ---------- vectors : np.ndarray """ if vectors.shape[-1] == 4 and vectors.ndim == 2: coord_list = self._convert_coords_to_coordinates(vectors) self._data_type = self._data_types[1] elif vectors.shape[-1] == 2 and vectors.ndim == 3: coord_list = self._convert_image_to_coordinates(vectors) self._data_type = self._data_types[0] else: raise TypeError( "Vector data of shape %s is not supported" % str(vectors.shape)) return coord_list def _convert_image_to_coordinates(self, vect) -> np.ndarray: """To convert an image-like array with elements (y-proj, x-proj) into a position list of coordinates Every pixel position (n, m) results in two output coordinates of (N,2) Parameters ---------- vect : np.ndarray of shape (N, M, 2) """ xdim = vect.shape[0] ydim = vect.shape[1] # stride is used during averaging and length adjustment stride_x, stride_y = self._averaging, self._averaging # create empty vector of necessary shape # every "pixel" has 2 coordinates pos = np.empty((2 * xdim * ydim, 2), dtype=np.float32) # create coordinate spacing for x-y # double the num of elements by doubling x sampling xspace = np.linspace(0, stride_x*xdim, 2 * xdim, endpoint=False) yspace = np.linspace(0, stride_y*ydim, ydim, endpoint=False) xv, yv = np.meshgrid(xspace, yspace) # assign coordinates (pos) to all pixels pos[:, 0] = xv.flatten() pos[:, 1] = yv.flatten() # pixel midpoints are the first x-values of positions midpt = np.zeros((xdim * ydim, 2), dtype=np.float32) midpt[:, 0] = pos[0::2, 0]+(stride_x-1)/2 midpt[:, 1] = pos[0::2, 1]+(stride_y-1)/2 # rotate coordinates about midpoint to represent angle and length pos[0::2, 0] = midpt[:, 0] - (stride_x / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 0] pos[0::2, 1] = midpt[:, 1] - (stride_y / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 1] pos[1::2, 0] = midpt[:, 0] + (stride_x / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 0] pos[1::2, 1] = midpt[:, 1] + (stride_y / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 1] return pos def _convert_coords_to_coordinates(self, vect) -> np.ndarray: """To convert a list of coordinates of shape (y-center, x-center, y-proj, x-proj) into a list of coordinates Input coordinate of (N,4) becomes two output coordinates of (N,2) Parameters ---------- vect : np.ndarray of shape (N, 4) """ # create empty vector of necessary shape # one coordinate for each endpoint of the vector pos = np.empty((2 * len(vect), 2), dtype=np.float32) # create pairs of points pos[0::2, 0] = vect[:, 0] pos[1::2, 0] = vect[:, 0] pos[0::2, 1] = vect[:, 1] pos[1::2, 1] = vect[:, 1] # adjust second of each pair according to x-y projection pos[1::2, 0] += vect[:, 2] pos[1::2, 1] += vect[:, 3] return pos @property def averaging(self) -> int: return self._averaging @averaging.setter def averaging(self, value: int): """Calculates an average vector over a kernel Parameters ---------- value : int that defines (int, int) kernel """ self._averaging = value self.events.averaging() self._update_avg() self.refresh() def _update_avg(self): """Method for calculating average Implemented ONLY for image-like vector data """ if self._data_type == 'coords': # default averaging is supported only for 'matrix' dataTypes return elif self._data_type == 'image': x, y = self._averaging, self._averaging if (x,y) == (1, 1): self.vectors = self._original_data # calling original data return tempdat = self._original_data range_x = tempdat.shape[0] range_y = tempdat.shape[1] x_offset = int((x - 1) / 2) y_offset = int((y - 1) / 2) kernel = np.ones(shape=(x, y)) / (x*y) output_mat = np.zeros_like(tempdat) output_mat_x = signal.convolve2d(tempdat[:, :, 0], kernel, mode='same', boundary='wrap') output_mat_y = signal.convolve2d(tempdat[:, :, 1], kernel, mode='same', boundary='wrap') output_mat[:, :, 0] = output_mat_x output_mat[:, :, 1] = output_mat_y self.vectors = (output_mat[x_offset:range_x-x_offset:x, y_offset:range_y-y_offset:y]) @property def width(self) -> Union[int, float]: return self._width @width.setter def width(self, width: Union[int, float]): """width of the line in pixels widths greater than 1px only guaranteed to work with "agg" method """ self._width = width vertices, triangles = self._generate_meshes(self.vectors, self._width) self._mesh_vertices = vertices
self.name = 'vectors'
conditional_block
model.py
=Event) # Store underlying data model self._data_types = ('image', 'coords') self._data_type = None # Save the line style params self._width = width self._color = color self._colors = get_color_names() # averaging and length attributes self._averaging = averaging self._length = length # update flags self._need_display_update = False self._need_visual_update = False # assign vector data and establish default behavior self._raw_data = None self._original_data = vectors self._current_data = vectors self._vectors = self._convert_to_vector_type(vectors) vertices, triangles = self._generate_meshes(self._vectors, self.width) self._mesh_vertices = vertices self._mesh_triangles = triangles if name is None: self.name = 'vectors' else: self.name = name self._qt_properties = QtVectorsLayer(self) # ====================== Property getter and setters ===================== @property def _original_data(self) -> np.ndarray: return self._raw_data @_original_data.setter def _original_data(self, data: np.ndarray): """Must preserve data used at construction. Specifically for default averaging/length adjustments. averaging/length adjustments recalculate the underlying data Parameters ---------- data : np.ndarray """ if self._raw_data is None: self._raw_data = data @property def vectors(self) -> np.ndarray: return self._vectors @vectors.setter def vectors(self, vectors: np.ndarray): """Can accept two data types: 1) (N, 4) array with elements (y, x, v, u), where x-y are position (center) and u-v are x-y projections of the vector 2) (N, M, 2) array with elements (v, u) where u-v are x-y projections of the vector vector position is one per-pixel in the NxM array Parameters ---------- vectors : np.ndarray """ self._original_data = vectors self._current_data = vectors self._vectors = self._convert_to_vector_type(self._current_data) vertices, triangles = self._generate_meshes(self._vectors, self.width) self._mesh_vertices = vertices self._mesh_triangles = triangles self.viewer._child_layer_changed = True self.refresh() def _convert_to_vector_type(self, vectors): """Check on input data for proper shape and dtype Parameters ---------- vectors : np.ndarray """ if vectors.shape[-1] == 4 and vectors.ndim == 2: coord_list = self._convert_coords_to_coordinates(vectors) self._data_type = self._data_types[1] elif vectors.shape[-1] == 2 and vectors.ndim == 3: coord_list = self._convert_image_to_coordinates(vectors) self._data_type = self._data_types[0] else: raise TypeError( "Vector data of shape %s is not supported" % str(vectors.shape)) return coord_list def _convert_image_to_coordinates(self, vect) -> np.ndarray: """To convert an image-like array with elements (y-proj, x-proj) into a position list of coordinates Every pixel position (n, m) results in two output coordinates of (N,2) Parameters ---------- vect : np.ndarray of shape (N, M, 2) """ xdim = vect.shape[0] ydim = vect.shape[1] # stride is used during averaging and length adjustment stride_x, stride_y = self._averaging, self._averaging # create empty vector of necessary shape # every "pixel" has 2 coordinates pos = np.empty((2 * xdim * ydim, 2), dtype=np.float32) # create coordinate spacing for x-y # double the num of elements by doubling x sampling xspace = np.linspace(0, stride_x*xdim, 2 * xdim, endpoint=False) yspace = np.linspace(0, stride_y*ydim, ydim, endpoint=False) xv, yv = np.meshgrid(xspace, yspace) # assign coordinates (pos) to all pixels pos[:, 0] = xv.flatten() pos[:, 1] = yv.flatten() # pixel midpoints are the first x-values of positions midpt = np.zeros((xdim * ydim, 2), dtype=np.float32) midpt[:, 0] = pos[0::2, 0]+(stride_x-1)/2 midpt[:, 1] = pos[0::2, 1]+(stride_y-1)/2 # rotate coordinates about midpoint to represent angle and length pos[0::2, 0] = midpt[:, 0] - (stride_x / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 0] pos[0::2, 1] = midpt[:, 1] - (stride_y / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 1] pos[1::2, 0] = midpt[:, 0] + (stride_x / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 0] pos[1::2, 1] = midpt[:, 1] + (stride_y / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 1] return pos def _convert_coords_to_coordinates(self, vect) -> np.ndarray: """To convert a list of coordinates of shape (y-center, x-center, y-proj, x-proj) into a list of coordinates Input coordinate of (N,4) becomes two output coordinates of (N,2) Parameters ---------- vect : np.ndarray of shape (N, 4) """ # create empty vector of necessary shape # one coordinate for each endpoint of the vector pos = np.empty((2 * len(vect), 2), dtype=np.float32) # create pairs of points pos[0::2, 0] = vect[:, 0] pos[1::2, 0] = vect[:, 0] pos[0::2, 1] = vect[:, 1] pos[1::2, 1] = vect[:, 1] # adjust second of each pair according to x-y projection pos[1::2, 0] += vect[:, 2] pos[1::2, 1] += vect[:, 3] return pos @property def averaging(self) -> int: return self._averaging @averaging.setter def averaging(self, value: int): """Calculates an average vector over a kernel
self.events.averaging() self._update_avg() self.refresh() def _update_avg(self): """Method for calculating average Implemented ONLY for image-like vector data """ if self._data_type == 'coords': # default averaging is supported only for 'matrix' dataTypes return elif self._data_type == 'image': x, y = self._averaging, self._averaging if (x,y) == (1, 1): self.vectors = self._original_data # calling original data return tempdat = self._original_data range_x = tempdat.shape[0] range_y = tempdat.shape[1] x_offset = int((x - 1) / 2) y_offset = int((y - 1) / 2) kernel = np.ones(shape=(x, y)) / (x*y) output_mat = np.zeros_like(tempdat) output_mat_x = signal.convolve2d(tempdat[:, :, 0], kernel, mode='same', boundary='wrap') output_mat_y = signal.convolve2d(tempdat[:, :, 1], kernel, mode='same', boundary='wrap') output_mat[:, :, 0] = output_mat_x output_mat[:, :, 1] = output_mat_y self.vectors = (output_mat[x_offset:range_x-x_offset:x, y_offset:range_y-y_offset:y]) @property def width(self) -> Union[int, float]: return self._width @width.setter def width(self, width: Union[int, float]): """width of the line in pixels widths greater than 1px only guaranteed to work with "agg" method """ self._width = width vertices, triangles = self._generate_meshes(self.vectors, self._width) self._mesh_vertices = vertices
Parameters ---------- value : int that defines (int, int) kernel """ self._averaging = value
random_line_split
model.py
=Event) # Store underlying data model self._data_types = ('image', 'coords') self._data_type = None # Save the line style params self._width = width self._color = color self._colors = get_color_names() # averaging and length attributes self._averaging = averaging self._length = length # update flags self._need_display_update = False self._need_visual_update = False # assign vector data and establish default behavior self._raw_data = None self._original_data = vectors self._current_data = vectors self._vectors = self._convert_to_vector_type(vectors) vertices, triangles = self._generate_meshes(self._vectors, self.width) self._mesh_vertices = vertices self._mesh_triangles = triangles if name is None: self.name = 'vectors' else: self.name = name self._qt_properties = QtVectorsLayer(self) # ====================== Property getter and setters ===================== @property def _original_data(self) -> np.ndarray: return self._raw_data @_original_data.setter def _original_data(self, data: np.ndarray): """Must preserve data used at construction. Specifically for default averaging/length adjustments. averaging/length adjustments recalculate the underlying data Parameters ---------- data : np.ndarray """ if self._raw_data is None: self._raw_data = data @property def vectors(self) -> np.ndarray: return self._vectors @vectors.setter def vectors(self, vectors: np.ndarray): """Can accept two data types: 1) (N, 4) array with elements (y, x, v, u), where x-y are position (center) and u-v are x-y projections of the vector 2) (N, M, 2) array with elements (v, u) where u-v are x-y projections of the vector vector position is one per-pixel in the NxM array Parameters ---------- vectors : np.ndarray """ self._original_data = vectors self._current_data = vectors self._vectors = self._convert_to_vector_type(self._current_data) vertices, triangles = self._generate_meshes(self._vectors, self.width) self._mesh_vertices = vertices self._mesh_triangles = triangles self.viewer._child_layer_changed = True self.refresh() def _convert_to_vector_type(self, vectors): """Check on input data for proper shape and dtype Parameters ---------- vectors : np.ndarray """ if vectors.shape[-1] == 4 and vectors.ndim == 2: coord_list = self._convert_coords_to_coordinates(vectors) self._data_type = self._data_types[1] elif vectors.shape[-1] == 2 and vectors.ndim == 3: coord_list = self._convert_image_to_coordinates(vectors) self._data_type = self._data_types[0] else: raise TypeError( "Vector data of shape %s is not supported" % str(vectors.shape)) return coord_list def _convert_image_to_coordinates(self, vect) -> np.ndarray: """To convert an image-like array with elements (y-proj, x-proj) into a position list of coordinates Every pixel position (n, m) results in two output coordinates of (N,2) Parameters ---------- vect : np.ndarray of shape (N, M, 2) """ xdim = vect.shape[0] ydim = vect.shape[1] # stride is used during averaging and length adjustment stride_x, stride_y = self._averaging, self._averaging # create empty vector of necessary shape # every "pixel" has 2 coordinates pos = np.empty((2 * xdim * ydim, 2), dtype=np.float32) # create coordinate spacing for x-y # double the num of elements by doubling x sampling xspace = np.linspace(0, stride_x*xdim, 2 * xdim, endpoint=False) yspace = np.linspace(0, stride_y*ydim, ydim, endpoint=False) xv, yv = np.meshgrid(xspace, yspace) # assign coordinates (pos) to all pixels pos[:, 0] = xv.flatten() pos[:, 1] = yv.flatten() # pixel midpoints are the first x-values of positions midpt = np.zeros((xdim * ydim, 2), dtype=np.float32) midpt[:, 0] = pos[0::2, 0]+(stride_x-1)/2 midpt[:, 1] = pos[0::2, 1]+(stride_y-1)/2 # rotate coordinates about midpoint to represent angle and length pos[0::2, 0] = midpt[:, 0] - (stride_x / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 0] pos[0::2, 1] = midpt[:, 1] - (stride_y / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 1] pos[1::2, 0] = midpt[:, 0] + (stride_x / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 0] pos[1::2, 1] = midpt[:, 1] + (stride_y / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 1] return pos def _convert_coords_to_coordinates(self, vect) -> np.ndarray: """To convert a list of coordinates of shape (y-center, x-center, y-proj, x-proj) into a list of coordinates Input coordinate of (N,4) becomes two output coordinates of (N,2) Parameters ---------- vect : np.ndarray of shape (N, 4) """ # create empty vector of necessary shape # one coordinate for each endpoint of the vector pos = np.empty((2 * len(vect), 2), dtype=np.float32) # create pairs of points pos[0::2, 0] = vect[:, 0] pos[1::2, 0] = vect[:, 0] pos[0::2, 1] = vect[:, 1] pos[1::2, 1] = vect[:, 1] # adjust second of each pair according to x-y projection pos[1::2, 0] += vect[:, 2] pos[1::2, 1] += vect[:, 3] return pos @property def averaging(self) -> int: return self._averaging @averaging.setter def averaging(self, value: int): """Calculates an average vector over a kernel Parameters ---------- value : int that defines (int, int) kernel """ self._averaging = value self.events.averaging() self._update_avg() self.refresh() def
(self): """Method for calculating average Implemented ONLY for image-like vector data """ if self._data_type == 'coords': # default averaging is supported only for 'matrix' dataTypes return elif self._data_type == 'image': x, y = self._averaging, self._averaging if (x,y) == (1, 1): self.vectors = self._original_data # calling original data return tempdat = self._original_data range_x = tempdat.shape[0] range_y = tempdat.shape[1] x_offset = int((x - 1) / 2) y_offset = int((y - 1) / 2) kernel = np.ones(shape=(x, y)) / (x*y) output_mat = np.zeros_like(tempdat) output_mat_x = signal.convolve2d(tempdat[:, :, 0], kernel, mode='same', boundary='wrap') output_mat_y = signal.convolve2d(tempdat[:, :, 1], kernel, mode='same', boundary='wrap') output_mat[:, :, 0] = output_mat_x output_mat[:, :, 1] = output_mat_y self.vectors = (output_mat[x_offset:range_x-x_offset:x, y_offset:range_y-y_offset:y]) @property def width(self) -> Union[int, float]: return self._width @width.setter def width(self, width: Union[int, float]): """width of the line in pixels widths greater than 1px only guaranteed to work with "agg" method """ self._width = width vertices, triangles = self._generate_meshes(self.vectors, self._width) self._mesh_vertices = vertices
_update_avg
identifier_name
model.py
averaging=Event) # Store underlying data model self._data_types = ('image', 'coords') self._data_type = None # Save the line style params self._width = width self._color = color self._colors = get_color_names() # averaging and length attributes self._averaging = averaging self._length = length # update flags self._need_display_update = False self._need_visual_update = False # assign vector data and establish default behavior self._raw_data = None self._original_data = vectors self._current_data = vectors self._vectors = self._convert_to_vector_type(vectors) vertices, triangles = self._generate_meshes(self._vectors, self.width) self._mesh_vertices = vertices self._mesh_triangles = triangles if name is None: self.name = 'vectors' else: self.name = name self._qt_properties = QtVectorsLayer(self) # ====================== Property getter and setters ===================== @property def _original_data(self) -> np.ndarray: return self._raw_data @_original_data.setter def _original_data(self, data: np.ndarray): """Must preserve data used at construction. Specifically for default averaging/length adjustments. averaging/length adjustments recalculate the underlying data Parameters ---------- data : np.ndarray """ if self._raw_data is None: self._raw_data = data @property def vectors(self) -> np.ndarray: return self._vectors @vectors.setter def vectors(self, vectors: np.ndarray): """Can accept two data types: 1) (N, 4) array with elements (y, x, v, u), where x-y are position (center) and u-v are x-y projections of the vector 2) (N, M, 2) array with elements (v, u) where u-v are x-y projections of the vector vector position is one per-pixel in the NxM array Parameters ---------- vectors : np.ndarray """ self._original_data = vectors self._current_data = vectors self._vectors = self._convert_to_vector_type(self._current_data) vertices, triangles = self._generate_meshes(self._vectors, self.width) self._mesh_vertices = vertices self._mesh_triangles = triangles self.viewer._child_layer_changed = True self.refresh() def _convert_to_vector_type(self, vectors): """Check on input data for proper shape and dtype Parameters ---------- vectors : np.ndarray """ if vectors.shape[-1] == 4 and vectors.ndim == 2: coord_list = self._convert_coords_to_coordinates(vectors) self._data_type = self._data_types[1] elif vectors.shape[-1] == 2 and vectors.ndim == 3: coord_list = self._convert_image_to_coordinates(vectors) self._data_type = self._data_types[0] else: raise TypeError( "Vector data of shape %s is not supported" % str(vectors.shape)) return coord_list def _convert_image_to_coordinates(self, vect) -> np.ndarray:
xspace = np.linspace(0, stride_x*xdim, 2 * xdim, endpoint=False) yspace = np.linspace(0, stride_y*ydim, ydim, endpoint=False) xv, yv = np.meshgrid(xspace, yspace) # assign coordinates (pos) to all pixels pos[:, 0] = xv.flatten() pos[:, 1] = yv.flatten() # pixel midpoints are the first x-values of positions midpt = np.zeros((xdim * ydim, 2), dtype=np.float32) midpt[:, 0] = pos[0::2, 0]+(stride_x-1)/2 midpt[:, 1] = pos[0::2, 1]+(stride_y-1)/2 # rotate coordinates about midpoint to represent angle and length pos[0::2, 0] = midpt[:, 0] - (stride_x / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 0] pos[0::2, 1] = midpt[:, 1] - (stride_y / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 1] pos[1::2, 0] = midpt[:, 0] + (stride_x / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 0] pos[1::2, 1] = midpt[:, 1] + (stride_y / 2) * (self._length/2) * \ vect.reshape((xdim*ydim, 2))[:, 1] return pos def _convert_coords_to_coordinates(self, vect) -> np.ndarray: """To convert a list of coordinates of shape (y-center, x-center, y-proj, x-proj) into a list of coordinates Input coordinate of (N,4) becomes two output coordinates of (N,2) Parameters ---------- vect : np.ndarray of shape (N, 4) """ # create empty vector of necessary shape # one coordinate for each endpoint of the vector pos = np.empty((2 * len(vect), 2), dtype=np.float32) # create pairs of points pos[0::2, 0] = vect[:, 0] pos[1::2, 0] = vect[:, 0] pos[0::2, 1] = vect[:, 1] pos[1::2, 1] = vect[:, 1] # adjust second of each pair according to x-y projection pos[1::2, 0] += vect[:, 2] pos[1::2, 1] += vect[:, 3] return pos @property def averaging(self) -> int: return self._averaging @averaging.setter def averaging(self, value: int): """Calculates an average vector over a kernel Parameters ---------- value : int that defines (int, int) kernel """ self._averaging = value self.events.averaging() self._update_avg() self.refresh() def _update_avg(self): """Method for calculating average Implemented ONLY for image-like vector data """ if self._data_type == 'coords': # default averaging is supported only for 'matrix' dataTypes return elif self._data_type == 'image': x, y = self._averaging, self._averaging if (x,y) == (1, 1): self.vectors = self._original_data # calling original data return tempdat = self._original_data range_x = tempdat.shape[0] range_y = tempdat.shape[1] x_offset = int((x - 1) / 2) y_offset = int((y - 1) / 2) kernel = np.ones(shape=(x, y)) / (x*y) output_mat = np.zeros_like(tempdat) output_mat_x = signal.convolve2d(tempdat[:, :, 0], kernel, mode='same', boundary='wrap') output_mat_y = signal.convolve2d(tempdat[:, :, 1], kernel, mode='same', boundary='wrap') output_mat[:, :, 0] = output_mat_x output_mat[:, :, 1] = output_mat_y self.vectors = (output_mat[x_offset:range_x-x_offset:x, y_offset:range_y-y_offset:y]) @property def width(self) -> Union[int, float]: return self._width @width.setter def width(self, width: Union[int, float]): """width of the line in pixels widths greater than 1px only guaranteed to work with "agg" method """ self._width = width vertices, triangles = self._generate_meshes(self.vectors, self._width) self._mesh_vertices = vertices
"""To convert an image-like array with elements (y-proj, x-proj) into a position list of coordinates Every pixel position (n, m) results in two output coordinates of (N,2) Parameters ---------- vect : np.ndarray of shape (N, M, 2) """ xdim = vect.shape[0] ydim = vect.shape[1] # stride is used during averaging and length adjustment stride_x, stride_y = self._averaging, self._averaging # create empty vector of necessary shape # every "pixel" has 2 coordinates pos = np.empty((2 * xdim * ydim, 2), dtype=np.float32) # create coordinate spacing for x-y # double the num of elements by doubling x sampling
identifier_body
draw_trans_pixel_cihea.py
('--block_fnam',default=BLOCK_FNAM,help='Block shape file (%default)') parser.add_option('--trans_fnam',default=TRANS_FNAM,help='Transplanting tiff file (%default)') parser.add_option('--mask_fnam',default=MASK_FNAM,help='Mask file (%default)') parser.add_option('--output_fnam',default=OUTPUT_FNAM,help='Output figure name (%default)') parser.add_option('--add_tmin',default=False,action='store_true',help='Add tmin in colorbar (%default)') parser.add_option('--add_tmax',default=False,action='store_true',help='Add tmax in colorbar (%default)') parser.add_option('--add_coords',default=False,action='store_true',help='Add geographical coordinates (%default)') parser.add_option('--coords_color',default=COORDS_COLOR,help='Color of geographical coordinates (%default)') parser.add_option('--early',default=False,action='store_true',help='Early estimation mode (%default)') parser.add_option('-b','--batch',default=False,action='store_true',help='Batch mode (%default)') parser.add_option('--debug',default=False,action='store_true',help='Debug mode (%default)') (opts,args) = parser.parse_args() if not opts.debug: warnings.simplefilter('ignore') if opts.batch: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.colors import ListedColormap,LinearSegmentedColormap,to_rgba from matplotlib.dates import date2num,num2date from matplotlib.path import Path def
(longitude,latitude): utm_zone = (int(1+(longitude.mean()+180.0)/6.0)) is_northern = (1 if latitude.mean() > 0 else 0) utm_coordinate_system = osr.SpatialReference() utm_coordinate_system.SetWellKnownGeogCS('WGS84') # Set geographic coordinate system to handle lat/lon utm_coordinate_system.SetUTM(utm_zone,is_northern) wgs84_coordinate_system = utm_coordinate_system.CloneGeogCS() # Clone ONLY the geographic coordinate system wgs84_to_utm_geo_transform = osr.CoordinateTransformation(wgs84_coordinate_system,utm_coordinate_system) # create transform component xyz = np.array(wgs84_to_utm_geo_transform.TransformPoints(np.dstack((longitude,latitude)).reshape((-1,2)))).reshape(longitude.shape[0],longitude.shape[1],3) return xyz[:,:,0],xyz[:,:,1],xyz[:,:,2] # returns easting, northing, altitude if opts.add_coords: center_x = 107.268 center_y = -6.839 lon = np.arange(107+10/60,107+23/60,2.0/60.0) lat = np.arange(-6-56/60,-6.756,2.0/60.0) xg,yg = np.meshgrid(lon,lat) x,y,z = transform_wgs84_to_utm(xg,yg) ind_x = np.argmin(np.abs(lon-center_x)) ind_y = np.argmin(np.abs(lat-center_y)) center_x_utm = x[ind_y,:] center_y_utm = y[:,ind_x] x_labels = ['{:.0f}'.format(int(x))+'$^{\circ}$'+'{:.0f}'.format((x-int(x))*60.0)+'$^{\prime}$E' for x in lon] y_labels = ['{:.0f}'.format(int(y))+'$^{\circ}$'+'{:.0f}'.format((y-int(y))*60.0)+'$^{\prime}$S' for y in -lat] x_labels = ['{:d}'.format(int(x))+'$^{\circ}$'+'{:02d}'.format(int((x-int(x))*60.0+0.1))+'$^{\prime}$E' for x in lon] y_labels = ['{:d}'.format(int(y))+'$^{\circ}$'+'{:02d}'.format(int((y-int(y))*60.0+0.1))+'$^{\prime}$S' for y in -lat] color = cm.hsv(np.linspace(0.0,1.0,365)) colors = np.vstack((color,color,color,color,color,color)) mymap = LinearSegmentedColormap.from_list('my_colormap',colors,N=len(colors)*2) prj = ccrs.UTM(zone=48,southern_hemisphere=True) block_shp = list(shpreader.Reader(opts.block_fnam).geometries()) block_rec = list(shpreader.Reader(opts.block_fnam).records()) ds = gdal.Open(opts.mask_fnam) mask = ds.ReadAsArray() mask_shape = mask.shape ds = None ds = gdal.Open(opts.trans_fnam) data = ds.ReadAsArray() data_trans = ds.GetGeoTransform() data_shape = data[0].shape ds = None if data_shape != mask_shape: raise ValueError('Error, data_shape={}, mask_shape={}'.format(data_shape,mask_shape)) data[:,mask<0.5] = np.nan #data[0] -= 9.0 # offset corrected #data[0] += date2num(np.datetime64('0000-12-31')) # Matplotlib>3.3 xmin = data_trans[0] xstp = data_trans[1] xmax = xmin+xstp*data_shape[1] ymax = data_trans[3] ystp = data_trans[5] ymin = ymax+ystp*data_shape[0] sys.stderr.write('tmin: {}\n'.format(num2date(np.nanmin(data[0])).strftime('%Y%m%d'))) sys.stderr.write('tmax: {}\n'.format(num2date(np.nanmax(data[0])).strftime('%Y%m%d'))) sys.stderr.write('smin: {}\n'.format(np.nanmin(data[1]))) sys.stderr.write('smax: {}\n'.format(np.nanmax(data[1]))) if opts.tmin is not None: tmin = date2num(datetime.strptime(opts.tmin,'%Y%m%d')) if opts.tmax is not None: tmax = date2num(datetime.strptime(opts.tmax,'%Y%m%d')) if opts.smin is not None: smin = opts.smin if opts.smax is not None: smax = opts.smax tdif = tmax-tmin values = [] labels = [] ticks = [] ds = tdif/365 for y in range(num2date(tmin).year,num2date(tmax).year+1): if ds > 2.0: for m in range(1,13,3): d = datetime(y,m,1) values.append(date2num(d)) labels.append(d.strftime('%Y-%m')) for m in range(1,13,1): d = datetime(y,m,1) ticks.append(date2num(d)) elif ds > 1.0: for m in range(1,13,2): d = datetime(y,m,1) values.append(date2num(d)) labels.append(d.strftime('%Y-%m')) for m in range(1,13,1): d = datetime(y,m,1) ticks.append(date2num(d)) else: for m in range(1,13,1): for day in [1,15]: d = datetime(y,m,day) values.append(date2num(d)) labels.append(d.strftime('%m/%d')) for day in [5,10,20,25]: d = datetime(y,m,day) ticks.append(date2num(d)) dmin = num2date(tmin) dmax = num2date(tmax) if opts.add_tmin: if not tmin in values: if ds > 1.0: values.append(tmin) labels.append(dmin.strftime('%Y-%m')) else: values.append(tmin) labels.append(dmin.strftime('%m/%d')) if opts.add_tmax: if not tmax in values: if ds > 1.0: values.append(tmax) labels.append(dmax.strftime('%Y-%m')) else: values.append(tmax) labels.append(dmax.strftime('%m/%d')) torg = date2num(datetime(dmin.year,1,1)) twid = 365.0*2.0 newcolors = mymap(np.linspace((tmin-torg)/twid,(tmax-torg)/twid,mymap.N)) if opts.early: indx = int(mymap.N*0.995+0.5) newcolors[indx:,:] = to_rgba('maroon') mymap2 = ListedColormap(newcolors) if not opts.batch: plt.interactive(True) fig = plt.figure(1,facecolor='w',figsize=(8.3,5.8)) plt.subplots_adjust(top=0.97,bottom=0.01,left=0.026,right=0.963,wspace=0.085,
transform_wgs84_to_utm
identifier_name
draw_trans_pixel_cihea.py
('--block_fnam',default=BLOCK_FNAM,help='Block shape file (%default)') parser.add_option('--trans_fnam',default=TRANS_FNAM,help='Transplanting tiff file (%default)') parser.add_option('--mask_fnam',default=MASK_FNAM,help='Mask file (%default)') parser.add_option('--output_fnam',default=OUTPUT_FNAM,help='Output figure name (%default)') parser.add_option('--add_tmin',default=False,action='store_true',help='Add tmin in colorbar (%default)') parser.add_option('--add_tmax',default=False,action='store_true',help='Add tmax in colorbar (%default)') parser.add_option('--add_coords',default=False,action='store_true',help='Add geographical coordinates (%default)') parser.add_option('--coords_color',default=COORDS_COLOR,help='Color of geographical coordinates (%default)') parser.add_option('--early',default=False,action='store_true',help='Early estimation mode (%default)') parser.add_option('-b','--batch',default=False,action='store_true',help='Batch mode (%default)') parser.add_option('--debug',default=False,action='store_true',help='Debug mode (%default)') (opts,args) = parser.parse_args() if not opts.debug: warnings.simplefilter('ignore') if opts.batch: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.colors import ListedColormap,LinearSegmentedColormap,to_rgba from matplotlib.dates import date2num,num2date from matplotlib.path import Path def transform_wgs84_to_utm(longitude,latitude): utm_zone = (int(1+(longitude.mean()+180.0)/6.0)) is_northern = (1 if latitude.mean() > 0 else 0) utm_coordinate_system = osr.SpatialReference() utm_coordinate_system.SetWellKnownGeogCS('WGS84') # Set geographic coordinate system to handle lat/lon utm_coordinate_system.SetUTM(utm_zone,is_northern) wgs84_coordinate_system = utm_coordinate_system.CloneGeogCS() # Clone ONLY the geographic coordinate system wgs84_to_utm_geo_transform = osr.CoordinateTransformation(wgs84_coordinate_system,utm_coordinate_system) # create transform component xyz = np.array(wgs84_to_utm_geo_transform.TransformPoints(np.dstack((longitude,latitude)).reshape((-1,2)))).reshape(longitude.shape[0],longitude.shape[1],3) return xyz[:,:,0],xyz[:,:,1],xyz[:,:,2] # returns easting, northing, altitude if opts.add_coords: center_x = 107.268 center_y = -6.839 lon = np.arange(107+10/60,107+23/60,2.0/60.0) lat = np.arange(-6-56/60,-6.756,2.0/60.0) xg,yg = np.meshgrid(lon,lat) x,y,z = transform_wgs84_to_utm(xg,yg) ind_x = np.argmin(np.abs(lon-center_x)) ind_y = np.argmin(np.abs(lat-center_y)) center_x_utm = x[ind_y,:] center_y_utm = y[:,ind_x] x_labels = ['{:.0f}'.format(int(x))+'$^{\circ}$'+'{:.0f}'.format((x-int(x))*60.0)+'$^{\prime}$E' for x in lon] y_labels = ['{:.0f}'.format(int(y))+'$^{\circ}$'+'{:.0f}'.format((y-int(y))*60.0)+'$^{\prime}$S' for y in -lat] x_labels = ['{:d}'.format(int(x))+'$^{\circ}$'+'{:02d}'.format(int((x-int(x))*60.0+0.1))+'$^{\prime}$E' for x in lon] y_labels = ['{:d}'.format(int(y))+'$^{\circ}$'+'{:02d}'.format(int((y-int(y))*60.0+0.1))+'$^{\prime}$S' for y in -lat] color = cm.hsv(np.linspace(0.0,1.0,365)) colors = np.vstack((color,color,color,color,color,color)) mymap = LinearSegmentedColormap.from_list('my_colormap',colors,N=len(colors)*2) prj = ccrs.UTM(zone=48,southern_hemisphere=True) block_shp = list(shpreader.Reader(opts.block_fnam).geometries()) block_rec = list(shpreader.Reader(opts.block_fnam).records()) ds = gdal.Open(opts.mask_fnam) mask = ds.ReadAsArray() mask_shape = mask.shape ds = None ds = gdal.Open(opts.trans_fnam) data = ds.ReadAsArray() data_trans = ds.GetGeoTransform() data_shape = data[0].shape ds = None if data_shape != mask_shape: raise ValueError('Error, data_shape={}, mask_shape={}'.format(data_shape,mask_shape)) data[:,mask<0.5] = np.nan #data[0] -= 9.0 # offset corrected #data[0] += date2num(np.datetime64('0000-12-31')) # Matplotlib>3.3 xmin = data_trans[0] xstp = data_trans[1] xmax = xmin+xstp*data_shape[1] ymax = data_trans[3] ystp = data_trans[5] ymin = ymax+ystp*data_shape[0] sys.stderr.write('tmin: {}\n'.format(num2date(np.nanmin(data[0])).strftime('%Y%m%d'))) sys.stderr.write('tmax: {}\n'.format(num2date(np.nanmax(data[0])).strftime('%Y%m%d'))) sys.stderr.write('smin: {}\n'.format(np.nanmin(data[1]))) sys.stderr.write('smax: {}\n'.format(np.nanmax(data[1]))) if opts.tmin is not None: tmin = date2num(datetime.strptime(opts.tmin,'%Y%m%d')) if opts.tmax is not None: tmax = date2num(datetime.strptime(opts.tmax,'%Y%m%d')) if opts.smin is not None: smin = opts.smin if opts.smax is not None: smax = opts.smax tdif = tmax-tmin values = [] labels = [] ticks = [] ds = tdif/365 for y in range(num2date(tmin).year,num2date(tmax).year+1): if ds > 2.0: for m in range(1,13,3): d = datetime(y,m,1) values.append(date2num(d)) labels.append(d.strftime('%Y-%m')) for m in range(1,13,1): d = datetime(y,m,1) ticks.append(date2num(d)) elif ds > 1.0: for m in range(1,13,2): d = datetime(y,m,1) values.append(date2num(d)) labels.append(d.strftime('%Y-%m')) for m in range(1,13,1): d = datetime(y,m,1) ticks.append(date2num(d)) else: for m in range(1,13,1): for day in [1,15]: d = datetime(y,m,day) values.append(date2num(d)) labels.append(d.strftime('%m/%d')) for day in [5,10,20,25]: d = datetime(y,m,day) ticks.append(date2num(d)) dmin = num2date(tmin) dmax = num2date(tmax) if opts.add_tmin:
if opts.add_tmax: if not tmax in values: if ds > 1.0: values.append(tmax) labels.append(dmax.strftime('%Y-%m')) else: values.append(tmax) labels.append(dmax.strftime('%m/%d')) torg = date2num(datetime(dmin.year,1,1)) twid = 365.0*2.0 newcolors = mymap(np.linspace((tmin-torg)/twid,(tmax-torg)/twid,mymap.N)) if opts.early: indx = int(mymap.N*0.995+0.5) newcolors[indx:,:] = to_rgba('maroon') mymap2 = ListedColormap(newcolors) if not opts.batch: plt.interactive(True) fig = plt.figure(1,facecolor='w',figsize=(8.3,5.8)) plt.subplots_adjust(top=0.97,bottom=0.01,left=0.026,right=0.963,wspace=0.085,
if not tmin in values: if ds > 1.0: values.append(tmin) labels.append(dmin.strftime('%Y-%m')) else: values.append(tmin) labels.append(dmin.strftime('%m/%d'))
conditional_block
draw_trans_pixel_cihea.py
('--block_fnam',default=BLOCK_FNAM,help='Block shape file (%default)') parser.add_option('--trans_fnam',default=TRANS_FNAM,help='Transplanting tiff file (%default)') parser.add_option('--mask_fnam',default=MASK_FNAM,help='Mask file (%default)') parser.add_option('--output_fnam',default=OUTPUT_FNAM,help='Output figure name (%default)') parser.add_option('--add_tmin',default=False,action='store_true',help='Add tmin in colorbar (%default)') parser.add_option('--add_tmax',default=False,action='store_true',help='Add tmax in colorbar (%default)') parser.add_option('--add_coords',default=False,action='store_true',help='Add geographical coordinates (%default)') parser.add_option('--coords_color',default=COORDS_COLOR,help='Color of geographical coordinates (%default)') parser.add_option('--early',default=False,action='store_true',help='Early estimation mode (%default)') parser.add_option('-b','--batch',default=False,action='store_true',help='Batch mode (%default)') parser.add_option('--debug',default=False,action='store_true',help='Debug mode (%default)') (opts,args) = parser.parse_args()
if opts.batch: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.colors import ListedColormap,LinearSegmentedColormap,to_rgba from matplotlib.dates import date2num,num2date from matplotlib.path import Path def transform_wgs84_to_utm(longitude,latitude): utm_zone = (int(1+(longitude.mean()+180.0)/6.0)) is_northern = (1 if latitude.mean() > 0 else 0) utm_coordinate_system = osr.SpatialReference() utm_coordinate_system.SetWellKnownGeogCS('WGS84') # Set geographic coordinate system to handle lat/lon utm_coordinate_system.SetUTM(utm_zone,is_northern) wgs84_coordinate_system = utm_coordinate_system.CloneGeogCS() # Clone ONLY the geographic coordinate system wgs84_to_utm_geo_transform = osr.CoordinateTransformation(wgs84_coordinate_system,utm_coordinate_system) # create transform component xyz = np.array(wgs84_to_utm_geo_transform.TransformPoints(np.dstack((longitude,latitude)).reshape((-1,2)))).reshape(longitude.shape[0],longitude.shape[1],3) return xyz[:,:,0],xyz[:,:,1],xyz[:,:,2] # returns easting, northing, altitude if opts.add_coords: center_x = 107.268 center_y = -6.839 lon = np.arange(107+10/60,107+23/60,2.0/60.0) lat = np.arange(-6-56/60,-6.756,2.0/60.0) xg,yg = np.meshgrid(lon,lat) x,y,z = transform_wgs84_to_utm(xg,yg) ind_x = np.argmin(np.abs(lon-center_x)) ind_y = np.argmin(np.abs(lat-center_y)) center_x_utm = x[ind_y,:] center_y_utm = y[:,ind_x] x_labels = ['{:.0f}'.format(int(x))+'$^{\circ}$'+'{:.0f}'.format((x-int(x))*60.0)+'$^{\prime}$E' for x in lon] y_labels = ['{:.0f}'.format(int(y))+'$^{\circ}$'+'{:.0f}'.format((y-int(y))*60.0)+'$^{\prime}$S' for y in -lat] x_labels = ['{:d}'.format(int(x))+'$^{\circ}$'+'{:02d}'.format(int((x-int(x))*60.0+0.1))+'$^{\prime}$E' for x in lon] y_labels = ['{:d}'.format(int(y))+'$^{\circ}$'+'{:02d}'.format(int((y-int(y))*60.0+0.1))+'$^{\prime}$S' for y in -lat] color = cm.hsv(np.linspace(0.0,1.0,365)) colors = np.vstack((color,color,color,color,color,color)) mymap = LinearSegmentedColormap.from_list('my_colormap',colors,N=len(colors)*2) prj = ccrs.UTM(zone=48,southern_hemisphere=True) block_shp = list(shpreader.Reader(opts.block_fnam).geometries()) block_rec = list(shpreader.Reader(opts.block_fnam).records()) ds = gdal.Open(opts.mask_fnam) mask = ds.ReadAsArray() mask_shape = mask.shape ds = None ds = gdal.Open(opts.trans_fnam) data = ds.ReadAsArray() data_trans = ds.GetGeoTransform() data_shape = data[0].shape ds = None if data_shape != mask_shape: raise ValueError('Error, data_shape={}, mask_shape={}'.format(data_shape,mask_shape)) data[:,mask<0.5] = np.nan #data[0] -= 9.0 # offset corrected #data[0] += date2num(np.datetime64('0000-12-31')) # Matplotlib>3.3 xmin = data_trans[0] xstp = data_trans[1] xmax = xmin+xstp*data_shape[1] ymax = data_trans[3] ystp = data_trans[5] ymin = ymax+ystp*data_shape[0] sys.stderr.write('tmin: {}\n'.format(num2date(np.nanmin(data[0])).strftime('%Y%m%d'))) sys.stderr.write('tmax: {}\n'.format(num2date(np.nanmax(data[0])).strftime('%Y%m%d'))) sys.stderr.write('smin: {}\n'.format(np.nanmin(data[1]))) sys.stderr.write('smax: {}\n'.format(np.nanmax(data[1]))) if opts.tmin is not None: tmin = date2num(datetime.strptime(opts.tmin,'%Y%m%d')) if opts.tmax is not None: tmax = date2num(datetime.strptime(opts.tmax,'%Y%m%d')) if opts.smin is not None: smin = opts.smin if opts.smax is not None: smax = opts.smax tdif = tmax-tmin values = [] labels = [] ticks = [] ds = tdif/365 for y in range(num2date(tmin).year,num2date(tmax).year+1): if ds > 2.0: for m in range(1,13,3): d = datetime(y,m,1) values.append(date2num(d)) labels.append(d.strftime('%Y-%m')) for m in range(1,13,1): d = datetime(y,m,1) ticks.append(date2num(d)) elif ds > 1.0: for m in range(1,13,2): d = datetime(y,m,1) values.append(date2num(d)) labels.append(d.strftime('%Y-%m')) for m in range(1,13,1): d = datetime(y,m,1) ticks.append(date2num(d)) else: for m in range(1,13,1): for day in [1,15]: d = datetime(y,m,day) values.append(date2num(d)) labels.append(d.strftime('%m/%d')) for day in [5,10,20,25]: d = datetime(y,m,day) ticks.append(date2num(d)) dmin = num2date(tmin) dmax = num2date(tmax) if opts.add_tmin: if not tmin in values: if ds > 1.0: values.append(tmin) labels.append(dmin.strftime('%Y-%m')) else: values.append(tmin) labels.append(dmin.strftime('%m/%d')) if opts.add_tmax: if not tmax in values: if ds > 1.0: values.append(tmax) labels.append(dmax.strftime('%Y-%m')) else: values.append(tmax) labels.append(dmax.strftime('%m/%d')) torg = date2num(datetime(dmin.year,1,1)) twid = 365.0*2.0 newcolors = mymap(np.linspace((tmin-torg)/twid,(tmax-torg)/twid,mymap.N)) if opts.early: indx = int(mymap.N*0.995+0.5) newcolors[indx:,:] = to_rgba('maroon') mymap2 = ListedColormap(newcolors) if not opts.batch: plt.interactive(True) fig = plt.figure(1,facecolor='w',figsize=(8.3,5.8)) plt.subplots_adjust(top=0.97,bottom=0.01,left=0.026,right=0.963,wspace=0.085,
if not opts.debug: warnings.simplefilter('ignore')
random_line_split
draw_trans_pixel_cihea.py
('--block_fnam',default=BLOCK_FNAM,help='Block shape file (%default)') parser.add_option('--trans_fnam',default=TRANS_FNAM,help='Transplanting tiff file (%default)') parser.add_option('--mask_fnam',default=MASK_FNAM,help='Mask file (%default)') parser.add_option('--output_fnam',default=OUTPUT_FNAM,help='Output figure name (%default)') parser.add_option('--add_tmin',default=False,action='store_true',help='Add tmin in colorbar (%default)') parser.add_option('--add_tmax',default=False,action='store_true',help='Add tmax in colorbar (%default)') parser.add_option('--add_coords',default=False,action='store_true',help='Add geographical coordinates (%default)') parser.add_option('--coords_color',default=COORDS_COLOR,help='Color of geographical coordinates (%default)') parser.add_option('--early',default=False,action='store_true',help='Early estimation mode (%default)') parser.add_option('-b','--batch',default=False,action='store_true',help='Batch mode (%default)') parser.add_option('--debug',default=False,action='store_true',help='Debug mode (%default)') (opts,args) = parser.parse_args() if not opts.debug: warnings.simplefilter('ignore') if opts.batch: import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.cm as cm from matplotlib.colors import ListedColormap,LinearSegmentedColormap,to_rgba from matplotlib.dates import date2num,num2date from matplotlib.path import Path def transform_wgs84_to_utm(longitude,latitude):
if opts.add_coords: center_x = 107.268 center_y = -6.839 lon = np.arange(107+10/60,107+23/60,2.0/60.0) lat = np.arange(-6-56/60,-6.756,2.0/60.0) xg,yg = np.meshgrid(lon,lat) x,y,z = transform_wgs84_to_utm(xg,yg) ind_x = np.argmin(np.abs(lon-center_x)) ind_y = np.argmin(np.abs(lat-center_y)) center_x_utm = x[ind_y,:] center_y_utm = y[:,ind_x] x_labels = ['{:.0f}'.format(int(x))+'$^{\circ}$'+'{:.0f}'.format((x-int(x))*60.0)+'$^{\prime}$E' for x in lon] y_labels = ['{:.0f}'.format(int(y))+'$^{\circ}$'+'{:.0f}'.format((y-int(y))*60.0)+'$^{\prime}$S' for y in -lat] x_labels = ['{:d}'.format(int(x))+'$^{\circ}$'+'{:02d}'.format(int((x-int(x))*60.0+0.1))+'$^{\prime}$E' for x in lon] y_labels = ['{:d}'.format(int(y))+'$^{\circ}$'+'{:02d}'.format(int((y-int(y))*60.0+0.1))+'$^{\prime}$S' for y in -lat] color = cm.hsv(np.linspace(0.0,1.0,365)) colors = np.vstack((color,color,color,color,color,color)) mymap = LinearSegmentedColormap.from_list('my_colormap',colors,N=len(colors)*2) prj = ccrs.UTM(zone=48,southern_hemisphere=True) block_shp = list(shpreader.Reader(opts.block_fnam).geometries()) block_rec = list(shpreader.Reader(opts.block_fnam).records()) ds = gdal.Open(opts.mask_fnam) mask = ds.ReadAsArray() mask_shape = mask.shape ds = None ds = gdal.Open(opts.trans_fnam) data = ds.ReadAsArray() data_trans = ds.GetGeoTransform() data_shape = data[0].shape ds = None if data_shape != mask_shape: raise ValueError('Error, data_shape={}, mask_shape={}'.format(data_shape,mask_shape)) data[:,mask<0.5] = np.nan #data[0] -= 9.0 # offset corrected #data[0] += date2num(np.datetime64('0000-12-31')) # Matplotlib>3.3 xmin = data_trans[0] xstp = data_trans[1] xmax = xmin+xstp*data_shape[1] ymax = data_trans[3] ystp = data_trans[5] ymin = ymax+ystp*data_shape[0] sys.stderr.write('tmin: {}\n'.format(num2date(np.nanmin(data[0])).strftime('%Y%m%d'))) sys.stderr.write('tmax: {}\n'.format(num2date(np.nanmax(data[0])).strftime('%Y%m%d'))) sys.stderr.write('smin: {}\n'.format(np.nanmin(data[1]))) sys.stderr.write('smax: {}\n'.format(np.nanmax(data[1]))) if opts.tmin is not None: tmin = date2num(datetime.strptime(opts.tmin,'%Y%m%d')) if opts.tmax is not None: tmax = date2num(datetime.strptime(opts.tmax,'%Y%m%d')) if opts.smin is not None: smin = opts.smin if opts.smax is not None: smax = opts.smax tdif = tmax-tmin values = [] labels = [] ticks = [] ds = tdif/365 for y in range(num2date(tmin).year,num2date(tmax).year+1): if ds > 2.0: for m in range(1,13,3): d = datetime(y,m,1) values.append(date2num(d)) labels.append(d.strftime('%Y-%m')) for m in range(1,13,1): d = datetime(y,m,1) ticks.append(date2num(d)) elif ds > 1.0: for m in range(1,13,2): d = datetime(y,m,1) values.append(date2num(d)) labels.append(d.strftime('%Y-%m')) for m in range(1,13,1): d = datetime(y,m,1) ticks.append(date2num(d)) else: for m in range(1,13,1): for day in [1,15]: d = datetime(y,m,day) values.append(date2num(d)) labels.append(d.strftime('%m/%d')) for day in [5,10,20,25]: d = datetime(y,m,day) ticks.append(date2num(d)) dmin = num2date(tmin) dmax = num2date(tmax) if opts.add_tmin: if not tmin in values: if ds > 1.0: values.append(tmin) labels.append(dmin.strftime('%Y-%m')) else: values.append(tmin) labels.append(dmin.strftime('%m/%d')) if opts.add_tmax: if not tmax in values: if ds > 1.0: values.append(tmax) labels.append(dmax.strftime('%Y-%m')) else: values.append(tmax) labels.append(dmax.strftime('%m/%d')) torg = date2num(datetime(dmin.year,1,1)) twid = 365.0*2.0 newcolors = mymap(np.linspace((tmin-torg)/twid,(tmax-torg)/twid,mymap.N)) if opts.early: indx = int(mymap.N*0.995+0.5) newcolors[indx:,:] = to_rgba('maroon') mymap2 = ListedColormap(newcolors) if not opts.batch: plt.interactive(True) fig = plt.figure(1,facecolor='w',figsize=(8.3,5.8)) plt.subplots_adjust(top=0.97,bottom=0.01,left=0.026,right=0.963,wspace=0.085,hspace
utm_zone = (int(1+(longitude.mean()+180.0)/6.0)) is_northern = (1 if latitude.mean() > 0 else 0) utm_coordinate_system = osr.SpatialReference() utm_coordinate_system.SetWellKnownGeogCS('WGS84') # Set geographic coordinate system to handle lat/lon utm_coordinate_system.SetUTM(utm_zone,is_northern) wgs84_coordinate_system = utm_coordinate_system.CloneGeogCS() # Clone ONLY the geographic coordinate system wgs84_to_utm_geo_transform = osr.CoordinateTransformation(wgs84_coordinate_system,utm_coordinate_system) # create transform component xyz = np.array(wgs84_to_utm_geo_transform.TransformPoints(np.dstack((longitude,latitude)).reshape((-1,2)))).reshape(longitude.shape[0],longitude.shape[1],3) return xyz[:,:,0],xyz[:,:,1],xyz[:,:,2] # returns easting, northing, altitude
identifier_body
base.py
', 'required']: return MODE_REQUIRED elif mode in ['auto', 'automatic']: return MODE_AUTOMATIC else: if mode not in ['off', 'disable', 'disabled']: logger.warning("Watchdog mode {0} not recognized, disabling watchdog".format(mode)) return MODE_OFF def synchronized(func: Callable[..., Any]) -> Callable[..., Any]: def
(self: 'Watchdog', *args: Any, **kwargs: Any) -> Any: with self.lock: return func(self, *args, **kwargs) return wrapped class WatchdogConfig(object): """Helper to contain a snapshot of configuration""" def __init__(self, config: Config) -> None: watchdog_config = config.get("watchdog") or {'mode': 'automatic'} self.mode = parse_mode(watchdog_config.get('mode', 'automatic')) self.ttl = config['ttl'] self.loop_wait = config['loop_wait'] self.safety_margin = watchdog_config.get('safety_margin', 5) self.driver = watchdog_config.get('driver', 'default') self.driver_config = dict((k, v) for k, v in watchdog_config.items() if k not in ['mode', 'safety_margin', 'driver']) def __eq__(self, other: Any) -> bool: return isinstance(other, WatchdogConfig) and \ all(getattr(self, attr) == getattr(other, attr) for attr in ['mode', 'ttl', 'loop_wait', 'safety_margin', 'driver', 'driver_config']) def __ne__(self, other: Any) -> bool: return not self == other def get_impl(self) -> 'WatchdogBase': if self.driver == 'testing': # pragma: no cover from patroni.watchdog.linux import TestingWatchdogDevice return TestingWatchdogDevice.from_config(self.driver_config) elif platform.system() == 'Linux' and self.driver == 'default': from patroni.watchdog.linux import LinuxWatchdogDevice return LinuxWatchdogDevice.from_config(self.driver_config) else: return NullWatchdog() @property def timeout(self) -> int: if self.safety_margin == -1: return int(self.ttl // 2) else: return self.ttl - self.safety_margin @property def timing_slack(self) -> int: return self.timeout - self.loop_wait class Watchdog(object): """Facade to dynamically manage watchdog implementations and handle config changes. When activation fails underlying implementation will be switched to a Null implementation. To avoid log spam activation will only be retried when watchdog configuration is changed.""" def __init__(self, config: Config) -> None: self.config = WatchdogConfig(config) self.active_config: WatchdogConfig = self.config self.lock = RLock() self.active = False if self.config.mode == MODE_OFF: self.impl = NullWatchdog() else: self.impl = self.config.get_impl() if self.config.mode == MODE_REQUIRED and self.impl.is_null: logger.error("Configuration requires a watchdog, but watchdog is not supported on this platform.") sys.exit(1) @synchronized def reload_config(self, config: Config) -> None: self.config = WatchdogConfig(config) # Turning a watchdog off can always be done immediately if self.config.mode == MODE_OFF: if self.active: self._disable() self.active_config = self.config self.impl = NullWatchdog() # If watchdog is not active we can apply config immediately to show any warnings early. Otherwise we need to # delay until next time a keepalive is sent so timeout matches up with leader key update. if not self.active: if self.config.driver != self.active_config.driver or \ self.config.driver_config != self.active_config.driver_config: self.impl = self.config.get_impl() self.active_config = self.config @synchronized def activate(self) -> bool: """Activates the watchdog device with suitable timeouts. While watchdog is active keepalive needs to be called every time loop_wait expires. :returns False if a safe watchdog could not be configured, but is required. """ self.active = True return self._activate() def _activate(self) -> bool: self.active_config = self.config if self.config.timing_slack < 0: logger.warning('Watchdog not supported because leader TTL {0} is less than 2x loop_wait {1}' .format(self.config.ttl, self.config.loop_wait)) self.impl = NullWatchdog() try: self.impl.open() actual_timeout = self._set_timeout() except WatchdogError as e: logger.warning("Could not activate %s: %s", self.impl.describe(), e) self.impl = NullWatchdog() actual_timeout = self.impl.get_timeout() if self.impl.is_running and not self.impl.can_be_disabled: logger.warning("Watchdog implementation can't be disabled." " Watchdog will trigger after Patroni loses leader key.") if not self.impl.is_running or actual_timeout and actual_timeout > self.config.timeout: if self.config.mode == MODE_REQUIRED: if self.impl.is_null: logger.error("Configuration requires watchdog, but watchdog could not be configured.") else: logger.error("Configuration requires watchdog, but a safe watchdog timeout {0} could" " not be configured. Watchdog timeout is {1}.".format( self.config.timeout, actual_timeout)) return False else: if not self.impl.is_null: logger.warning("Watchdog timeout {0} seconds does not ensure safe termination within {1} seconds" .format(actual_timeout, self.config.timeout)) if self.is_running: logger.info("{0} activated with {1} second timeout, timing slack {2} seconds" .format(self.impl.describe(), actual_timeout, self.config.timing_slack)) else: if self.config.mode == MODE_REQUIRED: logger.error("Configuration requires watchdog, but watchdog could not be activated") return False return True def _set_timeout(self) -> Optional[int]: if self.impl.has_set_timeout(): self.impl.set_timeout(self.config.timeout) # Safety checks for watchdog implementations that don't support configurable timeouts actual_timeout = self.impl.get_timeout() if self.impl.is_running and actual_timeout < self.config.loop_wait: logger.error('loop_wait of {0} seconds is too long for watchdog {1} second timeout' .format(self.config.loop_wait, actual_timeout)) if self.impl.can_be_disabled: logger.info('Disabling watchdog due to unsafe timeout.') self.impl.close() self.impl = NullWatchdog() return None return actual_timeout @synchronized def disable(self) -> None: self._disable() self.active = False def _disable(self) -> None: try: if self.impl.is_running and not self.impl.can_be_disabled: # Give sysadmin some extra time to clean stuff up. self.impl.keepalive() logger.warning("Watchdog implementation can't be disabled. System will reboot after " "{0} seconds when watchdog times out.".format(self.impl.get_timeout())) self.impl.close() except WatchdogError as e: logger.error("Error while disabling watchdog: %s", e) @synchronized def keepalive(self) -> None: try: if self.active: self.impl.keepalive() # In case there are any pending configuration changes apply them now. if self.active and self.config != self.active_config: if self.config.mode != MODE_OFF and self.active_config.mode == MODE_OFF: self.impl = self.config.get_impl() self._activate() if self.config.driver != self.active_config.driver \ or self.config.driver_config != self.active_config.driver_config: self._disable() self.impl = self.config.get_impl() self._activate() if self.config.timeout != self.active_config.timeout: self.impl.set_timeout(self.config.timeout) if self.is_running: logger.info("{0} updated with {1} second timeout, timing slack {2} seconds" .format(self.impl.describe(), self.impl.get_timeout(), self.config.timing_slack)) self.active_config = self.config except WatchdogError as e: logger.error("Error while sending keepalive: %s", e) @property @synchronized def is_running(self) -> bool: return self.impl.is_running @property @synchronized def is_healthy(self) -> bool: if self.config.mode != MODE_REQUIRED: return True return self.config.timing_slack >= 0 and self.impl.is_healthy class WatchdogBase(abc.ABC): """A watchdog object when opened requires periodic calls to keepalive. When keepalive is not called within a timeout the system will be terminated.""" is_null = False @property def is_running(self) -> bool: """Returns True when watchdog is activated and capable of performing it's task.""" return False @property def is_healthy(self) -> bool: """Returns False when calling open() is known to fail.""" return False @property def can_be_disabled(self) -> bool: """Returns True when watchdog will be disabled by calling close(). Some watchdog devices will keep running no matter what once activated. May raise Watchdog
wrapped
identifier_name
base.py
', 'required']: return MODE_REQUIRED elif mode in ['auto', 'automatic']: return MODE_AUTOMATIC else: if mode not in ['off', 'disable', 'disabled']: logger.warning("Watchdog mode {0} not recognized, disabling watchdog".format(mode)) return MODE_OFF def synchronized(func: Callable[..., Any]) -> Callable[..., Any]: def wrapped(self: 'Watchdog', *args: Any, **kwargs: Any) -> Any: with self.lock: return func(self, *args, **kwargs) return wrapped class WatchdogConfig(object): """Helper to contain a snapshot of configuration""" def __init__(self, config: Config) -> None: watchdog_config = config.get("watchdog") or {'mode': 'automatic'} self.mode = parse_mode(watchdog_config.get('mode', 'automatic')) self.ttl = config['ttl'] self.loop_wait = config['loop_wait'] self.safety_margin = watchdog_config.get('safety_margin', 5) self.driver = watchdog_config.get('driver', 'default') self.driver_config = dict((k, v) for k, v in watchdog_config.items() if k not in ['mode', 'safety_margin', 'driver']) def __eq__(self, other: Any) -> bool: return isinstance(other, WatchdogConfig) and \ all(getattr(self, attr) == getattr(other, attr) for attr in ['mode', 'ttl', 'loop_wait', 'safety_margin', 'driver', 'driver_config']) def __ne__(self, other: Any) -> bool: return not self == other def get_impl(self) -> 'WatchdogBase': if self.driver == 'testing': # pragma: no cover from patroni.watchdog.linux import TestingWatchdogDevice return TestingWatchdogDevice.from_config(self.driver_config) elif platform.system() == 'Linux' and self.driver == 'default': from patroni.watchdog.linux import LinuxWatchdogDevice return LinuxWatchdogDevice.from_config(self.driver_config) else: return NullWatchdog() @property def timeout(self) -> int: if self.safety_margin == -1: return int(self.ttl // 2) else: return self.ttl - self.safety_margin @property def timing_slack(self) -> int: return self.timeout - self.loop_wait class Watchdog(object): """Facade to dynamically manage watchdog implementations and handle config changes. When activation fails underlying implementation will be switched to a Null implementation. To avoid log spam activation will only be retried when watchdog configuration is changed.""" def __init__(self, config: Config) -> None: self.config = WatchdogConfig(config) self.active_config: WatchdogConfig = self.config self.lock = RLock() self.active = False if self.config.mode == MODE_OFF: self.impl = NullWatchdog() else: self.impl = self.config.get_impl() if self.config.mode == MODE_REQUIRED and self.impl.is_null: logger.error("Configuration requires a watchdog, but watchdog is not supported on this platform.") sys.exit(1) @synchronized def reload_config(self, config: Config) -> None: self.config = WatchdogConfig(config) # Turning a watchdog off can always be done immediately if self.config.mode == MODE_OFF: if self.active: self._disable() self.active_config = self.config self.impl = NullWatchdog() # If watchdog is not active we can apply config immediately to show any warnings early. Otherwise we need to # delay until next time a keepalive is sent so timeout matches up with leader key update. if not self.active: if self.config.driver != self.active_config.driver or \ self.config.driver_config != self.active_config.driver_config: self.impl = self.config.get_impl() self.active_config = self.config @synchronized def activate(self) -> bool: """Activates the watchdog device with suitable timeouts. While watchdog is active keepalive needs to be called every time loop_wait expires. :returns False if a safe watchdog could not be configured, but is required. """ self.active = True return self._activate() def _activate(self) -> bool: self.active_config = self.config if self.config.timing_slack < 0: logger.warning('Watchdog not supported because leader TTL {0} is less than 2x loop_wait {1}' .format(self.config.ttl, self.config.loop_wait)) self.impl = NullWatchdog() try: self.impl.open() actual_timeout = self._set_timeout() except WatchdogError as e: logger.warning("Could not activate %s: %s", self.impl.describe(), e) self.impl = NullWatchdog() actual_timeout = self.impl.get_timeout() if self.impl.is_running and not self.impl.can_be_disabled: logger.warning("Watchdog implementation can't be disabled." " Watchdog will trigger after Patroni loses leader key.") if not self.impl.is_running or actual_timeout and actual_timeout > self.config.timeout: if self.config.mode == MODE_REQUIRED: if self.impl.is_null: logger.error("Configuration requires watchdog, but watchdog could not be configured.") else: logger.error("Configuration requires watchdog, but a safe watchdog timeout {0} could" " not be configured. Watchdog timeout is {1}.".format( self.config.timeout, actual_timeout)) return False else: if not self.impl.is_null: logger.warning("Watchdog timeout {0} seconds does not ensure safe termination within {1} seconds" .format(actual_timeout, self.config.timeout)) if self.is_running: logger.info("{0} activated with {1} second timeout, timing slack {2} seconds" .format(self.impl.describe(), actual_timeout, self.config.timing_slack)) else: if self.config.mode == MODE_REQUIRED:
return True def _set_timeout(self) -> Optional[int]: if self.impl.has_set_timeout(): self.impl.set_timeout(self.config.timeout) # Safety checks for watchdog implementations that don't support configurable timeouts actual_timeout = self.impl.get_timeout() if self.impl.is_running and actual_timeout < self.config.loop_wait: logger.error('loop_wait of {0} seconds is too long for watchdog {1} second timeout' .format(self.config.loop_wait, actual_timeout)) if self.impl.can_be_disabled: logger.info('Disabling watchdog due to unsafe timeout.') self.impl.close() self.impl = NullWatchdog() return None return actual_timeout @synchronized def disable(self) -> None: self._disable() self.active = False def _disable(self) -> None: try: if self.impl.is_running and not self.impl.can_be_disabled: # Give sysadmin some extra time to clean stuff up. self.impl.keepalive() logger.warning("Watchdog implementation can't be disabled. System will reboot after " "{0} seconds when watchdog times out.".format(self.impl.get_timeout())) self.impl.close() except WatchdogError as e: logger.error("Error while disabling watchdog: %s", e) @synchronized def keepalive(self) -> None: try: if self.active: self.impl.keepalive() # In case there are any pending configuration changes apply them now. if self.active and self.config != self.active_config: if self.config.mode != MODE_OFF and self.active_config.mode == MODE_OFF: self.impl = self.config.get_impl() self._activate() if self.config.driver != self.active_config.driver \ or self.config.driver_config != self.active_config.driver_config: self._disable() self.impl = self.config.get_impl() self._activate() if self.config.timeout != self.active_config.timeout: self.impl.set_timeout(self.config.timeout) if self.is_running: logger.info("{0} updated with {1} second timeout, timing slack {2} seconds" .format(self.impl.describe(), self.impl.get_timeout(), self.config.timing_slack)) self.active_config = self.config except WatchdogError as e: logger.error("Error while sending keepalive: %s", e) @property @synchronized def is_running(self) -> bool: return self.impl.is_running @property @synchronized def is_healthy(self) -> bool: if self.config.mode != MODE_REQUIRED: return True return self.config.timing_slack >= 0 and self.impl.is_healthy class WatchdogBase(abc.ABC): """A watchdog object when opened requires periodic calls to keepalive. When keepalive is not called within a timeout the system will be terminated.""" is_null = False @property def is_running(self) -> bool: """Returns True when watchdog is activated and capable of performing it's task.""" return False @property def is_healthy(self) -> bool: """Returns False when calling open() is known to fail.""" return False @property def can_be_disabled(self) -> bool: """Returns True when watchdog will be disabled by calling close(). Some watchdog devices will keep running no matter what once activated. May raise WatchdogError
logger.error("Configuration requires watchdog, but watchdog could not be activated") return False
conditional_block
base.py
', 'required']: return MODE_REQUIRED elif mode in ['auto', 'automatic']: return MODE_AUTOMATIC else: if mode not in ['off', 'disable', 'disabled']: logger.warning("Watchdog mode {0} not recognized, disabling watchdog".format(mode)) return MODE_OFF def synchronized(func: Callable[..., Any]) -> Callable[..., Any]: def wrapped(self: 'Watchdog', *args: Any, **kwargs: Any) -> Any: with self.lock: return func(self, *args, **kwargs) return wrapped class WatchdogConfig(object): """Helper to contain a snapshot of configuration""" def __init__(self, config: Config) -> None: watchdog_config = config.get("watchdog") or {'mode': 'automatic'} self.mode = parse_mode(watchdog_config.get('mode', 'automatic')) self.ttl = config['ttl'] self.loop_wait = config['loop_wait'] self.safety_margin = watchdog_config.get('safety_margin', 5) self.driver = watchdog_config.get('driver', 'default') self.driver_config = dict((k, v) for k, v in watchdog_config.items() if k not in ['mode', 'safety_margin', 'driver']) def __eq__(self, other: Any) -> bool: return isinstance(other, WatchdogConfig) and \ all(getattr(self, attr) == getattr(other, attr) for attr in ['mode', 'ttl', 'loop_wait', 'safety_margin', 'driver', 'driver_config']) def __ne__(self, other: Any) -> bool: return not self == other def get_impl(self) -> 'WatchdogBase': if self.driver == 'testing': # pragma: no cover from patroni.watchdog.linux import TestingWatchdogDevice return TestingWatchdogDevice.from_config(self.driver_config) elif platform.system() == 'Linux' and self.driver == 'default': from patroni.watchdog.linux import LinuxWatchdogDevice return LinuxWatchdogDevice.from_config(self.driver_config) else: return NullWatchdog() @property def timeout(self) -> int: if self.safety_margin == -1: return int(self.ttl // 2) else: return self.ttl - self.safety_margin @property def timing_slack(self) -> int: return self.timeout - self.loop_wait class Watchdog(object): """Facade to dynamically manage watchdog implementations and handle config changes. When activation fails underlying implementation will be switched to a Null implementation. To avoid log spam activation will only be retried when watchdog configuration is changed.""" def __init__(self, config: Config) -> None: self.config = WatchdogConfig(config) self.active_config: WatchdogConfig = self.config self.lock = RLock() self.active = False if self.config.mode == MODE_OFF: self.impl = NullWatchdog() else: self.impl = self.config.get_impl() if self.config.mode == MODE_REQUIRED and self.impl.is_null: logger.error("Configuration requires a watchdog, but watchdog is not supported on this platform.") sys.exit(1) @synchronized def reload_config(self, config: Config) -> None: self.config = WatchdogConfig(config) # Turning a watchdog off can always be done immediately if self.config.mode == MODE_OFF: if self.active: self._disable() self.active_config = self.config self.impl = NullWatchdog() # If watchdog is not active we can apply config immediately to show any warnings early. Otherwise we need to # delay until next time a keepalive is sent so timeout matches up with leader key update. if not self.active: if self.config.driver != self.active_config.driver or \ self.config.driver_config != self.active_config.driver_config: self.impl = self.config.get_impl() self.active_config = self.config @synchronized def activate(self) -> bool: """Activates the watchdog device with suitable timeouts. While watchdog is active keepalive needs to be called every time loop_wait expires. :returns False if a safe watchdog could not be configured, but is required. """ self.active = True return self._activate() def _activate(self) -> bool: self.active_config = self.config if self.config.timing_slack < 0: logger.warning('Watchdog not supported because leader TTL {0} is less than 2x loop_wait {1}' .format(self.config.ttl, self.config.loop_wait)) self.impl = NullWatchdog() try: self.impl.open() actual_timeout = self._set_timeout() except WatchdogError as e: logger.warning("Could not activate %s: %s", self.impl.describe(), e) self.impl = NullWatchdog() actual_timeout = self.impl.get_timeout() if self.impl.is_running and not self.impl.can_be_disabled: logger.warning("Watchdog implementation can't be disabled." " Watchdog will trigger after Patroni loses leader key.") if not self.impl.is_running or actual_timeout and actual_timeout > self.config.timeout: if self.config.mode == MODE_REQUIRED: if self.impl.is_null: logger.error("Configuration requires watchdog, but watchdog could not be configured.") else: logger.error("Configuration requires watchdog, but a safe watchdog timeout {0} could" " not be configured. Watchdog timeout is {1}.".format( self.config.timeout, actual_timeout)) return False else: if not self.impl.is_null: logger.warning("Watchdog timeout {0} seconds does not ensure safe termination within {1} seconds" .format(actual_timeout, self.config.timeout)) if self.is_running: logger.info("{0} activated with {1} second timeout, timing slack {2} seconds" .format(self.impl.describe(), actual_timeout, self.config.timing_slack)) else: if self.config.mode == MODE_REQUIRED: logger.error("Configuration requires watchdog, but watchdog could not be activated") return False return True def _set_timeout(self) -> Optional[int]: if self.impl.has_set_timeout():
# Safety checks for watchdog implementations that don't support configurable timeouts actual_timeout = self.impl.get_timeout() if self.impl.is_running and actual_timeout < self.config.loop_wait: logger.error('loop_wait of {0} seconds is too long for watchdog {1} second timeout' .format(self.config.loop_wait, actual_timeout)) if self.impl.can_be_disabled: logger.info('Disabling watchdog due to unsafe timeout.') self.impl.close() self.impl = NullWatchdog() return None return actual_timeout @synchronized def disable(self) -> None: self._disable() self.active = False def _disable(self) -> None: try: if self.impl.is_running and not self.impl.can_be_disabled: # Give sysadmin some extra time to clean stuff up. self.impl.keepalive() logger.warning("Watchdog implementation can't be disabled. System will reboot after " "{0} seconds when watchdog times out.".format(self.impl.get_timeout())) self.impl.close() except WatchdogError as e: logger.error("Error while disabling watchdog: %s", e) @synchronized def keepalive(self) -> None: try: if self.active: self.impl.keepalive() # In case there are any pending configuration changes apply them now. if self.active and self.config != self.active_config: if self.config.mode != MODE_OFF and self.active_config.mode == MODE_OFF: self.impl = self.config.get_impl() self._activate() if self.config.driver != self.active_config.driver \ or self.config.driver_config != self.active_config.driver_config: self._disable() self.impl = self.config.get_impl() self._activate() if self.config.timeout != self.active_config.timeout: self.impl.set_timeout(self.config.timeout) if self.is_running: logger.info("{0} updated with {1} second timeout, timing slack {2} seconds" .format(self.impl.describe(), self.impl.get_timeout(), self.config.timing_slack)) self.active_config = self.config except WatchdogError as e: logger.error("Error while sending keepalive: %s", e) @property @synchronized def is_running(self) -> bool: return self.impl.is_running @property @synchronized def is_healthy(self) -> bool: if self.config.mode != MODE_REQUIRED: return True return self.config.timing_slack >= 0 and self.impl.is_healthy class WatchdogBase(abc.ABC): """A watchdog object when opened requires periodic calls to keepalive. When keepalive is not called within a timeout the system will be terminated.""" is_null = False @property def is_running(self) -> bool: """Returns True when watchdog is activated and capable of performing it's task.""" return False @property def is_healthy(self) -> bool: """Returns False when calling open() is known to fail.""" return False @property def can_be_disabled(self) -> bool: """Returns True when watchdog will be disabled by calling close(). Some watchdog devices will keep running no matter what once activated. May raise WatchdogError
self.impl.set_timeout(self.config.timeout)
random_line_split
base.py
) -> None: watchdog_config = config.get("watchdog") or {'mode': 'automatic'} self.mode = parse_mode(watchdog_config.get('mode', 'automatic')) self.ttl = config['ttl'] self.loop_wait = config['loop_wait'] self.safety_margin = watchdog_config.get('safety_margin', 5) self.driver = watchdog_config.get('driver', 'default') self.driver_config = dict((k, v) for k, v in watchdog_config.items() if k not in ['mode', 'safety_margin', 'driver']) def __eq__(self, other: Any) -> bool: return isinstance(other, WatchdogConfig) and \ all(getattr(self, attr) == getattr(other, attr) for attr in ['mode', 'ttl', 'loop_wait', 'safety_margin', 'driver', 'driver_config']) def __ne__(self, other: Any) -> bool: return not self == other def get_impl(self) -> 'WatchdogBase': if self.driver == 'testing': # pragma: no cover from patroni.watchdog.linux import TestingWatchdogDevice return TestingWatchdogDevice.from_config(self.driver_config) elif platform.system() == 'Linux' and self.driver == 'default': from patroni.watchdog.linux import LinuxWatchdogDevice return LinuxWatchdogDevice.from_config(self.driver_config) else: return NullWatchdog() @property def timeout(self) -> int: if self.safety_margin == -1: return int(self.ttl // 2) else: return self.ttl - self.safety_margin @property def timing_slack(self) -> int: return self.timeout - self.loop_wait class Watchdog(object): """Facade to dynamically manage watchdog implementations and handle config changes. When activation fails underlying implementation will be switched to a Null implementation. To avoid log spam activation will only be retried when watchdog configuration is changed.""" def __init__(self, config: Config) -> None: self.config = WatchdogConfig(config) self.active_config: WatchdogConfig = self.config self.lock = RLock() self.active = False if self.config.mode == MODE_OFF: self.impl = NullWatchdog() else: self.impl = self.config.get_impl() if self.config.mode == MODE_REQUIRED and self.impl.is_null: logger.error("Configuration requires a watchdog, but watchdog is not supported on this platform.") sys.exit(1) @synchronized def reload_config(self, config: Config) -> None: self.config = WatchdogConfig(config) # Turning a watchdog off can always be done immediately if self.config.mode == MODE_OFF: if self.active: self._disable() self.active_config = self.config self.impl = NullWatchdog() # If watchdog is not active we can apply config immediately to show any warnings early. Otherwise we need to # delay until next time a keepalive is sent so timeout matches up with leader key update. if not self.active: if self.config.driver != self.active_config.driver or \ self.config.driver_config != self.active_config.driver_config: self.impl = self.config.get_impl() self.active_config = self.config @synchronized def activate(self) -> bool: """Activates the watchdog device with suitable timeouts. While watchdog is active keepalive needs to be called every time loop_wait expires. :returns False if a safe watchdog could not be configured, but is required. """ self.active = True return self._activate() def _activate(self) -> bool: self.active_config = self.config if self.config.timing_slack < 0: logger.warning('Watchdog not supported because leader TTL {0} is less than 2x loop_wait {1}' .format(self.config.ttl, self.config.loop_wait)) self.impl = NullWatchdog() try: self.impl.open() actual_timeout = self._set_timeout() except WatchdogError as e: logger.warning("Could not activate %s: %s", self.impl.describe(), e) self.impl = NullWatchdog() actual_timeout = self.impl.get_timeout() if self.impl.is_running and not self.impl.can_be_disabled: logger.warning("Watchdog implementation can't be disabled." " Watchdog will trigger after Patroni loses leader key.") if not self.impl.is_running or actual_timeout and actual_timeout > self.config.timeout: if self.config.mode == MODE_REQUIRED: if self.impl.is_null: logger.error("Configuration requires watchdog, but watchdog could not be configured.") else: logger.error("Configuration requires watchdog, but a safe watchdog timeout {0} could" " not be configured. Watchdog timeout is {1}.".format( self.config.timeout, actual_timeout)) return False else: if not self.impl.is_null: logger.warning("Watchdog timeout {0} seconds does not ensure safe termination within {1} seconds" .format(actual_timeout, self.config.timeout)) if self.is_running: logger.info("{0} activated with {1} second timeout, timing slack {2} seconds" .format(self.impl.describe(), actual_timeout, self.config.timing_slack)) else: if self.config.mode == MODE_REQUIRED: logger.error("Configuration requires watchdog, but watchdog could not be activated") return False return True def _set_timeout(self) -> Optional[int]: if self.impl.has_set_timeout(): self.impl.set_timeout(self.config.timeout) # Safety checks for watchdog implementations that don't support configurable timeouts actual_timeout = self.impl.get_timeout() if self.impl.is_running and actual_timeout < self.config.loop_wait: logger.error('loop_wait of {0} seconds is too long for watchdog {1} second timeout' .format(self.config.loop_wait, actual_timeout)) if self.impl.can_be_disabled: logger.info('Disabling watchdog due to unsafe timeout.') self.impl.close() self.impl = NullWatchdog() return None return actual_timeout @synchronized def disable(self) -> None: self._disable() self.active = False def _disable(self) -> None: try: if self.impl.is_running and not self.impl.can_be_disabled: # Give sysadmin some extra time to clean stuff up. self.impl.keepalive() logger.warning("Watchdog implementation can't be disabled. System will reboot after " "{0} seconds when watchdog times out.".format(self.impl.get_timeout())) self.impl.close() except WatchdogError as e: logger.error("Error while disabling watchdog: %s", e) @synchronized def keepalive(self) -> None: try: if self.active: self.impl.keepalive() # In case there are any pending configuration changes apply them now. if self.active and self.config != self.active_config: if self.config.mode != MODE_OFF and self.active_config.mode == MODE_OFF: self.impl = self.config.get_impl() self._activate() if self.config.driver != self.active_config.driver \ or self.config.driver_config != self.active_config.driver_config: self._disable() self.impl = self.config.get_impl() self._activate() if self.config.timeout != self.active_config.timeout: self.impl.set_timeout(self.config.timeout) if self.is_running: logger.info("{0} updated with {1} second timeout, timing slack {2} seconds" .format(self.impl.describe(), self.impl.get_timeout(), self.config.timing_slack)) self.active_config = self.config except WatchdogError as e: logger.error("Error while sending keepalive: %s", e) @property @synchronized def is_running(self) -> bool: return self.impl.is_running @property @synchronized def is_healthy(self) -> bool: if self.config.mode != MODE_REQUIRED: return True return self.config.timing_slack >= 0 and self.impl.is_healthy class WatchdogBase(abc.ABC): """A watchdog object when opened requires periodic calls to keepalive. When keepalive is not called within a timeout the system will be terminated.""" is_null = False @property def is_running(self) -> bool: """Returns True when watchdog is activated and capable of performing it's task.""" return False @property def is_healthy(self) -> bool: """Returns False when calling open() is known to fail.""" return False @property def can_be_disabled(self) -> bool: """Returns True when watchdog will be disabled by calling close(). Some watchdog devices will keep running no matter what once activated. May raise WatchdogError if called without calling open() first.""" return True @abc.abstractmethod def open(self) -> None: """Open watchdog device. When watchdog is opened keepalive must be called. Returns nothing on success or raises WatchdogError if the device could not be opened.""" @abc.abstractmethod def close(self) -> None: """Gracefully close watchdog device.""" @abc.abstractmethod def keepalive(self) -> None: """Resets the watchdog timer. Watchdog must be open when keepalive is called.""" @abc.abstractmethod def get_timeout(self) -> int:
"""Returns the current keepalive timeout in effect."""
identifier_body
db_viewer.go
:= listCourses(s) for _, c := range courses { lang, err := strconv.Atoi(string(c.Code[1])) if err != nil { log.Printf("Couldn't get language digit, %v", err) continue } var equiv string if lang < 5 && lang >= 0 { equiv = c.Id[:4] + strconv.Itoa(lang+4) + c.Id[5:] } else if lang >= 5 && lang < 10 { equiv = c.Id[:4] + strconv.Itoa(lang-4) + c.Id[5:] } else { log.Printf("Invalid lang digit=%d", lang) continue } _, ok, err := s.Get(COURSE_COLL + dskvs.CollKeySep + equiv) if err != nil { log.Printf("Error getting bilingual equiv, %v", err) continue } if !ok { log.Printf("Not bilingual, %v", c.Id) continue } for _, known := range c.Equivalence { if known == equiv { log.Printf("Already know that one, %v", known) continue } } c.Equivalence = append(c.Equivalence, equiv) log.Printf("Linking bilingual vs of %s: %s", c.Id, equiv) b, err := json.Marshal(c) if err != nil { log.Printf("Couldn't marshal c, %v", err) return } key := COURSE_COLL + dskvs.CollKeySep + c.Id if err = s.Put(key, b); err != nil { log.Printf("Error Putting c, %v", err) return } } reconcileTopicWithCourses(s) } func reconcileTopicWithCourses(s *dskvs.Store) { courses := listCourses(s) var t Topic for _, c := range courses { key := TOPIC_COLL + dskvs.CollKeySep + c.Topic out, ok, err := s.Get(key) if !ok || err != nil { log.Printf("Something went wrong, course=%s, ok=%v, err=%v", c, ok, err) continue } err = json.Unmarshal(out, &t) if err != nil { log.Printf("Couldn't unmarshal topic, %v", err) } for _, known := range t.Courses { if known == c.Id { log.Printf("Already known by this topic, %s", c.Id) continue } } t.Courses = append(t.Courses, c.Id) log.Printf("Linking %s to %s", t.Code, c.Id) in, err := json.Marshal(t) if err != nil { log.Printf("Couldn't marshal topic %v, %v", t, err) continue } err = s.Put(key, in) if err != nil { log.Printf("Couldn't Put, key=%s, err=%v, t=%v", key, err, t) } } } func readDegree(degreeRead chan Degree) { defer close(degreeRead) degreeList := readDegreeUrlList() tick := time.NewTicker(time.Millisecond * DEGREE_QUERY_DELAY) defer tick.Stop() log.Printf("Found %d URLs to degree pages", len(degreeList)) for _, degreeUrl := range degreeList { fmt.Printf("...") <-tick.C fmt.Printf(" tic! %s\n", degreeUrl) deg, err := readDegreePage(degreeUrl) if err != nil { log.Printf("Error reading degree page, %v", err) return } degreeRead <- deg } } func readDegreeUrlList() []string { t0 := time.Now() doc, err := goquery.NewDocument(DEGREE_URL) if err != nil { log.Printf("Error getting degree list %s: %v", DEGREE_URL[:10], err) return nil } log.Printf("readDegreeUrlList Reading <%s> done in %s\n", DEGREE_URL, time.Since(t0)) var degrees []string doc.Find("a[href]").Each(func(i int, s *goquery.Selection) { if rgxDegUrl.MatchString(s.Text()) { degrees = append(degrees, s.Text()) } }) return degrees } func readDegreePage(degreePage string) (Degree, error) { deg := Degree{Url: DEGREE_URL + degreePage, LastUpdated: time.Now()} t0 := time.Now() doc, err := goquery.NewDocument(deg.Url) if err != nil { log.Printf("Error getting degree doc %s, %v", degreePage, err) return deg, err } log.Printf("readDegreePage Reading <%s> done in %s\n", deg.Url, time.Since(t0)) deg.Name = doc.Find(S_D_NAME).First().Text() deg.Credit, err = strconv.Atoi(doc.Find(S_D_CREDIT).First().Text()) if err != nil { log.Printf("Couldn't get int our of credit field, %v", err) } doc.Find(S_D_MANDATORY).Each(func(i int, s *goquery.Selection) { deg.Mandatory = append(deg.Mandatory, s.Text()) }) doc.Find(S_D_EXTRA).Each(func(i int, s *goquery.Selection) { deg.Extra = append(deg.Extra, s.Text()) }) deg.Id = base64.StdEncoding.EncodeToString([]byte(deg.Name)) return deg, nil } func readTopicPage(s *dskvs.Store, topicChan chan Topic) { t0 := time.Now() doc, err := goquery.NewDocument(TOPIC_URL) if err != nil { log.Printf("Error getting topic doc %s, %v", TOPIC_URL, err) return } log.Printf("readTopicPage Reading <%s> done in %s\n", TOPIC_URL, time.Since(t0)) doc.Find(S_T_PAIR).Each(func(i int, s *goquery.Selection) { // Skip the first pair, they're header if i == 0 { return } t := Topic{LastUpdated: time.Now()} s.Find(S_T_VAL).Each(func(i int, s *goquery.Selection) { log.Printf("i=%d Topic = %v", i, s.Text()) switch i { case 0: t.Code = s.Children().Text() case 1: t.Description = s.Text() default: return } }) topicChan <- t }) close(topicChan) } func readCourse(s *dskvs.Store, courseRead chan Course) { tick := time.NewTicker(time.Millisecond * COURSE_QUERY_DELAY) defer tick.Stop() topics := listTopics(s) for i, topic := range topics { fmt.Printf("...") <-tick.C fmt.Printf(" tick! %d/%d topics\n", i, len(topics)) courses, err := readCourseFromTopicPage(topic.Code) if err != nil { log.Printf("Error reading topic code %s, %v", topic.Code, err) continue } for _, c := range courses { courseRead <- c } } close(courseRead) } func readCourseFromTopicPage(topicCode string) ([]Course, error) { target := COURSE_URL + topicCode + ".html" t0 := time.Now() doc, err := goquery.NewDocument(target) if err != nil { log.Printf("Error getting topic doc %s, %v", target, err) return nil, err } log.Printf("readCourseFromTopicPage Reading <%s> done in %s\n", target, time.Since(t0)) var courses []Course doc.Find(S_CRS_BOX).Each(func(i int, s *goquery.Selection) { var id string = s.Find(S_CRS_CODE).Text() var topic string = topicCode var code string = id[3:] var url string = target var level int var credit int var name string = s.Find(S_CRS_TITLE).Text() var descr string = s.Find(S_CRS_DESC).Text() var depend []string var equiv []string level, err = strconv.Atoi(string(id[3])) if err != nil { log.Printf("Error reading course level from id %s, %v", id, err) return } creditStr := rgxCrsCredit.FindString(s.Find(S_CRS_CREDIT).Text()) if len(creditStr) < 1 { log.Printf("No credit for id %d", id) return } else { credit, err = strconv.Atoi(string(creditStr[0])) if err != nil { log.Printf("Error reading course credit from id %s, %v", id, err) return } } depend = rgxCrsCode.FindAllString(s.Find(S_CRS_REQ).Text(), -1) c := Course{ Id: id, Topic: topic, Code: code, Url: url, Level: level,
random_line_split
db_viewer.go
} defer func() { err := store.Close() if err != nil { log.Printf("Error closing dskvs: %v", err) } }() if *flagCourse { for _, c := range listCourses(store) { log.Printf("len(c.Id)=%d, c.Id=\"%s\"", len(c.Id), c.Id) fmt.Printf("%+v\n", c) } } if *valCourse != "" { key := COURSE_COLL + dskvs.CollKeySep + *valCourse val, ok, err := store.Get(key) if !ok { log.Printf("Not found : %v", key) } else if err != nil { log.Printf("Error %v", err) } else { log.Printf(string(val)) } } if *flagTopic { for _, t := range listTopics(store) { fmt.Printf("%+v\n", t) } } if *valTopic != "" { key := TOPIC_COLL + dskvs.CollKeySep + *valTopic val, ok, err := store.Get(key) if !ok { log.Printf("Not found : %v", key) } else if err != nil { log.Printf("Error %v", err) } else { log.Printf(string(val)) } } if *flagDegree { for _, d := range listDegrees(store) { fmt.Printf("%+v\n", d) } } if *valDegree != "" { key := DEGREE_COLL + dskvs.CollKeySep + *valDegree val, ok, err := store.Get(key) if !ok { log.Printf("Not found : %v", key) } else if err != nil { log.Printf("Error %v", err) } else { log.Printf(string(val)) } } if *flagDegreeBF { doDegreeBackfill(store) } if *flagTopicBF { doTopicBackfill(store) } if *flagCourseBF { doCourseBackfill(store) } } func listCourses(s *dskvs.Store) []Course { results, err := s.GetAll(COURSE_COLL) if err != nil { log.Printf("Couldn't query back saved courses, %v", err) return nil } var courses []Course for _, b := range results { c := Course{} if err := json.Unmarshal(b, &c); err != nil { log.Printf("Couldn't unmarshal courses from store, %v", err) continue } courses = append(courses, c) } return courses } func listTopics(s *dskvs.Store) []Topic { results, err := s.GetAll(TOPIC_COLL) if err != nil { log.Printf("Couldn't query back saved topics, %v", err) return nil } var topics []Topic for _, b := range results { d := Topic{} if err := json.Unmarshal(b, &d); err != nil { log.Printf("Couldn't unmarshal topics from store, %v", err) continue } topics = append(topics, d) } return topics } func listDegrees(s *dskvs.Store) []Degree { results, err := s.GetAll(DEGREE_COLL) if err != nil { log.Printf("Couldn't query back saved degrees, %v", err) return nil } var degrees []Degree for _, b := range results { d := Degree{} if err := json.Unmarshal(b, &d); err != nil { log.Printf("Couldn't unmarshal degrees from store, %v", err) continue } degrees = append(degrees, d) } return degrees } func doDegreeBackfill(s *dskvs.Store) { degreeChan := make(chan Degree) go readDegree(degreeChan) for degree := range degreeChan { b, err := json.Marshal(degree) if err != nil { log.Printf("Couldn't marshal degree, %v", err) return } key := DEGREE_COLL + dskvs.CollKeySep + degree.Id if err = s.Put(key, b); err != nil { log.Printf("Error Putting degree, %v", err) return } } } func doTopicBackfill(s *dskvs.Store) { topicChan := make(chan Topic) go readTopicPage(s, topicChan) for topic := range topicChan { b, err := json.Marshal(topic) if err != nil { log.Printf("Couldn't marshal topic, %v", err) return } key := TOPIC_COLL + dskvs.CollKeySep + topic.Code if err = s.Put(key, b); err != nil { log.Printf("Error Putting topic, %v", err) return } } } func doCourseBackfill(s *dskvs.Store) { courseChan := make(chan Course) go readCourse(s, courseChan) for course := range courseChan { b, err := json.Marshal(course) if err != nil { log.Printf("Couldn't marshal course, %v", err) return } key := COURSE_COLL + dskvs.CollKeySep + course.Id if err = s.Put(key, b); err != nil { log.Printf("Error Putting course, %v", err) return } } courses := listCourses(s) for _, c := range courses { lang, err := strconv.Atoi(string(c.Code[1])) if err != nil { log.Printf("Couldn't get language digit, %v", err) continue } var equiv string if lang < 5 && lang >= 0 { equiv = c.Id[:4] + strconv.Itoa(lang+4) + c.Id[5:] } else if lang >= 5 && lang < 10 { equiv = c.Id[:4] + strconv.Itoa(lang-4) + c.Id[5:] } else { log.Printf("Invalid lang digit=%d", lang) continue } _, ok, err := s.Get(COURSE_COLL + dskvs.CollKeySep + equiv) if err != nil { log.Printf("Error getting bilingual equiv, %v", err) continue } if !ok { log.Printf("Not bilingual, %v", c.Id) continue } for _, known := range c.Equivalence { if known == equiv { log.Printf("Already know that one, %v", known) continue } } c.Equivalence = append(c.Equivalence, equiv) log.Printf("Linking bilingual vs of %s: %s", c.Id, equiv) b, err := json.Marshal(c) if err != nil { log.Printf("Couldn't marshal c, %v", err) return } key := COURSE_COLL + dskvs.CollKeySep + c.Id if err = s.Put(key, b); err != nil { log.Printf("Error Putting c, %v", err) return } } reconcileTopicWithCourses(s) } func reconcileTopicWithCourses(s *dskvs.Store) { courses := listCourses(s) var t Topic for _, c := range courses { key := TOPIC_COLL + dskvs.CollKeySep + c.Topic out, ok, err := s.Get(key) if !ok || err != nil { log.Printf("Something went wrong, course=%s, ok=%v, err=%v", c, ok, err) continue } err = json.Unmarshal(out, &t) if err != nil { log.Printf("Couldn't unmarshal topic, %v", err) } for _, known := range t.Courses { if known == c.Id { log.Printf("Already known by this topic, %s", c.Id) continue } } t.Courses = append(t.Courses, c.Id) log.Printf("Linking %s to %s", t.Code, c.Id) in, err := json.Marshal(t) if err != nil { log.Printf("Couldn't marshal topic %v, %v", t, err) continue } err = s.Put(key, in) if err != nil { log.Printf("Couldn't Put, key=%s, err=%v, t=%v", key, err, t) } } } func readDegree(degreeRead chan Degree) { defer close(degreeRead) degreeList := readDegreeUrlList() tick := time.NewTicker(time.Millisecond * DEGREE_QUERY_DELAY) defer tick.Stop() log.Printf("Found %d URLs to degree pages", len(degreeList)) for _, degreeUrl := range degreeList { fmt.Printf("...") <-tick.C fmt.Printf(" tic! %s\n", degreeUrl) deg, err := readDegreePage(degreeUrl) if err != nil { log.Printf("Error reading degree page, %v", err) return } degreeRead <- deg } } func
readDegreeUrlList
identifier_name
db_viewer.go
json:"extra"` LastUpdated time.Time `json:"updated"` } func main() { flagCourse := flag.Bool( "courses", false, "print courses in the datastore", ) flagTopic := flag.Bool( "topics", false, "print topics in the datastore", ) flagDegree := flag.Bool( "degrees", false, "print degrees in the datastore", ) valCourse := flag.String( "course", "", "print value of that course", ) valTopic := flag.String( "topic", "", "print value of that topic", ) valDegree := flag.String( "degree", "", "print value of that degree", ) flagDegreeBF := flag.Bool( "backfill-degree", false, "backfill the degrees from the website", ) flagTopicBF := flag.Bool( "backfill-topic", false, "backfill the topics from the website", ) flagCourseBF := flag.Bool( "backfill-course", false, "backfill the courses from the website", ) flag.Parse() if !(*flagCourse || *flagTopic || *flagDegree || *flagCourseBF || *flagTopicBF || *flagDegreeBF || *valCourse != "" || *valTopic != "" || *valDegree != "") { log.Printf("%v", *flagTopic) flag.Usage() return } store, err := dskvs.Open("./db") if err != nil { log.Printf("Error opening dskvs: %v", err) return } defer func() { err := store.Close() if err != nil { log.Printf("Error closing dskvs: %v", err) } }() if *flagCourse { for _, c := range listCourses(store) { log.Printf("len(c.Id)=%d, c.Id=\"%s\"", len(c.Id), c.Id) fmt.Printf("%+v\n", c) } } if *valCourse != "" { key := COURSE_COLL + dskvs.CollKeySep + *valCourse val, ok, err := store.Get(key) if !ok { log.Printf("Not found : %v", key) } else if err != nil { log.Printf("Error %v", err) } else { log.Printf(string(val)) } } if *flagTopic { for _, t := range listTopics(store) { fmt.Printf("%+v\n", t) } } if *valTopic != "" { key := TOPIC_COLL + dskvs.CollKeySep + *valTopic val, ok, err := store.Get(key) if !ok { log.Printf("Not found : %v", key) } else if err != nil { log.Printf("Error %v", err) } else { log.Printf(string(val)) } } if *flagDegree { for _, d := range listDegrees(store) { fmt.Printf("%+v\n", d) } } if *valDegree != "" { key := DEGREE_COLL + dskvs.CollKeySep + *valDegree val, ok, err := store.Get(key) if !ok { log.Printf("Not found : %v", key) } else if err != nil { log.Printf("Error %v", err) } else { log.Printf(string(val)) } } if *flagDegreeBF { doDegreeBackfill(store) } if *flagTopicBF { doTopicBackfill(store) } if *flagCourseBF { doCourseBackfill(store) } } func listCourses(s *dskvs.Store) []Course { results, err := s.GetAll(COURSE_COLL) if err != nil { log.Printf("Couldn't query back saved courses, %v", err) return nil } var courses []Course for _, b := range results { c := Course{} if err := json.Unmarshal(b, &c); err != nil { log.Printf("Couldn't unmarshal courses from store, %v", err) continue } courses = append(courses, c) } return courses } func listTopics(s *dskvs.Store) []Topic { results, err := s.GetAll(TOPIC_COLL) if err != nil { log.Printf("Couldn't query back saved topics, %v", err) return nil } var topics []Topic for _, b := range results { d := Topic{} if err := json.Unmarshal(b, &d); err != nil { log.Printf("Couldn't unmarshal topics from store, %v", err) continue } topics = append(topics, d) } return topics } func listDegrees(s *dskvs.Store) []Degree { results, err := s.GetAll(DEGREE_COLL) if err != nil
var degrees []Degree for _, b := range results { d := Degree{} if err := json.Unmarshal(b, &d); err != nil { log.Printf("Couldn't unmarshal degrees from store, %v", err) continue } degrees = append(degrees, d) } return degrees } func doDegreeBackfill(s *dskvs.Store) { degreeChan := make(chan Degree) go readDegree(degreeChan) for degree := range degreeChan { b, err := json.Marshal(degree) if err != nil { log.Printf("Couldn't marshal degree, %v", err) return } key := DEGREE_COLL + dskvs.CollKeySep + degree.Id if err = s.Put(key, b); err != nil { log.Printf("Error Putting degree, %v", err) return } } } func doTopicBackfill(s *dskvs.Store) { topicChan := make(chan Topic) go readTopicPage(s, topicChan) for topic := range topicChan { b, err := json.Marshal(topic) if err != nil { log.Printf("Couldn't marshal topic, %v", err) return } key := TOPIC_COLL + dskvs.CollKeySep + topic.Code if err = s.Put(key, b); err != nil { log.Printf("Error Putting topic, %v", err) return } } } func doCourseBackfill(s *dskvs.Store) { courseChan := make(chan Course) go readCourse(s, courseChan) for course := range courseChan { b, err := json.Marshal(course) if err != nil { log.Printf("Couldn't marshal course, %v", err) return } key := COURSE_COLL + dskvs.CollKeySep + course.Id if err = s.Put(key, b); err != nil { log.Printf("Error Putting course, %v", err) return } } courses := listCourses(s) for _, c := range courses { lang, err := strconv.Atoi(string(c.Code[1])) if err != nil { log.Printf("Couldn't get language digit, %v", err) continue } var equiv string if lang < 5 && lang >= 0 { equiv = c.Id[:4] + strconv.Itoa(lang+4) + c.Id[5:] } else if lang >= 5 && lang < 10 { equiv = c.Id[:4] + strconv.Itoa(lang-4) + c.Id[5:] } else { log.Printf("Invalid lang digit=%d", lang) continue } _, ok, err := s.Get(COURSE_COLL + dskvs.CollKeySep + equiv) if err != nil { log.Printf("Error getting bilingual equiv, %v", err) continue } if !ok { log.Printf("Not bilingual, %v", c.Id) continue } for _, known := range c.Equivalence { if known == equiv { log.Printf("Already know that one, %v", known) continue } } c.Equivalence = append(c.Equivalence, equiv) log.Printf("Linking bilingual vs of %s: %s", c.Id, equiv) b, err := json.Marshal(c) if err != nil { log.Printf("Couldn't marshal c, %v", err) return } key := COURSE_COLL + dskvs.CollKeySep + c.Id if err = s.Put(key, b); err != nil { log.Printf("Error Putting c, %v", err) return } } reconcileTopicWithCourses(s) } func reconcileTopicWithCourses(s *dskvs.Store) { courses := listCourses(s) var t Topic for _, c := range courses { key := TOPIC_COLL + dskvs.CollKeySep + c.Topic out, ok, err := s.Get(key) if !
{ log.Printf("Couldn't query back saved degrees, %v", err) return nil }
conditional_block
db_viewer.go
_, d := range listDegrees(store) { fmt.Printf("%+v\n", d) } } if *valDegree != "" { key := DEGREE_COLL + dskvs.CollKeySep + *valDegree val, ok, err := store.Get(key) if !ok { log.Printf("Not found : %v", key) } else if err != nil { log.Printf("Error %v", err) } else { log.Printf(string(val)) } } if *flagDegreeBF { doDegreeBackfill(store) } if *flagTopicBF { doTopicBackfill(store) } if *flagCourseBF { doCourseBackfill(store) } } func listCourses(s *dskvs.Store) []Course { results, err := s.GetAll(COURSE_COLL) if err != nil { log.Printf("Couldn't query back saved courses, %v", err) return nil } var courses []Course for _, b := range results { c := Course{} if err := json.Unmarshal(b, &c); err != nil { log.Printf("Couldn't unmarshal courses from store, %v", err) continue } courses = append(courses, c) } return courses } func listTopics(s *dskvs.Store) []Topic { results, err := s.GetAll(TOPIC_COLL) if err != nil { log.Printf("Couldn't query back saved topics, %v", err) return nil } var topics []Topic for _, b := range results { d := Topic{} if err := json.Unmarshal(b, &d); err != nil { log.Printf("Couldn't unmarshal topics from store, %v", err) continue } topics = append(topics, d) } return topics } func listDegrees(s *dskvs.Store) []Degree { results, err := s.GetAll(DEGREE_COLL) if err != nil { log.Printf("Couldn't query back saved degrees, %v", err) return nil } var degrees []Degree for _, b := range results { d := Degree{} if err := json.Unmarshal(b, &d); err != nil { log.Printf("Couldn't unmarshal degrees from store, %v", err) continue } degrees = append(degrees, d) } return degrees } func doDegreeBackfill(s *dskvs.Store) { degreeChan := make(chan Degree) go readDegree(degreeChan) for degree := range degreeChan { b, err := json.Marshal(degree) if err != nil { log.Printf("Couldn't marshal degree, %v", err) return } key := DEGREE_COLL + dskvs.CollKeySep + degree.Id if err = s.Put(key, b); err != nil { log.Printf("Error Putting degree, %v", err) return } } } func doTopicBackfill(s *dskvs.Store) { topicChan := make(chan Topic) go readTopicPage(s, topicChan) for topic := range topicChan { b, err := json.Marshal(topic) if err != nil { log.Printf("Couldn't marshal topic, %v", err) return } key := TOPIC_COLL + dskvs.CollKeySep + topic.Code if err = s.Put(key, b); err != nil { log.Printf("Error Putting topic, %v", err) return } } } func doCourseBackfill(s *dskvs.Store) { courseChan := make(chan Course) go readCourse(s, courseChan) for course := range courseChan { b, err := json.Marshal(course) if err != nil { log.Printf("Couldn't marshal course, %v", err) return } key := COURSE_COLL + dskvs.CollKeySep + course.Id if err = s.Put(key, b); err != nil { log.Printf("Error Putting course, %v", err) return } } courses := listCourses(s) for _, c := range courses { lang, err := strconv.Atoi(string(c.Code[1])) if err != nil { log.Printf("Couldn't get language digit, %v", err) continue } var equiv string if lang < 5 && lang >= 0 { equiv = c.Id[:4] + strconv.Itoa(lang+4) + c.Id[5:] } else if lang >= 5 && lang < 10 { equiv = c.Id[:4] + strconv.Itoa(lang-4) + c.Id[5:] } else { log.Printf("Invalid lang digit=%d", lang) continue } _, ok, err := s.Get(COURSE_COLL + dskvs.CollKeySep + equiv) if err != nil { log.Printf("Error getting bilingual equiv, %v", err) continue } if !ok { log.Printf("Not bilingual, %v", c.Id) continue } for _, known := range c.Equivalence { if known == equiv { log.Printf("Already know that one, %v", known) continue } } c.Equivalence = append(c.Equivalence, equiv) log.Printf("Linking bilingual vs of %s: %s", c.Id, equiv) b, err := json.Marshal(c) if err != nil { log.Printf("Couldn't marshal c, %v", err) return } key := COURSE_COLL + dskvs.CollKeySep + c.Id if err = s.Put(key, b); err != nil { log.Printf("Error Putting c, %v", err) return } } reconcileTopicWithCourses(s) } func reconcileTopicWithCourses(s *dskvs.Store) { courses := listCourses(s) var t Topic for _, c := range courses { key := TOPIC_COLL + dskvs.CollKeySep + c.Topic out, ok, err := s.Get(key) if !ok || err != nil { log.Printf("Something went wrong, course=%s, ok=%v, err=%v", c, ok, err) continue } err = json.Unmarshal(out, &t) if err != nil { log.Printf("Couldn't unmarshal topic, %v", err) } for _, known := range t.Courses { if known == c.Id { log.Printf("Already known by this topic, %s", c.Id) continue } } t.Courses = append(t.Courses, c.Id) log.Printf("Linking %s to %s", t.Code, c.Id) in, err := json.Marshal(t) if err != nil { log.Printf("Couldn't marshal topic %v, %v", t, err) continue } err = s.Put(key, in) if err != nil { log.Printf("Couldn't Put, key=%s, err=%v, t=%v", key, err, t) } } } func readDegree(degreeRead chan Degree) { defer close(degreeRead) degreeList := readDegreeUrlList() tick := time.NewTicker(time.Millisecond * DEGREE_QUERY_DELAY) defer tick.Stop() log.Printf("Found %d URLs to degree pages", len(degreeList)) for _, degreeUrl := range degreeList { fmt.Printf("...") <-tick.C fmt.Printf(" tic! %s\n", degreeUrl) deg, err := readDegreePage(degreeUrl) if err != nil { log.Printf("Error reading degree page, %v", err) return } degreeRead <- deg } } func readDegreeUrlList() []string { t0 := time.Now() doc, err := goquery.NewDocument(DEGREE_URL) if err != nil { log.Printf("Error getting degree list %s: %v", DEGREE_URL[:10], err) return nil } log.Printf("readDegreeUrlList Reading <%s> done in %s\n", DEGREE_URL, time.Since(t0)) var degrees []string doc.Find("a[href]").Each(func(i int, s *goquery.Selection) { if rgxDegUrl.MatchString(s.Text()) { degrees = append(degrees, s.Text()) } }) return degrees } func readDegreePage(degreePage string) (Degree, error)
{ deg := Degree{Url: DEGREE_URL + degreePage, LastUpdated: time.Now()} t0 := time.Now() doc, err := goquery.NewDocument(deg.Url) if err != nil { log.Printf("Error getting degree doc %s, %v", degreePage, err) return deg, err } log.Printf("readDegreePage Reading <%s> done in %s\n", deg.Url, time.Since(t0)) deg.Name = doc.Find(S_D_NAME).First().Text() deg.Credit, err = strconv.Atoi(doc.Find(S_D_CREDIT).First().Text()) if err != nil { log.Printf("Couldn't get int our of credit field, %v", err) }
identifier_body
ic4164.rs
The 4164 is 1-bit memory that is stored /// in a 256x256 matrix internally, but we don't have either u1 or u256 types (bools /// don't count; they actually take up much more than 1 bit of memory space). Instead we /// pack the bits into an array of 2048 u32s, which we then address through a function /// that resolves the row and column into an array index and an index to the bit inside /// the u32 value at that array index. memory: [u32; 2048], /// The latched row value taken from the pins when RAS transitions low. If no row has /// been latched (RAS hasn't yet gone low), this will be `None`. row: Option<u8>, /// The latched column value taken from the pins when CAS transitions low. If no column /// has been latched (CAS hasn't yet gone low), this will be `None`. col: Option<u8>, /// The latched data bit taken from the D pin. This is latched just before a write takes /// place and is done so that its value can replace the Q pin's value in RMW mode /// easily. If no data has been latched (either WE or CAS is not low), this will be /// `None`. data: Option<u8>, } impl Ic4164 { /// Creates a new 4164 64k x 1 dynamic RAM emulation and returns a shared, internally /// mutable reference to it. pub fn new() -> DeviceRef { // Address pins 0-7. let a0 = pin!(A0, "A0", Input); let a1 = pin!(A1, "A1", Input); let a2 = pin!(A2, "A2", Input); let a3 = pin!(A3, "A3", Input); let a4 = pin!(A4, "A4", Input); let a5 = pin!(A5, "A5", Input); let a6 = pin!(A6, "A6", Input); let a7 = pin!(A7, "A7", Input); // The data input pin. When the chip is in write or read-modify-write mode, the // value of this pin will be written to the appropriate bit in the memory array. let d = pin!(D, "D", Input); // The data output pin. This is active in read and read-modify-write mode, set to // the value of the bit at the address latched by RAS and CAS. In write mode, it is // hi-Z. let q = pin!(Q, "Q", Output); // The row address strobe. Setting this low latches the values of A0-A7, saving them // to be part of the address used to access the memory array. let ras = pin!(RAS, "RAS", Input); // The column address strobe. Setting this low latches A0-A7 into the second part of // the memory address. It also initiates read or write mode, depending on the value // of WE. let cas = pin!(CAS, "CAS", Input); // The write-enable pin. If this is high, the chip is in read mode; if it and CAS // are low, the chip is in either write or read-modify-write mode, depending on // which pin went low first. let we = pin!(WE, "WE", Input); // Power supply and no-contact pins. These are not emulated. let nc = pin!(NC, "NC", Unconnected); let vcc = pin!(VCC, "VCC", Unconnected); let vss = pin!(VSS, "VSS", Unconnected); let pins = pins![a0, a1, a2, a3, a4, a5, a6, a7, d, q, ras, cas, we, nc, vcc, vss]; let addr_pins = RefVec::with_vec( IntoIterator::into_iter(PA_ADDRESS) .map(|pa| clone_ref!(pins[pa])) .collect::<Vec<PinRef>>(), ); let device: DeviceRef = new_ref!(Ic4164 { pins, addr_pins, memory: [0; 2048], row: None, col: None, data: None, }); float!(q); attach_to!(device, ras, cas, we); device } /// Reads the row and col and calculates the specific bit in the memory array to which /// this row/col combination refers. The first element of the return value is the index /// of the 32-bit number in the memory array where that bit resides; the second element /// is the index of the bit within that 32-bit number. fn resolve(&self) -> (usize, usize) { // Unless there's a bug in this program, this method should never be called while // either `self.row` or `self.col` are `None`. So we actually *want* it to panic if // `unwrap()` fails. let row = self.row.unwrap() as usize; let col = self.col.unwrap() as usize; let row_index = row << 3; let col_index = (col & 0b1110_0000) >> 5; let bit_index = col & 0b0001_1111; (row_index | col_index, bit_index) } /// Retrieves a single bit from the memory array and sets the level of the Q pin to the /// value of that bit. fn read(&self) { let (index, bit) = self.resolve(); let value = (self.memory[index] & (1 << bit)) >> bit; set_level!(self.pins[Q], Some(value as f64)) } /// Writes the value of the D pin to a single bit in the memory array. If the Q pin is /// also connected, the value is also sent to it; this happens only in RMW mode and /// keeps the input and output data pins synched. (This guaranteed sync means that the /// C64 can connect these two pins with a PC board trace, but the C64 doesn't use RMW /// mode.) fn write(&mut self) { let (index, bit) = self.resolve(); if self.data.unwrap() == 1 { self.memory[index] |= 1 << bit; } else { self.memory[index] &= !(1 << bit); } if !floating!(self.pins[Q]) { set_level!(self.pins[Q], Some(self.data.unwrap() as f64)); } } } impl Device for Ic4164 { fn pins(&self) -> RefVec<Pin> { self.pins.clone() } fn registers(&self) -> Vec<u8> { vec![] } fn update(&mut self, event: &LevelChange) { match event { LevelChange(pin) if number!(pin) == RAS => { // Invoked when the RAS pin changes level. When it goes low, the current // states of the A0-A7 pins are latched. The address is released when the // RAS pin goes high. // // Since this is the only thing that RAS is used for, it can be left low for // multiple memory accesses if its bits of the address remain the same for // those accesses. This can speed up reads and writes within the same page // by reducing the amount of setup needed for those reads and writes. (This // does not happen in the C64.) if high!(pin) { self.row = None; } else { self.row = Some(pins_to_value(&self.addr_pins) as u8); } } LevelChange(pin) if number!(pin) == CAS => { // Invoked when the CAS pin changes level. // // When CAS goes low, the current states of the A0-A7 pins are latched in a // smiliar way to when RAS goes low. What else happens depends on whether // the WE pin is low. If it is, the chip goes into write mode and the value // on the D pin is saved to a memory location referred to by the latched row // and column values. If WE is not low, read mode is entered, and the value // in that memory location is put onto the Q pin. (Setting the WE pin low // after CAS goes low sets read-modify-write mode; the read that CAS // initiated is still valid.) // // When CAS goes high, the Q pin is disconnected and the latched column and // data (if there is one) values are cleared. if high!(pin)
{ float!(self.pins[Q]); self.col = None; self.data = None; }
conditional_block
ic4164.rs
= (col & 0b1110_0000) >> 5; let bit_index = col & 0b0001_1111; (row_index | col_index, bit_index) } /// Retrieves a single bit from the memory array and sets the level of the Q pin to the /// value of that bit. fn read(&self) { let (index, bit) = self.resolve(); let value = (self.memory[index] & (1 << bit)) >> bit; set_level!(self.pins[Q], Some(value as f64)) } /// Writes the value of the D pin to a single bit in the memory array. If the Q pin is /// also connected, the value is also sent to it; this happens only in RMW mode and /// keeps the input and output data pins synched. (This guaranteed sync means that the /// C64 can connect these two pins with a PC board trace, but the C64 doesn't use RMW /// mode.) fn write(&mut self) { let (index, bit) = self.resolve(); if self.data.unwrap() == 1 { self.memory[index] |= 1 << bit; } else { self.memory[index] &= !(1 << bit); } if !floating!(self.pins[Q]) { set_level!(self.pins[Q], Some(self.data.unwrap() as f64)); } } } impl Device for Ic4164 { fn pins(&self) -> RefVec<Pin> { self.pins.clone() } fn registers(&self) -> Vec<u8> { vec![] } fn update(&mut self, event: &LevelChange) { match event { LevelChange(pin) if number!(pin) == RAS => { // Invoked when the RAS pin changes level. When it goes low, the current // states of the A0-A7 pins are latched. The address is released when the // RAS pin goes high. // // Since this is the only thing that RAS is used for, it can be left low for // multiple memory accesses if its bits of the address remain the same for // those accesses. This can speed up reads and writes within the same page // by reducing the amount of setup needed for those reads and writes. (This // does not happen in the C64.) if high!(pin) { self.row = None; } else { self.row = Some(pins_to_value(&self.addr_pins) as u8); } } LevelChange(pin) if number!(pin) == CAS => { // Invoked when the CAS pin changes level. // // When CAS goes low, the current states of the A0-A7 pins are latched in a // smiliar way to when RAS goes low. What else happens depends on whether // the WE pin is low. If it is, the chip goes into write mode and the value // on the D pin is saved to a memory location referred to by the latched row // and column values. If WE is not low, read mode is entered, and the value // in that memory location is put onto the Q pin. (Setting the WE pin low // after CAS goes low sets read-modify-write mode; the read that CAS // initiated is still valid.) // // When CAS goes high, the Q pin is disconnected and the latched column and // data (if there is one) values are cleared. if high!(pin) { float!(self.pins[Q]); self.col = None; self.data = None; } else { self.col = Some(pins_to_value(&self.addr_pins) as u8); if high!(self.pins[WE]) { self.read(); } else { self.data = Some(if high!(self.pins[D]) { 1 } else { 0 }); self.write(); } } } LevelChange(pin) if number!(pin) == WE => { // Invoked when the WE pin changes level. // // When WE is high, read mode is enabled (though the actual read will not be // available until both RAS and CAS are set low, indicating that the address // of the read is valid). The internal latched input data value is cleared. // // When WE goes low, the write mode that is enabled depends on whether CAS // is already low. If it is, the chip must have been in read mode and now // moves into read-modify-write mode. The data value on the Q pin remains // valid, and the valus on the D pin is latched and stored at the // appropriate memory location. // // If CAS is still high when WE goes low, the Q pin is disconnected. Nothing // further happens until CAS goes low; at that point, the chip goes into // write mode (data is written to memory but nothing is available to be // read). if high!(pin) { self.data = None; } else { if high!(self.pins[CAS]) { float!(self.pins[Q]); } else { self.data = Some(if high!(self.pins[D]) { 1 } else { 0 }); self.write(); } } } _ => {} } } fn debug_fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{:?}, {:?}, {:?}", self.row, self.col, self.data) } } #[cfg(test)] mod test { use crate::{ components::trace::{Trace, TraceRef}, test_utils::{make_traces, value_to_traces}, }; use super::*; fn before_each() -> (DeviceRef, RefVec<Trace>, RefVec<Trace>) { let device = Ic4164::new(); let tr = make_traces(&device); set!(tr[WE]); set!(tr[RAS]); set!(tr[CAS]); let addr_tr = RefVec::with_vec( IntoIterator::into_iter(PA_ADDRESS) .map(|p| clone_ref!(tr[p])) .collect::<Vec<TraceRef>>(), ); (device, tr, addr_tr) } #[test] fn read_mode_enable_q() { let (_, tr, _) = before_each(); clear!(tr[RAS]); clear!(tr[CAS]); // data at 0x0000, which will be 0 initially assert!(low!(tr[Q]), "Q should have data during read"); set!(tr[CAS]); set!(tr[RAS]); assert!(floating!(tr[Q]), "Q should be disabled after read"); } #[test] fn write_mode_disable_q() { let (_, tr, _) = before_each(); clear!(tr[RAS]); clear!(tr[WE]); clear!(tr[CAS]); assert!(floating!(tr[Q]), "Q should be disabled during write"); set!(tr[CAS]); set!(tr[WE]); set!(tr[RAS]); assert!(floating!(tr[Q]), "Q should be disabled after write"); } #[test] fn rmw_mode_enable_q() { let (_, tr, _) = before_each(); clear!(tr[D]); clear!(tr[RAS]); clear!(tr[CAS]); clear!(tr[WE]); assert!(low!(tr[Q]), "Q should be enabled during RMW"); set!(tr[WE]); set!(tr[CAS]); set!(tr[RAS]); assert!(floating!(tr[Q]), "Q should be disabled after RMW"); } #[test] fn read_write_one_bit() { let (_, tr, _) = before_each(); // Write is happening at 0x0000, so we don't need to set addresses at all set!(tr[D]); clear!(tr[WE]); clear!(tr[RAS]); clear!(tr[CAS]); // 1 is written to address 0x0000 at this point set!(tr[CAS]); set!(tr[RAS]); set!(tr[WE]); clear!(tr[RAS]); clear!(tr[CAS]); let value = high!(tr[Q]); set!(tr[CAS]); set!(tr[RAS]); assert!(value, "Value 1 not written to address 0x0000"); } #[test] fn rmw_one_bit() { let (_, tr, _) = before_each(); // Write is happening at 0x0000, so we don't need to set addresses at all set!(tr[D]); clear!(tr[RAS]); clear!(tr[CAS]); // in read mode, Q should be 0 because no data has been written to 0x0000 yet assert!( low!(tr[Q]), "Value 0 not read from address 0x0000 in RMW mode" ); // Lower WE to go into RMW
clear!(tr[WE]);
random_line_split
ic4164.rs
, vcc, vss]; let addr_pins = RefVec::with_vec( IntoIterator::into_iter(PA_ADDRESS) .map(|pa| clone_ref!(pins[pa])) .collect::<Vec<PinRef>>(), ); let device: DeviceRef = new_ref!(Ic4164 { pins, addr_pins, memory: [0; 2048], row: None, col: None, data: None, }); float!(q); attach_to!(device, ras, cas, we); device } /// Reads the row and col and calculates the specific bit in the memory array to which /// this row/col combination refers. The first element of the return value is the index /// of the 32-bit number in the memory array where that bit resides; the second element /// is the index of the bit within that 32-bit number. fn resolve(&self) -> (usize, usize) { // Unless there's a bug in this program, this method should never be called while // either `self.row` or `self.col` are `None`. So we actually *want* it to panic if // `unwrap()` fails. let row = self.row.unwrap() as usize; let col = self.col.unwrap() as usize; let row_index = row << 3; let col_index = (col & 0b1110_0000) >> 5; let bit_index = col & 0b0001_1111; (row_index | col_index, bit_index) } /// Retrieves a single bit from the memory array and sets the level of the Q pin to the /// value of that bit. fn read(&self) { let (index, bit) = self.resolve(); let value = (self.memory[index] & (1 << bit)) >> bit; set_level!(self.pins[Q], Some(value as f64)) } /// Writes the value of the D pin to a single bit in the memory array. If the Q pin is /// also connected, the value is also sent to it; this happens only in RMW mode and /// keeps the input and output data pins synched. (This guaranteed sync means that the /// C64 can connect these two pins with a PC board trace, but the C64 doesn't use RMW /// mode.) fn write(&mut self) { let (index, bit) = self.resolve(); if self.data.unwrap() == 1 { self.memory[index] |= 1 << bit; } else { self.memory[index] &= !(1 << bit); } if !floating!(self.pins[Q]) { set_level!(self.pins[Q], Some(self.data.unwrap() as f64)); } } } impl Device for Ic4164 { fn pins(&self) -> RefVec<Pin> { self.pins.clone() } fn registers(&self) -> Vec<u8> { vec![] } fn update(&mut self, event: &LevelChange) { match event { LevelChange(pin) if number!(pin) == RAS => { // Invoked when the RAS pin changes level. When it goes low, the current // states of the A0-A7 pins are latched. The address is released when the // RAS pin goes high. // // Since this is the only thing that RAS is used for, it can be left low for // multiple memory accesses if its bits of the address remain the same for // those accesses. This can speed up reads and writes within the same page // by reducing the amount of setup needed for those reads and writes. (This // does not happen in the C64.) if high!(pin) { self.row = None; } else { self.row = Some(pins_to_value(&self.addr_pins) as u8); } } LevelChange(pin) if number!(pin) == CAS => { // Invoked when the CAS pin changes level. // // When CAS goes low, the current states of the A0-A7 pins are latched in a // smiliar way to when RAS goes low. What else happens depends on whether // the WE pin is low. If it is, the chip goes into write mode and the value // on the D pin is saved to a memory location referred to by the latched row // and column values. If WE is not low, read mode is entered, and the value // in that memory location is put onto the Q pin. (Setting the WE pin low // after CAS goes low sets read-modify-write mode; the read that CAS // initiated is still valid.) // // When CAS goes high, the Q pin is disconnected and the latched column and // data (if there is one) values are cleared. if high!(pin) { float!(self.pins[Q]); self.col = None; self.data = None; } else { self.col = Some(pins_to_value(&self.addr_pins) as u8); if high!(self.pins[WE]) { self.read(); } else { self.data = Some(if high!(self.pins[D]) { 1 } else { 0 }); self.write(); } } } LevelChange(pin) if number!(pin) == WE => { // Invoked when the WE pin changes level. // // When WE is high, read mode is enabled (though the actual read will not be // available until both RAS and CAS are set low, indicating that the address // of the read is valid). The internal latched input data value is cleared. // // When WE goes low, the write mode that is enabled depends on whether CAS // is already low. If it is, the chip must have been in read mode and now // moves into read-modify-write mode. The data value on the Q pin remains // valid, and the valus on the D pin is latched and stored at the // appropriate memory location. // // If CAS is still high when WE goes low, the Q pin is disconnected. Nothing // further happens until CAS goes low; at that point, the chip goes into // write mode (data is written to memory but nothing is available to be // read). if high!(pin) { self.data = None; } else { if high!(self.pins[CAS]) { float!(self.pins[Q]); } else { self.data = Some(if high!(self.pins[D]) { 1 } else { 0 }); self.write(); } } } _ => {} } } fn debug_fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{:?}, {:?}, {:?}", self.row, self.col, self.data) } } #[cfg(test)] mod test { use crate::{ components::trace::{Trace, TraceRef}, test_utils::{make_traces, value_to_traces}, }; use super::*; fn before_each() -> (DeviceRef, RefVec<Trace>, RefVec<Trace>) { let device = Ic4164::new(); let tr = make_traces(&device); set!(tr[WE]); set!(tr[RAS]); set!(tr[CAS]); let addr_tr = RefVec::with_vec( IntoIterator::into_iter(PA_ADDRESS) .map(|p| clone_ref!(tr[p])) .collect::<Vec<TraceRef>>(), ); (device, tr, addr_tr) } #[test] fn read_mode_enable_q() { let (_, tr, _) = before_each(); clear!(tr[RAS]); clear!(tr[CAS]); // data at 0x0000, which will be 0 initially assert!(low!(tr[Q]), "Q should have data during read"); set!(tr[CAS]); set!(tr[RAS]); assert!(floating!(tr[Q]), "Q should be disabled after read"); } #[test] fn write_mode_disable_q() { let (_, tr, _) = before_each(); clear!(tr[RAS]); clear!(tr[WE]); clear!(tr[CAS]); assert!(floating!(tr[Q]), "Q should be disabled during write"); set!(tr[CAS]); set!(tr[WE]); set!(tr[RAS]); assert!(floating!(tr[Q]), "Q should be disabled after write"); } #[test] fn rmw_mode_enable_q() { let (_, tr, _) = before_each(); clear!(tr[D]); clear!(tr[RAS]); clear!(tr[CAS]); clear!(tr[WE]); assert!(low!(tr[Q]), "Q should be enabled during RMW"); set!(tr[WE]); set!(tr[CAS]); set!(tr[RAS]); assert!(floating!(tr[Q]), "Q should be disabled after RMW"); } #[test] fn
read_write_one_bit
identifier_name
ic4164.rs
bit_index) } /// Retrieves a single bit from the memory array and sets the level of the Q pin to the /// value of that bit. fn read(&self) { let (index, bit) = self.resolve(); let value = (self.memory[index] & (1 << bit)) >> bit; set_level!(self.pins[Q], Some(value as f64)) } /// Writes the value of the D pin to a single bit in the memory array. If the Q pin is /// also connected, the value is also sent to it; this happens only in RMW mode and /// keeps the input and output data pins synched. (This guaranteed sync means that the /// C64 can connect these two pins with a PC board trace, but the C64 doesn't use RMW /// mode.) fn write(&mut self) { let (index, bit) = self.resolve(); if self.data.unwrap() == 1 { self.memory[index] |= 1 << bit; } else { self.memory[index] &= !(1 << bit); } if !floating!(self.pins[Q]) { set_level!(self.pins[Q], Some(self.data.unwrap() as f64)); } } } impl Device for Ic4164 { fn pins(&self) -> RefVec<Pin> { self.pins.clone() } fn registers(&self) -> Vec<u8> { vec![] } fn update(&mut self, event: &LevelChange) { match event { LevelChange(pin) if number!(pin) == RAS => { // Invoked when the RAS pin changes level. When it goes low, the current // states of the A0-A7 pins are latched. The address is released when the // RAS pin goes high. // // Since this is the only thing that RAS is used for, it can be left low for // multiple memory accesses if its bits of the address remain the same for // those accesses. This can speed up reads and writes within the same page // by reducing the amount of setup needed for those reads and writes. (This // does not happen in the C64.) if high!(pin) { self.row = None; } else { self.row = Some(pins_to_value(&self.addr_pins) as u8); } } LevelChange(pin) if number!(pin) == CAS => { // Invoked when the CAS pin changes level. // // When CAS goes low, the current states of the A0-A7 pins are latched in a // smiliar way to when RAS goes low. What else happens depends on whether // the WE pin is low. If it is, the chip goes into write mode and the value // on the D pin is saved to a memory location referred to by the latched row // and column values. If WE is not low, read mode is entered, and the value // in that memory location is put onto the Q pin. (Setting the WE pin low // after CAS goes low sets read-modify-write mode; the read that CAS // initiated is still valid.) // // When CAS goes high, the Q pin is disconnected and the latched column and // data (if there is one) values are cleared. if high!(pin) { float!(self.pins[Q]); self.col = None; self.data = None; } else { self.col = Some(pins_to_value(&self.addr_pins) as u8); if high!(self.pins[WE]) { self.read(); } else { self.data = Some(if high!(self.pins[D]) { 1 } else { 0 }); self.write(); } } } LevelChange(pin) if number!(pin) == WE => { // Invoked when the WE pin changes level. // // When WE is high, read mode is enabled (though the actual read will not be // available until both RAS and CAS are set low, indicating that the address // of the read is valid). The internal latched input data value is cleared. // // When WE goes low, the write mode that is enabled depends on whether CAS // is already low. If it is, the chip must have been in read mode and now // moves into read-modify-write mode. The data value on the Q pin remains // valid, and the valus on the D pin is latched and stored at the // appropriate memory location. // // If CAS is still high when WE goes low, the Q pin is disconnected. Nothing // further happens until CAS goes low; at that point, the chip goes into // write mode (data is written to memory but nothing is available to be // read). if high!(pin) { self.data = None; } else { if high!(self.pins[CAS]) { float!(self.pins[Q]); } else { self.data = Some(if high!(self.pins[D]) { 1 } else { 0 }); self.write(); } } } _ => {} } } fn debug_fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{:?}, {:?}, {:?}", self.row, self.col, self.data) } } #[cfg(test)] mod test { use crate::{ components::trace::{Trace, TraceRef}, test_utils::{make_traces, value_to_traces}, }; use super::*; fn before_each() -> (DeviceRef, RefVec<Trace>, RefVec<Trace>) { let device = Ic4164::new(); let tr = make_traces(&device); set!(tr[WE]); set!(tr[RAS]); set!(tr[CAS]); let addr_tr = RefVec::with_vec( IntoIterator::into_iter(PA_ADDRESS) .map(|p| clone_ref!(tr[p])) .collect::<Vec<TraceRef>>(), ); (device, tr, addr_tr) } #[test] fn read_mode_enable_q() { let (_, tr, _) = before_each(); clear!(tr[RAS]); clear!(tr[CAS]); // data at 0x0000, which will be 0 initially assert!(low!(tr[Q]), "Q should have data during read"); set!(tr[CAS]); set!(tr[RAS]); assert!(floating!(tr[Q]), "Q should be disabled after read"); } #[test] fn write_mode_disable_q() { let (_, tr, _) = before_each(); clear!(tr[RAS]); clear!(tr[WE]); clear!(tr[CAS]); assert!(floating!(tr[Q]), "Q should be disabled during write"); set!(tr[CAS]); set!(tr[WE]); set!(tr[RAS]); assert!(floating!(tr[Q]), "Q should be disabled after write"); } #[test] fn rmw_mode_enable_q() { let (_, tr, _) = before_each(); clear!(tr[D]); clear!(tr[RAS]); clear!(tr[CAS]); clear!(tr[WE]); assert!(low!(tr[Q]), "Q should be enabled during RMW"); set!(tr[WE]); set!(tr[CAS]); set!(tr[RAS]); assert!(floating!(tr[Q]), "Q should be disabled after RMW"); } #[test] fn read_write_one_bit() { let (_, tr, _) = before_each(); // Write is happening at 0x0000, so we don't need to set addresses at all set!(tr[D]); clear!(tr[WE]); clear!(tr[RAS]); clear!(tr[CAS]); // 1 is written to address 0x0000 at this point set!(tr[CAS]); set!(tr[RAS]); set!(tr[WE]); clear!(tr[RAS]); clear!(tr[CAS]); let value = high!(tr[Q]); set!(tr[CAS]); set!(tr[RAS]); assert!(value, "Value 1 not written to address 0x0000"); } #[test] fn rmw_one_bit()
{ let (_, tr, _) = before_each(); // Write is happening at 0x0000, so we don't need to set addresses at all set!(tr[D]); clear!(tr[RAS]); clear!(tr[CAS]); // in read mode, Q should be 0 because no data has been written to 0x0000 yet assert!( low!(tr[Q]), "Value 0 not read from address 0x0000 in RMW mode" ); // Lower WE to go into RMW clear!(tr[WE]); // 1 is written to address 0x0000 at this point set!(tr[CAS]); set!(tr[RAS]); set!(tr[WE]); clear!(tr[RAS]);
identifier_body
main_demo.py
def change_infor_money(self,name,money): self.infor_conf.set(name,'余额',str(money)) self.infor_conf.write(open("information.conf","w")) def close_window(self): self.video_btn = 3 self.show_camera()
#self.sources = 'rtsp://admin:[email protected]/Streaming/Channels/1' #self.source = 'shishi-nini.mp4' self.source = conf.get('image_config', 'capture_source') if self.source == '0': self.source = 0 else : self.source = str(self.source) print(self.source) self.cap = cv2.VideoCapture() self.cap.open(self.source) print(self.cap.isOpened()) if self.video_btn==0: #在前面就设置了video_btn为0 为了在人脸识别的时候直接把这个值给改了 这样人脸识别和摄像头展示就分开了 while (self.cap.isOpened()): ret, self.image = self.cap.read() #print(ret,self.image) QApplication.processEvents() #这句代码告诉QT处理来处理任何没有被处理的事件,并且将控制权返回给调用者 让代码变的没有那么卡 show = cv2.resize(self.image, (900, 560)) show = cv2.cvtColor(show,cv2.COLOR_BGR2RGB) # 这里指的是显示原图 # opencv 读取图片的样式,不能通过Qlabel进行显示,需要转换为Qimage QImage(uchar * data, int width, self.showImage = QImage(show.data, show.shape[1], show.shape[0], QImage.Format_RGB888) self.label_4.setPixmap(QPixmap.fromImage(self.showImage)) # 因为他最后会存留一张 图像在lable上需要对 lable_5进行清理 self.label_4.setPixmap(QPixmap("")) elif self.video_btn==1: #这段代码是 获取photo文件夹中 人的信息 res,activeFileInfo = ASFGetActiveFileInfo() if (res != 0): print("ASFGetActiveFileInfo fail: {}".format(res)) else: print("获取到版本信息") pass # 获取人脸识别引擎 SET_SIZE = float(conf.get('image_config', 'set_size')) face_engine = ArcFace() #engine中一个类 face_Features = self.get_face_features("photo\\") print(face_Features) self.all_face_names = face_Features.keys() num_faces_features = len(face_Features) res = face_engine.ASFInitEngine(ASF_DETECT_MODE_VIDEO,ASF_OP_0_ONLY,16,10,5) definite_thres = 0.88 # 相似度达到0.88 直接确定 不再遍历其他特征 threshold = 0.7 # 阈值 遍历所有特征 取最大的相似度如果 大于0.7则认为是同一个 faceid_dict = {} frame_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) frame_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) self.moreFaceTime = time() while (self.cap.isOpened()): cap, frame = self.cap.read() QApplication.processEvents() # 改变摄像头图像的大小,图像小,所做的计算就少 #small_frame = cv2.resize(frame, (0, 0), fx=SET_SIZE, fy=SET_SIZE) small_frame = cv2.resize(frame, (int(frame_width * SET_SIZE) // 4 * 4, int(frame_height * SET_SIZE))) res,detectedFaces = face_engine.ASFDetectFaces(small_frame) inter_var = 0 faceid_list = [] face_names = [] #检测人脸 if res != 0: print("ASFDetectFaces 初始化失败") break faceNum = detectedFaces.faceNum if faceNum >= 1 : if faceNum > 1 and (time() - self.moreFaceTime) >3 : # 有多个人的时候进行提醒 print('有多个人脸') # 不能一直提醒 所以如果一直存在多个人脸 设置三秒提醒一次 print(time() - self.moreFaceTime) self.moreFaceTime = time() QMessageBox.about(self, 'warning', '请保持画面中只有一位顾客') detece_faceNum = 1 else : detece_faceNum = 0 for face in range(detece_faceNum): peopleName = "Unknown" ra = detectedFaces.faceRect[face] faceID = detectedFaces.faceID[face] faceid_list.append(faceID) left = int(ra.left * (1 / SET_SIZE)) # 坐标变大 top = int(ra.top * (1 / SET_SIZE)) right = int(ra.right * (1 / SET_SIZE)) bottom = int(ra.bottom * (1 / SET_SIZE)) cv2.rectangle(frame,(left, top), (right, bottom), (60, 20, 220), 1) #提取人脸特征 if faceid_dict!=None and (faceID in faceid_dict.keys()) and (faceid_dict[faceID][1]%8 !=0): peopleName = faceid_dict[faceID][0] faceid_dict[faceID][1]+=1 face_names.append(peopleName) else : single_detected_face1 = ASF_SingleFaceInfo() single_detected_face1.faceRect = detectedFaces.faceRect[face] single_detected_face1.faceOrient = detectedFaces.faceOrient[face] res, single_feature = face_engine.ASFFaceFeatureExtract(small_frame, single_detected_face1) if res == 0 : for name in face_Features.keys(): res,value = face_engine.ASFFaceFeatureCompare(single_feature,face_Features[name])# 人脸比对 if value >=definite_thres : #如果 相似度高 就不需要再遍历识别了 inter_var = value peopleName = name break if value >= inter_var : # 找到相似度最高的名字 inter_var = value peopleName = name if inter_var <= threshold : # 如果最高的相似度 达到不了阈值则 认为是不认识 peopleName = 'Unknown' if self.deduct_money != 0 and peopleName!='Unknown': # 判断是否扣钱 remain_money = self.name_money[peopleName] - self.deduct_money if remain_money < 0: QMessageBox.about(self, 'warning', '顾客'+peopleName+'余额不足 支付失败') else : self.name_money[peopleName] = remain_money self.change_infor_money(peopleName,remain_money) self.deduct_money = 0 list_face = [] list_face.append(peopleName) list_face.append(int(1)) faceid_dict[faceID] = list_face face_names.append(peopleName) print("与{}相似度是{}".format(peopleName,inter_var)) cv2img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # cv2和PIL中颜色的hex码的储存顺序不同 pilimg = Image.fromarray(cv2img) draw = ImageDraw.Draw(pilimg) # 图片上打印 font = ImageFont.truetype("msyh.ttf", 27, encoding="utf-8") # 参数1:字体文件路径,参数2:字体大小 draw.text((left+10 ,bottom ), peopleName, (220, 20, 60), font=font) # 参数1:打印坐标,参数2:文本,参数3:字体颜色,参数4:字体 remain_money_str = '余额:' + str(self.name_money[peopleName]) draw.text((left + 80, bottom),remain_money_str, (220, 20, 60), font=font) # PIL图片转cv2 图片 frame = cv2.cvtColor(np.array(pilimg), cv2.COLOR_RGB2BGR) for oldId in list(faceid_dict.keys()): if oldId not in faceid_list: del faceid_dict[oldId] #print(faceid_dict) #例 {0: ['张文豪', 1]} self.set_name=set(face_names) self.set_names=tuple(self.set_name) # 把名字先设为了一个 集合 把重复的去掉 再设为tuple 以便于下面显示其他信息和记录 调用 print(self.set_names) #把人脸识别检测到的人 用set_names 这个集合收集起来 show_video = cv2.resize(frame,(900, 560)) show_video = cv2.cvtColor(show_video,cv2.COLOR_BGR2RGB) # 这里指的是显示原图 # opencv 读取图片的样式,
infor_sql.update_infor() self.close() def show_camera(self): #展示摄像头画面并进行人脸识别的功能
identifier_body
main_demo.py
', '顾客'+peopleName+'余额不足 支付失败') else : self.name_money[peopleName] = remain_money self.change_infor_money(peopleName,remain_money) self.deduct_money = 0 list_face = [] list_face.append(peopleName) list_face.append(int(1)) faceid_dict[faceID] = list_face face_names.append(peopleName) print("与{}相似度是{}".format(peopleName,inter_var)) cv2img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # cv2和PIL中颜色的hex码的储存顺序不同 pilimg = Image.fromarray(cv2img) draw = ImageDraw.Draw(pilimg) # 图片上打印 font = ImageFont.truetype("msyh.ttf", 27, encoding="utf-8") # 参数1:字体文件路径,参数2:字体大小 draw.text((left+10 ,bottom ), peopleName, (220, 20, 60), font=font) # 参数1:打印坐标,参数2:文本,参数3:字体颜色,参数4:字体 remain_money_str = '余额:' + str(self.name_money[peopleName]) draw.text((left + 80, bottom),remain_money_str, (220, 20, 60), font=font) # PIL图片转cv2 图片 frame = cv2.cvtColor(np.array(pilimg), cv2.COLOR_RGB2BGR) for oldId in list(faceid_dict.keys()): if oldId not in faceid_list: del faceid_dict[oldId] #print(faceid_dict) #例 {0: ['张文豪', 1]} self.set_name=set(face_names) self.set_names=tuple(self.set_name) # 把名字先设为了一个 集合 把重复的去掉 再设为tuple 以便于下面显示其他信息和记录 调用 print(self.set_names) #把人脸识别检测到的人 用set_names 这个集合收集起来 show_video = cv2.resize(frame,(900, 560)) show_video = cv2.cvtColor(show_video,cv2.COLOR_BGR2RGB) # 这里指的是显示原图 # opencv 读取图片的样式,不能通过Qlabel进行显示,需要转换为Qimage QImage(uchar * data, int width, self.showImage = QImage(show_video.data, show_video.shape[1], show_video.shape[0], QImage.Format_RGB888) self.label_4.setPixmap(QPixmap.fromImage(self.showImage)) elif self.video_btn == 3: if self.cap.isOpened(): self.cap.release() cv2.destroyAllWindows() def qingping(self): # 不需要显示信息的时候 把显示到信息的那部分清除掉 在循环中保存了几次那些lable就不在发生变化了 self.label_5.setPixmap(QPixmap("")) # 照片1 self.label_6.setText("") # 信息1 self.label_7setPixmap(QPixmap("")) self.label_8.setText("") self.label_9.setPixmap(QPixmap("")) self.label_10.setText("") def LoadImg(self,imagePath): """ 将输入图片长和 宽都变成4的倍数 符合要求 """ img = cv2.imdecode(np.fromfile(imagePath,dtype=np.uint8),-1) # 读取中文命名的图片 sp = img.shape img = cv2.resize(img, (sp[1]//4*4, sp[0]//4*4)) return img def load_face_files(self,faceFiles): #将图片文件夹路径传下来 获取到 各个图片路径和图片名字 [[path1,name1],[path2,name2]...] imgs = [] files = os.listdir(faceFiles) for file in files: if file.find('.jpg') != -1 : imgs_infor = [] file_path = faceFiles + '\\' +file imgs_infor.append(file_path) img_name = file.split('.')[0] imgs_infor.append(img_name) imgs.append(imgs_infor) return imgs def get_face_features(self,path): face_engine = ArcFace() #engine中一个类 res = face_engine.ASFInitEngine(ASF_DETECT_MODE_IMAGE,ASF_OP_0_ONLY,30,10,5) # 需要引擎开启的功能 这里开启的是人脸检测和人脸比对 if (res != 0): print("ASFInitEngine fail") else: print("ASFInitEngine sucess") pass imgsFile = self.load_face_files(path) # 获取到 图片路径 和图片名字 face_features = {} for imgfile in imgsFile : img = self.LoadImg(imgfile[0]) res,detectedFaces = face_engine.ASFDetectFaces(img) if res==MOK: single_detected_face = ASF_SingleFaceInfo() single_detected_face.faceRect = detectedFaces.faceRect[0] single_detected_face.faceOrient = detectedFaces.faceOrient[0] res ,face_feature= face_engine.ASFFaceFeatureExtract(img,single_detected_face) if (res!=MOK): print ("ASFFaceFeatureExtract {} fail: {}".format(imgfile[0])) else: face_features[imgfile[1]] = face_feature #以字典的形式 图片名字和人脸特征成对 return face_features class MineWindow4(QMainWindow,Ui_Dialog2): def __init__(self,parent=None): #super(MineWindow, self).__init__(None, Qt.FramelessWindowHint) # 这句和普通的不一样 因为可以实现无边框 super(MineWindow4,self).__init__(parent) self.setupUi(self) self.pushButton_5.clicked.connect(self.close) self.pushButton_25.clicked.connect(self.close) self.pushButton_6.clicked.connect(self.showMinimized) self.pushButton_3.clicked.connect(self.search_infor) self.pushButton_24.clicked.connect(self.new_register) def get_conf(self): self.conf = ConfigParser() self.conf.read('information.conf', encoding='gbk') def close_clear(self): linetext=[self.lineEdit,self.lineEdit_13,self.lineEdit_14,self.lineEdit_15,self.lineEdit_16,self.lineEdit_17, self.lineEdit_18,self.lineEdit_20] i=0 for lineedit in linetext: #lineedit.setPlaceholderText(str(i)) if i<5 and i>=0 : lineedit.setPlaceholderText("请输入信息") if i>=5 and i <=7: lineedit.setPlaceholderText('***') i=i+1 #self.close() def search_infor(self): search_name=self.lineEdit.text() if search_name in self.conf.sections(): self.lineEdit_17.setPlaceholderText(self.conf.get(search_name,'余额')) self.lineEdit_18.setPlaceholderText(self.conf.get(search_name,'性别')) self.lineEdit_20.setPlaceholderText(self.conf.get(search_name,'更多信息')) else: QMessageBox.about(self,'warning','找不到'+search_name+'的信息') def new_register(self): button=0 #当都输入正确的时候写入 配置文件 name=self.lineEdit_15.text() age=self.lineEdit_13.text() sex=self.lineEdit_14.text() more_infor=self.lineEdit_16.text() button2=0 search_name=self.lineEdit.text() age2=self.lineEdit_17.text() sex2=self.lineEdit_18.text() mor_infor2=self.lineEdit_20.text() if name not in self.conf.sections(): if name != '': self.conf.add_section(name) if age == '': age= 0 elif str.isdigit(age)!= True: button=1 QMessageBox.about(self,'warning','余额请输入正确的格式') self.conf.set(name,'余额',str(age)) if sex == '': sex='未知' elif sex!='男' and sex!='女': button=1 QMessageBox.about(self,'warning','性别请输入正确') sex='未知' self.conf.set(name,'性别',sex) if more_infor == '': more_infor='未知' self.conf.set(name,'更多信息',more_infor) if button==0: self.conf.write(open("information.conf","w")) QMessageBox.about(self,'news','请将以'+name+'.jpg为命名的照片放入'+getcwd()+'\\'+'photo路径下完成注册') elif button == 1: self.conf.remove_section(name) elif age != '' or sex != '' or more_infor != '': QMessageBox.about(self,'warning','注册信息必须要输入姓名') else: QMessageBox.about(self,'warning',name+'已经注册过了') if age2!=''and str.isdigit(age2)== True:
print('更改余额',search_name,age2) self.conf.set(search_name,'余额',age2) button2=1 if sex2!='' and (sex2=='男' or sex2=='女'):
random_line_split
main_demo.py
if self.deduct_money != 0 and peopleName!='Unknown': # 判断是否扣钱 remain_money = self.name_money[peopleName] - self.deduct_money if remain_money < 0: QMessageBox.about(self, 'warning', '顾客'+peopleName+'余额不足 支付失败') else : self.name_money[peopleName] = remain_money self.change_infor_money(peopleName,remain_money) self.deduct_money = 0 list_face = [] list_face.append(peopleName) list_face.append(int(1)) faceid_dict[faceID] = list_face face_names.append(peopleName) print("与{}相似度是{}".format(peopleName,inter_var)) cv2img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # cv2和PIL中颜色的hex码的储存顺序不同 pilimg = Image.fromarray(cv2img) draw = ImageDraw.Draw(pilimg) # 图片上打印 font = ImageFont.truetype("msyh.ttf", 27, encoding="utf-8") # 参数1:字体文件路径,参数2:字体大小 draw.text((left+10 ,bottom ), peopleName, (220, 20, 60), font=font) # 参数1:打印坐标,参数2:文本,参数3:字体颜色,参数4:字体 remain_money_str = '余额:' + str(self.name_money[peopleName]) draw.text((left + 80, bottom),remain_money_str, (220, 20, 60), font=font) # PIL图片转cv2 图片 frame = cv2.cvtColor(np.array(pilimg), cv2.COLOR_RGB2BGR) for oldId in list(faceid_dict.keys()): if oldId not in faceid_list: del faceid_dict[oldId] #print(faceid_dict) #例 {0: ['张文豪', 1]} self.set_name=set(face_names) self.set_names=tuple(self.set_name) # 把名字先设为了一个 集合 把重复的去掉 再设为tuple 以便于下面显示其他信息和记录 调用 print(self.set_names) #把人脸识别检测到的人 用set_names 这个集合收集起来 show_video = cv2.resize(frame,(900, 560)) show_video = cv2.cvtColor(show_video,cv2.COLOR_BGR2RGB) # 这里指的是显示原图 # opencv 读取图片的样式,不能通过Qlabel进行显示,需要转换为Qimage QImage(uchar * data, int width, self.showImage = QImage(show_video.data, show_video.shape[1], show_video.shape[0], QImage.Format_RGB888) self.label_4.setPixmap(QPixmap.fromImage(self.showImage)) elif self.video_btn == 3: if self.cap.isOpened(): self.cap.release() cv2.destroyAllWindows() def qingping(self): # 不需要显示信息的时候 把显示到信息的那部分清除掉 在循环中保存了几次那些lable就不在发生变化了 self.label_5.setPixmap(QPixmap("")) # 照片1 self.label_6.setText("") # 信息1 self.label_7setPixmap(QPixmap("")) self.label_8.setText("") self.label_9.setPixmap(QPixmap("")) self.label_10.setText("") def LoadImg(self,imagePath): """ 将输入图片长和 宽都变成4的倍数 符合要求 """ img = cv2.imdecode(np.fromfile(imagePath,dtype=np.uint8),-1) # 读取中文命名的图片 sp = img.shape img = cv2.resize(img, (sp[1]//4*4, sp[0]//4*4)) return img def load_face_files(self,faceFiles): #将图片文件夹路径传下来 获取到 各个图片路径和图片名字 [[path1,name1],[path2,name2]...] imgs = [] files = os.listdir(faceFiles) for file in files: if file.find('.jpg') != -1 : imgs_infor = [] file_path = faceFiles + '\\' +file imgs_infor.append(file_path) img_name = file.split('.')[0] imgs_infor.append(img_name) imgs.append(imgs_infor) return imgs def get_face_features(self,path): face_engine = ArcFace() #engine中一个类 res = face_engine.ASFInitEngine(ASF_DETECT_MODE_IMAGE,ASF_OP_0_ONLY,30,10,5) # 需要引擎开启的功能 这里开启的是人脸检测和人脸比对 if (res != 0): print("ASFInitEngine fail") else: print("ASFInitEngine sucess") pass imgsFile = self.load_face_files(path) # 获取到 图片路径 和图片名字 face_features = {} for imgfile in imgsFile : img = self.LoadImg(imgfile[0]) res,detectedFaces = face_engine.ASFDetectFaces(img) if res==MOK: single_detected_face = ASF_SingleFaceInfo() single_detected_face.faceRect = detectedFaces.faceRect[0] single_detected_face.faceOrient = detectedFaces.faceOrient[0] res ,face_feature= face_engine.ASFFaceFeatureExtract(img,single_detected_face) if (res!=MOK): print ("ASFFaceFeatureExtract {} fail: {}".format(imgfile[0])) else: face_features[imgfile[1]] = face_feature #以字典的形式 图片名字和人脸特征成对 return face_features class MineWindow4(QMainWindow,Ui_Dialog2): def __init__(self,parent=None): #super(MineWindow, self).__init__(None, Qt.FramelessWindowHint) # 这句和普通的不一样 因为可以实现无边框 super(MineWindow4,self).__init__(parent) self.setupUi(self) self.pushButton_5.clicked.connect(self.close) self.pushButton_25.clicked.connect(self.close) self.pushButton_6.clicked.connect(self.showMinimized) self.pushButton_3.clicked.connect(self.search_infor) self.pushButton_24.clicked.connect(self.new_register) def get_conf(self): self.conf = ConfigParser() self.conf.read('information.conf', encoding='gbk') def close_clear(self): linetext=[self.lineEdit,self.lineEdit_13,self.lineEdit_14,self.lineEdit_15,self.lineEdit_16,self.lineEdit_17, self.lineEdit_18,self.lineEdit_20] i=0 for lineedit in linetext: #lineedit.setPlaceholderText(str(i)) if i<5 and i>=0 : lineedit.setPlaceholderText("请输入信息") if i>=5 and i <=7: lineedit.setPlaceholderText('***') i=i+1 #self.close() def search_infor(self): search_name=self.lineEdit.text() if search_name in self.conf.sections(): self.lineEdit_17.setPlaceholderText(self.conf.get(search_name,'余额')) self.lineEdit_18.setPlaceholderText(self.conf.get(search_name,'性别')) self.lineEdit_20.setPlaceholderText(self.conf.get(search_name,'更多信息')) else: QMessageBox.about(self,'warning','找不到'+search_name+'的信息') def new_register(self): button=0 #当都输入正确的时候写入 配置文件 name=self.lineEdit_15.text() age=self.lineEdit_13.text() sex=self.lineEdit_14.text() more_infor=self.lineEdit_16.text() button2=0 search_name=self.lineEdit.text() age2=self.lineEdit_17.text() sex2=self.lineEdit_18.text() mor_infor2=self.lineEdit_20.text() if name not in self.conf.sections(): if name != '': self.conf.add_section(name) if age == '': age= 0 elif str.isdigit(age)!= True: button=1 QMessageBox.about(self,'warning','余额请输入正确的格式') self.conf.set(name,'余额',str(age)) if sex == '': sex='未知' elif sex!='男' and sex!='女': button=1 QMessageBox.about(self,'warning','性别请输入正确') sex='未知' self.conf.set(name,'性别',sex) if more_infor == '': more_infor='未知' self.conf.set(name,'更多信息',more_infor) if button==0: self.conf.write(open("information.conf","w")) QMessageBox.about(self,'news','请将以'+name+'.jpg为命名的照片放入'+getcwd()+'\\'+'photo路径下完成注册') elif button == 1: self.conf.remove_section(name) elif age != '' or sex != '' or more_infor != '': QMessageBox.about(self,'warning','注册信息必须要输入姓名') else: QMessageBox.about(self,'warning',name+'已经注册过了') if age2!=''
and str.isdi
identifier_name
main_demo.py
Num): peopleName = "Unknown" ra = detectedFaces.faceRect[face] faceID = detectedFaces.faceID[face] faceid_list.append(faceID) left = int(ra.left * (1 / SET_SIZE)) # 坐标变大 top = int(ra.top * (1 / SET_SIZE)) right = int(ra.right * (1 / SET_SIZE)) bottom = int(ra.bottom * (1 / SET_SIZE)) cv2.rectangle(frame,(left, top), (right, bottom), (60, 20, 220), 1) #提取人脸特征 if faceid_dict!=None and (faceID in faceid_dict.keys()) and (faceid_dict[faceID][1]%8 !=0): peopleName = faceid_dict[faceID][0] faceid_dict[faceID][1]+=1 face_names.append(peopleName) else : single_detected_face1 = ASF_SingleFaceInfo() single_detected_face1.faceRect = detectedFaces.faceRect[face] single_detected_face1.faceOrient = detectedFaces.faceOrient[face] res, single_feature = face_engine.ASFFaceFeatureExtract(small_frame, single_detected_face1) if res == 0 : for name in face_Features.keys(): res,value = face_engine.ASFFaceFeatureCompare(single_feature,face_Features[name])# 人脸比对 if value >=definite_thres : #如果 相似度高 就不需要再遍历识别了 inter_var = value peopleName = name break if value >= inter_var : # 找到相似度最高的名字 inter_var = value peopleName = name if inter_var <= threshold : # 如果最高的相似度 达到不了阈值则 认为是不认识 peopleName = 'Unknown' if self.deduct_money != 0 and peopleName!='Unknown': # 判断是否扣钱 remain_money = self.name_money[peopleName] - self.deduct_money if remain_money < 0: QMessageBox.about(self, 'warning', '顾客'+peopleName+'余额不足 支付失败') else : self.name_money[peopleName] = remain_money self.change_infor_money(peopleName,remain_money) self.deduct_money = 0 list_face = [] list_face.append(peopleName) list_face.append(int(1)) faceid_dict[faceID] = list_face face_names.append(peopleName) print("与{}相似度是{}".format(peopleName,inter_var)) cv2img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # cv2和PIL中颜色的hex码的储存顺序不同 pilimg = Image.fromarray(cv2img) draw = ImageDraw.Draw(pilimg) # 图片上打印 font = ImageFont.truetype("msyh.ttf", 27, encoding="utf-8") # 参数1:字体文件路径,参数2:字体大小 draw.text((left+10 ,bottom ), peopleName, (220, 20, 60), font=font) # 参数1:打印坐标,参数2:文本,参数3:字体颜色,参数4:字体 remain_money_str = '余额:' + str(self.name_money[peopleName]) draw.text((left + 80, bottom),remain_money_str, (220, 20, 60), font=font) # PIL图片转cv2 图片 frame = cv2.cvtColor(np.array(pilimg), cv2.COLOR_RGB2BGR) for oldId in list(faceid_dict.keys()): if oldId not in faceid_list: del faceid_dict[oldId] #print(faceid_dict) #例 {0: ['张文豪', 1]} self.set_name=set(face_names) self.set_names=tuple(self.set_name) # 把名字先设为了一个 集合 把重复的去掉 再设为tuple 以便于下面显示其他信息和记录 调用 print(self.set_names) #把人脸识别检测到的人 用set_names 这个集合收集起来 show_video = cv2.resize(frame,(900, 560)) show_video = cv2.cvtColor(show_video,cv2.COLOR_BGR2RGB) # 这里指的是显示原图 # opencv 读取图片的样式,不能通过Qlabel进行显示,需要转换为Qimage QImage(uchar * data, int width, self.showImage = QImage(show_video.data, show_video.shape[1], show_video.shape[0], QImage.Format_RGB888) self.label_4.setPixmap(QPixmap.fromImage(self.showImage)) elif self.video_btn == 3: if self.cap.isOpened(): self.cap.release() cv2.destroyAllWindows() def qingping(self): # 不需要显示信息的时候 把显示到信息的那部分清除掉 在循环中保存了几次那些lable就不在发生变化了 self.label_5.setPixmap(QPixmap("")) # 照片1 self.label_6.setText("") # 信息1 self.label_7setPixmap(QPixmap("")) self.label_8.setText("") self.label_9.setPixmap(QPixmap("")) self.label_10.setText("") def LoadImg(self,imagePath): """ 将输入图片长和 宽都变成4的倍数 符合要求 """ img = cv2.imdecode(np.fromfile(imagePath,dtype=np.uint8),-1) # 读取中文命名的图片 sp = img.shape img = cv2.resize(img, (sp[1]//4*4, sp[0]//4*4)) return img def load_face_files(self,faceFiles): #将图片文件夹路径传下来 获取到 各个图片路径和图片名字 [[path1,name1],[path2,name2]...] imgs = [] files = os.listdir(faceFiles) for file in files: if file.find('.jpg') != -1 : imgs_infor = [] file_path = faceFiles + '\\' +file imgs_infor.append(file_path) img_name = file.split('.')[0] imgs_infor.append(img_name) imgs.append(imgs_infor) return imgs def get_face_features(self,path): face_engine = ArcFace() #engine中一个类 res = face_engine.ASFInitEngine(ASF_DETECT_MODE_IMAGE,ASF_OP_0_ONLY,30,10,5) # 需要引擎开启的功能 这里开启的是人脸检测和人脸比对 if (res != 0): print("ASFInitEngine fail") else: print("ASFInitEngine sucess") pass imgsFile = self.load_face_files(path) # 获取到 图片路径 和图片名字 face_features = {} for imgfile in imgsFile : img = self.LoadImg(imgfile[0]) res,detectedFaces = face_engine.ASFDetectFaces(img) if res==MOK: single_detected_face = ASF_SingleFaceInfo() single_detected_face.faceRect = detectedFaces.faceRect[0] single_detected_face.faceOrient = detectedFaces.faceOrient[0] res ,face_feature= face_engine.ASFFaceFeatureExtract(img,single_detected_face) if (res!=MOK): print ("ASFFaceFeatureExtract {} fail: {}".format(imgfile[0])) else: face_features[imgfile[1]] = face_feature #以字典的形式 图片名字和人脸特征成对 return face_features class MineWindow4(QMainWindow,Ui_Dialog2): def __init__(self,parent=None): #super(MineWindow, self).__init__(None, Qt.FramelessWindowHint) # 这句和普通的不一样 因为可以实现无边框 super(MineWindow4,self).__init__(parent) self.setupUi(self) self.pushButton_5.clicked.connect(self.close) self.pushButton_25.clicked.connect(self.close) self.pushButton_6.clicked.connect(self.showMinimized) self.pushButton_3.clicked.connect(self.search_infor) self.pushButton_24.clicked.connect(self.new_register) def get_conf(self): self.conf = ConfigParser() self.conf.read('information.conf', encoding='gbk') def close_clear(self): linetext=[self.lineEdit,self.lineEdit_13,self.lineEdit_14,self.lineEdit_15,self.lineEdit_16,self.lineEdit_17, self.lineEdit_18,self.lineEdit_20] i=0 for lineedit in linetext: #lineedit.setPlaceholderText(str(i)) if i<5 and i>=0 : lineedit.setPlaceholderText("请输入信息") if i>=5 and i <=7: lineedit.setPlaceholderText('***') i=i+1 #self.close() def search_infor(self): search_name=self.lineEdit.text() if search_name in self.conf.sections(): self.lineEdit_17.setPlaceholderText(self.conf.get(search_n
ame,'余额')) self.lineEdit_18.setPlaceholderText(se
conditional_block
field.py
# Read the string offset table offset = stringTableOffset offset += 2 # the first two bytes are supposed to indicate the number of strings, but this is totally unreliable firstOffset = struct.unpack_from("<H", data, offset)[0] numStrings = firstOffset / 2 - 1 # determine the number of strings by the first offset instead stringOffsets = [] for i in xrange(numStrings): stringOffsets.append(struct.unpack_from("<H", data, offset)[0]) offset += 2 # Read the strings (assumption: each string is 0xff-terminated; we # don't use the offsets to calculate string sizes because the # strings may overlap, and the offsets may not be in ascending # order) self.stringData = [] for o in stringOffsets: start = stringTableOffset + o end = data.find('\xff', start) self.stringData.append(data[start:end + 1]) # Read the extra blocks (assumptions: offsets are in ascending order # and there is no other data between or after the extra blocks, so # the size of each block is the difference between adjacent offsets) self.extras = [] for i in xrange(numExtra): start = extraOffsets[i] end = extraOffsets[i + 1] assert end >= start self.extras.append(data[start:end]) # Return the list of all strings as unicode objects. def getStrings(self, japanese = False): return [ff7text.decodeField(s, japanese) for s in self.stringData] # Replace the entire string list. def setStrings(self, stringList, japanese = False): self.stringData = [ff7text.encode(s, True, japanese) for s in stringList] # Return the list of extra data blocks. def getExtras(self): return self.extras # Replace an extra data block. def setExtra(self, index, data): self.extras[index] = data # Encode event section to binary data and return it. def getData(self): version = 0x0502 numActors = len(self.actorNames) numExtras = len(self.extras) numStrings = len(self.stringData) headerSize = 32 actorNamesSize = numActors * 8 extraOffsetsSize = numExtras * 4 scriptTablesSize = numActors * 32 * 2 scriptCodeSize = len(self.scriptCode) stringTableOffset = 32 + actorNamesSize + extraOffsetsSize + scriptTablesSize + scriptCodeSize # Create the string table stringOffsets = "" stringTable = "" offset = 2 + numStrings * 2 for string in self.stringData: stringOffsets += struct.pack("<H", offset) stringTable += string offset += len(string) assert numStrings <= 256 # string IDs in MES/ASK/MPNAM commands are one byte only stringTable = struct.pack("<H", numStrings & 0xff) + stringOffsets + stringTable # Align string table size so the extra blocks are 32-bit aligned align = stringTableOffset + len(stringTable) if align % 4: stringTable += '\0' * (4 - align % 4) stringTableSize = len(stringTable) # Write the header data = struct.pack("<HBBHHH6x8s8s", version, numActors, self.numModels, stringTableOffset, numExtras, self.scale, self.creator, self.mapName) # Write the actor names for name in self.actorNames: data += struct.pack("8s", name) # Write the extra block offset table offset = stringTableOffset + stringTableSize for extra in self.extras: data += struct.pack("<L", offset) offset += len(extra) # Write the actor script entry tables for scripts in self.actorScripts: for i in xrange(32): data += struct.pack("<H", scripts[i]) # Write the script code data += str(self.scriptCode) # Write the string table data += stringTable # Write the extra blocks for extra in self.extras: data += extra return data # Mnemonic and operand length for each script opcode opcodes = [ # 0x00..0x07 ("ret", 0), ("req", 2), ("reqsw", 2), ("reqew", 2), ("preq", 2), ("prqsw", 2), ("prqew", 2), ("retto", 1), # 0x08..0x0f ("join", 1), ("split", 14), ("sptye", 5), ("gptye", 5), ("", -1), ("", -1), ("dskcg", 1), ("spcal", 0), # 0x10..0x17 ("skip", 1), ("lskip", 2), ("back", 1), ("lback", 2), ("if", 5), ("lif", 6), ("if2", 7), ("lif2", 8), # 0x18..0x1f ("if2", 7), ("lif2", 8), ("", -1), ("", -1), ("", -1), ("", -1), ("", -1), ("", -1), # 0x20..0x27 ("mgame", 10), ("tutor", 1), ("btmd2", 4), ("btrlt", 2), ("wait", 2), ("nfade", 8), ("blink", 1), ("bgmovie", 1), # 0x28..0x2f ("kawai", 0), ("kawiw", 0), ("pmova", 1), ("slip", 1), ("bgdph", 4), ("bgscr", 6), ("wcls!", 1), ("wsizw", 9), # 0x30..0x37 ("key!", 3), ("keyon", 3), ("keyof", 3), ("uc", 1), ("pdira", 1), ("ptura", 3), ("wspcl", 4), ("wnumb", 7), # 0x38..0x3f ("sttim", 5), ("gold+", 5), ("gold-", 5), ("chgld", 3), ("hmpmx", 0), ("hmpmx", 0), ("mhmmx", 0), ("hmpmx", 0), # 0x40..0x47 ("mes", 2), ("mpara", 4), ("mpra2", 5), ("mpnam", 1), ("", -1), ("mp+", 4), ("", -1), ("mp-", 4), # 0x48..0x4f ("ask", 6), ("menu", 3), ("menu", 1), ("btltb", 1), ("", -1), ("hp+", 4), ("", -1), ("hp-", 4), # 0x50..0x57 ("wsize", 9), ("wmove", 5), ("wmode", 3), ("wrest", 1), ("wclse", 1), ("wrow", 2), ("gwcol", 6), ("swcol", 6), # 0x58..0x5f ("stitm", 4), ("dlitm", 4), ("ckitm", 4), ("smtra", 6), ("dmtra", 7), ("cmtra", 9), ("shake", 7), ("wait", 0), # 0x60..0x67 ("mjump", 9), ("scrlo", 1), ("scrlc", 4), ("scrla", 5), ("scr2d", 5), ("scrcc",
if self.scriptCode[codeOffset] == Op.RET and self.scriptCode[codeOffset + 1] == Op.RET: entry = codeOffset + self.scriptBaseAddress + 2 if entry not in self.scriptEntryAddresses: self.actorScripts[i].append(entry) self.scriptEntryAddresses.add(entry) codeOffset += 2 else: codeOffset += instructionSize(self.scriptCode, codeOffset) if (codeOffset + self.scriptBaseAddress) in self.scriptEntryAddresses: break # stop at next script
conditional_block
field.py
), ("chmph", 3), # 0xf8..0xff ("pmvie", 1), ("movie", 0), ("mvief", 2), ("mvcam", 1), ("fmusc", 1), ("cmusc", 5), ("chmst", 2), ("gmovr", 0), ] # Mnemonic and operand length for SPCAL sub-opcodes specialOpcodes = { 0xf5: ("arrow", 1), 0xf6: ("pname", 4), 0xf7: ("gmspd", 2), 0xf8: ("smspd", 2), 0xf9: ("flmat", 0), 0xfa: ("flitm", 0), 0xfb: ("btlck", 1), 0xfc: ("mvlck", 1), 0xfd: ("spcnm", 2), 0xfe: ("rsglb", 0), 0xff: ("clitm", 0), } # Some selected opcodes (flow control and text/window-related) Op = _enum( RET = 0x00, RETTO = 0x07, SPCAL = 0x0f, SKIP = 0x10, LSKIP = 0x11, BACK = 0x12, LBACK = 0x13, IF = 0x14, LIF = 0x15, IF2 = 0x16, LIF2 = 0x17, IF2U = 0x18, LIF2U = 0x19, KAWAI = 0x28, WSIZW = 0x2f, KEYQ = 0x30, KEYON = 0x31, KEYOFF = 0x32, WSPCL = 0x36, MES = 0x40, MPNAM = 0x43, ASK = 0x48, WSIZE = 0x50, WREST = 0x53, PRTYQ = 0xcb, MEMBQ = 0xcc, GMOVR = 0xff, SPCNM = 0x0ffd, ) # # Terminology: # - An "address" is the offset of a script instruction relative to the start # of the event section of the field map. # - An "offset" refers to a relative location within the script code block, # and is used to refer to script code bytes within the bytearray which # holds the script code. # - The "base address" of the script code block is the address of the script # instruction with offset 0. # # For example, if the script code block starts at byte 0x1234 within the # event section, then the first instruction of the script is at address # 0x1234, offset 0. # # Basic block of the control flow graph class BasicBlock: def __init__(self): # List of offsets of the instructions which make up the block self.instructions = [] # Set of addresses of succeeding blocks (zero for exit blocks, # one for sequential control flow or unconditional jumps, two # or more for conditional branches) self.succ = set() # Find the size of the instruction at the given offset in a script code block. def instructionSize(code, offset): op = code[offset] size = opcodes[op][1] + 1 if op == Op.SPCAL: # First operand byte is sub-opcode subOp = code[offset + 1] size = specialOpcodes[subOp][1] + 2 elif op == Op.KAWAI: # Variable size given by first operand byte size = code[offset + 1] return size # If the instruction at the given offset is a jump or branch instruction, # return the jump target offset. Otherwise, return None. def targetOffset(code, offset): op = code[offset] if op == Op.SKIP: return offset + code[offset + 1] + 1 elif op == Op.LSKIP: return offset + (code[offset + 1] | (code[offset + 2] << 8)) + 1 elif op == Op.BACK: return offset - code[offset + 1] elif op == Op.LBACK: return offset - (code[offset + 1] | (code[offset + 2] << 8)) if op == Op.IF: return offset + code[offset + 5] + 5 elif op == Op.LIF: return offset + (code[offset + 5] | (code[offset + 6] << 8)) + 5 elif op in (Op.IF2, Op.IF2U): return offset + code[offset + 7] + 7 elif op in (Op.LIF2, Op.LIF2U): return offset + (code[offset + 7] | (code[offset + 8] << 8)) + 7 elif op in (Op.KEYQ, Op.KEYON, Op.KEYOFF): return offset + code[offset + 3] + 3 elif op in (Op.PRTYQ, Op.MEMBQ): return offset + code[offset + 2] + 2 else: return None # Return true if the instruction at the given offset halts the control flow. def isExit(code, offset): return code[offset] in (Op.RET, Op.RETTO, Op.GMOVR) # Return true if the instruction at the given offset is an unconditional jump. def isJump(code, offset): return code[offset] in (Op.SKIP, Op.LSKIP, Op.BACK, Op.LBACK) # Return true if the instruction at the given offset is a conditional branch. def isBranch(code, offset): return code[offset] in (Op.IF, Op.LIF, Op.IF2, Op.LIF2, Op.IF2U, Op.LIF2U, Op.KEYQ, Op.KEYON, Op.KEYOFF, Op.PRTYQ, Op.MEMBQ) # Build and return the control flow graph, a dictionary mapping addresses to # BasicBlock objects. def buildCFG(code, baseAddress, entryAddresses): # Find the addresses of the leaders, starting with the supplied set of # entry addresses leaders = set(entryAddresses) offset = 0 while offset < len(code): nextOffset = offset + instructionSize(code, offset) # Instructions following exit points are leaders if isExit(code, offset): leaders.add(nextOffset + baseAddress) else: target = targetOffset(code, offset) # Targets of jump and branches, and the instructions following # a jump or branch, are leaders if target is not None: leaders.add(target + baseAddress) leaders.add(nextOffset + baseAddress) offset = nextOffset # For each leader, assemble the corresponding basic block, building # the graph graph = {} for leader in leaders: addr = leader offset = addr - baseAddress # If the last instruction of the code is a jump, there will be # a bogus leader pointing after the end of the code, which we # need to skip if offset >= len(code): continue block = BasicBlock() while True: # Append one instruction size = instructionSize(code, offset) block.instructions.append(offset) addr += size offset += size # Stop when reaching another leader, or before going outside the # code section if (addr in leaders) or (offset >= len(code)): break # Examine the last instruction of the block to determine the # block's successors assert len(block.instructions) > 0 lastInstruction = block.instructions[-1] if isJump(code, lastInstruction): # one successor: the jump target block.succ = set([targetOffset(code, lastInstruction) + baseAddress]) elif isBranch(code, lastInstruction): # two successors: the branch target and the next instruction if offset >= len(code): raise IndexError, "Control flow reaches end of script code" block.succ = set([targetOffset(code, lastInstruction) + baseAddress, addr]) elif isExit(code, lastInstruction): # no successors block.succ = set() else: # one successor: the next instruction if offset >= len(code): raise IndexError, "Control flow reaches end of script code" block.succ = set([addr]) # Add the block to the graph graph[leader] = block return graph # Determine all possible paths through a control flow graph starting at a given # entry point, ignoring any cycles. # # This function returns a list of paths, each path being a list of instruction # addresses. def findPaths(graph, entryAddress, path = []): path = path + [entryAddress] succ = graph[entryAddress].succ if not succ: return [path] # exit reached
random_line_split
field.py
(self, event): data = event.getData() # Align section size to multiple of four if len(data) % 4: data += '\0' * (4 - len(data) % 4) self.sections[Section.EVENT] = data # Write the map to a file object, truncating the file. def writeToFile(self, fileobj): mapData = "" # Create the pointer table pointer = self.basePointer for data in self.sections: mapData += struct.pack("<L", pointer) pointer += len(data) # Append the sections for data in self.sections: mapData += data # Compress the map data cmpData = lzss.compress(mapData) # Write to file fileobj.seek(0) fileobj.truncate() fileobj.write(struct.pack("<L", len(cmpData))) fileobj.write(cmpData) # Field map event section class EventSection: # Create an EventSection object from binary data. def __init__(self, data): # Parse the section header headerSize = 32 self.version, numActors, self.numModels, stringTableOffset, numExtra, self.scale, self.creator, self.mapName = struct.unpack_from("<HBBHHH6x8s8s", data) offset = headerSize self.creator = self.creator.rstrip('\0') self.mapName = self.mapName.rstrip('\0') # Read the actor names self.actorNames = [] for i in xrange(numActors): name = struct.unpack_from("8s", data, offset)[0] offset += 8 name = name.rstrip('\0') self.actorNames.append(name) # Read the extra block (music/tutorial) offset table extraOffsets = [] for i in xrange(numExtra): extraOffset = struct.unpack_from("<L", data, offset)[0] offset += 4 extraOffsets.append(extraOffset) extraOffsets.append(len(data)) # dummy offset to determine end of last extra block # Read the actor script entry tables (32 entries per actor) self.actorScripts = [] self.scriptEntryAddresses = set() for i in xrange(numActors): scripts = list(struct.unpack_from("<32H", data, offset)) offset += 64 self.actorScripts.append(scripts) self.scriptEntryAddresses |= set(scripts) # Read the script code (assumptions: the script data immediately # follows the actor script offset table, and the start of the string # table marks the end of the script data) self.scriptBaseAddress = offset self.scriptCode = bytearray(data[offset:stringTableOffset]) if (len(self.scriptCode) + self.scriptBaseAddress) in self.scriptEntryAddresses: self.scriptCode.append(Op.RET) # the SNW_W field has (unused) pointers after the end of the code # The default script of each actor continues after the first RET # instruction. In order to include the following code in control # flow analyses we add a 33rd element to each script entry table # which points to the instruction after the first RET of the # default script. for i in xrange(numActors): defaultScript = self.actorScripts[i][0] codeOffset = defaultScript - self.scriptBaseAddress while codeOffset < len(self.scriptCode): if self.scriptCode[codeOffset] == Op.RET: entry = codeOffset + self.scriptBaseAddress + 1 self.actorScripts[i].append(entry) self.scriptEntryAddresses.add(entry) break else: codeOffset += instructionSize(self.scriptCode, codeOffset) # Also look for double-RET instructions in regular scripts and # add pseudo entry points after them for i in xrange(numActors): for j in xrange(1, 32): codeOffset = self.actorScripts[i][j] - self.scriptBaseAddress while codeOffset < (len(self.scriptCode) - 2): if self.scriptCode[codeOffset] == Op.RET and self.scriptCode[codeOffset + 1] == Op.RET: entry = codeOffset + self.scriptBaseAddress + 2 if entry not in self.scriptEntryAddresses: self.actorScripts[i].append(entry) self.scriptEntryAddresses.add(entry) codeOffset += 2 else: codeOffset += instructionSize(self.scriptCode, codeOffset) if (codeOffset + self.scriptBaseAddress) in self.scriptEntryAddresses: break # stop at next script # Read the string offset table offset = stringTableOffset offset += 2 # the first two bytes are supposed to indicate the number of strings, but this is totally unreliable firstOffset = struct.unpack_from("<H", data, offset)[0] numStrings = firstOffset / 2 - 1 # determine the number of strings by the first offset instead stringOffsets = [] for i in xrange(numStrings): stringOffsets.append(struct.unpack_from("<H", data, offset)[0]) offset += 2 # Read the strings (assumption: each string is 0xff-terminated; we # don't use the offsets to calculate string sizes because the # strings may overlap, and the offsets may not be in ascending # order) self.stringData = [] for o in stringOffsets: start = stringTableOffset + o end = data.find('\xff', start) self.stringData.append(data[start:end + 1]) # Read the extra blocks (assumptions: offsets are in ascending order # and there is no other data between or after the extra blocks, so # the size of each block is the difference between adjacent offsets) self.extras = [] for i in xrange(numExtra): start = extraOffsets[i] end = extraOffsets[i + 1] assert end >= start self.extras.append(data[start:end]) # Return the list of all strings as unicode objects. def getStrings(self, japanese = False): return [ff7text.decodeField(s, japanese) for s in self.stringData] # Replace the entire string list. def setStrings(self, stringList, japanese = False): self.stringData = [ff7text.encode(s, True, japanese) for s in stringList] # Return the list of extra data blocks. def getExtras(self): return self.extras # Replace an extra data block. def setExtra(self, index, data): self.extras[index] = data # Encode event section to binary data and return it. def getData(self): version = 0x0502 numActors = len(self.actorNames) numExtras = len(self.extras) numStrings = len(self.stringData) headerSize = 32 actorNamesSize = numActors * 8 extraOffsetsSize = numExtras * 4 scriptTablesSize = numActors * 32 * 2 scriptCodeSize = len(self.scriptCode) stringTableOffset = 32 + actorNamesSize + extraOffsetsSize + scriptTablesSize + scriptCodeSize # Create the string table stringOffsets = "" stringTable = "" offset = 2 + numStrings * 2 for string in self.stringData: stringOffsets += struct.pack("<H", offset) stringTable += string offset += len(string) assert numStrings <= 256 # string IDs in MES/ASK/MPNAM commands are one byte only stringTable = struct.pack("<H", numStrings & 0xff) + stringOffsets + stringTable # Align string table size so the extra blocks are 32-bit aligned align = stringTableOffset + len(stringTable) if align % 4: stringTable += '\0' * (4 - align % 4) stringTableSize = len(stringTable) # Write the header data = struct.pack("<HBBHHH6x8s8s", version, numActors, self.numModels, stringTableOffset, numExtras, self.scale, self.creator, self.mapName) # Write the actor names for name in self.actorNames: data += struct.pack("8s", name) # Write the extra block offset table offset = stringTableOffset + stringTableSize for extra in self.extras: data += struct.pack("<L", offset) offset += len(extra) # Write the actor script entry tables for scripts in self.actorScripts: for i in xrange(32): data += struct.pack("<H", scripts[i]) # Write the script code data += str(self.scriptCode) # Write the string table data += stringTable # Write the extra blocks for extra in self.extras: data += extra return data # Mnemonic and operand length for each script opcode opcodes = [ # 0x00..0x07 ("ret", 0), ("req", 2), ("reqsw", 2), ("reqew", 2), ("preq", 2), ("prqsw", 2), ("prqew", 2
setEventSection
identifier_name
field.py
# Write the map to a file object, truncating the file. def writeToFile(self, fileobj): mapData = "" # Create the pointer table pointer = self.basePointer for data in self.sections: mapData += struct.pack("<L", pointer) pointer += len(data) # Append the sections for data in self.sections: mapData += data # Compress the map data cmpData = lzss.compress(mapData) # Write to file fileobj.seek(0) fileobj.truncate() fileobj.write(struct.pack("<L", len(cmpData))) fileobj.write(cmpData) # Field map event section class EventSection: # Create an EventSection object from binary data. def __init__(self, data): # Parse the section header headerSize = 32 self.version, numActors, self.numModels, stringTableOffset, numExtra, self.scale, self.creator, self.mapName = struct.unpack_from("<HBBHHH6x8s8s", data) offset = headerSize self.creator = self.creator.rstrip('\0') self.mapName = self.mapName.rstrip('\0') # Read the actor names self.actorNames = [] for i in xrange(numActors): name = struct.unpack_from("8s", data, offset)[0] offset += 8 name = name.rstrip('\0') self.actorNames.append(name) # Read the extra block (music/tutorial) offset table extraOffsets = [] for i in xrange(numExtra): extraOffset = struct.unpack_from("<L", data, offset)[0] offset += 4 extraOffsets.append(extraOffset) extraOffsets.append(len(data)) # dummy offset to determine end of last extra block # Read the actor script entry tables (32 entries per actor) self.actorScripts = [] self.scriptEntryAddresses = set() for i in xrange(numActors): scripts = list(struct.unpack_from("<32H", data, offset)) offset += 64 self.actorScripts.append(scripts) self.scriptEntryAddresses |= set(scripts) # Read the script code (assumptions: the script data immediately # follows the actor script offset table, and the start of the string # table marks the end of the script data) self.scriptBaseAddress = offset self.scriptCode = bytearray(data[offset:stringTableOffset]) if (len(self.scriptCode) + self.scriptBaseAddress) in self.scriptEntryAddresses: self.scriptCode.append(Op.RET) # the SNW_W field has (unused) pointers after the end of the code # The default script of each actor continues after the first RET # instruction. In order to include the following code in control # flow analyses we add a 33rd element to each script entry table # which points to the instruction after the first RET of the # default script. for i in xrange(numActors): defaultScript = self.actorScripts[i][0] codeOffset = defaultScript - self.scriptBaseAddress while codeOffset < len(self.scriptCode): if self.scriptCode[codeOffset] == Op.RET: entry = codeOffset + self.scriptBaseAddress + 1 self.actorScripts[i].append(entry) self.scriptEntryAddresses.add(entry) break else: codeOffset += instructionSize(self.scriptCode, codeOffset) # Also look for double-RET instructions in regular scripts and # add pseudo entry points after them for i in xrange(numActors): for j in xrange(1, 32): codeOffset = self.actorScripts[i][j] - self.scriptBaseAddress while codeOffset < (len(self.scriptCode) - 2): if self.scriptCode[codeOffset] == Op.RET and self.scriptCode[codeOffset + 1] == Op.RET: entry = codeOffset + self.scriptBaseAddress + 2 if entry not in self.scriptEntryAddresses: self.actorScripts[i].append(entry) self.scriptEntryAddresses.add(entry) codeOffset += 2 else: codeOffset += instructionSize(self.scriptCode, codeOffset) if (codeOffset + self.scriptBaseAddress) in self.scriptEntryAddresses: break # stop at next script # Read the string offset table offset = stringTableOffset offset += 2 # the first two bytes are supposed to indicate the number of strings, but this is totally unreliable firstOffset = struct.unpack_from("<H", data, offset)[0] numStrings = firstOffset / 2 - 1 # determine the number of strings by the first offset instead stringOffsets = [] for i in xrange(numStrings): stringOffsets.append(struct.unpack_from("<H", data, offset)[0]) offset += 2 # Read the strings (assumption: each string is 0xff-terminated; we # don't use the offsets to calculate string sizes because the # strings may overlap, and the offsets may not be in ascending # order) self.stringData = [] for o in stringOffsets: start = stringTableOffset + o end = data.find('\xff', start) self.stringData.append(data[start:end + 1]) # Read the extra blocks (assumptions: offsets are in ascending order # and there is no other data between or after the extra blocks, so # the size of each block is the difference between adjacent offsets) self.extras = [] for i in xrange(numExtra): start = extraOffsets[i] end = extraOffsets[i + 1] assert end >= start self.extras.append(data[start:end]) # Return the list of all strings as unicode objects. def getStrings(self, japanese = False): return [ff7text.decodeField(s, japanese) for s in self.stringData] # Replace the entire string list. def setStrings(self, stringList, japanese = False): self.stringData = [ff7text.encode(s, True, japanese) for s in stringList] # Return the list of extra data blocks. def getExtras(self): return self.extras # Replace an extra data block. def setExtra(self, index, data): self.extras[index] = data # Encode event section to binary data and return it. def getData(self): version = 0x0502 numActors = len(self.actorNames) numExtras = len(self.extras) numStrings = len(self.stringData) headerSize = 32 actorNamesSize = numActors * 8 extraOffsetsSize = numExtras * 4 scriptTablesSize = numActors * 32 * 2 scriptCodeSize = len(self.scriptCode) stringTableOffset = 32 + actorNamesSize + extraOffsetsSize + scriptTablesSize + scriptCodeSize # Create the string table stringOffsets = "" stringTable = "" offset = 2 + numStrings * 2 for string in self.stringData: stringOffsets += struct.pack("<H", offset) stringTable += string offset += len(string) assert numStrings <= 256 # string IDs in MES/ASK/MPNAM commands are one byte only stringTable = struct.pack("<H", numStrings & 0xff) + stringOffsets + stringTable # Align string table size so the extra blocks are 32-bit aligned align = stringTableOffset + len(stringTable) if align % 4: stringTable += '\0' * (4 - align % 4) stringTableSize = len(stringTable) # Write the header data = struct.pack("<HBBHHH6x8s8s", version, numActors, self.numModels, stringTableOffset, numExtras, self.scale, self.creator, self.mapName) # Write the actor names for name in self.actorNames: data += struct.pack("8s", name) # Write the extra block offset table offset = stringTableOffset + stringTableSize for extra in self.extras: data += struct.pack("<L", offset) offset += len(extra) # Write the actor script entry tables for scripts in self.actorScripts: for i in xrange(32): data += struct.pack("<H", scripts[i]) # Write the script code data += str(self.scriptCode) # Write the string table data += stringTable # Write the extra blocks for extra in self.extras: data += extra return data # Mnemonic and operand length for each script opcode opcodes = [ # 0x00..0x07 ("ret", 0), ("req", 2), ("reqsw", 2), ("reqew", 2), ("preq", 2), ("prqsw", 2), ("prqew", 2), ("retto", 1
data = event.getData() # Align section size to multiple of four if len(data) % 4: data += '\0' * (4 - len(data) % 4) self.sections[Section.EVENT] = data
identifier_body
trainer.py
2]) for x in open('tmp', 'r').readlines()] os.remove('tmp') return np.argmax(memory_available) return 0 def get_device(model): """ Extract the device to run on from the model. :param model: The model to train. :return: String. The name of the device. """ if next(model.parameters()).is_cuda:
else: return 'cpu' def prepare_batch(batch, device=None, non_blocking=False): """ Move the batch to the provided device. :param batch: The batch to prepare. :param device: The device to move to (e.g. cpu or gpu). :param non_blocking: Bool. Whether it should be blocking or not. :return: The prepared batch. """ images, target = batch return [convert_tensor(image, device=device, non_blocking=non_blocking) for image in images], \ convert_tensor(target, device=device, non_blocking=non_blocking) def create_name(name, epochs, lr, lr_decay_step, dilation, batch_size): """ Create a name that includes all the given hyper-parameters. :param name: The name of the model. :param epochs: The amount of epochs to train. :param lr: The learning rate to use for training. :param lr_decay_step: The amount of steps before the learning rate gets reduced. :param dilation: The dilation. :param batch_size: The batch size. :return: The name. """ return '{}_ep-{}_lr-{}_de-{}_di-{}_bs-{}'.format(name, epochs, lr, lr_decay_step, sum(dilation), batch_size) class Trainer(Engine): def __init__(self, name, model, log_dir, lr, lr_decay_step, adam=False): """ Initialize to train the given model. :param name: The name of the model to be trained. :param model: The model to be trained. :param log_dir: String. The log directory of the tensorboard. :param lr: Float. The learning rate. :param lr_decay_step: Integer. The amount of steps the learning rate decays. :param adam: Bool. Whether to use adam optimizer or not. """ super(Trainer, self).__init__(self.update_model) self.model = model # tqdm ProgressBar(persist=True).attach(self) # Optimizer params = [p for p in model.parameters() if p.requires_grad] if adam: self.optimizer = torch.optim.Adam(params, lr=lr) else: self.optimizer = torch.optim.SGD(params, lr=lr, momentum=0.9) # Scheduler if lr_decay_step > 0: self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=lr_decay_step, gamma=0.1) self.add_event_handler(Events.EPOCH_COMPLETED, lambda e: e.scheduler.step()) else: self.scheduler = None # Terminate if nan values found self.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan()) # Tensorboard logging self.tb_logger = TensorboardLogger(log_dir=os.path.join(log_dir, name)) self.add_event_handler(Events.COMPLETED, lambda x: self.tb_logger.close()) self.tb_logger.attach(self, log_handler=OptimizerParamsHandler(self.optimizer), event_name=Events.EPOCH_COMPLETED) self.tb_logger.attach(self, log_handler=OutputHandler(tag='training', output_transform=lambda x: { 'rpn_box_loss': round(self.state.output['loss_rpn_box_reg'].item(), 4), 'rpn_cls_loss': round(self.state.output['loss_objectness'].item(), 4), 'roi_box_loss': round(self.state.output['loss_box_reg'].item(), 4), 'roi_cls_loss': round(self.state.output['loss_classifier'].item(), 4) }), event_name=Events.EPOCH_COMPLETED) # Run on GPU (cuda) if available if torch.cuda.is_available(): torch.cuda.set_device(int(get_free_gpu())) model.cuda(torch.cuda.current_device()) @staticmethod def update_model(engine, batch): """ Runs the model on the given data batch and does the backpropagation. :param engine: The Trainer engine. :param batch: The batch to train on. :return: The loss values. """ engine.model.train() engine.model.rpn.nms_thresh = 0.7 img, target = prepare_batch(batch, device=get_device(engine.model)) engine.optimizer.zero_grad() loss = engine.model(img, target) losses = sum(l for l in loss.values()) losses.backward() engine.optimizer.step() return loss class Evaluator(Engine): def __init__(self, model, tb_logger): """ Initialize to evaluate the given model. :param model: The model to be evaluated. :param tb_logger: The tensorboard to be logged to. """ super(Evaluator, self).__init__(self.predict_on_batch) self.model = model # FROC avg_fps = list(range(1, 26)) avg_fps.append(0.5) avg_fps.sort() tags = ['froc_{}fp'.format(fp) for fp in avg_fps] for avg_fp, tag in zip(avg_fps, tags): FROC([avg_fp], iou_threshold=0.5).attach(self, tag) # tqdm ProgressBar(persist=True).attach(self) # Tensorboard logging tb_logger.attach(self, log_handler=OutputHandler(tag='validation', metric_names=tags, global_step_transform=lambda engine, name: engine.state.epoch), event_name=Events.EPOCH_COMPLETED) @staticmethod def predict_on_batch(engine, batch): """ Runs the model on the given data batch. :param engine: The Evaluator engine. :param batch: The batch to evaluate on. :return: The predicted values and the target values. """ engine.model.eval() engine.model.rpn.nms_thresh = 0.3 with torch.no_grad(): imgs, target = prepare_batch(batch, device=get_device(engine.model)) y_pred = engine.model(imgs) return y_pred, target def run(self, data, max_epochs=None, epoch_length=None, seed=None): # BugFix: After first run, the max_epochs have to be incremented or set to this engines epoch count. if not (self.state is None): self.state.max_epochs += 1 # Run evaluation super(Evaluator, self).run(data, max_epochs, epoch_length, seed) def as_array(value): """ Checks whether or not the given value is a list. If not, the value is wrapped in a list. :param value: List or Other. The value to wrap in a list if it isn't already one. :return: The value as a lit. """ if not isinstance(value, list): return [value] return value def train(model_type, lr, lr_decay_step, epochs, dilation, validate, batch_size, log_dir, data_dir, csv_file, use_adam, checkpoint_dir, resume_checkpoint): """ Train the model with the given parameters. :param model_type: The type of the model to train. :param lr: Float or Array[Float]. The learning rate. :param lr_decay_step: Integer or Array[Integer]. The amount of steps the learning rate decays. :param epochs: Integer or Array[Integer]. The amount of epochs. :param dilation: Integer or Array[Integer]. See https://pytorch.org/docs/master/generated/torch.nn.Conv2d.html for further information. :param validate: Bool. Use a validation step after each epoch. :param batch_size: Integer or Array[Integer]. The batch size. :param log_dir: String. The log directory of the tensorboard. :param data_dir: String. The path to the data. :param csv_file: String. The csv file which describes the dataset. :param use_adam: Bool. Whether to use adam optimizer or not. :param checkpoint_dir: String. The path to the checkpoints directory. :param resume_checkpoint: String. If None start all over, otherwise start from given checkpoint name. """ # Datasets train_dataset = DeepLesionDataset(data_dir, csv_file, batch_size=batch_size, type=DatasetType.TRAIN) validation_dataset = DeepLesionDataset(data_dir, csv_file, batch_size=batch_size, type=DatasetType.VALIDATION) # Create combinations of hyper-parameters train_variations = itertools.product(*[ as_array(model_type), as_array(epochs), as_array(lr), as_array(lr_decay_step), as_array(dilation), as_array(batch_size) ]) # Train for all combinations for h_type, h_epochs, h_lr, h_lr_decay_step, h_dilation, h_batch_size in train_variations: name = FasterRCNNType.get_name(h_type) title = create_name(name, h_epochs, h_lr, h_lr_decay_step, h_dilation, h_batch_size) checkpoint_files = [d[:-len('_checkpoint_8.pth')] for d in os.listdir(checkpoint_dir) if d.endswith('
return 'cuda:{}'.format(torch.cuda.current_device())
conditional_block
trainer.py
2]) for x in open('tmp', 'r').readlines()] os.remove('tmp') return np.argmax(memory_available) return 0 def get_device(model): """ Extract the device to run on from the model. :param model: The model to train. :return: String. The name of the device. """ if next(model.parameters()).is_cuda: return 'cuda:{}'.format(torch.cuda.current_device()) else: return 'cpu' def prepare_batch(batch, device=None, non_blocking=False): """ Move the batch to the provided device. :param batch: The batch to prepare. :param device: The device to move to (e.g. cpu or gpu). :param non_blocking: Bool. Whether it should be blocking or not. :return: The prepared batch. """ images, target = batch return [convert_tensor(image, device=device, non_blocking=non_blocking) for image in images], \ convert_tensor(target, device=device, non_blocking=non_blocking) def create_name(name, epochs, lr, lr_decay_step, dilation, batch_size): """ Create a name that includes all the given hyper-parameters. :param name: The name of the model. :param epochs: The amount of epochs to train. :param lr: The learning rate to use for training. :param lr_decay_step: The amount of steps before the learning rate gets reduced. :param dilation: The dilation. :param batch_size: The batch size. :return: The name. """ return '{}_ep-{}_lr-{}_de-{}_di-{}_bs-{}'.format(name, epochs, lr, lr_decay_step, sum(dilation), batch_size) class Trainer(Engine): def __init__(self, name, model, log_dir, lr, lr_decay_step, adam=False): """ Initialize to train the given model. :param name: The name of the model to be trained. :param model: The model to be trained. :param log_dir: String. The log directory of the tensorboard. :param lr: Float. The learning rate. :param lr_decay_step: Integer. The amount of steps the learning rate decays. :param adam: Bool. Whether to use adam optimizer or not. """ super(Trainer, self).__init__(self.update_model) self.model = model # tqdm ProgressBar(persist=True).attach(self) # Optimizer params = [p for p in model.parameters() if p.requires_grad] if adam: self.optimizer = torch.optim.Adam(params, lr=lr) else: self.optimizer = torch.optim.SGD(params, lr=lr, momentum=0.9) # Scheduler if lr_decay_step > 0: self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=lr_decay_step, gamma=0.1) self.add_event_handler(Events.EPOCH_COMPLETED, lambda e: e.scheduler.step()) else: self.scheduler = None # Terminate if nan values found self.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan()) # Tensorboard logging self.tb_logger = TensorboardLogger(log_dir=os.path.join(log_dir, name)) self.add_event_handler(Events.COMPLETED, lambda x: self.tb_logger.close()) self.tb_logger.attach(self, log_handler=OptimizerParamsHandler(self.optimizer), event_name=Events.EPOCH_COMPLETED) self.tb_logger.attach(self, log_handler=OutputHandler(tag='training', output_transform=lambda x: { 'rpn_box_loss': round(self.state.output['loss_rpn_box_reg'].item(), 4), 'rpn_cls_loss': round(self.state.output['loss_objectness'].item(), 4), 'roi_box_loss': round(self.state.output['loss_box_reg'].item(), 4), 'roi_cls_loss': round(self.state.output['loss_classifier'].item(), 4) }), event_name=Events.EPOCH_COMPLETED) # Run on GPU (cuda) if available if torch.cuda.is_available(): torch.cuda.set_device(int(get_free_gpu())) model.cuda(torch.cuda.current_device()) @staticmethod def update_model(engine, batch): """ Runs the model on the given data batch and does the backpropagation. :param engine: The Trainer engine. :param batch: The batch to train on. :return: The loss values. """ engine.model.train() engine.model.rpn.nms_thresh = 0.7 img, target = prepare_batch(batch, device=get_device(engine.model)) engine.optimizer.zero_grad() loss = engine.model(img, target) losses = sum(l for l in loss.values()) losses.backward() engine.optimizer.step() return loss class Evaluator(Engine): def __init__(self, model, tb_logger): """ Initialize to evaluate the given model. :param model: The model to be evaluated. :param tb_logger: The tensorboard to be logged to. """ super(Evaluator, self).__init__(self.predict_on_batch) self.model = model # FROC avg_fps = list(range(1, 26)) avg_fps.append(0.5) avg_fps.sort() tags = ['froc_{}fp'.format(fp) for fp in avg_fps] for avg_fp, tag in zip(avg_fps, tags): FROC([avg_fp], iou_threshold=0.5).attach(self, tag) # tqdm ProgressBar(persist=True).attach(self) # Tensorboard logging tb_logger.attach(self, log_handler=OutputHandler(tag='validation', metric_names=tags, global_step_transform=lambda engine, name: engine.state.epoch), event_name=Events.EPOCH_COMPLETED) @staticmethod def predict_on_batch(engine, batch): """ Runs the model on the given data batch. :param engine: The Evaluator engine. :param batch: The batch to evaluate on. :return: The predicted values and the target values. """ engine.model.eval() engine.model.rpn.nms_thresh = 0.3 with torch.no_grad(): imgs, target = prepare_batch(batch, device=get_device(engine.model)) y_pred = engine.model(imgs) return y_pred, target def run(self, data, max_epochs=None, epoch_length=None, seed=None): # BugFix: After first run, the max_epochs have to be incremented or set to this engines epoch count. if not (self.state is None): self.state.max_epochs += 1 # Run evaluation super(Evaluator, self).run(data, max_epochs, epoch_length, seed) def as_array(value): """ Checks whether or not the given value is a list. If not, the value is wrapped in a list. :param value: List or Other. The value to wrap in a list if it isn't already one. :return: The value as a lit. """ if not isinstance(value, list): return [value] return value def
(model_type, lr, lr_decay_step, epochs, dilation, validate, batch_size, log_dir, data_dir, csv_file, use_adam, checkpoint_dir, resume_checkpoint): """ Train the model with the given parameters. :param model_type: The type of the model to train. :param lr: Float or Array[Float]. The learning rate. :param lr_decay_step: Integer or Array[Integer]. The amount of steps the learning rate decays. :param epochs: Integer or Array[Integer]. The amount of epochs. :param dilation: Integer or Array[Integer]. See https://pytorch.org/docs/master/generated/torch.nn.Conv2d.html for further information. :param validate: Bool. Use a validation step after each epoch. :param batch_size: Integer or Array[Integer]. The batch size. :param log_dir: String. The log directory of the tensorboard. :param data_dir: String. The path to the data. :param csv_file: String. The csv file which describes the dataset. :param use_adam: Bool. Whether to use adam optimizer or not. :param checkpoint_dir: String. The path to the checkpoints directory. :param resume_checkpoint: String. If None start all over, otherwise start from given checkpoint name. """ # Datasets train_dataset = DeepLesionDataset(data_dir, csv_file, batch_size=batch_size, type=DatasetType.TRAIN) validation_dataset = DeepLesionDataset(data_dir, csv_file, batch_size=batch_size, type=DatasetType.VALIDATION) # Create combinations of hyper-parameters train_variations = itertools.product(*[ as_array(model_type), as_array(epochs), as_array(lr), as_array(lr_decay_step), as_array(dilation), as_array(batch_size) ]) # Train for all combinations for h_type, h_epochs, h_lr, h_lr_decay_step, h_dilation, h_batch_size in train_variations: name = FasterRCNNType.get_name(h_type) title = create_name(name, h_epochs, h_lr, h_lr_decay_step, h_dilation, h_batch_size) checkpoint_files = [d[:-len('_checkpoint_8.pth')] for d in os.listdir(checkpoint_dir) if d.endswith('
train
identifier_name
trainer.py
()[2]) for x in open('tmp', 'r').readlines()] os.remove('tmp') return np.argmax(memory_available) return 0 def get_device(model): """ Extract the device to run on from the model. :param model: The model to train. :return: String. The name of the device. """ if next(model.parameters()).is_cuda: return 'cuda:{}'.format(torch.cuda.current_device()) else: return 'cpu' def prepare_batch(batch, device=None, non_blocking=False): """ Move the batch to the provided device. :param batch: The batch to prepare. :param device: The device to move to (e.g. cpu or gpu). :param non_blocking: Bool. Whether it should be blocking or not. :return: The prepared batch. """ images, target = batch return [convert_tensor(image, device=device, non_blocking=non_blocking) for image in images], \ convert_tensor(target, device=device, non_blocking=non_blocking) def create_name(name, epochs, lr, lr_decay_step, dilation, batch_size): """ Create a name that includes all the given hyper-parameters. :param name: The name of the model. :param epochs: The amount of epochs to train. :param lr: The learning rate to use for training. :param lr_decay_step: The amount of steps before the learning rate gets reduced. :param dilation: The dilation. :param batch_size: The batch size. :return: The name. """ return '{}_ep-{}_lr-{}_de-{}_di-{}_bs-{}'.format(name, epochs, lr, lr_decay_step, sum(dilation), batch_size) class Trainer(Engine): def __init__(self, name, model, log_dir, lr, lr_decay_step, adam=False): """ Initialize to train the given model. :param name: The name of the model to be trained. :param model: The model to be trained. :param log_dir: String. The log directory of the tensorboard. :param lr: Float. The learning rate. :param lr_decay_step: Integer. The amount of steps the learning rate decays. :param adam: Bool. Whether to use adam optimizer or not. """ super(Trainer, self).__init__(self.update_model) self.model = model # tqdm ProgressBar(persist=True).attach(self) # Optimizer params = [p for p in model.parameters() if p.requires_grad] if adam: self.optimizer = torch.optim.Adam(params, lr=lr) else: self.optimizer = torch.optim.SGD(params, lr=lr, momentum=0.9) # Scheduler if lr_decay_step > 0: self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=lr_decay_step, gamma=0.1) self.add_event_handler(Events.EPOCH_COMPLETED, lambda e: e.scheduler.step()) else: self.scheduler = None # Terminate if nan values found self.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan()) # Tensorboard logging self.tb_logger = TensorboardLogger(log_dir=os.path.join(log_dir, name)) self.add_event_handler(Events.COMPLETED, lambda x: self.tb_logger.close()) self.tb_logger.attach(self, log_handler=OptimizerParamsHandler(self.optimizer), event_name=Events.EPOCH_COMPLETED) self.tb_logger.attach(self, log_handler=OutputHandler(tag='training', output_transform=lambda x: { 'rpn_box_loss': round(self.state.output['loss_rpn_box_reg'].item(), 4), 'rpn_cls_loss': round(self.state.output['loss_objectness'].item(), 4), 'roi_box_loss': round(self.state.output['loss_box_reg'].item(), 4), 'roi_cls_loss': round(self.state.output['loss_classifier'].item(), 4) }), event_name=Events.EPOCH_COMPLETED) # Run on GPU (cuda) if available if torch.cuda.is_available(): torch.cuda.set_device(int(get_free_gpu())) model.cuda(torch.cuda.current_device()) @staticmethod def update_model(engine, batch): """ Runs the model on the given data batch and does the backpropagation. :param engine: The Trainer engine. :param batch: The batch to train on. :return: The loss values. """ engine.model.train() engine.model.rpn.nms_thresh = 0.7 img, target = prepare_batch(batch, device=get_device(engine.model)) engine.optimizer.zero_grad() loss = engine.model(img, target) losses = sum(l for l in loss.values()) losses.backward() engine.optimizer.step() return loss class Evaluator(Engine): def __init__(self, model, tb_logger): """ Initialize to evaluate the given model. :param model: The model to be evaluated. :param tb_logger: The tensorboard to be logged to. """ super(Evaluator, self).__init__(self.predict_on_batch) self.model = model
avg_fps = list(range(1, 26)) avg_fps.append(0.5) avg_fps.sort() tags = ['froc_{}fp'.format(fp) for fp in avg_fps] for avg_fp, tag in zip(avg_fps, tags): FROC([avg_fp], iou_threshold=0.5).attach(self, tag) # tqdm ProgressBar(persist=True).attach(self) # Tensorboard logging tb_logger.attach(self, log_handler=OutputHandler(tag='validation', metric_names=tags, global_step_transform=lambda engine, name: engine.state.epoch), event_name=Events.EPOCH_COMPLETED) @staticmethod def predict_on_batch(engine, batch): """ Runs the model on the given data batch. :param engine: The Evaluator engine. :param batch: The batch to evaluate on. :return: The predicted values and the target values. """ engine.model.eval() engine.model.rpn.nms_thresh = 0.3 with torch.no_grad(): imgs, target = prepare_batch(batch, device=get_device(engine.model)) y_pred = engine.model(imgs) return y_pred, target def run(self, data, max_epochs=None, epoch_length=None, seed=None): # BugFix: After first run, the max_epochs have to be incremented or set to this engines epoch count. if not (self.state is None): self.state.max_epochs += 1 # Run evaluation super(Evaluator, self).run(data, max_epochs, epoch_length, seed) def as_array(value): """ Checks whether or not the given value is a list. If not, the value is wrapped in a list. :param value: List or Other. The value to wrap in a list if it isn't already one. :return: The value as a lit. """ if not isinstance(value, list): return [value] return value def train(model_type, lr, lr_decay_step, epochs, dilation, validate, batch_size, log_dir, data_dir, csv_file, use_adam, checkpoint_dir, resume_checkpoint): """ Train the model with the given parameters. :param model_type: The type of the model to train. :param lr: Float or Array[Float]. The learning rate. :param lr_decay_step: Integer or Array[Integer]. The amount of steps the learning rate decays. :param epochs: Integer or Array[Integer]. The amount of epochs. :param dilation: Integer or Array[Integer]. See https://pytorch.org/docs/master/generated/torch.nn.Conv2d.html for further information. :param validate: Bool. Use a validation step after each epoch. :param batch_size: Integer or Array[Integer]. The batch size. :param log_dir: String. The log directory of the tensorboard. :param data_dir: String. The path to the data. :param csv_file: String. The csv file which describes the dataset. :param use_adam: Bool. Whether to use adam optimizer or not. :param checkpoint_dir: String. The path to the checkpoints directory. :param resume_checkpoint: String. If None start all over, otherwise start from given checkpoint name. """ # Datasets train_dataset = DeepLesionDataset(data_dir, csv_file, batch_size=batch_size, type=DatasetType.TRAIN) validation_dataset = DeepLesionDataset(data_dir, csv_file, batch_size=batch_size, type=DatasetType.VALIDATION) # Create combinations of hyper-parameters train_variations = itertools.product(*[ as_array(model_type), as_array(epochs), as_array(lr), as_array(lr_decay_step), as_array(dilation), as_array(batch_size) ]) # Train for all combinations for h_type, h_epochs, h_lr, h_lr_decay_step, h_dilation, h_batch_size in train_variations: name = FasterRCNNType.get_name(h_type) title = create_name(name, h_epochs, h_lr, h_lr_decay_step, h_dilation, h_batch_size) checkpoint_files = [d[:-len('_checkpoint_8.pth')] for d in os.listdir(checkpoint_dir) if d.endswith('8.pth
# FROC
random_line_split
trainer.py
def get_device(model): """ Extract the device to run on from the model. :param model: The model to train. :return: String. The name of the device. """ if next(model.parameters()).is_cuda: return 'cuda:{}'.format(torch.cuda.current_device()) else: return 'cpu' def prepare_batch(batch, device=None, non_blocking=False): """ Move the batch to the provided device. :param batch: The batch to prepare. :param device: The device to move to (e.g. cpu or gpu). :param non_blocking: Bool. Whether it should be blocking or not. :return: The prepared batch. """ images, target = batch return [convert_tensor(image, device=device, non_blocking=non_blocking) for image in images], \ convert_tensor(target, device=device, non_blocking=non_blocking) def create_name(name, epochs, lr, lr_decay_step, dilation, batch_size): """ Create a name that includes all the given hyper-parameters. :param name: The name of the model. :param epochs: The amount of epochs to train. :param lr: The learning rate to use for training. :param lr_decay_step: The amount of steps before the learning rate gets reduced. :param dilation: The dilation. :param batch_size: The batch size. :return: The name. """ return '{}_ep-{}_lr-{}_de-{}_di-{}_bs-{}'.format(name, epochs, lr, lr_decay_step, sum(dilation), batch_size) class Trainer(Engine): def __init__(self, name, model, log_dir, lr, lr_decay_step, adam=False): """ Initialize to train the given model. :param name: The name of the model to be trained. :param model: The model to be trained. :param log_dir: String. The log directory of the tensorboard. :param lr: Float. The learning rate. :param lr_decay_step: Integer. The amount of steps the learning rate decays. :param adam: Bool. Whether to use adam optimizer or not. """ super(Trainer, self).__init__(self.update_model) self.model = model # tqdm ProgressBar(persist=True).attach(self) # Optimizer params = [p for p in model.parameters() if p.requires_grad] if adam: self.optimizer = torch.optim.Adam(params, lr=lr) else: self.optimizer = torch.optim.SGD(params, lr=lr, momentum=0.9) # Scheduler if lr_decay_step > 0: self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=lr_decay_step, gamma=0.1) self.add_event_handler(Events.EPOCH_COMPLETED, lambda e: e.scheduler.step()) else: self.scheduler = None # Terminate if nan values found self.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan()) # Tensorboard logging self.tb_logger = TensorboardLogger(log_dir=os.path.join(log_dir, name)) self.add_event_handler(Events.COMPLETED, lambda x: self.tb_logger.close()) self.tb_logger.attach(self, log_handler=OptimizerParamsHandler(self.optimizer), event_name=Events.EPOCH_COMPLETED) self.tb_logger.attach(self, log_handler=OutputHandler(tag='training', output_transform=lambda x: { 'rpn_box_loss': round(self.state.output['loss_rpn_box_reg'].item(), 4), 'rpn_cls_loss': round(self.state.output['loss_objectness'].item(), 4), 'roi_box_loss': round(self.state.output['loss_box_reg'].item(), 4), 'roi_cls_loss': round(self.state.output['loss_classifier'].item(), 4) }), event_name=Events.EPOCH_COMPLETED) # Run on GPU (cuda) if available if torch.cuda.is_available(): torch.cuda.set_device(int(get_free_gpu())) model.cuda(torch.cuda.current_device()) @staticmethod def update_model(engine, batch): """ Runs the model on the given data batch and does the backpropagation. :param engine: The Trainer engine. :param batch: The batch to train on. :return: The loss values. """ engine.model.train() engine.model.rpn.nms_thresh = 0.7 img, target = prepare_batch(batch, device=get_device(engine.model)) engine.optimizer.zero_grad() loss = engine.model(img, target) losses = sum(l for l in loss.values()) losses.backward() engine.optimizer.step() return loss class Evaluator(Engine): def __init__(self, model, tb_logger): """ Initialize to evaluate the given model. :param model: The model to be evaluated. :param tb_logger: The tensorboard to be logged to. """ super(Evaluator, self).__init__(self.predict_on_batch) self.model = model # FROC avg_fps = list(range(1, 26)) avg_fps.append(0.5) avg_fps.sort() tags = ['froc_{}fp'.format(fp) for fp in avg_fps] for avg_fp, tag in zip(avg_fps, tags): FROC([avg_fp], iou_threshold=0.5).attach(self, tag) # tqdm ProgressBar(persist=True).attach(self) # Tensorboard logging tb_logger.attach(self, log_handler=OutputHandler(tag='validation', metric_names=tags, global_step_transform=lambda engine, name: engine.state.epoch), event_name=Events.EPOCH_COMPLETED) @staticmethod def predict_on_batch(engine, batch): """ Runs the model on the given data batch. :param engine: The Evaluator engine. :param batch: The batch to evaluate on. :return: The predicted values and the target values. """ engine.model.eval() engine.model.rpn.nms_thresh = 0.3 with torch.no_grad(): imgs, target = prepare_batch(batch, device=get_device(engine.model)) y_pred = engine.model(imgs) return y_pred, target def run(self, data, max_epochs=None, epoch_length=None, seed=None): # BugFix: After first run, the max_epochs have to be incremented or set to this engines epoch count. if not (self.state is None): self.state.max_epochs += 1 # Run evaluation super(Evaluator, self).run(data, max_epochs, epoch_length, seed) def as_array(value): """ Checks whether or not the given value is a list. If not, the value is wrapped in a list. :param value: List or Other. The value to wrap in a list if it isn't already one. :return: The value as a lit. """ if not isinstance(value, list): return [value] return value def train(model_type, lr, lr_decay_step, epochs, dilation, validate, batch_size, log_dir, data_dir, csv_file, use_adam, checkpoint_dir, resume_checkpoint): """ Train the model with the given parameters. :param model_type: The type of the model to train. :param lr: Float or Array[Float]. The learning rate. :param lr_decay_step: Integer or Array[Integer]. The amount of steps the learning rate decays. :param epochs: Integer or Array[Integer]. The amount of epochs. :param dilation: Integer or Array[Integer]. See https://pytorch.org/docs/master/generated/torch.nn.Conv2d.html for further information. :param validate: Bool. Use a validation step after each epoch. :param batch_size: Integer or Array[Integer]. The batch size. :param log_dir: String. The log directory of the tensorboard. :param data_dir: String. The path to the data. :param csv_file: String. The csv file which describes the dataset. :param use_adam: Bool. Whether to use adam optimizer or not. :param checkpoint_dir: String. The path to the checkpoints directory. :param resume_checkpoint: String. If None start all over, otherwise start from given checkpoint name. """ # Datasets train_dataset = DeepLesionDataset(data_dir, csv_file, batch_size=batch_size, type=DatasetType.TRAIN) validation_dataset = DeepLesionDataset(data_dir, csv_file, batch_size=batch_size, type=DatasetType.VALIDATION) # Create combinations of hyper-parameters train_variations = itertools.product(*[ as_array(model_type), as_array(epochs), as_array(lr), as_array(lr_decay_step), as_array(dilation), as_array(batch_size) ]) # Train for all combinations for h_type, h_epochs, h_lr, h_lr
""" Scan the system for available GPUs' and return the one with the most memory available. NOTE: Only available for linux systems! :return: Integer. The index of the GPU. """ os.system('nvidia-smi -q -d Memory |grep -A4 GPU|grep Free >tmp') if os.path.exists('tmp'): memory_available = [int(x.split()[2]) for x in open('tmp', 'r').readlines()] os.remove('tmp') return np.argmax(memory_available) return 0
identifier_body
vet.go
false, nil } // Upgrades reports if the are any upgrades for any direct and indirect dependencies. // It returns true if upgrades are needed. // Rule: gomodvet-002 func Upgrades(verbose bool) (bool, error) { mods, err := buildlist.ResolveUpgrades() if err != nil { return false, err } flagged := false for _, mod := range mods
return flagged, nil } // MultipleMajor reports if the current module has any dependencies with multiple major versions. // For example, if the current module is 'foo', it reports if there is a 'bar' and 'bar/v3' as dependencies of 'foo'. // It returns true if multiple major versions are found. // Note that this looks for Semantic Import Version '/vN' versions, not gopkg.in versions. (Probably reasonable to not flag gopkg.in?) // Could use SplitPathVersion from https://github.com/rogpeppe/go-internal/blob/master/module/module.go#L274 // Rule: gomodvet-003 func MultipleMajor(verbose bool) (bool, error) { // TODO: non-regexp parsing of '/vN'? re := regexp.MustCompile("/v[0-9]+$") // track our paths in { strippedPath: fullPath, ... } map. paths := make(map[string]string) mods, err := buildlist.Resolve() if err != nil { fmt.Println("gomodvet:", err) return false, err } flagged := false for _, mod := range mods { if verbose { fmt.Printf("gomodvet: multiplemajors: module %s: %+v\n", mod.Path, mod) } strippedPath := re.ReplaceAllString(mod.Path, "") if priorPath, ok := paths[strippedPath]; ok { fmt.Println("gomodvet-003: a module has multiple major versions in this build: ", priorPath, mod.Path) flagged = true } paths[strippedPath] = mod.Path } return flagged, nil } // ConflictingRequires reports if the current module or any dependencies have: // -- different v0 versions of a shared dependency. // -- a v0 version of a shared dependency plus a v1 version. // -- a vN+incompatible (N > 2) version of a shared dependency plus a v0, v1, or other vN+incompatible. // It returns true if so. // Rule: gomodvet-004 func ConflictingRequires(verbose bool) (bool, error) { // obtain the set of requires by all modules in our build (via 'go mod graph'). // this takes into account replace directives. requires, err := modgraph.Requirements() if err != nil { return false, err } // track our paths and versions in { path: {version, version, ...}, ... } map. paths := make(map[string][]string) for _, require := range requires { f := strings.Split(require, "@") if len(f) != 2 { return false, fmt.Errorf("unexpected requirement: %s", require) } path, version := f[0], f[1] if !semver.IsValid(version) { return false, fmt.Errorf("invalid semver version: %s", require) } // Probably not needed, but might as well use the canonical semver version. That strips "+incompatible", // which we need to preserve. Thus, we check here for "+incompatible" and add it back if needed. if semver.Build(version) == "+incompatible" { paths[path] = append(paths[path], semver.Canonical(version)+"+incompatible") } else { paths[path] = append(paths[path], semver.Canonical(version)) } } // for each path, loop over its versions (in semantic order) and build up a list // of potential conflicts. flagged := false for path, versions := range paths { sort.Slice(versions, func(i, j int) bool { return -1 == semver.Compare(versions[i], versions[j]) }) if verbose { fmt.Printf("gomodvet: conflictingrequires: module %q has require versions: %v\n", path, versions) } priorVersion := "" var potentialIncompats []string for _, version := range versions { if version == priorVersion { continue } if isBeforeV1(version) { // all pre-v1 versions are potentially incompatible potentialIncompats = append(potentialIncompats, version) } else if isV1(version) && !isV1(priorVersion) { // the first v1 version seen is potentially incompatible with any v0, v2+incompatible, v3+incompatible, etc. potentialIncompats = append(potentialIncompats, version) } else if isV2OrHigherIncompat(version) && semver.Major(version) != semver.Major(priorVersion) { // the first major version v2+incompatible, v3+incompatible, etc is potentially incompatible. // (If two v2+incompatible versions are seen, in theory they should be compatible with each other). potentialIncompats = append(potentialIncompats, version) } priorVersion = version } if len(potentialIncompats) > 1 { // mutiple potential incompatible versions, which means they can be incompatible with each other. fmt.Printf("gomodvet-004: module %q was required with potentially incompatible versions: %s\n", path, strings.Join(potentialIncompats, ", ")) flagged = true } } return flagged, nil } // ExcludedVersion reports if the current module or any dependencies are using a version excluded by a dependency. // It returns true if so. // Currently requires main module's go.mod being in a consistent state (e.g., after a 'go list' or 'go build'), such that // the main module does not have a go.mod file using something it excludes. // gomodvet enforces this requirement. // // ExcludedVersion also assumes versions in any 'go.mod' file in the build is using canonical version strings. // The 'go' tool also enforces this when run (with some rare possible exceptions like multiple valid tags for a single commit), // but a person could check in any given 'go.mod' file prior to letting the 'go' tool use canonical version strings. If // that were to happen, the current ExcludedVersion could have a false negative (that is, potentially miss flagging something). // Rule: gomodvet-005 func ExcludedVersion(verbose bool) (bool, error) { report := func(err error) error { return fmt.Errorf("excludedversion: %v", err) } // track our versions in { path: version } map. versions := make(map[string]string) mods, err := buildlist.Resolve() if err != nil { return false, report(err) } // build up our reference map for _, mod := range mods { if verbose { fmt.Printf("gomodvet: excludedversion: module %s: %+v\n", mod.Path, mod) } versions[mod.Path] = mod.Version } // do our check by parsing each 'go.mod' file being used, // and check if we are using a path/version combination excluded // by one of a go.mod file in our dependecies flagged := false for _, mod := range mods { if mod.Main { // here we assume the main module's 'go.mod' is in a consistent state, // and not using something excluded in its own 'go.mod' file. The 'go' tool // enforces this on a 'go build', 'go mod tidy', etc. continue } file, err := modfile.Parse(mod.GoMod) if err != nil { return false, report(err) } for _, exclude := range file.Exclude { usingVersion, ok := versions[exclude.Path] if !ok { continue } if usingVersion == exclude.Version { fmt.Printf("gomodvet-005: a module is using a version excluded by another module. excluded version: %s %s\n", exclude.Path, exclude.Version) flagged = true } } } return flagged, nil } // Prerelease reports if the current module or any dependencies are using a prerelease semver version // (exclusive of pseudo-versions, which are also prerelease versions according to semver spec but are reported separately). // It returns true if so. // Rule: gomodvet-006 func Prerelease(verbose bool) (bool, error) { mods,
{ if verbose { fmt.Printf("gomodvet: upgrades: module %s: %+v\n", mod.Path, mod) } if mod.Update != nil { fmt.Println("gomodvet-002: dependencies have available updates: ", mod.Path, mod.Update.Version) flagged = true } }
conditional_block
vet.go
odvet-002: dependencies have available updates: ", mod.Path, mod.Update.Version) flagged = true } } return flagged, nil } // MultipleMajor reports if the current module has any dependencies with multiple major versions. // For example, if the current module is 'foo', it reports if there is a 'bar' and 'bar/v3' as dependencies of 'foo'. // It returns true if multiple major versions are found. // Note that this looks for Semantic Import Version '/vN' versions, not gopkg.in versions. (Probably reasonable to not flag gopkg.in?) // Could use SplitPathVersion from https://github.com/rogpeppe/go-internal/blob/master/module/module.go#L274 // Rule: gomodvet-003 func MultipleMajor(verbose bool) (bool, error) { // TODO: non-regexp parsing of '/vN'? re := regexp.MustCompile("/v[0-9]+$") // track our paths in { strippedPath: fullPath, ... } map. paths := make(map[string]string) mods, err := buildlist.Resolve() if err != nil { fmt.Println("gomodvet:", err) return false, err } flagged := false for _, mod := range mods { if verbose { fmt.Printf("gomodvet: multiplemajors: module %s: %+v\n", mod.Path, mod) } strippedPath := re.ReplaceAllString(mod.Path, "") if priorPath, ok := paths[strippedPath]; ok { fmt.Println("gomodvet-003: a module has multiple major versions in this build: ", priorPath, mod.Path) flagged = true } paths[strippedPath] = mod.Path } return flagged, nil } // ConflictingRequires reports if the current module or any dependencies have: // -- different v0 versions of a shared dependency. // -- a v0 version of a shared dependency plus a v1 version. // -- a vN+incompatible (N > 2) version of a shared dependency plus a v0, v1, or other vN+incompatible. // It returns true if so. // Rule: gomodvet-004 func ConflictingRequires(verbose bool) (bool, error) { // obtain the set of requires by all modules in our build (via 'go mod graph'). // this takes into account replace directives. requires, err := modgraph.Requirements() if err != nil { return false, err } // track our paths and versions in { path: {version, version, ...}, ... } map. paths := make(map[string][]string) for _, require := range requires { f := strings.Split(require, "@") if len(f) != 2 { return false, fmt.Errorf("unexpected requirement: %s", require) } path, version := f[0], f[1] if !semver.IsValid(version) { return false, fmt.Errorf("invalid semver version: %s", require) } // Probably not needed, but might as well use the canonical semver version. That strips "+incompatible", // which we need to preserve. Thus, we check here for "+incompatible" and add it back if needed. if semver.Build(version) == "+incompatible" { paths[path] = append(paths[path], semver.Canonical(version)+"+incompatible") } else { paths[path] = append(paths[path], semver.Canonical(version)) } } // for each path, loop over its versions (in semantic order) and build up a list // of potential conflicts. flagged := false for path, versions := range paths { sort.Slice(versions, func(i, j int) bool { return -1 == semver.Compare(versions[i], versions[j]) }) if verbose { fmt.Printf("gomodvet: conflictingrequires: module %q has require versions: %v\n", path, versions) } priorVersion := "" var potentialIncompats []string for _, version := range versions { if version == priorVersion { continue } if isBeforeV1(version) { // all pre-v1 versions are potentially incompatible potentialIncompats = append(potentialIncompats, version) } else if isV1(version) && !isV1(priorVersion) { // the first v1 version seen is potentially incompatible with any v0, v2+incompatible, v3+incompatible, etc. potentialIncompats = append(potentialIncompats, version) } else if isV2OrHigherIncompat(version) && semver.Major(version) != semver.Major(priorVersion) { // the first major version v2+incompatible, v3+incompatible, etc is potentially incompatible. // (If two v2+incompatible versions are seen, in theory they should be compatible with each other). potentialIncompats = append(potentialIncompats, version) } priorVersion = version } if len(potentialIncompats) > 1 { // mutiple potential incompatible versions, which means they can be incompatible with each other. fmt.Printf("gomodvet-004: module %q was required with potentially incompatible versions: %s\n", path, strings.Join(potentialIncompats, ", ")) flagged = true } } return flagged, nil } // ExcludedVersion reports if the current module or any dependencies are using a version excluded by a dependency. // It returns true if so. // Currently requires main module's go.mod being in a consistent state (e.g., after a 'go list' or 'go build'), such that // the main module does not have a go.mod file using something it excludes. // gomodvet enforces this requirement. // // ExcludedVersion also assumes versions in any 'go.mod' file in the build is using canonical version strings. // The 'go' tool also enforces this when run (with some rare possible exceptions like multiple valid tags for a single commit), // but a person could check in any given 'go.mod' file prior to letting the 'go' tool use canonical version strings. If // that were to happen, the current ExcludedVersion could have a false negative (that is, potentially miss flagging something). // Rule: gomodvet-005 func ExcludedVersion(verbose bool) (bool, error) { report := func(err error) error { return fmt.Errorf("excludedversion: %v", err) } // track our versions in { path: version } map. versions := make(map[string]string) mods, err := buildlist.Resolve() if err != nil { return false, report(err) } // build up our reference map for _, mod := range mods { if verbose { fmt.Printf("gomodvet: excludedversion: module %s: %+v\n", mod.Path, mod) } versions[mod.Path] = mod.Version } // do our check by parsing each 'go.mod' file being used, // and check if we are using a path/version combination excluded // by one of a go.mod file in our dependecies flagged := false for _, mod := range mods { if mod.Main { // here we assume the main module's 'go.mod' is in a consistent state, // and not using something excluded in its own 'go.mod' file. The 'go' tool // enforces this on a 'go build', 'go mod tidy', etc. continue } file, err := modfile.Parse(mod.GoMod) if err != nil { return false, report(err) } for _, exclude := range file.Exclude { usingVersion, ok := versions[exclude.Path] if !ok { continue } if usingVersion == exclude.Version { fmt.Printf("gomodvet-005: a module is using a version excluded by another module. excluded version: %s %s\n", exclude.Path, exclude.Version) flagged = true } } } return flagged, nil } // Prerelease reports if the current module or any dependencies are using a prerelease semver version // (exclusive of pseudo-versions, which are also prerelease versions according to semver spec but are reported separately). // It returns true if so. // Rule: gomodvet-006 func Prerelease(verbose bool) (bool, error)
{ mods, err := buildlist.Resolve() if err != nil { return false, fmt.Errorf("prerelease: %v", err) } flagged := false for _, mod := range mods { if verbose { fmt.Printf("gomodvet: prerelease: module %s: %+v\n", mod.Path, mod) } if isPrerelease(mod.Version) { fmt.Printf("gomodvet-006: a module is using a prerelease version: %s %s\n", mod.Path, mod.Version) flagged = true } } return flagged, nil }
identifier_body
vet.go
return false, nil } // Upgrades reports if the are any upgrades for any direct and indirect dependencies. // It returns true if upgrades are needed. // Rule: gomodvet-002 func Upgrades(verbose bool) (bool, error) { mods, err := buildlist.ResolveUpgrades() if err != nil { return false, err } flagged := false for _, mod := range mods { if verbose { fmt.Printf("gomodvet: upgrades: module %s: %+v\n", mod.Path, mod) } if mod.Update != nil { fmt.Println("gomodvet-002: dependencies have available updates: ", mod.Path, mod.Update.Version) flagged = true } } return flagged, nil } // MultipleMajor reports if the current module has any dependencies with multiple major versions. // For example, if the current module is 'foo', it reports if there is a 'bar' and 'bar/v3' as dependencies of 'foo'. // It returns true if multiple major versions are found. // Note that this looks for Semantic Import Version '/vN' versions, not gopkg.in versions. (Probably reasonable to not flag gopkg.in?) // Could use SplitPathVersion from https://github.com/rogpeppe/go-internal/blob/master/module/module.go#L274 // Rule: gomodvet-003 func MultipleMajor(verbose bool) (bool, error) { // TODO: non-regexp parsing of '/vN'? re := regexp.MustCompile("/v[0-9]+$") // track our paths in { strippedPath: fullPath, ... } map. paths := make(map[string]string) mods, err := buildlist.Resolve() if err != nil { fmt.Println("gomodvet:", err) return false, err } flagged := false for _, mod := range mods { if verbose { fmt.Printf("gomodvet: multiplemajors: module %s: %+v\n", mod.Path, mod) } strippedPath := re.ReplaceAllString(mod.Path, "") if priorPath, ok := paths[strippedPath]; ok { fmt.Println("gomodvet-003: a module has multiple major versions in this build: ", priorPath, mod.Path) flagged = true } paths[strippedPath] = mod.Path } return flagged, nil } // ConflictingRequires reports if the current module or any dependencies have: // -- different v0 versions of a shared dependency. // -- a v0 version of a shared dependency plus a v1 version. // -- a vN+incompatible (N > 2) version of a shared dependency plus a v0, v1, or other vN+incompatible. // It returns true if so. // Rule: gomodvet-004 func ConflictingRequires(verbose bool) (bool, error) { // obtain the set of requires by all modules in our build (via 'go mod graph'). // this takes into account replace directives. requires, err := modgraph.Requirements() if err != nil { return false, err } // track our paths and versions in { path: {version, version, ...}, ... } map. paths := make(map[string][]string) for _, require := range requires { f := strings.Split(require, "@") if len(f) != 2 { return false, fmt.Errorf("unexpected requirement: %s", require) } path, version := f[0], f[1] if !semver.IsValid(version) { return false, fmt.Errorf("invalid semver version: %s", require) } // Probably not needed, but might as well use the canonical semver version. That strips "+incompatible", // which we need to preserve. Thus, we check here for "+incompatible" and add it back if needed. if semver.Build(version) == "+incompatible" { paths[path] = append(paths[path], semver.Canonical(version)+"+incompatible") } else { paths[path] = append(paths[path], semver.Canonical(version)) } } // for each path, loop over its versions (in semantic order) and build up a list // of potential conflicts. flagged := false for path, versions := range paths { sort.Slice(versions, func(i, j int) bool { return -1 == semver.Compare(versions[i], versions[j]) }) if verbose { fmt.Printf("gomodvet: conflictingrequires: module %q has require versions: %v\n", path, versions) } priorVersion := "" var potentialIncompats []string for _, version := range versions { if version == priorVersion { continue } if isBeforeV1(version) { // all pre-v1 versions are potentially incompatible potentialIncompats = append(potentialIncompats, version) } else if isV1(version) && !isV1(priorVersion) { // the first v1 version seen is potentially incompatible with any v0, v2+incompatible, v3+incompatible, etc. potentialIncompats = append(potentialIncompats, version) } else if isV2OrHigherIncompat(version) && semver.Major(version) != semver.Major(priorVersion) { // the first major version v2+incompatible, v3+incompatible, etc is potentially incompatible. // (If two v2+incompatible versions are seen, in theory they should be compatible with each other). potentialIncompats = append(potentialIncompats, version) } priorVersion = version } if len(potentialIncompats) > 1 { // mutiple potential incompatible versions, which means they can be incompatible with each other. fmt.Printf("gomodvet-004: module %q was required with potentially incompatible versions: %s\n", path, strings.Join(potentialIncompats, ", ")) flagged = true
// ExcludedVersion reports if the current module or any dependencies are using a version excluded by a dependency. // It returns true if so. // Currently requires main module's go.mod being in a consistent state (e.g., after a 'go list' or 'go build'), such that // the main module does not have a go.mod file using something it excludes. // gomodvet enforces this requirement. // // ExcludedVersion also assumes versions in any 'go.mod' file in the build is using canonical version strings. // The 'go' tool also enforces this when run (with some rare possible exceptions like multiple valid tags for a single commit), // but a person could check in any given 'go.mod' file prior to letting the 'go' tool use canonical version strings. If // that were to happen, the current ExcludedVersion could have a false negative (that is, potentially miss flagging something). // Rule: gomodvet-005 func ExcludedVersion(verbose bool) (bool, error) { report := func(err error) error { return fmt.Errorf("excludedversion: %v", err) } // track our versions in { path: version } map. versions := make(map[string]string) mods, err := buildlist.Resolve() if err != nil { return false, report(err) } // build up our reference map for _, mod := range mods { if verbose { fmt.Printf("gomodvet: excludedversion: module %s: %+v\n", mod.Path, mod) } versions[mod.Path] = mod.Version } // do our check by parsing each 'go.mod' file being used, // and check if we are using a path/version combination excluded // by one of a go.mod file in our dependecies flagged := false for _, mod := range mods { if mod.Main { // here we assume the main module's 'go.mod' is in a consistent state, // and not using something excluded in its own 'go.mod' file. The 'go' tool // enforces this on a 'go build', 'go mod tidy', etc. continue } file, err := modfile.Parse(mod.GoMod) if err != nil { return false, report(err) } for _, exclude := range file.Exclude { usingVersion, ok := versions[exclude.Path] if !ok { continue } if usingVersion == exclude.Version { fmt.Printf("gomodvet-005: a module is using a version excluded by another module. excluded version: %s %s\n", exclude.Path, exclude.Version) flagged = true } } } return flagged, nil } // Prerelease reports if the current module or any dependencies are using a prerelease semver version // (exclusive of pseudo-versions, which are also prerelease versions according to semver spec but are reported separately). // It returns true if so. // Rule: gomodvet-006 func Prerelease(verbose bool) (bool, error) { mods,
} } return flagged, nil }
random_line_split
vet.go
%s", require) } // Probably not needed, but might as well use the canonical semver version. That strips "+incompatible", // which we need to preserve. Thus, we check here for "+incompatible" and add it back if needed. if semver.Build(version) == "+incompatible" { paths[path] = append(paths[path], semver.Canonical(version)+"+incompatible") } else { paths[path] = append(paths[path], semver.Canonical(version)) } } // for each path, loop over its versions (in semantic order) and build up a list // of potential conflicts. flagged := false for path, versions := range paths { sort.Slice(versions, func(i, j int) bool { return -1 == semver.Compare(versions[i], versions[j]) }) if verbose { fmt.Printf("gomodvet: conflictingrequires: module %q has require versions: %v\n", path, versions) } priorVersion := "" var potentialIncompats []string for _, version := range versions { if version == priorVersion { continue } if isBeforeV1(version) { // all pre-v1 versions are potentially incompatible potentialIncompats = append(potentialIncompats, version) } else if isV1(version) && !isV1(priorVersion) { // the first v1 version seen is potentially incompatible with any v0, v2+incompatible, v3+incompatible, etc. potentialIncompats = append(potentialIncompats, version) } else if isV2OrHigherIncompat(version) && semver.Major(version) != semver.Major(priorVersion) { // the first major version v2+incompatible, v3+incompatible, etc is potentially incompatible. // (If two v2+incompatible versions are seen, in theory they should be compatible with each other). potentialIncompats = append(potentialIncompats, version) } priorVersion = version } if len(potentialIncompats) > 1 { // mutiple potential incompatible versions, which means they can be incompatible with each other. fmt.Printf("gomodvet-004: module %q was required with potentially incompatible versions: %s\n", path, strings.Join(potentialIncompats, ", ")) flagged = true } } return flagged, nil } // ExcludedVersion reports if the current module or any dependencies are using a version excluded by a dependency. // It returns true if so. // Currently requires main module's go.mod being in a consistent state (e.g., after a 'go list' or 'go build'), such that // the main module does not have a go.mod file using something it excludes. // gomodvet enforces this requirement. // // ExcludedVersion also assumes versions in any 'go.mod' file in the build is using canonical version strings. // The 'go' tool also enforces this when run (with some rare possible exceptions like multiple valid tags for a single commit), // but a person could check in any given 'go.mod' file prior to letting the 'go' tool use canonical version strings. If // that were to happen, the current ExcludedVersion could have a false negative (that is, potentially miss flagging something). // Rule: gomodvet-005 func ExcludedVersion(verbose bool) (bool, error) { report := func(err error) error { return fmt.Errorf("excludedversion: %v", err) } // track our versions in { path: version } map. versions := make(map[string]string) mods, err := buildlist.Resolve() if err != nil { return false, report(err) } // build up our reference map for _, mod := range mods { if verbose { fmt.Printf("gomodvet: excludedversion: module %s: %+v\n", mod.Path, mod) } versions[mod.Path] = mod.Version } // do our check by parsing each 'go.mod' file being used, // and check if we are using a path/version combination excluded // by one of a go.mod file in our dependecies flagged := false for _, mod := range mods { if mod.Main { // here we assume the main module's 'go.mod' is in a consistent state, // and not using something excluded in its own 'go.mod' file. The 'go' tool // enforces this on a 'go build', 'go mod tidy', etc. continue } file, err := modfile.Parse(mod.GoMod) if err != nil { return false, report(err) } for _, exclude := range file.Exclude { usingVersion, ok := versions[exclude.Path] if !ok { continue } if usingVersion == exclude.Version { fmt.Printf("gomodvet-005: a module is using a version excluded by another module. excluded version: %s %s\n", exclude.Path, exclude.Version) flagged = true } } } return flagged, nil } // Prerelease reports if the current module or any dependencies are using a prerelease semver version // (exclusive of pseudo-versions, which are also prerelease versions according to semver spec but are reported separately). // It returns true if so. // Rule: gomodvet-006 func Prerelease(verbose bool) (bool, error) { mods, err := buildlist.Resolve() if err != nil { return false, fmt.Errorf("prerelease: %v", err) } flagged := false for _, mod := range mods { if verbose { fmt.Printf("gomodvet: prerelease: module %s: %+v\n", mod.Path, mod) } if isPrerelease(mod.Version) { fmt.Printf("gomodvet-006: a module is using a prerelease version: %s %s\n", mod.Path, mod.Version) flagged = true } } return flagged, nil } // PseudoVersion reports if the current module or any dependencies are using a prerelease semver version // (exclusive of pseudo-versions, which are also prerelease versions according to semver spec but are reported separately). // It returns true if so. // Rule: gomodvet-007 func PseudoVersion(verbose bool) (bool, error) { mods, err := buildlist.Resolve() if err != nil { return false, fmt.Errorf("pseudoversion: %v", err) } flagged := false for _, mod := range mods { if verbose { fmt.Printf("gomodvet: pseudoversion: module %s: %+v\n", mod.Path, mod) } if isPseudoVersion(mod.Version) { fmt.Printf("gomodvet-007: a module is using a pseudoversion version: %s %s\n", mod.Path, mod.Version) flagged = true } } return flagged, nil } // Replace reports if the current go.mod has 'replace' directives. // It returns true if so. // The parses the 'go.mod' for the main module, and hence can report // true if the main module's 'go.mod' has ineffective replace directives. // Part of the use case is some people never want to check in a replace directive, // and this can be used to check that. // Rule: gomodvet-008 func Replace(verbose bool) (bool, error) { mods, err := buildlist.Resolve() if err != nil { return false, fmt.Errorf("replace: %v", err) } flagged := false for _, mod := range mods { if !mod.Main { continue } if verbose { fmt.Printf("gomodvet: replacement: module %s: %+v\n", mod.Path, mod) } file, err := modfile.Parse(mod.GoMod) if err != nil { return false, fmt.Errorf("replace: %v", err) } if len(file.Replace) > 0 { fmt.Printf("gomodvet-008: the main module has 'replace' directives\n") flagged = true } } return flagged, nil } func isPseudoVersion(version string) bool { // regexp from cmd/go/internal/modfetch/pseudo.go re := regexp.MustCompile(`^v[0-9]+\.(0\.0-|\d+\.\d+-([^+]*\.)?0\.)\d{14}-[A-Za-z0-9]+(\+incompatible)?$`) return semver.IsValid(version) && re.MatchString(version) } func isPrerelease(version string) bool { return semver.IsValid(version) && !isPseudoVersion(version) && semver.Prerelease(version) != "" } // isBeforeV1 reports if a version is prio to v1.0.0, according to semver. // v0.9.0 and v1.0.0-alpha are examples of versions before v1.0.0. func
isBeforeV1
identifier_name
dldata.py
logger = logging.getLogger('animethemes-dl') FILENAME_BAD = set('#%&{}\\<>*?/$!\'":@+`|') FILENAME_BANNED = set('<>:"/\\|?*') FILENAME_ALLOWEDASCII = set(string.printable).difference(FILENAME_BANNED) # this regex is for getting metadata from a song name, might be straight up wrong FEATURED_RE = re.compile(r"""^ (.*?) # song name (?: \ \(?feat\.\ ( [\w\ ]+ # artist name (?:\([\w\ ]+\))? # artists second name )\)? | \(([\w\ ]+)\) # comment enclosed in "()" (?:\ (.+))? # after comment details )? $""",re.VERBOSE) def is_entry_wanted(entry: AnimeThemeEntry) -> bool: """ Determines wheter all the tags in the entry are the same as in OPTIONS """ for k in ('spoiler','nsfw'): v = OPTIONS['filter'][k] if v is not None and entry[k] ^ v: return False return True def is_video_wanted(video: AnimeThemeVideo) -> bool: """ Determines wheter all the tags in the entry are the same as in OPTIONS """ for k in ('nc','subbed','lyrics','uncen'): v = OPTIONS['filter'][k] if v is not None and video[k] ^ v: return False if video['resolution'] < OPTIONS['filter']['resolution']: return False if OPTIONS['filter']['source'] is not None and video['source'] != OPTIONS['filter']['source']: return False if OPTIONS['filter']['overlap'] is not None and video['overlap'] not in OPTIONS['filter']['overlap']: # uses lists return False return True def get_amount_episodes(episodes: str) -> int: """ Takes in the animethemes syntax of episodes and returns it's amoutn """ a = 0 for ep in episodes.split(', '): if '-' in ep: start,end = ep.split('-') a += int(end)-int(start) else: a += int(ep) return a def strip_illegal_chars(filename: str) -> str: """ Removes all illegal chars from a filename """ if OPTIONS['download']['ascii']: return ''.join(i for i in filename if i in FILENAME_ALLOWEDASCII) else: return ''.join(i for i in filename if i not in FILENAME_BANNED) def
(**kwargs) -> Dict[str,str]: """ Generates a formatter dict used for formatting filenames. Takes in kwargs of Dict[str,Any]. Does not keep lists, dicts and bools. Automatically filters out` .endswith('ated_at')` for animethemes-dl. Also adds `{video_filetype:webm,anime_filename:...}`. """ formatter = {} for t,d in kwargs.items(): for k,v in d.items(): if (not isinstance(v,(list,dict,bool)) and not k.endswith('ated_at') ): formatter[t+'_'+k] = v formatter['video_filetype'] = 'webm' formatter['anime_filename'] = formatter['video_filename'].split('-')[0] return formatter def generate_path( anime: AnimeThemeAnime, theme: AnimeThemeTheme, entry: AnimeThemeEntry, video: AnimeThemeVideo) -> ( Tuple[Optional[PathLike],Optional[PathLike]]): """ Generates a path with animethemes api returns. Returns `(videopath|None,audiopath|None)` """ formatter = get_formatter( anime=anime,theme=theme,entry=entry,video=video,song=theme['song']) filename = OPTIONS['download']['filename'] % formatter filename = strip_illegal_chars(filename) if OPTIONS['download']['video_folder']: video_path = realpath(join(OPTIONS['download']['video_folder'],filename)) else: video_path = None if OPTIONS['download']['audio_folder']: audio_path = realpath(join(OPTIONS['download']['audio_folder'],filename)) audio_path = splitext(audio_path)[0]+'.mp3' else: audio_path = None return video_path,audio_path def pick_best_entry(theme: AnimeThemeTheme) -> Optional[Tuple[AnimeThemeEntry,AnimeThemeVideo]]: """ Returns the best entry and video based on OPTIONS. Returns None if no entry/video is wanted """ # picking best entry entries = [] for entry in theme['entries']: if not is_entry_wanted(entry): continue # picking best video videos = [] for video in entry['videos']: if ((is_video_wanted(video) or video['id'] in OPTIONS['download']['force_videos']) and not (OPTIONS['filter']['smart'] and entry['spoiler'] and video['overlap']!='None') ): videos.append(video) # can't append empty videos if videos: # sort videos by giving points videos.sort(key=lambda x: ('None','Transition','Over').index(x['overlap'])) entries.append((entry,videos[0])) # pick first (best) # there's a chance no entries will be found if entries: return entries[0] else: logger.debug(f"removed {theme['song']['title']}/{theme['slug']} ({theme['id']})") return None def parse_download_data(data: List[AnimeThemeAnime]) -> List[DownloadData]: """ Parses a list of animethemes api returns for anime. Returns download data. """ out = [] songs = set() for anime in data: last_group = None for tracknumber,theme in enumerate(anime['themes']): # # remove unwanted tags in song title (feat and brackets) match = FEATURED_RE.match(theme['song']['title']) # .* always matches theme['song']['title'],featured,comments,version = match.groups() # filtering: # theme type if OPTIONS['filter']['type'] is not None and OPTIONS['filter']['type']!=theme['type']: continue # groups (for example dubs) if last_group is not None and theme['group']!=last_group: continue else: last_group = theme['group'] # video tags best = pick_best_entry(theme) if best is None: continue entry,video = best # copies if OPTIONS['filter']['no_copy']: if theme['song']['title'] in songs: continue else: songs.add(theme['song']['title']) # fix some problems video['link'] = video['link'].replace('https://v.staging.animethemes.moe','https://animethemes.moe/video') entry['version'] = entry['version'] if entry['version'] else 1 series = [series['name'] for series in anime['series']] # add to all the songs if OPTIONS['filter']['no_copy']: songs.add(theme['song']['title']) # get video path videopath,audiopath = generate_path(anime,theme,entry,video) out.append({ 'url': video['link'], 'video_path': videopath, 'audio_path': audiopath, 'metadata': { # anime 'series': series[0] if len(series)==1 else anime['name'], # mashups are it's own thing (ie isekai quarter) 'album': anime['name'], # discs should be numbered, 'year': anime['year'], 'track': f"{tracknumber+1}/{len(anime['themes'])}", # an ID3 "track/total" syntax 'coverarts': [i['link'] for i in anime['images']][::-1], # theme 'title': theme['song']['title'], 'artists': [artist['name'] for artist in theme['song']['artists']], 'themetype': theme['slug'], # entry 'version': entry['version'], 'notes': entry['notes'], # video 'resolution': video['resolution'], 'videoid': video['id'], 'filesize': video['size'], # const 'genre': [145], # anime 'encodedby': 'animethemes.moe', 'cgroup': 'anime theme', # content group # data pulled from filename 'file_featured':featured, 'file_comments':comments, 'file_version':version }, 'info': { 'malid':[r['external_id'] for r in anime['resources'] if r['site']=='MyAnimeList'][0] } }) return out def get_download_data(username: str, site: AnimeListSite, animelist_args={}) -> List[DownloadData]: """ Gets download data from themes.moe and myanimelist.net/anilist.co. Returns a list of mirrors, save_paths and id3 tags. Sorts using `animethemes_dl.OPTIONS['options']` To use anilist.co instead of myanimelist.net, use `anilist`. For additional args for
get_formatter
identifier_name
dldata.py
logger = logging.getLogger('animethemes-dl') FILENAME_BAD = set('#%&{}\\<>*?/$!\'":@+`|') FILENAME_BANNED = set('<>:"/\\|?*') FILENAME_ALLOWEDASCII = set(string.printable).difference(FILENAME_BANNED) # this regex is for getting metadata from a song name, might be straight up wrong FEATURED_RE = re.compile(r"""^ (.*?) # song name (?: \ \(?feat\.\ ( [\w\ ]+ # artist name (?:\([\w\ ]+\))? # artists second name )\)? | \(([\w\ ]+)\) # comment enclosed in "()" (?:\ (.+))? # after comment details )? $""",re.VERBOSE) def is_entry_wanted(entry: AnimeThemeEntry) -> bool: """ Determines wheter all the tags in the entry are the same as in OPTIONS """ for k in ('spoiler','nsfw'): v = OPTIONS['filter'][k] if v is not None and entry[k] ^ v: return False return True def is_video_wanted(video: AnimeThemeVideo) -> bool: """ Determines wheter all the tags in the entry are the same as in OPTIONS """ for k in ('nc','subbed','lyrics','uncen'): v = OPTIONS['filter'][k] if v is not None and video[k] ^ v: return False if video['resolution'] < OPTIONS['filter']['resolution']: return False if OPTIONS['filter']['source'] is not None and video['source'] != OPTIONS['filter']['source']: return False if OPTIONS['filter']['overlap'] is not None and video['overlap'] not in OPTIONS['filter']['overlap']: # uses lists return False return True def get_amount_episodes(episodes: str) -> int: """ Takes in the animethemes syntax of episodes and returns it's amoutn """ a = 0 for ep in episodes.split(', '): if '-' in ep: start,end = ep.split('-') a += int(end)-int(start) else: a += int(ep) return a def strip_illegal_chars(filename: str) -> str: """ Removes all illegal chars from a filename """ if OPTIONS['download']['ascii']: return ''.join(i for i in filename if i in FILENAME_ALLOWEDASCII) else: return ''.join(i for i in filename if i not in FILENAME_BANNED) def get_formatter(**kwargs) -> Dict[str,str]: """ Generates a formatter dict used for formatting filenames. Takes in kwargs of Dict[str,Any]. Does not keep lists, dicts and bools. Automatically filters out` .endswith('ated_at')` for animethemes-dl. Also adds `{video_filetype:webm,anime_filename:...}`. """ formatter = {} for t,d in kwargs.items(): for k,v in d.items(): if (not isinstance(v,(list,dict,bool)) and not k.endswith('ated_at') ): formatter[t+'_'+k] = v formatter['video_filetype'] = 'webm' formatter['anime_filename'] = formatter['video_filename'].split('-')[0] return formatter def generate_path( anime: AnimeThemeAnime, theme: AnimeThemeTheme, entry: AnimeThemeEntry, video: AnimeThemeVideo) -> ( Tuple[Optional[PathLike],Optional[PathLike]]): """ Generates a path with animethemes api returns. Returns `(videopath|None,audiopath|None)` """ formatter = get_formatter( anime=anime,theme=theme,entry=entry,video=video,song=theme['song']) filename = OPTIONS['download']['filename'] % formatter filename = strip_illegal_chars(filename) if OPTIONS['download']['video_folder']: video_path = realpath(join(OPTIONS['download']['video_folder'],filename)) else: video_path = None if OPTIONS['download']['audio_folder']: audio_path = realpath(join(OPTIONS['download']['audio_folder'],filename)) audio_path = splitext(audio_path)[0]+'.mp3' else:
return video_path,audio_path def pick_best_entry(theme: AnimeThemeTheme) -> Optional[Tuple[AnimeThemeEntry,AnimeThemeVideo]]: """ Returns the best entry and video based on OPTIONS. Returns None if no entry/video is wanted """ # picking best entry entries = [] for entry in theme['entries']: if not is_entry_wanted(entry): continue # picking best video videos = [] for video in entry['videos']: if ((is_video_wanted(video) or video['id'] in OPTIONS['download']['force_videos']) and not (OPTIONS['filter']['smart'] and entry['spoiler'] and video['overlap']!='None') ): videos.append(video) # can't append empty videos if videos: # sort videos by giving points videos.sort(key=lambda x: ('None','Transition','Over').index(x['overlap'])) entries.append((entry,videos[0])) # pick first (best) # there's a chance no entries will be found if entries: return entries[0] else: logger.debug(f"removed {theme['song']['title']}/{theme['slug']} ({theme['id']})") return None def parse_download_data(data: List[AnimeThemeAnime]) -> List[DownloadData]: """ Parses a list of animethemes api returns for anime. Returns download data. """ out = [] songs = set() for anime in data: last_group = None for tracknumber,theme in enumerate(anime['themes']): # # remove unwanted tags in song title (feat and brackets) match = FEATURED_RE.match(theme['song']['title']) # .* always matches theme['song']['title'],featured,comments,version = match.groups() # filtering: # theme type if OPTIONS['filter']['type'] is not None and OPTIONS['filter']['type']!=theme['type']: continue # groups (for example dubs) if last_group is not None and theme['group']!=last_group: continue else: last_group = theme['group'] # video tags best = pick_best_entry(theme) if best is None: continue entry,video = best # copies if OPTIONS['filter']['no_copy']: if theme['song']['title'] in songs: continue else: songs.add(theme['song']['title']) # fix some problems video['link'] = video['link'].replace('https://v.staging.animethemes.moe','https://animethemes.moe/video') entry['version'] = entry['version'] if entry['version'] else 1 series = [series['name'] for series in anime['series']] # add to all the songs if OPTIONS['filter']['no_copy']: songs.add(theme['song']['title']) # get video path videopath,audiopath = generate_path(anime,theme,entry,video) out.append({ 'url': video['link'], 'video_path': videopath, 'audio_path': audiopath, 'metadata': { # anime 'series': series[0] if len(series)==1 else anime['name'], # mashups are it's own thing (ie isekai quarter) 'album': anime['name'], # discs should be numbered, 'year': anime['year'], 'track': f"{tracknumber+1}/{len(anime['themes'])}", # an ID3 "track/total" syntax 'coverarts': [i['link'] for i in anime['images']][::-1], # theme 'title': theme['song']['title'], 'artists': [artist['name'] for artist in theme['song']['artists']], 'themetype': theme['slug'], # entry 'version': entry['version'], 'notes': entry['notes'], # video 'resolution': video['resolution'], 'videoid': video['id'], 'filesize': video['size'], # const 'genre': [145], # anime 'encodedby': 'animethemes.moe', 'cgroup': 'anime theme', # content group # data pulled from filename 'file_featured':featured, 'file_comments':comments, 'file_version':version }, 'info': { 'malid':[r['external_id'] for r in anime['resources'] if r['site']=='MyAnimeList'][0] } }) return out def get_download_data(username: str, site: AnimeListSite, animelist_args={}) -> List[DownloadData]: """ Gets download data from themes.moe and myanimelist.net/anilist.co. Returns a list of mirrors, save_paths and id3 tags. Sorts using `animethemes_dl.OPTIONS['options']` To use anilist.co instead of myanimelist.net, use `anilist`. For additional args for
audio_path = None
conditional_block
dldata.py
logger = logging.getLogger('animethemes-dl') FILENAME_BAD = set('#%&{}\\<>*?/$!\'":@+`|') FILENAME_BANNED = set('<>:"/\\|?*') FILENAME_ALLOWEDASCII = set(string.printable).difference(FILENAME_BANNED) # this regex is for getting metadata from a song name, might be straight up wrong FEATURED_RE = re.compile(r"""^ (.*?) # song name (?: \ \(?feat\.\ ( [\w\ ]+ # artist name (?:\([\w\ ]+\))? # artists second name )\)? | \(([\w\ ]+)\) # comment enclosed in "()" (?:\ (.+))? # after comment details )? $""",re.VERBOSE) def is_entry_wanted(entry: AnimeThemeEntry) -> bool: """ Determines wheter all the tags in the entry are the same as in OPTIONS """ for k in ('spoiler','nsfw'): v = OPTIONS['filter'][k] if v is not None and entry[k] ^ v: return False return True def is_video_wanted(video: AnimeThemeVideo) -> bool: """ Determines wheter all the tags in the entry are the same as in OPTIONS """ for k in ('nc','subbed','lyrics','uncen'): v = OPTIONS['filter'][k] if v is not None and video[k] ^ v: return False if video['resolution'] < OPTIONS['filter']['resolution']: return False if OPTIONS['filter']['source'] is not None and video['source'] != OPTIONS['filter']['source']: return False if OPTIONS['filter']['overlap'] is not None and video['overlap'] not in OPTIONS['filter']['overlap']: # uses lists return False return True def get_amount_episodes(episodes: str) -> int: """ Takes in the animethemes syntax of episodes and returns it's amoutn """ a = 0 for ep in episodes.split(', '): if '-' in ep: start,end = ep.split('-') a += int(end)-int(start) else: a += int(ep) return a def strip_illegal_chars(filename: str) -> str: """ Removes all illegal chars from a filename """ if OPTIONS['download']['ascii']: return ''.join(i for i in filename if i in FILENAME_ALLOWEDASCII) else: return ''.join(i for i in filename if i not in FILENAME_BANNED) def get_formatter(**kwargs) -> Dict[str,str]:
def generate_path( anime: AnimeThemeAnime, theme: AnimeThemeTheme, entry: AnimeThemeEntry, video: AnimeThemeVideo) -> ( Tuple[Optional[PathLike],Optional[PathLike]]): """ Generates a path with animethemes api returns. Returns `(videopath|None,audiopath|None)` """ formatter = get_formatter( anime=anime,theme=theme,entry=entry,video=video,song=theme['song']) filename = OPTIONS['download']['filename'] % formatter filename = strip_illegal_chars(filename) if OPTIONS['download']['video_folder']: video_path = realpath(join(OPTIONS['download']['video_folder'],filename)) else: video_path = None if OPTIONS['download']['audio_folder']: audio_path = realpath(join(OPTIONS['download']['audio_folder'],filename)) audio_path = splitext(audio_path)[0]+'.mp3' else: audio_path = None return video_path,audio_path def pick_best_entry(theme: AnimeThemeTheme) -> Optional[Tuple[AnimeThemeEntry,AnimeThemeVideo]]: """ Returns the best entry and video based on OPTIONS. Returns None if no entry/video is wanted """ # picking best entry entries = [] for entry in theme['entries']: if not is_entry_wanted(entry): continue # picking best video videos = [] for video in entry['videos']: if ((is_video_wanted(video) or video['id'] in OPTIONS['download']['force_videos']) and not (OPTIONS['filter']['smart'] and entry['spoiler'] and video['overlap']!='None') ): videos.append(video) # can't append empty videos if videos: # sort videos by giving points videos.sort(key=lambda x: ('None','Transition','Over').index(x['overlap'])) entries.append((entry,videos[0])) # pick first (best) # there's a chance no entries will be found if entries: return entries[0] else: logger.debug(f"removed {theme['song']['title']}/{theme['slug']} ({theme['id']})") return None def parse_download_data(data: List[AnimeThemeAnime]) -> List[DownloadData]: """ Parses a list of animethemes api returns for anime. Returns download data. """ out = [] songs = set() for anime in data: last_group = None for tracknumber,theme in enumerate(anime['themes']): # # remove unwanted tags in song title (feat and brackets) match = FEATURED_RE.match(theme['song']['title']) # .* always matches theme['song']['title'],featured,comments,version = match.groups() # filtering: # theme type if OPTIONS['filter']['type'] is not None and OPTIONS['filter']['type']!=theme['type']: continue # groups (for example dubs) if last_group is not None and theme['group']!=last_group: continue else: last_group = theme['group'] # video tags best = pick_best_entry(theme) if best is None: continue entry,video = best # copies if OPTIONS['filter']['no_copy']: if theme['song']['title'] in songs: continue else: songs.add(theme['song']['title']) # fix some problems video['link'] = video['link'].replace('https://v.staging.animethemes.moe','https://animethemes.moe/video') entry['version'] = entry['version'] if entry['version'] else 1 series = [series['name'] for series in anime['series']] # add to all the songs if OPTIONS['filter']['no_copy']: songs.add(theme['song']['title']) # get video path videopath,audiopath = generate_path(anime,theme,entry,video) out.append({ 'url': video['link'], 'video_path': videopath, 'audio_path': audiopath, 'metadata': { # anime 'series': series[0] if len(series)==1 else anime['name'], # mashups are it's own thing (ie isekai quarter) 'album': anime['name'], # discs should be numbered, 'year': anime['year'], 'track': f"{tracknumber+1}/{len(anime['themes'])}", # an ID3 "track/total" syntax 'coverarts': [i['link'] for i in anime['images']][::-1], # theme 'title': theme['song']['title'], 'artists': [artist['name'] for artist in theme['song']['artists']], 'themetype': theme['slug'], # entry 'version': entry['version'], 'notes': entry['notes'], # video 'resolution': video['resolution'], 'videoid': video['id'], 'filesize': video['size'], # const 'genre': [145], # anime 'encodedby': 'animethemes.moe', 'cgroup': 'anime theme', # content group # data pulled from filename 'file_featured':featured, 'file_comments':comments, 'file_version':version }, 'info': { 'malid':[r['external_id'] for r in anime['resources'] if r['site']=='MyAnimeList'][0] } }) return out def get_download_data(username: str, site: AnimeListSite, animelist_args={}) -> List[DownloadData]: """ Gets download data from themes.moe and myanimelist.net/anilist.co. Returns a list of mirrors, save_paths and id3 tags. Sorts using `animethemes_dl.OPTIONS['options']` To use anilist.co instead of myanimelist.net, use `anilist`. For additional args for
""" Generates a formatter dict used for formatting filenames. Takes in kwargs of Dict[str,Any]. Does not keep lists, dicts and bools. Automatically filters out` .endswith('ated_at')` for animethemes-dl. Also adds `{video_filetype:webm,anime_filename:...}`. """ formatter = {} for t,d in kwargs.items(): for k,v in d.items(): if (not isinstance(v,(list,dict,bool)) and not k.endswith('ated_at') ): formatter[t+'_'+k] = v formatter['video_filetype'] = 'webm' formatter['anime_filename'] = formatter['video_filename'].split('-')[0] return formatter
identifier_body
dldata.py
logger = logging.getLogger('animethemes-dl') FILENAME_BAD = set('#%&{}\\<>*?/$!\'":@+`|') FILENAME_BANNED = set('<>:"/\\|?*') FILENAME_ALLOWEDASCII = set(string.printable).difference(FILENAME_BANNED) # this regex is for getting metadata from a song name, might be straight up wrong FEATURED_RE = re.compile(r"""^ (.*?) # song name (?: \ \(?feat\.\ ( [\w\ ]+ # artist name (?:\([\w\ ]+\))? # artists second name )\)? | \(([\w\ ]+)\) # comment enclosed in "()" (?:\ (.+))? # after comment details )? $""",re.VERBOSE) def is_entry_wanted(entry: AnimeThemeEntry) -> bool: """ Determines wheter all the tags in the entry are the same as in OPTIONS
""" for k in ('spoiler','nsfw'): v = OPTIONS['filter'][k] if v is not None and entry[k] ^ v: return False return True def is_video_wanted(video: AnimeThemeVideo) -> bool: """ Determines wheter all the tags in the entry are the same as in OPTIONS """ for k in ('nc','subbed','lyrics','uncen'): v = OPTIONS['filter'][k] if v is not None and video[k] ^ v: return False if video['resolution'] < OPTIONS['filter']['resolution']: return False if OPTIONS['filter']['source'] is not None and video['source'] != OPTIONS['filter']['source']: return False if OPTIONS['filter']['overlap'] is not None and video['overlap'] not in OPTIONS['filter']['overlap']: # uses lists return False return True def get_amount_episodes(episodes: str) -> int: """ Takes in the animethemes syntax of episodes and returns it's amoutn """ a = 0 for ep in episodes.split(', '): if '-' in ep: start,end = ep.split('-') a += int(end)-int(start) else: a += int(ep) return a def strip_illegal_chars(filename: str) -> str: """ Removes all illegal chars from a filename """ if OPTIONS['download']['ascii']: return ''.join(i for i in filename if i in FILENAME_ALLOWEDASCII) else: return ''.join(i for i in filename if i not in FILENAME_BANNED) def get_formatter(**kwargs) -> Dict[str,str]: """ Generates a formatter dict used for formatting filenames. Takes in kwargs of Dict[str,Any]. Does not keep lists, dicts and bools. Automatically filters out` .endswith('ated_at')` for animethemes-dl. Also adds `{video_filetype:webm,anime_filename:...}`. """ formatter = {} for t,d in kwargs.items(): for k,v in d.items(): if (not isinstance(v,(list,dict,bool)) and not k.endswith('ated_at') ): formatter[t+'_'+k] = v formatter['video_filetype'] = 'webm' formatter['anime_filename'] = formatter['video_filename'].split('-')[0] return formatter def generate_path( anime: AnimeThemeAnime, theme: AnimeThemeTheme, entry: AnimeThemeEntry, video: AnimeThemeVideo) -> ( Tuple[Optional[PathLike],Optional[PathLike]]): """ Generates a path with animethemes api returns. Returns `(videopath|None,audiopath|None)` """ formatter = get_formatter( anime=anime,theme=theme,entry=entry,video=video,song=theme['song']) filename = OPTIONS['download']['filename'] % formatter filename = strip_illegal_chars(filename) if OPTIONS['download']['video_folder']: video_path = realpath(join(OPTIONS['download']['video_folder'],filename)) else: video_path = None if OPTIONS['download']['audio_folder']: audio_path = realpath(join(OPTIONS['download']['audio_folder'],filename)) audio_path = splitext(audio_path)[0]+'.mp3' else: audio_path = None return video_path,audio_path def pick_best_entry(theme: AnimeThemeTheme) -> Optional[Tuple[AnimeThemeEntry,AnimeThemeVideo]]: """ Returns the best entry and video based on OPTIONS. Returns None if no entry/video is wanted """ # picking best entry entries = [] for entry in theme['entries']: if not is_entry_wanted(entry): continue # picking best video videos = [] for video in entry['videos']: if ((is_video_wanted(video) or video['id'] in OPTIONS['download']['force_videos']) and not (OPTIONS['filter']['smart'] and entry['spoiler'] and video['overlap']!='None') ): videos.append(video) # can't append empty videos if videos: # sort videos by giving points videos.sort(key=lambda x: ('None','Transition','Over').index(x['overlap'])) entries.append((entry,videos[0])) # pick first (best) # there's a chance no entries will be found if entries: return entries[0] else: logger.debug(f"removed {theme['song']['title']}/{theme['slug']} ({theme['id']})") return None def parse_download_data(data: List[AnimeThemeAnime]) -> List[DownloadData]: """ Parses a list of animethemes api returns for anime. Returns download data. """ out = [] songs = set() for anime in data: last_group = None for tracknumber,theme in enumerate(anime['themes']): # # remove unwanted tags in song title (feat and brackets) match = FEATURED_RE.match(theme['song']['title']) # .* always matches theme['song']['title'],featured,comments,version = match.groups() # filtering: # theme type if OPTIONS['filter']['type'] is not None and OPTIONS['filter']['type']!=theme['type']: continue # groups (for example dubs) if last_group is not None and theme['group']!=last_group: continue else: last_group = theme['group'] # video tags best = pick_best_entry(theme) if best is None: continue entry,video = best # copies if OPTIONS['filter']['no_copy']: if theme['song']['title'] in songs: continue else: songs.add(theme['song']['title']) # fix some problems video['link'] = video['link'].replace('https://v.staging.animethemes.moe','https://animethemes.moe/video') entry['version'] = entry['version'] if entry['version'] else 1 series = [series['name'] for series in anime['series']] # add to all the songs if OPTIONS['filter']['no_copy']: songs.add(theme['song']['title']) # get video path videopath,audiopath = generate_path(anime,theme,entry,video) out.append({ 'url': video['link'], 'video_path': videopath, 'audio_path': audiopath, 'metadata': { # anime 'series': series[0] if len(series)==1 else anime['name'], # mashups are it's own thing (ie isekai quarter) 'album': anime['name'], # discs should be numbered, 'year': anime['year'], 'track': f"{tracknumber+1}/{len(anime['themes'])}", # an ID3 "track/total" syntax 'coverarts': [i['link'] for i in anime['images']][::-1], # theme 'title': theme['song']['title'], 'artists': [artist['name'] for artist in theme['song']['artists']], 'themetype': theme['slug'], # entry 'version': entry['version'], 'notes': entry['notes'], # video 'resolution': video['resolution'], 'videoid': video['id'], 'filesize': video['size'], # const 'genre': [145], # anime 'encodedby': 'animethemes.moe', 'cgroup': 'anime theme', # content group # data pulled from filename 'file_featured':featured, 'file_comments':comments, 'file_version':version }, 'info': { 'malid':[r['external_id'] for r in anime['resources'] if r['site']=='MyAnimeList'][0] } }) return out def get_download_data(username: str, site: AnimeListSite, animelist_args={}) -> List[DownloadData]: """ Gets download data from themes.moe and myanimelist.net/anilist.co. Returns a list of mirrors, save_paths and id3 tags. Sorts using `animethemes_dl.OPTIONS['options']` To use anilist.co instead of myanimelist.net, use `anilist`. For additional args for my
random_line_split
securitygroup.go
type SecurityGroupPolicy struct { region *SRegion PolicyIndex int // 安全组规则索引号。 Protocol string // 协议, 取值: TCP,UDP, ICMP。 Port string // 端口(all, 离散port, range)。 ServiceTemplate ServiceTemplateSpecification // 协议端口ID或者协议端口组ID。ServiceTemplate和Protocol+Port互斥。 CidrBlock string // 网段或IP(互斥)。 SecurityGroupId string // 已绑定安全组的网段或IP。 AddressTemplate AddressTemplateSpecification // IP地址ID或者ID地址组ID。 Action string // ACCEPT 或 DROP。 PolicyDescription string // 安全组规则描述。 direction string } type ServiceTemplateSpecification struct { ServiceId string // 协议端口ID,例如:ppm-f5n1f8da。 ServiceGroupId string // 协议端口组ID,例如:ppmg-f5n1f8da。 } type AddressTemplateSpecification struct { AddressId string // IP地址ID,例如:ipm-2uw6ujo6。 AddressGroupId string // IP地址组ID,例如:ipmg-2uw6ujo6。 } type SecurityGroupPolicySet struct { Version string Egress []SecurityGroupPolicy // 出站规则。 Ingress []SecurityGroupPolicy // 入站规则。 } type SSecurityGroup struct { multicloud.SSecurityGroup region *SRegion SecurityGroupId string // 安全组实例ID,例如:sg-ohuuioma。 SecurityGroupName string // 安全组名称,可任意命名,但不得超过60个字符。 SecurityGroupDesc string // 安全组备注,最多100个字符。 ProjectId string // 项目id,默认0。可在qcloud控制台项目管理页面查询到。 IsDefault bool // 是否是默认安全组,默认安全组不支持删除。 CreatedTime time.Time // 安全组创建时间。 SecurityGroupPolicySet SecurityGroupPolicySet } func (self *SRegion) GetSecurityGroups(vpcId string, name string, offset int, limit int) ([]SSecurityGroup, int, error) { if limit > 50 || limit <= 0 { limit = 50 } params := make(map[string]string) params["Limit"] = fmt.Sprintf("%d", limit) params["Offset"] = fmt.Sprintf("%d", offset) if len(name) > 0 { params["Filters.0.Name"] = "security-group-name" params["Filters.0.Values.0"] = name } body, err := self.vpcRequest("DescribeSecurityGroups", params) if err != nil { log.Errorf("GetSecurityGroups fail %s", err) return nil, 0, err } secgrps := make([]SSecurityGroup, 0) err = body.Unmarshal(&secgrps, "SecurityGroupSet") if err != nil { log.Errorf("Unmarshal security groups fail %s", err) return nil, 0, err } total, _ := body.Float("TotalCount") return secgrps, int(total), nil } func (self *SSecurityGroup) GetMetadata() *jsonutils.JSONDict { return nil } func (self *SSecurityGroup) GetVpcId() string { //腾讯云安全组未与vpc关联,统一使用normal return "normal" } func (self *SSecurityGroup) GetId() string { return self.SecurityGroupId } func (self *SSecurityGroup) GetGlobalId() string { return self.SecurityGroupId } func (self *SSecurityGroup) GetDescription() string { return self.SecurityGroupDesc } func (self *SSecurityGroup) GetName() string { if len(self.SecurityGroupName) > 0 { return self.SecurityGroupName } return self.SecurityGroupId } func (self *SecurityGroupPolicy) String() string { rules := self.toRules() result := []string{} for _, rule := range rules { result = append(result, rule.String()) } return strings.Join(result, ";") } func (self *SecurityGroupPolicy) toRules() []cloudprovider.SecurityRule { result := []cloudprovider.SecurityRule{} rule := cloudprovider.SecurityRule{ ExternalId: fmt.Sprintf("%d", self.PolicyIndex), SecurityRule: secrules.SecurityRule{ Action: secrules.SecurityRuleAllow, Protocol: secrules.PROTO_ANY, Direction: secrules.TSecurityRuleDirection(self.direction), Priority: self.PolicyIndex, Ports: []int{}, PortStart: -1, PortEnd: -1, }, } if len(self.SecurityGroupId) != 0 { //安全组关联安全组的规则忽略 return nil } if strings.ToLower(self.Action) == "drop" { rule.Action = secrules.SecurityRuleDeny } if utils.IsInStringArray(strings.ToLower(self.Protocol), []string{"tcp", "udp", "icmp"}) { rule.Protocol = strings.ToLower(self.Protocol) } if strings.Index(self.Port, ",") > 0 { for _, _port := range strings.Split(self.Port, ",") { port, err := strconv.Atoi(_port) if err != nil { log.Errorf("parse secgroup port %s %s error %v", self.Port, _port, err) continue } rule.Ports = append(rule.Ports, port) } } else if strings.Index(self.Port, "-") > 0 { ports := strings.Split(self.Port, "-") if len(ports) == 2 { portStart, err := strconv.Atoi(ports[0]) if err != nil { return nil } portEnd, err := strconv.Atoi(ports[1]) if err != nil { return nil } rule.PortStart, rule.PortEnd = portStart, portEnd } } else if strings.ToLower(self.Port) != "all" { port, err := strconv.Atoi(self.Port) if err != nil { return nil } rule.PortStart, rule.PortEnd = port, port } if len(self.AddressTemplate.AddressGroupId) > 0 { addressGroup, total, err := self.region.AddressGroupList(self.AddressTemplate.AddressGroupId, "", 0, 1) if err != nil { log.Errorf("Get AddressList %s failed %v", self.AddressTemplate.AddressId, err) return nil } if total != 1 { return nil } for i := 0; i < len(addressGroup[0].AddressTemplateIdSet); i++ { rules, err := self.getAddressRules(rule, addressGroup[0].AddressTemplateIdSet[i]) if err != nil { return nil } result = append(result, rules...) } } else if len(self.AddressTemplate.AddressId) > 0 { rules, err := self.getAddressRules(rule, self.AddressTemplate.AddressId) if err != nil { return nil } result = append(result, rules...) } else if len(self.CidrBlock) > 0 { rule.ParseCIDR(self.CidrBlock) result = append(result, rule) } return result } func (self *SecurityGroupPolicy) getAddressRules(rule cloudprovider.SecurityRule, addressId string) ([]cloudprovider.SecurityRule, error) { result := []cloudprovider.SecurityRule{} address, total, err := self.region.AddressList(addressId, "", 0, 1) if err != nil { log.Errorf("Get AddressList %s failed %v", self.AddressTemplate.AddressId, err) return nil, err } if total != 1 { return nil, fmt.Errorf("failed to find address %s", addressId) } for _, ip := range address[0].AddressSet { rule.ParseCIDR(ip) result = append(result, rule) } return result, nil } func (self *SSecurityGroup) GetRules() ([]cloudprovider.SecurityRule, error) { secgroup, err := self.region.GetSecurityGroupDetails(self.SecurityGroupId) if err != nil { return nil, err } for i := 0; i < len(secgroup.SecurityGroupPolicySet.Egress); i++ { secgroup.SecurityGroupPolicySet.Egress[i].direction = "out" } for i := 0; i < len(secgroup.SecurityGroupPolicySet.Ingress); i++ { secgroup.SecurityGroupPolicySet.Ingress[i].direction = "in" } originRules := []SecurityGroupPolicy{} originRules = append(originRules, secgroup.SecurityGroupPolicySet.Egress...) originRules = append(originRules, secgroup.SecurityGroupPolicySet.Ingress...) for i := 0; i < len(originRules); i++ { originRules[i].region = self.region } rules := []cloudprovider.SecurityRule{} for _, rule := range originRules {
"yunion.io/x/onecloud/pkg/cloudprovider" "yunion.io/x/onecloud/pkg/multicloud" )
random_line_split
securitygroup.go
offset int, limit int) ([]SSecurityGroup, int, error) { if limit > 50 || limit <= 0 { limit = 50 } params := make(map[string]string) params["Limit"] = fmt.Sprintf("%d", limit) params["Offset"] = fmt.Sprintf("%d", offset) if len(name) > 0 { params["Filters.0.Name"] = "security-group-name" params["Filters.0.Values.0"] = name } body, err := self.vpcRequest("DescribeSecurityGroups", params) if err != nil { log.Errorf("GetSecurityGroups fail %s", err) return nil, 0, err } secgrps := make([]SSecurityGroup, 0) err = body.Unmarshal(&secgrps, "SecurityGroupSet") if err != nil { log.Errorf("Unmarshal security groups fail %s", err) return nil, 0, err } total, _ := body.Float("TotalCount") return secgrps, int(total), nil } func (self *SSecurityGroup) GetMetadata() *jsonutils.JSONDict { return nil } func (self *SSecurityGroup) GetVpcId() string { //腾讯云安全组未与vpc关联,统一使用normal return "normal" } func (self *SSecurityGroup) GetId() string { return self.SecurityGroupId } func (self *SSecurityGroup) GetGlobalId() string { return self.SecurityGroupId } func (self *SSecurityGroup) GetDescription() string { return self.SecurityGroupDesc } func (self *SSecurityGroup) GetName() string { if len(self.SecurityGroupName) > 0 { return self.SecurityGroupName } return self.SecurityGroupId } func (self *SecurityGroupPolicy) String() string { rules := self.toRules() result := []string{} for _, rule := range rules { result = append(result, rule.String()) } return strings.Join(result, ";") } func (self *SecurityG
Rules() []cloudprovider.SecurityRule { result := []cloudprovider.SecurityRule{} rule := cloudprovider.SecurityRule{ ExternalId: fmt.Sprintf("%d", self.PolicyIndex), SecurityRule: secrules.SecurityRule{ Action: secrules.SecurityRuleAllow, Protocol: secrules.PROTO_ANY, Direction: secrules.TSecurityRuleDirection(self.direction), Priority: self.PolicyIndex, Ports: []int{}, PortStart: -1, PortEnd: -1, }, } if len(self.SecurityGroupId) != 0 { //安全组关联安全组的规则忽略 return nil } if strings.ToLower(self.Action) == "drop" { rule.Action = secrules.SecurityRuleDeny } if utils.IsInStringArray(strings.ToLower(self.Protocol), []string{"tcp", "udp", "icmp"}) { rule.Protocol = strings.ToLower(self.Protocol) } if strings.Index(self.Port, ",") > 0 { for _, _port := range strings.Split(self.Port, ",") { port, err := strconv.Atoi(_port) if err != nil { log.Errorf("parse secgroup port %s %s error %v", self.Port, _port, err) continue } rule.Ports = append(rule.Ports, port) } } else if strings.Index(self.Port, "-") > 0 { ports := strings.Split(self.Port, "-") if len(ports) == 2 { portStart, err := strconv.Atoi(ports[0]) if err != nil { return nil } portEnd, err := strconv.Atoi(ports[1]) if err != nil { return nil } rule.PortStart, rule.PortEnd = portStart, portEnd } } else if strings.ToLower(self.Port) != "all" { port, err := strconv.Atoi(self.Port) if err != nil { return nil } rule.PortStart, rule.PortEnd = port, port } if len(self.AddressTemplate.AddressGroupId) > 0 { addressGroup, total, err := self.region.AddressGroupList(self.AddressTemplate.AddressGroupId, "", 0, 1) if err != nil { log.Errorf("Get AddressList %s failed %v", self.AddressTemplate.AddressId, err) return nil } if total != 1 { return nil } for i := 0; i < len(addressGroup[0].AddressTemplateIdSet); i++ { rules, err := self.getAddressRules(rule, addressGroup[0].AddressTemplateIdSet[i]) if err != nil { return nil } result = append(result, rules...) } } else if len(self.AddressTemplate.AddressId) > 0 { rules, err := self.getAddressRules(rule, self.AddressTemplate.AddressId) if err != nil { return nil } result = append(result, rules...) } else if len(self.CidrBlock) > 0 { rule.ParseCIDR(self.CidrBlock) result = append(result, rule) } return result } func (self *SecurityGroupPolicy) getAddressRules(rule cloudprovider.SecurityRule, addressId string) ([]cloudprovider.SecurityRule, error) { result := []cloudprovider.SecurityRule{} address, total, err := self.region.AddressList(addressId, "", 0, 1) if err != nil { log.Errorf("Get AddressList %s failed %v", self.AddressTemplate.AddressId, err) return nil, err } if total != 1 { return nil, fmt.Errorf("failed to find address %s", addressId) } for _, ip := range address[0].AddressSet { rule.ParseCIDR(ip) result = append(result, rule) } return result, nil } func (self *SSecurityGroup) GetRules() ([]cloudprovider.SecurityRule, error) { secgroup, err := self.region.GetSecurityGroupDetails(self.SecurityGroupId) if err != nil { return nil, err } for i := 0; i < len(secgroup.SecurityGroupPolicySet.Egress); i++ { secgroup.SecurityGroupPolicySet.Egress[i].direction = "out" } for i := 0; i < len(secgroup.SecurityGroupPolicySet.Ingress); i++ { secgroup.SecurityGroupPolicySet.Ingress[i].direction = "in" } originRules := []SecurityGroupPolicy{} originRules = append(originRules, secgroup.SecurityGroupPolicySet.Egress...) originRules = append(originRules, secgroup.SecurityGroupPolicySet.Ingress...) for i := 0; i < len(originRules); i++ { originRules[i].region = self.region } rules := []cloudprovider.SecurityRule{} for _, rule := range originRules { subRules := rule.toRules() rules = append(rules, subRules...) } return rules, nil } func (self *SSecurityGroup) GetStatus() string { return "" } func (self *SSecurityGroup) IsEmulated() bool { return false } func (self *SSecurityGroup) Refresh() error { group, err := self.region.GetSecurityGroupDetails(self.SecurityGroupId) if err != nil { return err } return jsonutils.Update(self, group) } func (self *SSecurityGroup) deleteRules(rules []cloudprovider.SecurityRule, direction string) error { ids := []string{} for _, r := range rules { ids = append(ids, r.ExternalId) } if len(ids) > 0 { err := self.region.DeleteRules(self.SecurityGroupId, direction, ids) if err != nil { return errors.Wrapf(err, "deleteRules(%s)", ids) } } return nil } func (self *SSecurityGroup) SyncRules(common, inAdds, outAdds, inDels, outDels []cloudprovider.SecurityRule) error { rules := append(common, append(inAdds, outAdds...)...) return self.region.syncSecgroupRules(self.SecurityGroupId, rules) } func (self *SRegion) syncSecgroupRules(secgroupid string, rules []cloudprovider.SecurityRule) error { err := self.deleteAllRules(secgroupid) if err != nil { return errors.Wrap(err, "deleteAllRules") } egressIndex, ingressIndex := -1, -1 for _, rule := range rules { policyIndex := 0 switch rule.Direction { case secrules.DIR_IN: ingressIndex++ policyIndex = ingressIndex case secrules.DIR_OUT: egressIndex++ policyIndex = egressIndex default: return fmt.Errorf("Unknown rule direction %v for secgroup %s", rule, secgroupid) } //为什么不一次创建完成? //答: 因为如果只有入方向安全组规则,创建时会提示缺少出方向规则。 //为什么不分两次,一次创建入方向规则,一次创建出方向规则? //答: 因为这样就不能设置优先级了,一次性创建的出或入方向的优先级必须一样。 err := self.AddRule(secgroupid, policyIndex, rule) if err !=
roupPolicy) to
identifier_name
securitygroup.go
ip := range address[0].AddressSet { rule.ParseCIDR(ip) result = append(result, rule) } return result, nil } func (self *SSecurityGroup) GetRules() ([]cloudprovider.SecurityRule, error) { secgroup, err := self.region.GetSecurityGroupDetails(self.SecurityGroupId) if err != nil { return nil, err } for i := 0; i < len(secgroup.SecurityGroupPolicySet.Egress); i++ { secgroup.SecurityGroupPolicySet.Egress[i].direction = "out" } for i := 0; i < len(secgroup.SecurityGroupPolicySet.Ingress); i++ { secgroup.SecurityGroupPolicySet.Ingress[i].direction = "in" } originRules := []SecurityGroupPolicy{} originRules = append(originRules, secgroup.SecurityGroupPolicySet.Egress...) originRules = append(originRules, secgroup.SecurityGroupPolicySet.Ingress...) for i := 0; i < len(originRules); i++ { originRules[i].region = self.region } rules := []cloudprovider.SecurityRule{} for _, rule := range originRules { subRules := rule.toRules() rules = append(rules, subRules...) } return rules, nil } func (self *SSecurityGroup) GetStatus() string { return "" } func (self *SSecurityGroup) IsEmulated() bool { return false } func (self *SSecurityGroup) Refresh() error { group, err := self.region.GetSecurityGroupDetails(self.SecurityGroupId) if err != nil { return err } return jsonutils.Update(self, group) } func (self *SSecurityGroup) deleteRules(rules []cloudprovider.SecurityRule, direction string) error { ids := []string{} for _, r := range rules { ids = append(ids, r.ExternalId) } if len(ids) > 0 { err := self.region.DeleteRules(self.SecurityGroupId, direction, ids) if err != nil { return errors.Wrapf(err, "deleteRules(%s)", ids) } } return nil } func (self *SSecurityGroup) SyncRules(common, inAdds, outAdds, inDels, outDels []cloudprovider.SecurityRule) error { rules := append(common, append(inAdds, outAdds...)...) return self.region.syncSecgroupRules(self.SecurityGroupId, rules) } func (self *SRegion) syncSecgroupRules(secgroupid string, rules []cloudprovider.SecurityRule) error { err := self.deleteAllRules(secgroupid) if err != nil { return errors.Wrap(err, "deleteAllRules") } egressIndex, ingressIndex := -1, -1 for _, rule := range rules { policyIndex := 0 switch rule.Direction { case secrules.DIR_IN: ingressIndex++ policyIndex = ingressIndex case secrules.DIR_OUT: egressIndex++ policyIndex = egressIndex default: return fmt.Errorf("Unknown rule direction %v for secgroup %s", rule, secgroupid) } //为什么不一次创建完成? //答: 因为如果只有入方向安全组规则,创建时会提示缺少出方向规则。 //为什么不分两次,一次创建入方向规则,一次创建出方向规则? //答: 因为这样就不能设置优先级了,一次性创建的出或入方向的优先级必须一样。 err := self.AddRule(secgroupid, policyIndex, rule) if err != nil { return errors.Wrap(err, "AddRule") } } return nil } func (self *SRegion) deleteAllRules(secgroupid string) error { params := map[string]string{"SecurityGroupId": secgroupid, "SecurityGroupPolicySet.Version": "0"} _, err := self.vpcRequest("ModifySecurityGroupPolicies", params) return err } func (self *SRegion) DeleteRules(secgroupId, direction string, ids []string) error { if len(ids) == 0 { return nil } params := map[string]string{"SecurityGroupId": secgroupId} for idx, id := range ids { params[fmt.Sprintf("SecurityGroupPolicySet.%s.%d.PolicyIndex", direction, idx)] = id } _, err := self.vpcRequest("DeleteSecurityGroupPolicies", params) return err } func (self *SRegion) AddRule(secgroupId string, policyIndex int, rule cloudprovider.SecurityRule) error { params := map[string]string{} params["SecurityGroupId"] = secgroupId direction := "Egress" action := "accept" if rule.Action == secrules.SecurityRuleDeny { action = "drop" } protocol := "ALL" if rule.Protocol != secrules.PROTO_ANY { protocol = rule.Protocol } if rule.Direction == secrules.DIR_IN { direction = "Ingress" } params[fmt.Sprintf("SecurityGroupPolicySet.%s.0.PolicyIndex", direction)] = fmt.Sprintf("%d", policyIndex) params[fmt.Sprintf("SecurityGroupPolicySet.%s.0.Action", direction)] = action params[fmt.Sprintf("SecurityGroupPolicySet.%s.0.PolicyDescription", direction)] = rule.Description params[fmt.Sprintf("SecurityGroupPolicySet.%s.0.Protocol", direction)] = protocol params[fmt.Sprintf("SecurityGroupPolicySet.%s.0.CidrBlock", direction)] = rule.IPNet.String() if rule.Protocol == secrules.PROTO_TCP || rule.Protocol == secrules.PROTO_UDP { port := "ALL" if rule.PortEnd > 0 && rule.PortStart > 0 { if rule.PortStart == rule.PortEnd { port = fmt.Sprintf("%d", rule.PortStart) } else { port = fmt.Sprintf("%d-%d", rule.PortStart, rule.PortEnd) } } else if len(rule.Ports) > 0 { ports := []string{} for _, _port := range rule.Ports { ports = append(ports, fmt.Sprintf("%d", _port)) } port = strings.Join(ports, ",") } params[fmt.Sprintf("SecurityGroupPolicySet.%s.0.Port", direction)] = port } _, err := self.vpcRequest("CreateSecurityGroupPolicies", params) if err != nil { log.Errorf("Create SecurityGroup rule %s error: %v", rule, err) return err } return nil } func (self *SRegion) GetSecurityGroupDetails(secGroupId string) (*SSecurityGroup, error) { params := make(map[string]string) params["Region"] = self.Region params["SecurityGroupId"] = secGroupId body, err := self.vpcRequest("DescribeSecurityGroupPolicies", params) if err != nil { log.Errorf("DescribeSecurityGroupAttribute fail %s", err) return nil, err } secgrp := SSecurityGroup{SecurityGroupId: secGroupId, region: self} err = body.Unmarshal(&secgrp.SecurityGroupPolicySet, "SecurityGroupPolicySet") if err != nil { log.Errorf("Unmarshal security group details fail %s", err) return nil, err } return &secgrp, nil } func (self *SRegion) DeleteSecurityGroup(secGroupId string) error { params := make(map[string]string) params["Region"] = self.Region params["SecurityGroupId"] = secGroupId _, err := self.vpcRequest("DeleteSecurityGroup", params) return err } type AddressTemplate struct { AddressSet []string AddressTemplateId string AddressTemplateName string CreatedTime time.Time } func (self *SRegion) AddressList(addressId, addressName string, offset, limit int) ([]AddressTemplate, int, error) { params := map[string]string{} filter := 0 if len(addressId) > 0 { params[fmt.Sprintf("Filters.%d.Name", filter)] = "address-template-id" params[fmt.Sprintf("Filters.%d.Values.0", filter)] = addressId filter++ } if len(addressName) > 0 { params[fmt.Sprintf("Filters.%d.Name", filter)] = "address-template-name" params[fmt.Sprintf("Filters.%d.Values.0", filter)] = addressName filter++ } params["Offset"] = fmt.Sprintf("%d", offset) if limit == 0 { limit = 20 } params["Limit"] = fmt.Sprintf("%d", limit) body, err := self.vpcRequest("DescribeAddressTemplates", params) if err != nil { return nil, 0, err } addressTemplates := []AddressTemplate{} err = body.Unmarshal(&addressTemplates, "AddressTemplateSet") if err != nil { return nil, 0, err } total, _ := body.Float("TotalCount") return addressTemplates, int(total), nil } type AddressTemplateGroup struct { AddressTemplateIdSet []string AddressTemplateGroupName string AddressTemplateGroupId string CreatedTime time.Time } func (self *SRegion) AddressGroupList(groupId, groupName string, offset, limit int) ([]AddressTemplateGroup, int, error) { params := map[string]string{} filter := 0 if len(groupId) > 0 { params[fmt.Sprintf("Filters.%d.Name", filter)] = "a
ddress-template-group-id"
conditional_block
securitygroup.go
int, limit int) ([]SSecurityGroup, int, error) { if limit > 50 || limit <= 0 { limit = 50 } params := make(map[string]string) params["Limit"] = fmt.Sprintf("%d", limit) params["Offset"] = fmt.Sprintf("%d", offset) if len(name) > 0 { params["Filters.0.Name"] = "security-group-name" params["Filters.0.Values.0"] = name } body, err := self.vpcRequest("DescribeSecurityGroups", params) if err != nil { log.Errorf("GetSecurityGroups fail %s", err) return nil, 0, err } secgrps := make([]SSecurityGroup, 0) err = body.Unmarshal(&secgrps, "SecurityGroupSet") if err != nil { log.Errorf("Unmarshal security groups fail %s", err) return nil, 0, err } total, _ := body.Float("TotalCount") return secgrps, int(total), nil } func (self *SSecurityGroup) GetMetadata() *jsonutils.JSONDict { return nil } func (self *SSecurityGroup) GetVpcId() string { //腾讯云安全组未与vpc关联,统一使用normal return "normal" } func (self *SSecurityGroup) GetId() string { return self.SecurityGroupId } func (self *SSecurityGroup) GetGlobalId() string { return self.SecurityGroupId } func (self *SSecurityGroup) GetDescription() string { return self.SecurityGroupDesc } func (self *SSecurityGroup) GetName() string { if len(self.SecurityGroupName) > 0 { return self.SecurityGroupName } return self.SecurityGroupId } func (self *SecurityGroupPolicy) String() string { rules := self.toRules() result := []string{} for _, rule := range rules { result = append(result, rule.String()) } return strings.Join(result, ";") } func (self *SecurityGroupPolicy) toRules() []cloudprovider.SecurityRule { result := []cloudprovider.SecurityRule{} rule := cloudprovider.SecurityRule{ ExternalId: fmt.Sprintf("%d", self.PolicyIndex), SecurityRule: secrules.SecurityRule{ Action: secrules.SecurityRuleAllow, Protocol: secrules.PROTO_ANY, Direction: secrules.TSecurityRuleDirection(self.direction), Priority: self.PolicyIndex, Ports: []int{}, PortStart: -1, PortEnd: -1, }, } if len(self.SecurityGroupId) != 0 { //安全组关联安全组的规则忽略 return nil } if strings.ToLower(self.Action) == "drop" { rule.Action = secrules.SecurityRuleDeny } if utils.IsInStringArray(strings.ToLower(self.Protocol), []string{"tcp", "udp", "icmp"}) { rule.Protocol = strings.ToLower(self.Protocol) } if strings.Index(self.Port, ",") > 0 { for _, _port := range strings.Split(self.Port, ",") { port, err := strconv.Atoi(_port) if err != nil { log.Errorf("parse secgroup port %s %s error %v", self.Port, _port, err) continue } rule.Ports = append(rule.Ports, port) } } else if strings.Index(self.Port, "-") > 0 { ports := strings.Split(self.Port, "-") if len(ports) == 2 { portStart, err := strconv.Atoi(ports[0]) if err != nil { return nil } portEnd, err := strconv.Atoi(ports[1]) if err != nil { return nil } rule.PortStart, rule.PortEnd = portStart, portEnd } } else if strings.ToLower(self.Port) != "all" { port, err := strconv.Atoi(self.Port) if err != nil { return nil } rule.PortStart, rule.PortEnd = port, port } if len(self.AddressTemplate.AddressGroupId) > 0 { addressGroup, total, err := self.region.AddressGroupList(self.AddressTemplate.AddressGroupId, "", 0, 1) if err != nil { log.Errorf("Get AddressList %s failed %v", self.AddressTemplate.AddressId, err) return nil } if total != 1 { return nil } for i := 0; i < len(addressGroup[0].AddressTemplateIdSet); i++ { rules, err := self.getAddressRules(rule, addressGroup[0].AddressTemplateIdSet[i]) if err != nil { return nil } result = append(result, rules...) } } else if len(self.AddressTemplate.AddressId) > 0 { rules, err := self.getAddressRules(rule, self.AddressTemplate.AddressId) if err != nil { return nil } result = append(result, rules...) } else if len(self.CidrBlock) > 0 { rule.ParseCIDR(self.CidrBlock) result = append(result, rule) } return result } func (self *SecurityGroupPolicy) getAddressRules(rule cloudprovider.SecurityRule, addressId string) ([]cloudprovider.SecurityRule, error) { result := []cloudprovider.SecurityRule{} address, total, err := self.region.AddressList(addressId, "", 0, 1) if err != nil { log.Errorf("Get AddressList %s failed %v", self.AddressTemplate.AddressId, err) return nil, err } if total != 1 { return nil, fmt.Errorf("failed to find address %s", addressId) } for _, ip := range address[0].AddressSet { rule.ParseCIDR(ip) result = append(result, rule) } return result, nil } func (self *SSecurityGroup) GetRules() ([]cloudprovider.SecurityRule, error) { secgroup, err := self.region.GetSecurityGroupDetails(self.SecurityGroupId) if err != nil { return nil, err } for i := 0; i < len(secgroup.SecurityGroupPolicySet.Egress); i++ { secgroup.SecurityGroupPolicySet.Egress[i].direction = "out" } for i := 0; i < len(secgroup.SecurityGroupPolicySet.Ingress); i++ { secgroup.SecurityGroupPolicySet.Ingress[i].direction = "in" } originRules := []SecurityGroupPolicy{} originRules = append(originRules, secgroup.SecurityGroupPolicySet.Egress...) originRules = append(originRules, secgroup.SecurityGroupPolicySet.Ingress...) for i := 0; i < len(originRules); i++ { originRules[i].region = self.region } rules := []cloudprovider.SecurityRule{} for _, rule := range originRules { subRules := rule.toRules() rules = append(rules, subRules...) } return rules, nil } func (self *SSecurityGroup) GetStatus() string { return "" } func (self *SSecurityGroup) IsEmulated() bool { return false } func (self *SSecurityGroup) Refresh() error { group, err := self.region.GetSecurityGroupDetails(self.SecurityGroupId) if err != nil { return err } return jsonutils.Update(self, group) } func (self *SSecurityGroup) deleteRules(rules []cloudprovider.SecurityRule, direction string) error { ids := []string{} for _, r := range rules { ids = append(ids, r.ExternalId) } if len(ids) > 0 { err := self.region.DeleteRules(self.SecurityGroupId, direction, ids) if err != nil { return errors.Wrapf(err, "deleteRules(%s)", ids) } } return nil } func (self *SSecurityGroup) SyncRules(common, inAdds, outAdds, inDels, outDels []cloudprovider.SecurityRule) error { rules := append(common, append(inAdds, outAdds...)...) return self.region.syncSecg
ressIndex := -1, -1 for _, rule := range rules { policyIndex := 0 switch rule.Direction { case secrules.DIR_IN: ingressIndex++ policyIndex = ingressIndex case secrules.DIR_OUT: egressIndex++ policyIndex = egressIndex default: return fmt.Errorf("Unknown rule direction %v for secgroup %s", rule, secgroupid) } //为什么不一次创建完成? //答: 因为如果只有入方向安全组规则,创建时会提示缺少出方向规则。 //为什么不分两次,一次创建入方向规则,一次创建出方向规则? //答: 因为这样就不能设置优先级了,一次性创建的出或入方向的优先级必须一样。 err := self.AddRule(secgroupid, policyIndex, rule) if err !=
roupRules(self.SecurityGroupId, rules) } func (self *SRegion) syncSecgroupRules(secgroupid string, rules []cloudprovider.SecurityRule) error { err := self.deleteAllRules(secgroupid) if err != nil { return errors.Wrap(err, "deleteAllRules") } egressIndex, ing
identifier_body
ext-all-extend.js
= r - 1; newF = -1; } else if (f == "down") { var cm = this.grid.colModel, clen = cm.getColumnCount(), ds = this.grid.store, rlen = ds.getCount(); newR = r + 1; if (newR >= rlen) { newR = 0; newC = c + 1; } } var newCell = this.grid.walkCells( newR, newC, newF, this.grid.isEditor && this.grid.editing ? this.acceptsNav : this.isSelectable, // *** handle tabbing while editorgrid is in edit mode this ); if(newCell){ // *** reassign r & c variables to newly-selected cell's row and column r = newCell[0]; c = newCell[1]; var g = this.grid; if (g.getColumnModel().isCellEditable(c, r)) { window.setTimeout(function(){ g.startEditing(r, c); }, 100); } else { this.foucsNextCell(r, c, f); } } } }); /** * 使GridPanel单元格中文字可以复制 */ /* { if (!Ext.grid.GridView.prototype.templates) { Ext.grid.GridView.prototype.templates = {}; } Ext.grid.GridView.prototype.templates.cell = new Ext.Template( ' <td class="x-grid3-col x-grid3-cell x-grid3-td-{id} x-selectable {css}" style="{style}" tabIndex="0" {cellAttr}>' , ' <div class="x-grid3-cell-inner x-grid3-col-{id}" {attr}>{value} </div>' , ' </td>' ); } */ //Ext.override(Ext.form.DateField, { // // fullParseDate : function(value) { // var v = ""; // if (value != "") { // v = value.replaceAll("-", ""); // v = v.replaceAll("/", ""); // v = v.replaceAll("\\.", ""); // v = v.replaceAll(" ", ""); // v = v.replaceAll(":", ""); // for (var i = v.length + 1; i <= 14; i++) { // if (i == 6 || i == 8) { // v += "1"; // } else { // v += "0"; // } // } // } // // // var sMonth = v.substr(4, 2); // var sDay = v.substr(6, 2); // if (sMonth > "12") { // alert("日期录入非法!"); // return null; // } // if (sMonth == '01' || sMonth == '03' || sMonth == '05' || sMonth == '07' || sMonth == '08' || sMonth == '10' || sMonth == '12') { // if (sDay > "31") { // alert("日期录入非法!"); // return null; // } // } else if (sMonth == '04' || sMonth == '06' || sMonth == '09' || sMonth == '11') { // if (sDay > "30") { // alert("日期录入非法!"); // return null; // } // } else if (sMonth == '02') { // if (sDay > "28") { // alert("日期录入非法!"); // return null; // } // } // // return Date.parseDate(v, 'YmdHis'); // }, // // beforeBlur : function(){ // var d = this.fullParseDate(this.getRawValue()); // if (d) { // var value = d.format('Y-m-d H:i:s'); // this.value = value; // this.setValue(value); // var rawValue = d.format(this.format); // this.setRawValue(rawValue); // } else { // this.value = ""; // this.setValue(""); // this.setRawValue(""); // } // } //}); String.prototype.replaceAll = function(s1, s2) { return this.replace(new RegExp(s1,"gm"), s2); }; /** * @author yaolf * @date 2011-7-21 * @param {Object} key 搜索的关键字 * * 为树添加搜索方法,如果没有配置searchConfig,默认搜索的是树节点的Text属性 * searchConfig有attributes属性,值为数组,表示要在哪些属性中进行搜索,例如: * searchConfig: { * attributes: ['text', 'name'] * } */ Ext.override(Ext.tree.TreePanel, { search: function(key){ var se = this.searchEngine; if (se && se.key === key) { var fn = se.foundNodes; for (var i = 0; i < fn.length; i++) { if (se.activeNode === fn[i].id) { var nn = ((i == fn.length - 1) ? fn[0] : fn[++i]); se.activeNode = nn.id; break; } } this.searchEngine.activeNode = se.activeNode; } else { se = this.searchEngine = this.createSearchEngine(key); } var activeNode = this.getNodeById(se.activeNode); if (activeNode) { try { this.expandPath(activeNode.getPath("id"), "id", function(s, o){ if (s) { o.select(); } }); } catch(err) { } } }, searchMatchNodes: function(key){ var attrs = this.searchConfig.attributes || ['text']; function contains(n, k) { var regexp = new RegExp(k, "i"); for (var i = 0; i < attrs.length; i++) { var value = new String(n.attributes[attrs[i]]); if (value && value.search(regexp) != -1) { return true; } } return false; } var foundNodes = []; this.root.cascade(function(n){ if (contains(n, key)) { foundNodes.push(n); } }); return foundNodes; }, createSearchEngine: function(key){ var sc = this.searchConfig = this.searchConfig || {}, se = {}; se.searchConfig = sc; se.key = key; var fn = this.searchMatchNodes(key); se.foundNodes = fn; se.activeNode = fn.length > 0 ? fn[0].id : undefined; return se; } }); Ext.Ajax.on('requestexception', function(conn, response, options){ try { var error = Ext.decode(response.responseText); if (!error) return; alert('出错了: ' + error.msg); // var msg = ['噢,出错了!!!\t\n--------------------------------------------------------\n错误信息:', // error.msg, '\n 控制器:', error.controller, '\n 错误行:', error.line, '\n 动 作:', // error.action, '\n'].join(''); if (error.type == 'sessiontimeout') { window.location.href = window.location.protocol + '//' + window.location.host + contextPath; } } catch(err) { } }); /** * 弹出大文本框 */ Ext.haode.showTextAreaDlg = function(obj){ var a = Ext.get(obj).query("span[class=displaytext]"); if (a.length == 0) { return; } var txtObj = a[0]; var v = txtObj.innerHTML; window.showModalDialog("common/textarea_field/index.jsp", {value:v, readOnly:true}, "dialogWidth=500px;dialogHeight=270px"); }; /** * gridpanel 默认有斑马线和列分割线 */ Ext.override(Ext.grid.GridPanel, { columnLines: true, stripeRows: true }); /** * 分页条增加每页显示多少条 */ Ext.override(Ext.PagingToolbar, { initComponent : function() { // TODO read from cookie var defaultPageSize = [50, 100, this.pageSize > 100 ? 100 : this.pageSize], pageSizeStore = []; defaultPageSize.distinct(); for (var i = 0; i < defaultPageSize.length; i++) { var s = defaultPageSize[i]; pageSizeStore.push([s, s]); } pageSizeStore.sort(function(i,j){ return i[0] - j[0]; }); var pagingItems = [ this.first = new Ext.Toolbar.Button({ tooltip : this.firstText, overflowText : this.firstText, iconCls : 'x-tbar-page-first', disabled : true, handler : this.moveFirst, scope : this }), this.prev = new Ext.Toolbar.Button({ tooltip : this.prevText, overflowText : this.prevText, iconCls : 'x-tbar-page-prev', disabled : true,
random_line_split
ext-all-extend.js
Reader({ root: "rows", totalProperty: 'totalcount' }, [ {name: "menuId"}, {name: "name"} ]) }); ds.on("load", function(){ var totalcount = ds.getCount(); for (var i = 0; i < totalcount; i++) { var menuId = ds.getAt(i).data.menuId; var sArray = menuId.split("."); var funcId = sArray[1]; if (toolbar.items.get(funcId)) { toolbar.items.get(funcId).show(); } } }, this); ds.load(); }; /* * author lijz * * 重写Ext.data.JsonReader * 失去session时跳转页面 */ Ext.override(Ext.data.JsonReader, { read : function(response){ var json = response.responseText; if(json.indexOf("<html>") == 2){ // window.top.location = Ext.haode.contextPath + "/index.jsp"; return; } var o = eval("(" + json + ")"); if (!o) { throw {message: "JsonReader.read: Json object not found"}; } if (o.metaData) { delete this.ef; this.meta = o.metaData; this.recordType = Ext.data.Record.create(o.metaData.fields); this.onMetaChange(this.meta, this.recordType, o); } return this.readRecords(o); } }) // 改变表格单元格选择的样式 Ext.override(Ext.grid.GridView, { onCellSelect : function(row, col){ var cell = this.getCell(row, col); if(cell){ this.fly(cell).addClass("x-grid3-cell-selected"); } if(row){ this.addRowClass(row, "x-grid3-row-selected"); } }, onCellDeselect : function(row, col){ var cell = this.getCell(row, col); if(cell){ this.fly(cell).removeClass("x-grid3-cell-selected"); } if(row){ this.removeRowClass(row, "x-grid3-row-selected"); } }, handleHdDown : function(e, target) { if (Ext.fly(target).hasClass('x-grid3-hd-btn')) { e.stopEvent(); var colModel = this.cm, header = this.findHeaderCell(target), index = this.getCellIndex(header), sortable = colModel.isSortable(index), menu = this.hmenu, menuItems = menu.items, menuCls = this.headerMenuOpenCls; this.hdCtxIndex = index; Ext.fly(header).addClass(menuCls); menuItems.get('asc').setDisabled(!sortable); menuItems.get('desc').setDisabled(!sortable); menuItems.get('asc').setVisible(sortable); menuItems.get('desc').setVisible(sortable); menu.on('hide', function() { Ext.fly(header).removeClass(menuCls); }, this, {single:true}); menu.show(target, 'tl-bl?'); } } }); Ext.override(Ext.form.TextArea, { fireKey : function(e){ //if(e.isSpecialKey() && (this.enterIsSpecial || (e.hasModifier()))){ this.fireEvent("specialkey", this, e); //} } }); Ext.override(Ext.grid.ColumnModel, { //获得列是否可编辑 getEditable : function(col){ return this.config[col].editable; } }); //改变表格单元格选择的样式 Ext.override(Ext.grid.CellSelectionModel, { onEditorKey: function(field, e){ if(e.getKey() == e.TAB){ this.handleKeyDown(e); return; } else if (e.getKey() == e.ENTER) { //回车跳到下个单元格 var s = this.selection if (s) { var f = "right" if (this.grid.keyModel == 'access') { f = "right"; } else if (this.grid.keyModel == 'excel') { f = "down"; } var cell = s.cell; // currently selected cell var r = cell[0]; // current row var c = cell[1]; this.foucsNextCell(r, c, f); } } }, //跳转下个单元格 foucsNextCell : function(r, c, f){ var newR = r; var newC = c; var newF = 1; if (f == "right") { newC = c + 1; } else if (f == "left") { newC = c - 1; newF = -1; } else if (f == "up"){ newR = r - 1; newF = -1; } else if (f == "down") { var cm = this.grid.colModel, clen = cm.getColumnCount(), ds = this.grid.store, rlen = ds.getCount(); newR = r + 1; if (newR >= rlen) { newR = 0; newC = c + 1; } } var newCell = this.grid.walkCells( newR, newC, newF, this.grid.isEditor && this.grid.editing ? this.acceptsNav : this.isSelectable, // *** handle tabbing while editorgrid is in edit mode this ); if(newCell){ // *** reassign r & c variables to newly-selected cell's row and column r = newCell[0]; c = newCell[1]; var g = this.grid; if (g.getColumnModel().isCellEditable(c, r)) { window.setTimeout(function(){ g.startEditing(r, c); }, 100); } else { this.foucsNextCell(r, c, f); } } } }); /** * 使GridPanel单元格中文字可以复制 */ /* { if (!Ext.grid.GridView.prototype.templates) { Ext.grid.GridView.prototype.templates = {}; } Ext.grid.GridView.prototype.templates.cell = new Ext.Template( ' <td class="x-grid3-col x-grid3-cell x-grid3-td-{id} x-selectable {css}" style="{style}" tabIndex="0" {cellAttr}>' , ' <div class="x-grid3-cell-inner x-grid3-col-{id}" {attr}>{value} </div>' , ' </td>' ); } */ //Ext.override(Ext.form.DateField, { // // fullParseDate : function(value) { // var v = ""; // if (value != "") { // v = value.replaceAll("-", ""); // v = v.replaceAll("/", ""); // v = v.replaceAll("\\.", ""); // v = v.replaceAll(" ", ""); // v = v.replaceAll(":", ""); // for (var i = v.length + 1; i <= 14; i++) { // if (i == 6 || i == 8) { // v += "1"; // } else { // v += "0"; // } // } // } // // // var sMonth = v.substr(4, 2); // var sDay = v.substr(6, 2); // if (sMonth > "12") { // alert("日期录入非法!"); // return null; // } // if (sMonth == '01' || sMonth == '03' || sMonth == '05' || sMonth == '07' || sMonth == '08' || sMonth == '10' || sMonth == '12') { // if (sDay > "31") { // alert("日期录入非法!"); // return null; // } // } else if (sMonth == '04' || sMonth == '06' || sMonth == '09' || sMonth == '11') { // if (sDay > "30") { // alert("日期录入非法!"); // return null; // } // } else if (sMonth == '02') { // if (sDay > "28") { // alert("日期录入非法!"); // return null; // } // } // // return Date.parseDate(v, 'YmdHis'); // }, // // beforeBlur : function(){ // var d = this.fullParseDate(this.getRawValue()); // if (d) { // var value = d.format('Y-m-d H:i:s'); // this.value = value; // this.setValue(value); // var rawValue = d.format(this.format); // this.setRawValue(rawValue); // } else { // this.value = ""; // this.setValue(""); // this.setRawValue(""); // } // } //}); String.prototype.replaceAll = function(s1, s2) { return this.replace(new RegExp(s1,"gm"), s2); }; /**
on=getSubFuncs&menuId=" + menuId}), reader: new Ext.data.Json
conditional_block
ext-all-extend.js
menu.items, menuCls = this.headerMenuOpenCls; this.hdCtxIndex = index; Ext.fly(header).addClass(menuCls); menuItems.get('asc').setDisabled(!sortable); menuItems.get('desc').setDisabled(!sortable); menuItems.get('asc').setVisible(sortable); menuItems.get('desc').setVisible(sortable); menu.on('hide', function() { Ext.fly(header).removeClass(menuCls); }, this, {single:true}); menu.show(target, 'tl-bl?'); } } }); Ext.override(Ext.form.TextArea, { fireKey : function(e){ //if(e.isSpecialKey() && (this.enterIsSpecial || (e.hasModifier()))){ this.fireEvent("specialkey", this, e); //} } }); Ext.override(Ext.grid.ColumnModel, { //获得列是否可编辑 getEditable : function(col){ return this.config[col].editable; } }); //改变表格单元格选择的样式 Ext.override(Ext.grid.CellSelectionModel, { onEditorKey: function(field, e){ if(e.getKey() == e.TAB){ this.handleKeyDown(e); return; } else if (e.getKey() == e.ENTER) { //回车跳到下个单元格 var s = this.selection if (s) { var f = "right" if (this.grid.keyModel == 'access') { f = "right"; } else if (this.grid.keyModel == 'excel') { f = "down"; } var cell = s.cell; // currently selected cell var r = cell[0]; // current row var c = cell[1]; this.foucsNextCell(r, c, f); } } }, //跳转下个单元格 foucsNextCell : function(r, c, f){ var newR = r; var newC = c; var newF = 1; if (f == "right") { newC = c + 1; } else if (f == "left") { newC = c - 1; newF = -1; } else if (f == "up"){ newR = r - 1; newF = -1; } else if (f == "down") { var cm = this.grid.colModel, clen = cm.getColumnCount(), ds = this.grid.store, rlen = ds.getCount(); newR = r + 1; if (newR >= rlen) { newR = 0; newC = c + 1; } } var newCell = this.grid.walkCells( newR, newC, newF, this.grid.isEditor && this.grid.editing ? this.acceptsNav : this.isSelectable, // *** handle tabbing while editorgrid is in edit mode this ); if(newCell){ // *** reassign r & c variables to newly-selected cell's row and column r = newCell[0]; c = newCell[1]; var g = this.grid; if (g.getColumnModel().isCellEditable(c, r)) { window.setTimeout(function(){ g.startEditing(r, c); }, 100); } else { this.foucsNextCell(r, c, f); } } } }); /** * 使GridPanel单元格中文字可以复制 */ /* { if (!Ext.grid.GridView.prototype.templates) { Ext.grid.GridView.prototype.templates = {}; } Ext.grid.GridView.prototype.templates.cell = new Ext.Template( ' <td class="x-grid3-col x-grid3-cell x-grid3-td-{id} x-selectable {css}" style="{style}" tabIndex="0" {cellAttr}>' , ' <div class="x-grid3-cell-inner x-grid3-col-{id}" {attr}>{value} </div>' , ' </td>' ); } */ //Ext.override(Ext.form.DateField, { // // fullParseDate : function(value) { // var v = ""; // if (value != "") { // v = value.replaceAll("-", ""); // v = v.replaceAll("/", ""); // v = v.replaceAll("\\.", ""); // v = v.replaceAll(" ", ""); // v = v.replaceAll(":", ""); // for (var i = v.length + 1; i <= 14; i++) { // if (i == 6 || i == 8) { // v += "1"; // } else { // v += "0"; // } // } // } // // // var sMonth = v.substr(4, 2); // var sDay = v.substr(6, 2); // if (sMonth > "12") { // alert("日期录入非法!"); // return null; // } // if (sMonth == '01' || sMonth == '03' || sMonth == '05' || sMonth == '07' || sMonth == '08' || sMonth == '10' || sMonth == '12') { // if (sDay > "31") { // alert("日期录入非法!"); // return null; // } // } else if (sMonth == '04' || sMonth == '06' || sMonth == '09' || sMonth == '11') { // if (sDay > "30") { // alert("日期录入非法!"); // return null; // } // } else if (sMonth == '02') { // if (sDay > "28") { // alert("日期录入非法!"); // return null; // } // } // // return Date.parseDate(v, 'YmdHis'); // }, // // beforeBlur : function(){ // var d = this.fullParseDate(this.getRawValue()); // if (d) { // var value = d.format('Y-m-d H:i:s'); // this.value = value; // this.setValue(value); // var rawValue = d.format(this.format); // this.setRawValue(rawValue); // } else { // this.value = ""; // this.setValue(""); // this.setRawValue(""); // } // } //}); String.prototype.replaceAll = function(s1, s2) { return this.replace(new RegExp(s1,"gm"), s2); }; /** * @author yaolf * @date 2011-7-21 * @param {Object} key 搜索的关键字 * * 为树添加搜索方法,如果没有配置searchConfig,默认搜索的是树节点的Text属性 * searchConfig有attributes属性,值为数组,表示要在哪些属性中进行搜索,例如: * searchConfig: { * attributes: ['text', 'name'] * } */ Ext.override(Ext.tree.TreePanel, { search: function(key){ var se = this.searchEngine; if (se && se.key === key) { var fn = se.foundNodes; for (var i = 0; i < fn.length; i++) { if (se.activeNode === fn[i].id) { var nn = ((i == fn.length - 1) ? fn[0] : fn[++i]); se.activeNode = nn.id; break; } } this.searchEngine.activeNode = se.activeNode; } else { se = this.searchEngine = this.createSearchEngine(key); } var activeNode = this.getNodeById(se.activeNode); if (activeNode) { try { this.expandPath(activeNode.getPath("id"), "id", function(s, o){ if (s) { o.select(); } }); } catch(err) { } } }, searchMatchNodes: function(key){ var attrs = this.searchConfig.attributes || ['text']; function contains(n, k) { var regexp = new RegExp(k, "i"); for (var i = 0; i < attrs.length; i++) { var value = new String(n.attributes[attrs[i]]); if (value && value.search(regexp) != -1) { return true; } } return false; } var foundNodes = []; this.root.cascade(function(n){ if (contains(n, key)) { foundNodes.push(n); } }); return foundNodes; }, createSearchEngine: function(key){ var sc = this.searchConfig = this.searchConfig || {}, se = {}; se.searchConfig = sc; se.key = key; var fn = this.searchMatchNodes(key); se.fou
ndNodes = fn; se.activeNode = fn.length > 0 ? fn[0].id : undefined; return se; } }); Ext.Ajax.on('requestexception', function(conn, response, options){ try { var error = Ext.decode(response.responseText); if (!error)
identifier_body
ext-all-extend.js
('x-grid3-hd-btn')) { e.stopEvent(); var colModel = this.cm, header = this.findHeaderCell(target), index = this.getCellIndex(header), sortable = colModel.isSortable(index), menu = this.hmenu, menuItems = menu.items, menuCls = this.headerMenuOpenCls; this.hdCtxIndex = index; Ext.fly(header).addClass(menuCls); menuItems.get('asc').setDisabled(!sortable); menuItems.get('desc').setDisabled(!sortable); menuItems.get('asc').setVisible(sortable); menuItems.get('desc').setVisible(sortable); menu.on('hide', function() { Ext.fly(header).removeClass(menuCls); }, this, {single:true}); menu.show(target, 'tl-bl?'); } } }); Ext.override(Ext.form.TextArea, { fireKey : function(e){ //if(e.isSpecialKey() && (this.enterIsSpecial || (e.hasModifier()))){ this.fireEvent("specialkey", this, e); //} } }); Ext.override(Ext.grid.ColumnModel, { //获得列是否可编辑 getEditable : function(col){ return this.config[col].editable; } }); //改变表格单元格选择的样式 Ext.override(Ext.grid.CellSelectionModel, { onEditorKey: function(field, e){ if(e.getKey() == e.TAB){ this.handleKeyDown(e); return; } else if (e.getKey() == e.ENTER) { //回车跳到下个单元格 var s = this.selection if (s) { var f = "right" if (this.grid.keyModel == 'access') { f = "right"; } else if (this.grid.keyModel == 'excel') { f = "down"; } var cell = s.cell; // currently selected cell var r = cell[0]; // current row var c = cell[1]; this.foucsNextCell(r, c, f); } } }, //跳转下个单元格 foucsNextCell : function(r, c, f){ var newR = r; var newC = c; var newF = 1; if (f == "right") { newC = c + 1; } else if (f == "left") { newC = c - 1; newF = -1; } else if (f == "up"){ newR = r - 1; newF = -1; } else if (f == "down") { var cm = this.grid.colModel, clen = cm.getColumnCount(), ds = this.grid.store, rlen = ds.getCount(); newR = r + 1; if (newR >= rlen) { newR = 0; newC = c + 1; } } var newCell = this.grid.walkCells( newR, newC, newF, this.grid.isEditor && this.grid.editing ? this.acceptsNav : this.isSelectable, // *** handle tabbing while editorgrid is in edit mode this ); if(newCell){ // *** reassign r & c variables to newly-selected cell's row and column r = newCell[0]; c = newCell[1]; var g = this.grid; if (g.getColumnModel().isCellEditable(c, r)) { window.setTimeout(function(){ g.startEditing(r, c); }, 100); } else { this.foucsNextCell(r, c, f); } } } }); /** * 使GridPanel单元格中文字可以复制 */ /* { if (!Ext.grid.GridView.prototype.templates) { Ext.grid.GridView.prototype.templates = {}; } Ext.grid.GridView.prototype.templates.cell = new Ext.Template( ' <td class="x-grid3-col x-grid3-cell x-grid3-td-{id} x-selectable {css}" style="{style}" tabIndex="0" {cellAttr}>' , ' <div class="x-grid3-cell-inner x-grid3-col-{id}" {attr}>{value} </div>' , ' </td>' ); } */ //Ext.override(Ext.form.DateField, { // // fullParseDate : function(value) { // var v = ""; // if (value != "") { // v = value.replaceAll("-", ""); // v = v.replaceAll("/", ""); // v = v.replaceAll("\\.", ""); // v = v.replaceAll(" ", ""); // v = v.replaceAll(":", ""); // for (var i = v.length + 1; i <= 14; i++) { // if (i == 6 || i == 8) { // v += "1"; // } else { // v += "0"; // } // } // } // // // var sMonth = v.substr(4, 2); // var sDay = v.substr(6, 2); // if (sMonth > "12") { // alert("日期录入非法!"); // return null; // } // if (sMonth == '01' || sMonth == '03' || sMonth == '05' || sMonth == '07' || sMonth == '08' || sMonth == '10' || sMonth == '12') { // if (sDay > "31") { // alert("日期录入非法!"); // return null; // } // } else if (sMonth == '04' || sMonth == '06' || sMonth == '09' || sMonth == '11') { // if (sDay > "30") { // alert("日期录入非法!"); // return null; // } // } else if (sMonth == '02') { // if (sDay > "28") { // alert("日期录入非法!"); // return null; // } // } // // return Date.parseDate(v, 'YmdHis'); // }, // // beforeBlur : function(){ // var d = this.fullParseDate(this.getRawValue()); // if (d) { // var value = d.format('Y-m-d H:i:s'); // this.value = value; // this.setValue(value); // var rawValue = d.format(this.format); // this.setRawValue(rawValue); // } else { // this.value = ""; // this.setValue(""); // this.setRawValue(""); // } // } //}); String.prototype.replaceAll = function(s1, s2) { return this.replace(new RegExp(s1,"gm"), s2); }; /** * @author yaolf * @date 2011-7-21 * @param {Object} key 搜索的关键字 * * 为树添加搜索方法,如果没有配置searchConfig,默认搜索的是树节点的Text属性 * searchConfig有attributes属性,值为数组,表示要在哪些属性中进行搜索,例如: * searchConfig: { * attributes: ['text', 'name'] * } */ Ext.override(Ext.tree.TreePanel, { search: function(key){ var se = this.searchEngine; if (se && se.key === key) { var fn = se.foundNodes; for (var i = 0; i < fn.length; i++) { if (se.activeNode === fn[i].id) { var nn = ((i == fn.length - 1) ? fn[0] : fn[++i]); se.activeNode = nn.id; break; } } this.searchEngine.activeNode = se.activeNode; } else { se = this.searchEngine = this.createSearchEngine(key); } var activeNode = this.getNodeById(se.activeNode); if (activeNode) { try { this.expandPath(activeNode.getPath("id"), "id", function(s, o){ if (s) { o.select(); } }); } catch(err) { } } }, searchMatchNodes: function(key){ var attrs = this.searchConfig.attributes || ['text']; function contains(n, k) { var regexp = new RegExp(k, "i"); for (var i = 0; i < attrs.length; i++) { var value = new String(n.attributes[attrs[i]]); if (value && value.search(regexp) != -1) { return true; } } return false; } var foundNodes = []; this.root.cascade(function(n){ if (contains(n, key)) { foundNodes.push(n); } }); return foundNodes; }, createSearchEngine: function(key){ var sc = this.searchConfig = this.searchConfig || {}, se = {}; se.searchConfig = sc; se.key = key; var fn = this.searchMatchNodes(
key);
identifier_name
corebuilder.rs
, minimum_separation: 100, read_distance: 8000, write_distance: 8000, separation: Separation::Random(100), warriors: Vec::new(), logger: None, } } } impl CoreBuilder { /// Creates a new instance of CoreBuilder with default parameters and no warriors. pub fn new() -> Self { CoreBuilder::default() } /// Sets the core's size. Core size is the number of instructions which make up the core /// during the battle. pub fn core_size(&mut self, core_size: usize) -> &mut Self { self.core_size = core_size; self } /// Sets the number of cycles that the match can last for before it is declared a tie. pub fn cycles_before_tie(&mut self, cycles_before_tie: usize) -> &mut Self { self.cycles_before_tie = cycles_before_tie; self } /// Sets the core's initial intruction. The initial instruction is that instruction which is preloaded /// into core prior to loading warriors. In addition to loading /// an instruction such as "DAT #0, #0" into all of core, the /// initial instruction could be set to `Random`, meaning core /// instructions are filled with randomly generated instructions. pub fn initial_instruction(&mut self, initial_instruction: InitialInstruction) -> &mut Self { self.initial_instruction = initial_instruction; self } /// The maximum number of instructions allowed per warrior. pub fn instruction_limit(&mut self, instruction_limit: usize) -> &mut Self { self.instruction_limit = instruction_limit; self } /// Each warrior can spawn multiple additional tasks. This variable sets the maximum /// number of tasks allowed per warrior. In other words, this is the size of each warrior's task queue. pub fn maximum_number_of_tasks(&mut self, maximum_number_of_tasks: usize) -> &mut Self { self.maximum_number_of_tasks = maximum_number_of_tasks; self } /// The minimum number of instructions from the first instruction /// of one warrior to the first instruction of the next warrior. pub fn minimum_separation(&mut self, minimum_separation: usize) -> &mut Self { self.minimum_separation = minimum_separation; // Need to put some limit on this related to number of warriors. self } /// This is the range available for warriors to read information /// from core. Attempts to read outside the limits of this range /// result in reading within the local readable range. The range /// is centered on the current instruction. Thus, a range of /// 500 limits reading to offsets of (-249 -> +250) from the /// currently executing instruction. The read limit can therefore /// be considered a mini-core within core. An attempt to read /// location PC+251 reads location PC-249 instead. An attempt to /// read location PC+500 reads location PC instead. /// /// Read distance must be a factor of core size, otherwise the /// above defined behaviour is not guaranteed. pub fn read_distance(&mut self, read_distance: usize) -> &mut Self { self.read_distance = read_distance; self } /// The number of instructions from the first instruction of one
self.separation = separation; self } /// This is the range available for warriors to write information /// to core. Attempts to write outside the limits of this range /// result in writing within the local writable range. The range /// is centered on the current instruction. Thus, a range of 500 /// limits writing to offsets of (-249 -> +250) from the /// currently executing instruction. The write limit can /// therefore be considered a mini-core within core. An attempt /// to write location PC+251 writes to location PC-249 instead. /// An attempt to write to location PC+500 writes to location PC /// instead. /// /// Write distance must be a factor of core size, otherwise the /// above defined behaviour is not guaranteed. pub fn write_distance(&mut self, write_distance: usize) -> &mut Self { self.write_distance = write_distance; self } pub fn load_warriors(&mut self, warriors: &[Warrior]) -> Result<&mut Self, CoreError> { for warrior in warriors { if warrior.len() > self.instruction_limit { return Err(CoreError::WarriorTooLong( warrior.len(), self.instruction_limit, warrior.metadata.name().unwrap_or("Unnamed").to_owned(), )); } if warrior.is_empty() { return Err(CoreError::EmptyWarrior( warrior.metadata.name().unwrap_or("Unnamed").to_owned(), )); }; } self.warriors = warriors.to_vec(); Ok(self) } /// Use a `Logger` to log the battle's output. pub fn log_with(&mut self, logger: Box<dyn Logger>) -> &mut Self { self.logger = Some(logger); self } /// Build the core, consuming the `CoreBuilder` and returning a [`Core`](../struct.Core.html) struct. pub fn build(&self) -> Result<Core, CoreError> { let CoreBuilder { initial_instruction, separation, warriors, maximum_number_of_tasks, core_size, instruction_limit, .. } = self; let mut core_instructions = vec![ CoreInstruction::from_instruction( initial_instruction.clone().extract(), *core_size ); *core_size ]; let separation = separation.clone(); let mut warrior_offsets: Vec<usize> = warriors.iter().map(|w| w.starts_at_line).collect(); match separation { Separation::Random(min_separation) => { let offsets = random_offsets(&warriors, min_separation, *instruction_limit, *core_size); for (i, (offset, warrior)) in offsets.iter().enumerate() { let mut ptr = *offset; warrior_offsets[i] = Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size); for instruction in &warrior.instructions { core_instructions[ptr] = CoreInstruction::from_instruction(instruction.clone(), *core_size); ptr = Core::fold(ptr + 1, *core_size, *core_size); } } } Separation::Fixed(separation) => { let mut ptr = 0_usize; for (i, warrior) in warriors.iter().enumerate() { warrior_offsets[i] = Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size); for instruction in &warrior.instructions { core_instructions[ptr] = CoreInstruction::from_instruction(instruction.clone(), *core_size); ptr = Core::fold(ptr + 1, *core_size, *core_size); } ptr = Core::fold(ptr + separation, *core_size, *core_size); } } }; let task_queues = warrior_offsets .iter() .zip(warriors) .map(|(&offset, warrior)| { let mut v = VecDeque::with_capacity(*maximum_number_of_tasks); let offset = Core::fold(offset, *core_size, *core_size); v.push_back(offset); (warrior, v) }) .collect(); Ok(Core { core: self, instructions: core_instructions, task_queues, current_queue: 0, cycle_count: 0, }) } } /// The separation between warriors at the start of a match. /// /// The number of instructions from the first instruction of one warrior to the first instruction of the next warrior. /// If a core's separation is `Random`, separations will be chosen randomly from the set of numbers larger than the core's minimum separation. #[derive(Debug, Clone)] pub enum Separation { Random(usize), Fixed(usize), } /// The value to which the core's memory addresses are initialised /// at the beginning of the match. /// /// The initial instruction is that instruction which is preloaded /// into core prior to loading warriors. If set to `Random`, core /// instructions are filled with randomly generated instructions. #[derive(Debug, Clone)] pub enum InitialInstruction { Random, Fixed(Instruction), } impl InitialInstruction { /// Extract the initial instruction if it's `Fixed`, or get a random `Instruction` if it's `Random`. pub fn extract(self) -> Instruction { match self { Self::Random => todo!(), Self::Fixed(instr) => instr, } } } fn random_offsets( warriors: &[Warrior], minimum_separation: usize, instruction_limit: usize, core_size: usize, ) -> Vec<(usize, &Warrior)> { let mut offsets: Vec<(usize, &Warrior)> = Vec::new(); for warrior in warriors {
/// warrior to the first instruction of the next warrior. /// Separation can be set to `Random`, meaning separations will be /// chosen randomly from those larger than the minimum separation. pub fn separation(&mut self, separation: Separation) -> &mut Self {
random_line_split
corebuilder.rs
minimum_separation: 100, read_distance: 8000, write_distance: 8000, separation: Separation::Random(100), warriors: Vec::new(), logger: None, } } } impl CoreBuilder { /// Creates a new instance of CoreBuilder with default parameters and no warriors. pub fn new() -> Self { CoreBuilder::default() } /// Sets the core's size. Core size is the number of instructions which make up the core /// during the battle. pub fn core_size(&mut self, core_size: usize) -> &mut Self { self.core_size = core_size; self } /// Sets the number of cycles that the match can last for before it is declared a tie. pub fn cycles_before_tie(&mut self, cycles_before_tie: usize) -> &mut Self { self.cycles_before_tie = cycles_before_tie; self } /// Sets the core's initial intruction. The initial instruction is that instruction which is preloaded /// into core prior to loading warriors. In addition to loading /// an instruction such as "DAT #0, #0" into all of core, the /// initial instruction could be set to `Random`, meaning core /// instructions are filled with randomly generated instructions. pub fn initial_instruction(&mut self, initial_instruction: InitialInstruction) -> &mut Self { self.initial_instruction = initial_instruction; self } /// The maximum number of instructions allowed per warrior. pub fn instruction_limit(&mut self, instruction_limit: usize) -> &mut Self { self.instruction_limit = instruction_limit; self } /// Each warrior can spawn multiple additional tasks. This variable sets the maximum /// number of tasks allowed per warrior. In other words, this is the size of each warrior's task queue. pub fn maximum_number_of_tasks(&mut self, maximum_number_of_tasks: usize) -> &mut Self { self.maximum_number_of_tasks = maximum_number_of_tasks; self } /// The minimum number of instructions from the first instruction /// of one warrior to the first instruction of the next warrior. pub fn minimum_separation(&mut self, minimum_separation: usize) -> &mut Self { self.minimum_separation = minimum_separation; // Need to put some limit on this related to number of warriors. self } /// This is the range available for warriors to read information /// from core. Attempts to read outside the limits of this range /// result in reading within the local readable range. The range /// is centered on the current instruction. Thus, a range of /// 500 limits reading to offsets of (-249 -> +250) from the /// currently executing instruction. The read limit can therefore /// be considered a mini-core within core. An attempt to read /// location PC+251 reads location PC-249 instead. An attempt to /// read location PC+500 reads location PC instead. /// /// Read distance must be a factor of core size, otherwise the /// above defined behaviour is not guaranteed. pub fn read_distance(&mut self, read_distance: usize) -> &mut Self { self.read_distance = read_distance; self } /// The number of instructions from the first instruction of one /// warrior to the first instruction of the next warrior. /// Separation can be set to `Random`, meaning separations will be /// chosen randomly from those larger than the minimum separation. pub fn separation(&mut self, separation: Separation) -> &mut Self { self.separation = separation; self } /// This is the range available for warriors to write information /// to core. Attempts to write outside the limits of this range /// result in writing within the local writable range. The range /// is centered on the current instruction. Thus, a range of 500 /// limits writing to offsets of (-249 -> +250) from the /// currently executing instruction. The write limit can /// therefore be considered a mini-core within core. An attempt /// to write location PC+251 writes to location PC-249 instead. /// An attempt to write to location PC+500 writes to location PC /// instead. /// /// Write distance must be a factor of core size, otherwise the /// above defined behaviour is not guaranteed. pub fn write_distance(&mut self, write_distance: usize) -> &mut Self { self.write_distance = write_distance; self } pub fn
(&mut self, warriors: &[Warrior]) -> Result<&mut Self, CoreError> { for warrior in warriors { if warrior.len() > self.instruction_limit { return Err(CoreError::WarriorTooLong( warrior.len(), self.instruction_limit, warrior.metadata.name().unwrap_or("Unnamed").to_owned(), )); } if warrior.is_empty() { return Err(CoreError::EmptyWarrior( warrior.metadata.name().unwrap_or("Unnamed").to_owned(), )); }; } self.warriors = warriors.to_vec(); Ok(self) } /// Use a `Logger` to log the battle's output. pub fn log_with(&mut self, logger: Box<dyn Logger>) -> &mut Self { self.logger = Some(logger); self } /// Build the core, consuming the `CoreBuilder` and returning a [`Core`](../struct.Core.html) struct. pub fn build(&self) -> Result<Core, CoreError> { let CoreBuilder { initial_instruction, separation, warriors, maximum_number_of_tasks, core_size, instruction_limit, .. } = self; let mut core_instructions = vec![ CoreInstruction::from_instruction( initial_instruction.clone().extract(), *core_size ); *core_size ]; let separation = separation.clone(); let mut warrior_offsets: Vec<usize> = warriors.iter().map(|w| w.starts_at_line).collect(); match separation { Separation::Random(min_separation) => { let offsets = random_offsets(&warriors, min_separation, *instruction_limit, *core_size); for (i, (offset, warrior)) in offsets.iter().enumerate() { let mut ptr = *offset; warrior_offsets[i] = Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size); for instruction in &warrior.instructions { core_instructions[ptr] = CoreInstruction::from_instruction(instruction.clone(), *core_size); ptr = Core::fold(ptr + 1, *core_size, *core_size); } } } Separation::Fixed(separation) => { let mut ptr = 0_usize; for (i, warrior) in warriors.iter().enumerate() { warrior_offsets[i] = Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size); for instruction in &warrior.instructions { core_instructions[ptr] = CoreInstruction::from_instruction(instruction.clone(), *core_size); ptr = Core::fold(ptr + 1, *core_size, *core_size); } ptr = Core::fold(ptr + separation, *core_size, *core_size); } } }; let task_queues = warrior_offsets .iter() .zip(warriors) .map(|(&offset, warrior)| { let mut v = VecDeque::with_capacity(*maximum_number_of_tasks); let offset = Core::fold(offset, *core_size, *core_size); v.push_back(offset); (warrior, v) }) .collect(); Ok(Core { core: self, instructions: core_instructions, task_queues, current_queue: 0, cycle_count: 0, }) } } /// The separation between warriors at the start of a match. /// /// The number of instructions from the first instruction of one warrior to the first instruction of the next warrior. /// If a core's separation is `Random`, separations will be chosen randomly from the set of numbers larger than the core's minimum separation. #[derive(Debug, Clone)] pub enum Separation { Random(usize), Fixed(usize), } /// The value to which the core's memory addresses are initialised /// at the beginning of the match. /// /// The initial instruction is that instruction which is preloaded /// into core prior to loading warriors. If set to `Random`, core /// instructions are filled with randomly generated instructions. #[derive(Debug, Clone)] pub enum InitialInstruction { Random, Fixed(Instruction), } impl InitialInstruction { /// Extract the initial instruction if it's `Fixed`, or get a random `Instruction` if it's `Random`. pub fn extract(self) -> Instruction { match self { Self::Random => todo!(), Self::Fixed(instr) => instr, } } } fn random_offsets( warriors: &[Warrior], minimum_separation: usize, instruction_limit: usize, core_size: usize, ) -> Vec<(usize, &Warrior)> { let mut offsets: Vec<(usize, &Warrior)> = Vec::new(); for warrior in warriors
load_warriors
identifier_name
corebuilder.rs
minimum_separation: 100, read_distance: 8000, write_distance: 8000, separation: Separation::Random(100), warriors: Vec::new(), logger: None, } } } impl CoreBuilder { /// Creates a new instance of CoreBuilder with default parameters and no warriors. pub fn new() -> Self { CoreBuilder::default() } /// Sets the core's size. Core size is the number of instructions which make up the core /// during the battle. pub fn core_size(&mut self, core_size: usize) -> &mut Self { self.core_size = core_size; self } /// Sets the number of cycles that the match can last for before it is declared a tie. pub fn cycles_before_tie(&mut self, cycles_before_tie: usize) -> &mut Self { self.cycles_before_tie = cycles_before_tie; self } /// Sets the core's initial intruction. The initial instruction is that instruction which is preloaded /// into core prior to loading warriors. In addition to loading /// an instruction such as "DAT #0, #0" into all of core, the /// initial instruction could be set to `Random`, meaning core /// instructions are filled with randomly generated instructions. pub fn initial_instruction(&mut self, initial_instruction: InitialInstruction) -> &mut Self { self.initial_instruction = initial_instruction; self } /// The maximum number of instructions allowed per warrior. pub fn instruction_limit(&mut self, instruction_limit: usize) -> &mut Self
/// Each warrior can spawn multiple additional tasks. This variable sets the maximum /// number of tasks allowed per warrior. In other words, this is the size of each warrior's task queue. pub fn maximum_number_of_tasks(&mut self, maximum_number_of_tasks: usize) -> &mut Self { self.maximum_number_of_tasks = maximum_number_of_tasks; self } /// The minimum number of instructions from the first instruction /// of one warrior to the first instruction of the next warrior. pub fn minimum_separation(&mut self, minimum_separation: usize) -> &mut Self { self.minimum_separation = minimum_separation; // Need to put some limit on this related to number of warriors. self } /// This is the range available for warriors to read information /// from core. Attempts to read outside the limits of this range /// result in reading within the local readable range. The range /// is centered on the current instruction. Thus, a range of /// 500 limits reading to offsets of (-249 -> +250) from the /// currently executing instruction. The read limit can therefore /// be considered a mini-core within core. An attempt to read /// location PC+251 reads location PC-249 instead. An attempt to /// read location PC+500 reads location PC instead. /// /// Read distance must be a factor of core size, otherwise the /// above defined behaviour is not guaranteed. pub fn read_distance(&mut self, read_distance: usize) -> &mut Self { self.read_distance = read_distance; self } /// The number of instructions from the first instruction of one /// warrior to the first instruction of the next warrior. /// Separation can be set to `Random`, meaning separations will be /// chosen randomly from those larger than the minimum separation. pub fn separation(&mut self, separation: Separation) -> &mut Self { self.separation = separation; self } /// This is the range available for warriors to write information /// to core. Attempts to write outside the limits of this range /// result in writing within the local writable range. The range /// is centered on the current instruction. Thus, a range of 500 /// limits writing to offsets of (-249 -> +250) from the /// currently executing instruction. The write limit can /// therefore be considered a mini-core within core. An attempt /// to write location PC+251 writes to location PC-249 instead. /// An attempt to write to location PC+500 writes to location PC /// instead. /// /// Write distance must be a factor of core size, otherwise the /// above defined behaviour is not guaranteed. pub fn write_distance(&mut self, write_distance: usize) -> &mut Self { self.write_distance = write_distance; self } pub fn load_warriors(&mut self, warriors: &[Warrior]) -> Result<&mut Self, CoreError> { for warrior in warriors { if warrior.len() > self.instruction_limit { return Err(CoreError::WarriorTooLong( warrior.len(), self.instruction_limit, warrior.metadata.name().unwrap_or("Unnamed").to_owned(), )); } if warrior.is_empty() { return Err(CoreError::EmptyWarrior( warrior.metadata.name().unwrap_or("Unnamed").to_owned(), )); }; } self.warriors = warriors.to_vec(); Ok(self) } /// Use a `Logger` to log the battle's output. pub fn log_with(&mut self, logger: Box<dyn Logger>) -> &mut Self { self.logger = Some(logger); self } /// Build the core, consuming the `CoreBuilder` and returning a [`Core`](../struct.Core.html) struct. pub fn build(&self) -> Result<Core, CoreError> { let CoreBuilder { initial_instruction, separation, warriors, maximum_number_of_tasks, core_size, instruction_limit, .. } = self; let mut core_instructions = vec![ CoreInstruction::from_instruction( initial_instruction.clone().extract(), *core_size ); *core_size ]; let separation = separation.clone(); let mut warrior_offsets: Vec<usize> = warriors.iter().map(|w| w.starts_at_line).collect(); match separation { Separation::Random(min_separation) => { let offsets = random_offsets(&warriors, min_separation, *instruction_limit, *core_size); for (i, (offset, warrior)) in offsets.iter().enumerate() { let mut ptr = *offset; warrior_offsets[i] = Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size); for instruction in &warrior.instructions { core_instructions[ptr] = CoreInstruction::from_instruction(instruction.clone(), *core_size); ptr = Core::fold(ptr + 1, *core_size, *core_size); } } } Separation::Fixed(separation) => { let mut ptr = 0_usize; for (i, warrior) in warriors.iter().enumerate() { warrior_offsets[i] = Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size); for instruction in &warrior.instructions { core_instructions[ptr] = CoreInstruction::from_instruction(instruction.clone(), *core_size); ptr = Core::fold(ptr + 1, *core_size, *core_size); } ptr = Core::fold(ptr + separation, *core_size, *core_size); } } }; let task_queues = warrior_offsets .iter() .zip(warriors) .map(|(&offset, warrior)| { let mut v = VecDeque::with_capacity(*maximum_number_of_tasks); let offset = Core::fold(offset, *core_size, *core_size); v.push_back(offset); (warrior, v) }) .collect(); Ok(Core { core: self, instructions: core_instructions, task_queues, current_queue: 0, cycle_count: 0, }) } } /// The separation between warriors at the start of a match. /// /// The number of instructions from the first instruction of one warrior to the first instruction of the next warrior. /// If a core's separation is `Random`, separations will be chosen randomly from the set of numbers larger than the core's minimum separation. #[derive(Debug, Clone)] pub enum Separation { Random(usize), Fixed(usize), } /// The value to which the core's memory addresses are initialised /// at the beginning of the match. /// /// The initial instruction is that instruction which is preloaded /// into core prior to loading warriors. If set to `Random`, core /// instructions are filled with randomly generated instructions. #[derive(Debug, Clone)] pub enum InitialInstruction { Random, Fixed(Instruction), } impl InitialInstruction { /// Extract the initial instruction if it's `Fixed`, or get a random `Instruction` if it's `Random`. pub fn extract(self) -> Instruction { match self { Self::Random => todo!(), Self::Fixed(instr) => instr, } } } fn random_offsets( warriors: &[Warrior], minimum_separation: usize, instruction_limit: usize, core_size: usize, ) -> Vec<(usize, &Warrior)> { let mut offsets: Vec<(usize, &Warrior)> = Vec::new(); for warrior in
{ self.instruction_limit = instruction_limit; self }
identifier_body
corebuilder.rs
minimum_separation: 100, read_distance: 8000, write_distance: 8000, separation: Separation::Random(100), warriors: Vec::new(), logger: None, } } } impl CoreBuilder { /// Creates a new instance of CoreBuilder with default parameters and no warriors. pub fn new() -> Self { CoreBuilder::default() } /// Sets the core's size. Core size is the number of instructions which make up the core /// during the battle. pub fn core_size(&mut self, core_size: usize) -> &mut Self { self.core_size = core_size; self } /// Sets the number of cycles that the match can last for before it is declared a tie. pub fn cycles_before_tie(&mut self, cycles_before_tie: usize) -> &mut Self { self.cycles_before_tie = cycles_before_tie; self } /// Sets the core's initial intruction. The initial instruction is that instruction which is preloaded /// into core prior to loading warriors. In addition to loading /// an instruction such as "DAT #0, #0" into all of core, the /// initial instruction could be set to `Random`, meaning core /// instructions are filled with randomly generated instructions. pub fn initial_instruction(&mut self, initial_instruction: InitialInstruction) -> &mut Self { self.initial_instruction = initial_instruction; self } /// The maximum number of instructions allowed per warrior. pub fn instruction_limit(&mut self, instruction_limit: usize) -> &mut Self { self.instruction_limit = instruction_limit; self } /// Each warrior can spawn multiple additional tasks. This variable sets the maximum /// number of tasks allowed per warrior. In other words, this is the size of each warrior's task queue. pub fn maximum_number_of_tasks(&mut self, maximum_number_of_tasks: usize) -> &mut Self { self.maximum_number_of_tasks = maximum_number_of_tasks; self } /// The minimum number of instructions from the first instruction /// of one warrior to the first instruction of the next warrior. pub fn minimum_separation(&mut self, minimum_separation: usize) -> &mut Self { self.minimum_separation = minimum_separation; // Need to put some limit on this related to number of warriors. self } /// This is the range available for warriors to read information /// from core. Attempts to read outside the limits of this range /// result in reading within the local readable range. The range /// is centered on the current instruction. Thus, a range of /// 500 limits reading to offsets of (-249 -> +250) from the /// currently executing instruction. The read limit can therefore /// be considered a mini-core within core. An attempt to read /// location PC+251 reads location PC-249 instead. An attempt to /// read location PC+500 reads location PC instead. /// /// Read distance must be a factor of core size, otherwise the /// above defined behaviour is not guaranteed. pub fn read_distance(&mut self, read_distance: usize) -> &mut Self { self.read_distance = read_distance; self } /// The number of instructions from the first instruction of one /// warrior to the first instruction of the next warrior. /// Separation can be set to `Random`, meaning separations will be /// chosen randomly from those larger than the minimum separation. pub fn separation(&mut self, separation: Separation) -> &mut Self { self.separation = separation; self } /// This is the range available for warriors to write information /// to core. Attempts to write outside the limits of this range /// result in writing within the local writable range. The range /// is centered on the current instruction. Thus, a range of 500 /// limits writing to offsets of (-249 -> +250) from the /// currently executing instruction. The write limit can /// therefore be considered a mini-core within core. An attempt /// to write location PC+251 writes to location PC-249 instead. /// An attempt to write to location PC+500 writes to location PC /// instead. /// /// Write distance must be a factor of core size, otherwise the /// above defined behaviour is not guaranteed. pub fn write_distance(&mut self, write_distance: usize) -> &mut Self { self.write_distance = write_distance; self } pub fn load_warriors(&mut self, warriors: &[Warrior]) -> Result<&mut Self, CoreError> { for warrior in warriors { if warrior.len() > self.instruction_limit
if warrior.is_empty() { return Err(CoreError::EmptyWarrior( warrior.metadata.name().unwrap_or("Unnamed").to_owned(), )); }; } self.warriors = warriors.to_vec(); Ok(self) } /// Use a `Logger` to log the battle's output. pub fn log_with(&mut self, logger: Box<dyn Logger>) -> &mut Self { self.logger = Some(logger); self } /// Build the core, consuming the `CoreBuilder` and returning a [`Core`](../struct.Core.html) struct. pub fn build(&self) -> Result<Core, CoreError> { let CoreBuilder { initial_instruction, separation, warriors, maximum_number_of_tasks, core_size, instruction_limit, .. } = self; let mut core_instructions = vec![ CoreInstruction::from_instruction( initial_instruction.clone().extract(), *core_size ); *core_size ]; let separation = separation.clone(); let mut warrior_offsets: Vec<usize> = warriors.iter().map(|w| w.starts_at_line).collect(); match separation { Separation::Random(min_separation) => { let offsets = random_offsets(&warriors, min_separation, *instruction_limit, *core_size); for (i, (offset, warrior)) in offsets.iter().enumerate() { let mut ptr = *offset; warrior_offsets[i] = Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size); for instruction in &warrior.instructions { core_instructions[ptr] = CoreInstruction::from_instruction(instruction.clone(), *core_size); ptr = Core::fold(ptr + 1, *core_size, *core_size); } } } Separation::Fixed(separation) => { let mut ptr = 0_usize; for (i, warrior) in warriors.iter().enumerate() { warrior_offsets[i] = Core::fold(warrior_offsets[i] + ptr, *core_size, *core_size); for instruction in &warrior.instructions { core_instructions[ptr] = CoreInstruction::from_instruction(instruction.clone(), *core_size); ptr = Core::fold(ptr + 1, *core_size, *core_size); } ptr = Core::fold(ptr + separation, *core_size, *core_size); } } }; let task_queues = warrior_offsets .iter() .zip(warriors) .map(|(&offset, warrior)| { let mut v = VecDeque::with_capacity(*maximum_number_of_tasks); let offset = Core::fold(offset, *core_size, *core_size); v.push_back(offset); (warrior, v) }) .collect(); Ok(Core { core: self, instructions: core_instructions, task_queues, current_queue: 0, cycle_count: 0, }) } } /// The separation between warriors at the start of a match. /// /// The number of instructions from the first instruction of one warrior to the first instruction of the next warrior. /// If a core's separation is `Random`, separations will be chosen randomly from the set of numbers larger than the core's minimum separation. #[derive(Debug, Clone)] pub enum Separation { Random(usize), Fixed(usize), } /// The value to which the core's memory addresses are initialised /// at the beginning of the match. /// /// The initial instruction is that instruction which is preloaded /// into core prior to loading warriors. If set to `Random`, core /// instructions are filled with randomly generated instructions. #[derive(Debug, Clone)] pub enum InitialInstruction { Random, Fixed(Instruction), } impl InitialInstruction { /// Extract the initial instruction if it's `Fixed`, or get a random `Instruction` if it's `Random`. pub fn extract(self) -> Instruction { match self { Self::Random => todo!(), Self::Fixed(instr) => instr, } } } fn random_offsets( warriors: &[Warrior], minimum_separation: usize, instruction_limit: usize, core_size: usize, ) -> Vec<(usize, &Warrior)> { let mut offsets: Vec<(usize, &Warrior)> = Vec::new(); for warrior in
{ return Err(CoreError::WarriorTooLong( warrior.len(), self.instruction_limit, warrior.metadata.name().unwrap_or("Unnamed").to_owned(), )); }
conditional_block
exec_plan9.go
} for b := buf[:n]; len(b) > 0; { var s []byte s, b = gdirname(b) if s == nil { return nil, ErrBadStat } names = append(names, string(s)) } } return } // name of the directory containing names and control files for all open file descriptors var dupdev, _ = BytePtrFromString("#d") // forkAndExecInChild forks the process, calling dup onto 0..len(fd) // and finally invoking exec(argv0, argvv, envv) in the child. // If a dup or exec fails, it writes the error string to pipe. // (The pipe write end is close-on-exec so if exec succeeds, it will be closed.) // // In the child, this function must not acquire any locks, because // they might have been locked at the time of the fork. This means // no rescheduling, no malloc calls, and no new stack segments. // The calls to RawSyscall are okay because they are assembly // functions that do not grow the stack. //go:norace func forkAndExecInChild(argv0 *byte, argv []*byte, envv []envItem, dir *byte, attr *ProcAttr, pipe int, rflag int) (pid int, err error) { // Declare all variables at top in case any // declarations require heap allocation (e.g., errbuf). var ( r1 uintptr nextfd int i int clearenv int envfd int errbuf [ERRMAX]byte statbuf [STATMAX]byte dupdevfd int ) // Guard against side effects of shuffling fds below. // Make sure that nextfd is beyond any currently open files so // that we can't run the risk of overwriting any of them. fd := make([]int, len(attr.Files)) nextfd = len(attr.Files) for i, ufd := range attr.Files { if nextfd < int(ufd) { nextfd = int(ufd) } fd[i] = int(ufd) } nextfd++ if envv != nil { clearenv = RFCENVG } // About to call fork. // No more allocation or calls of non-assembly functions. r1, _, _ = RawSyscall(SYS_RFORK, uintptr(RFPROC|RFFDG|RFREND|clearenv|rflag), 0, 0) if r1 != 0 { if int32(r1) == -1 { return 0, NewError(errstr()) } // parent; return PID return int(r1), nil } // Fork succeeded, now in child. // Close fds we don't need. r1, _, _ = RawSyscall(SYS_OPEN, uintptr(unsafe.Pointer(dupdev)), uintptr(O_RDONLY), 0) dupdevfd = int(r1) if dupdevfd == -1 { goto childerror } dirloop: for { r1, _, _ = RawSyscall6(SYS_PREAD, uintptr(dupdevfd), uintptr(unsafe.Pointer(&statbuf[0])), uintptr(len(statbuf)), ^uintptr(0), ^uintptr(0), 0) n := int(r1) switch n { case -1: goto childerror case 0: break dirloop } for b := statbuf[:n]; len(b) > 0; { var s []byte s, b = gdirname(b) if s == nil { copy(errbuf[:], ErrBadStat.Error()) goto childerror1 } if s[len(s)-1] == 'l' { // control file for descriptor <N> is named <N>ctl continue } closeFdExcept(int(atoi(s)), pipe, dupdevfd, fd) } } RawSyscall(SYS_CLOSE, uintptr(dupdevfd), 0, 0) // Write new environment variables. if envv != nil { for i = 0; i < len(envv); i++ { r1, _, _ = RawSyscall(SYS_CREATE, uintptr(unsafe.Pointer(envv[i].name)), uintptr(O_WRONLY), uintptr(0666)) if int32(r1) == -1 { goto childerror } envfd = int(r1) r1, _, _ = RawSyscall6(SYS_PWRITE, uintptr(envfd), uintptr(unsafe.Pointer(envv[i].value)), uintptr(envv[i].nvalue), ^uintptr(0), ^uintptr(0), 0) if int32(r1) == -1 || int(r1) != envv[i].nvalue { goto childerror } r1, _, _ = RawSyscall(SYS_CLOSE, uintptr(envfd), 0, 0) if int32(r1) == -1 { goto childerror } } } // Chdir if dir != nil { r1, _, _ = RawSyscall(SYS_CHDIR, uintptr(unsafe.Pointer(dir)), 0, 0) if int32(r1) == -1 { goto childerror } } // Pass 1: look for fd[i] < i and move those up above len(fd) // so that pass 2 won't stomp on an fd it needs later. if pipe < nextfd { r1, _, _ = RawSyscall(SYS_DUP, uintptr(pipe), uintptr(nextfd), 0) if int32(r1) == -1 { goto childerror } pipe = nextfd nextfd++ } for i = 0; i < len(fd); i++ {
r1, _, _ = RawSyscall(SYS_DUP, uintptr(fd[i]), uintptr(nextfd), 0) if int32(r1) == -1 { goto childerror } fd[i] = nextfd nextfd++ } } // Pass 2: dup fd[i] down onto i. for i = 0; i < len(fd); i++ { if fd[i] == -1 { RawSyscall(SYS_CLOSE, uintptr(i), 0, 0) continue } if fd[i] == int(i) { continue } r1, _, _ = RawSyscall(SYS_DUP, uintptr(fd[i]), uintptr(i), 0) if int32(r1) == -1 { goto childerror } } // Pass 3: close fd[i] if it was moved in the previous pass. for i = 0; i < len(fd); i++ { if fd[i] >= 0 && fd[i] != int(i) { RawSyscall(SYS_CLOSE, uintptr(fd[i]), 0, 0) } } // Time to exec. r1, _, _ = RawSyscall(SYS_EXEC, uintptr(unsafe.Pointer(argv0)), uintptr(unsafe.Pointer(&argv[0])), 0) childerror: // send error string on pipe RawSyscall(SYS_ERRSTR, uintptr(unsafe.Pointer(&errbuf[0])), uintptr(len(errbuf)), 0) childerror1: errbuf[len(errbuf)-1] = 0 i = 0 for i < len(errbuf) && errbuf[i] != 0 { i++ } RawSyscall6(SYS_PWRITE, uintptr(pipe), uintptr(unsafe.Pointer(&errbuf[0])), uintptr(i), ^uintptr(0), ^uintptr(0), 0) for { RawSyscall(SYS_EXITS, 0, 0, 0) } } // close the numbered file descriptor, unless it is fd1, fd2, or a member of fds. //go:nosplit func closeFdExcept(n int, fd1 int, fd2 int, fds []int) { if n == fd1 || n == fd2 { return } for _, fd := range fds { if n == fd { return } } RawSyscall(SYS_CLOSE, uintptr(n), 0, 0) } func cexecPipe(p []int) error { e := Pipe(p) if e != nil { return e } fd, e := Open("#d/"+itoa.Itoa(p[1]), O_RDWR|O_CLOEXEC) if e != nil { Close(p[0]) Close(p[1]) return e } Close(p[1]) p[1] = fd return nil } type envItem struct { name *byte value *byte nvalue int } type ProcAttr struct { Dir string // Current working directory. Env []string // Environment. Files []uintptr // File descriptors. Sys *SysProcAttr } type SysProcAttr struct { Rfork int // additional flags to pass to rfork } var zeroProcAttr ProcAttr
if fd[i] >= 0 && fd[i] < int(i) { if nextfd == pipe { // don't stomp on pipe nextfd++ }
random_line_split
exec_plan9.go
(ss []string) []*byte { bb := make([]*byte, len(ss)+1) for i := 0; i < len(ss); i++ { bb[i] = StringBytePtr(ss[i]) } bb[len(ss)] = nil return bb } // SlicePtrFromStrings converts a slice of strings to a slice of // pointers to NUL-terminated byte arrays. If any string contains // a NUL byte, it returns (nil, EINVAL). func SlicePtrFromStrings(ss []string) ([]*byte, error) { var err error bb := make([]*byte, len(ss)+1) for i := 0; i < len(ss); i++ { bb[i], err = BytePtrFromString(ss[i]) if err != nil { return nil, err } } bb[len(ss)] = nil return bb, nil } // readdirnames returns the names of files inside the directory represented by dirfd. func readdirnames(dirfd int) (names []string, err error) { names = make([]string, 0, 100) var buf [STATMAX]byte for { n, e := Read(dirfd, buf[:]) if e != nil { return nil, e } if n == 0 { break } for b := buf[:n]; len(b) > 0; { var s []byte s, b = gdirname(b) if s == nil { return nil, ErrBadStat } names = append(names, string(s)) } } return } // name of the directory containing names and control files for all open file descriptors var dupdev, _ = BytePtrFromString("#d") // forkAndExecInChild forks the process, calling dup onto 0..len(fd) // and finally invoking exec(argv0, argvv, envv) in the child. // If a dup or exec fails, it writes the error string to pipe. // (The pipe write end is close-on-exec so if exec succeeds, it will be closed.) // // In the child, this function must not acquire any locks, because // they might have been locked at the time of the fork. This means // no rescheduling, no malloc calls, and no new stack segments. // The calls to RawSyscall are okay because they are assembly // functions that do not grow the stack. //go:norace func forkAndExecInChild(argv0 *byte, argv []*byte, envv []envItem, dir *byte, attr *ProcAttr, pipe int, rflag int) (pid int, err error) { // Declare all variables at top in case any // declarations require heap allocation (e.g., errbuf). var ( r1 uintptr nextfd int i int clearenv int envfd int errbuf [ERRMAX]byte statbuf [STATMAX]byte dupdevfd int ) // Guard against side effects of shuffling fds below. // Make sure that nextfd is beyond any currently open files so // that we can't run the risk of overwriting any of them. fd := make([]int, len(attr.Files)) nextfd = len(attr.Files) for i, ufd := range attr.Files { if nextfd < int(ufd) { nextfd = int(ufd) } fd[i] = int(ufd) } nextfd++ if envv != nil { clearenv = RFCENVG } // About to call fork. // No more allocation or calls of non-assembly functions. r1, _, _ = RawSyscall(SYS_RFORK, uintptr(RFPROC|RFFDG|RFREND|clearenv|rflag), 0, 0) if r1 != 0 { if int32(r1) == -1 { return 0, NewError(errstr()) } // parent; return PID return int(r1), nil } // Fork succeeded, now in child. // Close fds we don't need. r1, _, _ = RawSyscall(SYS_OPEN, uintptr(unsafe.Pointer(dupdev)), uintptr(O_RDONLY), 0) dupdevfd = int(r1) if dupdevfd == -1 { goto childerror } dirloop: for { r1, _, _ = RawSyscall6(SYS_PREAD, uintptr(dupdevfd), uintptr(unsafe.Pointer(&statbuf[0])), uintptr(len(statbuf)), ^uintptr(0), ^uintptr(0), 0) n := int(r1) switch n { case -1: goto childerror case 0: break dirloop } for b := statbuf[:n]; len(b) > 0; { var s []byte s, b = gdirname(b) if s == nil { copy(errbuf[:], ErrBadStat.Error()) goto childerror1 } if s[len(s)-1] == 'l' { // control file for descriptor <N> is named <N>ctl continue } closeFdExcept(int(atoi(s)), pipe, dupdevfd, fd) } } RawSyscall(SYS_CLOSE, uintptr(dupdevfd), 0, 0) // Write new environment variables. if envv != nil { for i = 0; i < len(envv); i++ { r1, _, _ = RawSyscall(SYS_CREATE, uintptr(unsafe.Pointer(envv[i].name)), uintptr(O_WRONLY), uintptr(0666)) if int32(r1) == -1 { goto childerror } envfd = int(r1) r1, _, _ = RawSyscall6(SYS_PWRITE, uintptr(envfd), uintptr(unsafe.Pointer(envv[i].value)), uintptr(envv[i].nvalue), ^uintptr(0), ^uintptr(0), 0) if int32(r1) == -1 || int(r1) != envv[i].nvalue { goto childerror } r1, _, _ = RawSyscall(SYS_CLOSE, uintptr(envfd), 0, 0) if int32(r1) == -1 { goto childerror } } } // Chdir if dir != nil { r1, _, _ = RawSyscall(SYS_CHDIR, uintptr(unsafe.Pointer(dir)), 0, 0) if int32(r1) == -1 { goto childerror } } // Pass 1: look for fd[i] < i and move those up above len(fd) // so that pass 2 won't stomp on an fd it needs later. if pipe < nextfd { r1, _, _ = RawSyscall(SYS_DUP, uintptr(pipe), uintptr(nextfd), 0) if int32(r1) == -1 { goto childerror } pipe = nextfd nextfd++ } for i = 0; i < len(fd); i++ { if fd[i] >= 0 && fd[i] < int(i) { if nextfd == pipe { // don't stomp on pipe nextfd++ } r1, _, _ = RawSyscall(SYS_DUP, uintptr(fd[i]), uintptr(nextfd), 0) if int32(r1) == -1 { goto childerror } fd[i] = nextfd nextfd++ } } // Pass 2: dup fd[i] down onto i. for i = 0; i < len(fd); i++ { if fd[i] == -1 { RawSyscall(SYS_CLOSE, uintptr(i), 0, 0) continue } if fd[i] == int(i) { continue } r1, _, _ = RawSyscall(SYS_DUP, uintptr(fd[i]), uintptr(i), 0) if int32(r1) == -1 { goto childerror } } // Pass 3: close fd[i] if it was moved in the previous pass. for i = 0; i < len(fd); i++ { if fd[i] >= 0 && fd[i] != int(i) { RawSyscall(SYS_CLOSE, uintptr(fd[i]), 0, 0) } } // Time to exec. r1, _, _ = RawSyscall(SYS_EXEC, uintptr(unsafe.Pointer(argv0)), uintptr(unsafe.Pointer(&argv[0])), 0) childerror: // send error string on pipe RawSyscall(SYS_ERRSTR, uintptr(unsafe.Pointer(&errbuf[0])), uintptr(len(errbuf)), 0) childerror1: errbuf[len(errbuf)-1] = 0 i = 0 for i < len(errbuf) && errbuf[i] != 0 { i++ } RawSyscall6(SYS_PWRITE, uintptr(pipe), uintptr(unsafe.Pointer(&errbuf[0])), uintptr(i), ^uintptr(0), ^uintptr(0), 0) for { RawSyscall(SYS_EXITS,
StringSlicePtr
identifier_name
exec_plan9.go
0666)) if int32(r1) == -1 { goto childerror } envfd = int(r1) r1, _, _ = RawSyscall6(SYS_PWRITE, uintptr(envfd), uintptr(unsafe.Pointer(envv[i].value)), uintptr(envv[i].nvalue), ^uintptr(0), ^uintptr(0), 0) if int32(r1) == -1 || int(r1) != envv[i].nvalue { goto childerror } r1, _, _ = RawSyscall(SYS_CLOSE, uintptr(envfd), 0, 0) if int32(r1) == -1 { goto childerror } } } // Chdir if dir != nil { r1, _, _ = RawSyscall(SYS_CHDIR, uintptr(unsafe.Pointer(dir)), 0, 0) if int32(r1) == -1 { goto childerror } } // Pass 1: look for fd[i] < i and move those up above len(fd) // so that pass 2 won't stomp on an fd it needs later. if pipe < nextfd { r1, _, _ = RawSyscall(SYS_DUP, uintptr(pipe), uintptr(nextfd), 0) if int32(r1) == -1 { goto childerror } pipe = nextfd nextfd++ } for i = 0; i < len(fd); i++ { if fd[i] >= 0 && fd[i] < int(i) { if nextfd == pipe { // don't stomp on pipe nextfd++ } r1, _, _ = RawSyscall(SYS_DUP, uintptr(fd[i]), uintptr(nextfd), 0) if int32(r1) == -1 { goto childerror } fd[i] = nextfd nextfd++ } } // Pass 2: dup fd[i] down onto i. for i = 0; i < len(fd); i++ { if fd[i] == -1 { RawSyscall(SYS_CLOSE, uintptr(i), 0, 0) continue } if fd[i] == int(i) { continue } r1, _, _ = RawSyscall(SYS_DUP, uintptr(fd[i]), uintptr(i), 0) if int32(r1) == -1 { goto childerror } } // Pass 3: close fd[i] if it was moved in the previous pass. for i = 0; i < len(fd); i++ { if fd[i] >= 0 && fd[i] != int(i) { RawSyscall(SYS_CLOSE, uintptr(fd[i]), 0, 0) } } // Time to exec. r1, _, _ = RawSyscall(SYS_EXEC, uintptr(unsafe.Pointer(argv0)), uintptr(unsafe.Pointer(&argv[0])), 0) childerror: // send error string on pipe RawSyscall(SYS_ERRSTR, uintptr(unsafe.Pointer(&errbuf[0])), uintptr(len(errbuf)), 0) childerror1: errbuf[len(errbuf)-1] = 0 i = 0 for i < len(errbuf) && errbuf[i] != 0 { i++ } RawSyscall6(SYS_PWRITE, uintptr(pipe), uintptr(unsafe.Pointer(&errbuf[0])), uintptr(i), ^uintptr(0), ^uintptr(0), 0) for { RawSyscall(SYS_EXITS, 0, 0, 0) } } // close the numbered file descriptor, unless it is fd1, fd2, or a member of fds. //go:nosplit func closeFdExcept(n int, fd1 int, fd2 int, fds []int) { if n == fd1 || n == fd2 { return } for _, fd := range fds { if n == fd { return } } RawSyscall(SYS_CLOSE, uintptr(n), 0, 0) } func cexecPipe(p []int) error { e := Pipe(p) if e != nil { return e } fd, e := Open("#d/"+itoa.Itoa(p[1]), O_RDWR|O_CLOEXEC) if e != nil { Close(p[0]) Close(p[1]) return e } Close(p[1]) p[1] = fd return nil } type envItem struct { name *byte value *byte nvalue int } type ProcAttr struct { Dir string // Current working directory. Env []string // Environment. Files []uintptr // File descriptors. Sys *SysProcAttr } type SysProcAttr struct { Rfork int // additional flags to pass to rfork } var zeroProcAttr ProcAttr var zeroSysProcAttr SysProcAttr func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) { var ( p [2]int n int errbuf [ERRMAX]byte wmsg Waitmsg ) if attr == nil { attr = &zeroProcAttr } sys := attr.Sys if sys == nil { sys = &zeroSysProcAttr } p[0] = -1 p[1] = -1 // Convert args to C form. argv0p, err := BytePtrFromString(argv0) if err != nil { return 0, err } argvp, err := SlicePtrFromStrings(argv) if err != nil { return 0, err } destDir := attr.Dir if destDir == "" { wdmu.Lock() destDir = wdStr wdmu.Unlock() } var dir *byte if destDir != "" { dir, err = BytePtrFromString(destDir) if err != nil { return 0, err } } var envvParsed []envItem if attr.Env != nil { envvParsed = make([]envItem, 0, len(attr.Env)) for _, v := range attr.Env { i := 0 for i < len(v) && v[i] != '=' { i++ } envname, err := BytePtrFromString("/env/" + v[:i]) if err != nil { return 0, err } envvalue := make([]byte, len(v)-i) copy(envvalue, v[i+1:]) envvParsed = append(envvParsed, envItem{envname, &envvalue[0], len(v) - i}) } } // Allocate child status pipe close on exec. e := cexecPipe(p[:]) if e != nil { return 0, e } // Kick off child. pid, err = forkAndExecInChild(argv0p, argvp, envvParsed, dir, attr, p[1], sys.Rfork) if err != nil { if p[0] >= 0 { Close(p[0]) Close(p[1]) } return 0, err } // Read child error status from pipe. Close(p[1]) n, err = Read(p[0], errbuf[:]) Close(p[0]) if err != nil || n != 0 { if n > 0 { err = NewError(string(errbuf[:n])) } else if err == nil { err = NewError("failed to read exec status") } // Child failed; wait for it to exit, to make sure // the zombies don't accumulate. for wmsg.Pid != pid { Await(&wmsg) } return 0, err } // Read got EOF, so pipe closed on exec, so exec succeeded. return pid, nil } type waitErr struct { Waitmsg err error } var procs struct { sync.Mutex waits map[int]chan *waitErr } // startProcess starts a new goroutine, tied to the OS // thread, which runs the process and subsequently waits // for it to finish, communicating the process stats back // to any goroutines that may have been waiting on it. // // Such a dedicated goroutine is needed because on // Plan 9, only the parent thread can wait for a child, // whereas goroutines tend to jump OS threads (e.g., // between starting a process and running Wait(), the // goroutine may have been rescheduled). func startProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, err error)
{ type forkRet struct { pid int err error } forkc := make(chan forkRet, 1) go func() { runtime.LockOSThread() var ret forkRet ret.pid, ret.err = forkExec(argv0, argv, attr) // If fork fails there is nothing to wait for. if ret.err != nil || ret.pid == 0 { forkc <- ret return } waitc := make(chan *waitErr, 1)
identifier_body
exec_plan9.go
0 && fd[i] < int(i) { if nextfd == pipe { // don't stomp on pipe nextfd++ } r1, _, _ = RawSyscall(SYS_DUP, uintptr(fd[i]), uintptr(nextfd), 0) if int32(r1) == -1 { goto childerror } fd[i] = nextfd nextfd++ } } // Pass 2: dup fd[i] down onto i. for i = 0; i < len(fd); i++ { if fd[i] == -1 { RawSyscall(SYS_CLOSE, uintptr(i), 0, 0) continue } if fd[i] == int(i) { continue } r1, _, _ = RawSyscall(SYS_DUP, uintptr(fd[i]), uintptr(i), 0) if int32(r1) == -1 { goto childerror } } // Pass 3: close fd[i] if it was moved in the previous pass. for i = 0; i < len(fd); i++ { if fd[i] >= 0 && fd[i] != int(i) { RawSyscall(SYS_CLOSE, uintptr(fd[i]), 0, 0) } } // Time to exec. r1, _, _ = RawSyscall(SYS_EXEC, uintptr(unsafe.Pointer(argv0)), uintptr(unsafe.Pointer(&argv[0])), 0) childerror: // send error string on pipe RawSyscall(SYS_ERRSTR, uintptr(unsafe.Pointer(&errbuf[0])), uintptr(len(errbuf)), 0) childerror1: errbuf[len(errbuf)-1] = 0 i = 0 for i < len(errbuf) && errbuf[i] != 0 { i++ } RawSyscall6(SYS_PWRITE, uintptr(pipe), uintptr(unsafe.Pointer(&errbuf[0])), uintptr(i), ^uintptr(0), ^uintptr(0), 0) for { RawSyscall(SYS_EXITS, 0, 0, 0) } } // close the numbered file descriptor, unless it is fd1, fd2, or a member of fds. //go:nosplit func closeFdExcept(n int, fd1 int, fd2 int, fds []int) { if n == fd1 || n == fd2 { return } for _, fd := range fds { if n == fd { return } } RawSyscall(SYS_CLOSE, uintptr(n), 0, 0) } func cexecPipe(p []int) error { e := Pipe(p) if e != nil { return e } fd, e := Open("#d/"+itoa.Itoa(p[1]), O_RDWR|O_CLOEXEC) if e != nil { Close(p[0]) Close(p[1]) return e } Close(p[1]) p[1] = fd return nil } type envItem struct { name *byte value *byte nvalue int } type ProcAttr struct { Dir string // Current working directory. Env []string // Environment. Files []uintptr // File descriptors. Sys *SysProcAttr } type SysProcAttr struct { Rfork int // additional flags to pass to rfork } var zeroProcAttr ProcAttr var zeroSysProcAttr SysProcAttr func forkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) { var ( p [2]int n int errbuf [ERRMAX]byte wmsg Waitmsg ) if attr == nil { attr = &zeroProcAttr } sys := attr.Sys if sys == nil { sys = &zeroSysProcAttr } p[0] = -1 p[1] = -1 // Convert args to C form. argv0p, err := BytePtrFromString(argv0) if err != nil { return 0, err } argvp, err := SlicePtrFromStrings(argv) if err != nil { return 0, err } destDir := attr.Dir if destDir == "" { wdmu.Lock() destDir = wdStr wdmu.Unlock() } var dir *byte if destDir != "" { dir, err = BytePtrFromString(destDir) if err != nil { return 0, err } } var envvParsed []envItem if attr.Env != nil { envvParsed = make([]envItem, 0, len(attr.Env)) for _, v := range attr.Env { i := 0 for i < len(v) && v[i] != '=' { i++ } envname, err := BytePtrFromString("/env/" + v[:i]) if err != nil { return 0, err } envvalue := make([]byte, len(v)-i) copy(envvalue, v[i+1:]) envvParsed = append(envvParsed, envItem{envname, &envvalue[0], len(v) - i}) } } // Allocate child status pipe close on exec. e := cexecPipe(p[:]) if e != nil { return 0, e } // Kick off child. pid, err = forkAndExecInChild(argv0p, argvp, envvParsed, dir, attr, p[1], sys.Rfork) if err != nil { if p[0] >= 0 { Close(p[0]) Close(p[1]) } return 0, err } // Read child error status from pipe. Close(p[1]) n, err = Read(p[0], errbuf[:]) Close(p[0]) if err != nil || n != 0 { if n > 0 { err = NewError(string(errbuf[:n])) } else if err == nil { err = NewError("failed to read exec status") } // Child failed; wait for it to exit, to make sure // the zombies don't accumulate. for wmsg.Pid != pid { Await(&wmsg) } return 0, err } // Read got EOF, so pipe closed on exec, so exec succeeded. return pid, nil } type waitErr struct { Waitmsg err error } var procs struct { sync.Mutex waits map[int]chan *waitErr } // startProcess starts a new goroutine, tied to the OS // thread, which runs the process and subsequently waits // for it to finish, communicating the process stats back // to any goroutines that may have been waiting on it. // // Such a dedicated goroutine is needed because on // Plan 9, only the parent thread can wait for a child, // whereas goroutines tend to jump OS threads (e.g., // between starting a process and running Wait(), the // goroutine may have been rescheduled). func startProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) { type forkRet struct { pid int err error } forkc := make(chan forkRet, 1) go func() { runtime.LockOSThread() var ret forkRet ret.pid, ret.err = forkExec(argv0, argv, attr) // If fork fails there is nothing to wait for. if ret.err != nil || ret.pid == 0 { forkc <- ret return } waitc := make(chan *waitErr, 1) // Mark that the process is running. procs.Lock() if procs.waits == nil { procs.waits = make(map[int]chan *waitErr) } procs.waits[ret.pid] = waitc procs.Unlock() forkc <- ret var w waitErr for w.err == nil && w.Pid != ret.pid { w.err = Await(&w.Waitmsg) } waitc <- &w close(waitc) }() ret := <-forkc return ret.pid, ret.err } // Combination of fork and exec, careful to be thread safe. func ForkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) { return startProcess(argv0, argv, attr) } // StartProcess wraps ForkExec for package os. func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, handle uintptr, err error) { pid, err = startProcess(argv0, argv, attr) return pid, 0, err } // Ordinary exec. func Exec(argv0 string, argv []string, envv []string) (err error) { if envv != nil { r1, _, _ := RawSyscall(SYS_RFORK, RFCENVG, 0, 0) if int32(r1) == -1 { return NewError(errstr()) } for _, v := range envv { i := 0 for i < len(v) && v[i] != '='
{ i++ }
conditional_block
ha-state.go
if c.leaf == nil { log.Panicf("cert TLS called with nil leaf!") } return c.leaf } // GlobalHaState is the consensus state shared by all members of // a consensus cluster. type GlobalHaState struct { // LoadBalanced indicates that an external service is responsible for // routing traffic destined to VirtAddr to a cluster node. LoadBalanced bool // Enabled indicates whether either HA mode is operating on this cluster. // If just Enabled is set, the cluster is using the synchronous replication // protocol with manual failover. Enabled bool // ConsensusEnabled indicates that this cluster is operating on the Raft // based replication protocol with automatic failover. ConsensusEnabled bool // ConsensusJoin is the API URL of the current active node in a consensus // cluster. It may be unset if the cluster nodes cannot agree who should // be the active node, or if the cluster is operating using the sync replication // protocol. ConsensusJoin string // VirtAddr is the IP address that the cluster should appear to have from the // perspective of clients and third parties. VirtAddr string // ServerHostname is the DNS name for the DRP endpoint that managed systems should use. ServerHostname string // ActiveUri is the API URL of cthe cluster as built from the virtual addr. ActiveUri string // Token is an API authentication token that can be sued to perform cluster operations. Token string // HaID is the ID of the cluster as a whole. HaID string // Valid indicates that this state is valid and has been consistency checked. Valid bool // Roots is a list of self-signed trust roots that consensus nodes will use // to verify communication. These roots are automatically created and rotated // on a regular basis. Roots []Cert } func (g *GlobalHaState) FillTls() error { for i := range g.Roots { if err := (&g.Roots[i]).setTls(); err != nil { return err } } return nil } // NodeHaState tracks the HA state for an individual node. type NodeHaState struct { // ConsensusID us the unique autogenerated ID for this node. // Once set, it must not be changed, or else other nodes in // a cluster will not recognize this node. ConsensusID uuid.UUID // VirtInterface is the network interface that the global VirtAddr // will be added on when this node si the active node, and removed from // when this node is no longer the active node. VirtInterface string // VirtInterfaceScript will be called whenever VirtAddr must be added // or removed from VirtInterface. If empty, a default set of scripts // will be sued that are appropriate to the OS type dr-provision is running on. VirtInterfaceScript string // ConsensusAddr is the addr:port that other nodes should attempt to contact // this node no when operating in consensus mode. All communication over these // ports will be secured using TLS 1.3 using per-node short-lived certs signed // by the certs in the global Roots field. ConsensusAddr string // ApiUrl is the URL that can be used to contact this node's API directly. ApiUrl string // Passive indicates that this node is not responsible for handling client connections // or writes via the API. Passive bool // Observer indicates that this node cannot become the active node. It is used when // a node should act as a live backup and a consensus tiebreaker. Observer bool } // CurrentHAState is the GlobalHaState and the NodeHaState for a particular node. type CurrentHAState struct { GlobalHaState NodeHaState } func makeCert(template *x509.Certificate, parentCert *tls.Certificate) (*tls.Certificate, error) { var err error var priv ed25519.PrivateKey var public ed25519.PublicKey public, priv, err = ed25519.GenerateKey(rand.Reader) if err != nil { return nil, err } var parent *x509.Certificate var parentPriv ed25519.PrivateKey if parentCert == nil { parent = template parentPriv = priv } else { parent = parentCert.Leaf parentPriv = parentCert.PrivateKey.(ed25519.PrivateKey) } var derBytes []byte derBytes, err = x509.CreateCertificate(rand.Reader, template, parent, public, parentPriv) if err != nil { return nil, err } finalCert, err := x509.ParseCertificate(derBytes) if err != nil { return nil, err } return &tls.Certificate{ Certificate: [][]byte{derBytes}, PrivateKey: priv, Leaf: finalCert, }, nil } // RotateRoot adds a new self-signed root certificate to the beginning of g.Roots, // and removes any expired certificates. func (g *GlobalHaState) RotateRoot(templateMaker func() (*x509.Certificate, error)) (err error) { // Generate an initial certificate root. var template *x509.Certificate template, err = templateMaker() if err != nil { return } var finalCert *tls.Certificate finalCert, err = makeCert(template, nil) if err != nil { return } res := Cert{Data: finalCert.Certificate, Key: finalCert.PrivateKey.(ed25519.PrivateKey), leaf: finalCert} if len(g.Roots) == 0 { g.Roots = []Cert{res} } else if g.Roots[len(g.Roots)-1].leaf.Leaf.NotAfter.After(time.Now()) { copy(g.Roots[1:], g.Roots) g.Roots[0] = res } else { g.Roots = append([]Cert{res}, g.Roots...) } return } // EndpointCert creates a short-lived per-node certificate that is signed by the most recent root certificate. func (c *CurrentHAState) EndpointCert(templateMaker func() (*x509.Certificate, error)) (*tls.Certificate, error) { tmpl, err := templateMaker() if err != nil { return nil, err } addr, _, err := net.SplitHostPort(c.ConsensusAddr) if err != nil { return nil, err } tmpl.IPAddresses = []net.IP{net.ParseIP(addr)} return makeCert(tmpl, c.Roots[0].TLS()) } // OurIp returns the IP address that should be set in an endpoint certificate for host validation. func (c *CurrentHAState) OurIp() (string, error) { if !c.Enabled { return "", errors.New("HA not enabled") } if c.ConsensusAddr != "" { return c.VirtAddr, nil } if c.LoadBalanced { return c.VirtAddr, nil } ip, _, err := net.ParseCIDR(c.VirtAddr) return ip.String(), err } // Validate validates CurrentHAState to make sure is it sane. func (cOpts *CurrentHAState) Validate() error { // Validate HA args. if !cOpts.Enabled { return nil } ourAddrs, err := net.InterfaceAddrs() if err != nil { return err } consensusAddr := "" consensusPort := "" if cOpts.ConsensusAddr != "" { consensusAddr, consensusPort, err = net.SplitHostPort(cOpts.ConsensusAddr) if err != nil { return err } cAddrOk := false if net.ParseIP(consensusAddr) == nil { return fmt.Errorf("Must specify an IP address for the consensus address") } for _, ourAddr := range ourAddrs { if ourAddr.(*net.IPNet).IP.String() == consensusAddr { cAddrOk = true break } } if !cAddrOk { return fmt.Errorf("Consensus address %s is not present on the system", consensusAddr) } portNo, _ := strconv.ParseInt(consensusPort, 10, 32) if portNo < 0 || portNo > 65536 { return fmt.Errorf("Consensus port %d is out of range", portNo) } } if cOpts.LoadBalanced { if cOpts.VirtAddr == "" { return fmt.Errorf("Error: HA must specify an address that eternal systems will see this system as") } if net.ParseIP(cOpts.VirtAddr) == nil { return fmt.Errorf("Error: Invalid HA address %s", cOpts.VirtAddr) } lbAddrOk := true for _, ourAddr := range ourAddrs { if ourAddr.String() == cOpts.VirtAddr { lbAddrOk = false break } } if !lbAddrOk { return fmt.Errorf("Virt address %s is present on the system, not permitted when
return nil } // TLS converts the Cert into a TLS compatible certificate. func (c *Cert) TLS() *tls.Certificate {
random_line_split
ha-state.go
Enabled is set, the cluster is using the synchronous replication // protocol with manual failover. Enabled bool // ConsensusEnabled indicates that this cluster is operating on the Raft // based replication protocol with automatic failover. ConsensusEnabled bool // ConsensusJoin is the API URL of the current active node in a consensus // cluster. It may be unset if the cluster nodes cannot agree who should // be the active node, or if the cluster is operating using the sync replication // protocol. ConsensusJoin string // VirtAddr is the IP address that the cluster should appear to have from the // perspective of clients and third parties. VirtAddr string // ServerHostname is the DNS name for the DRP endpoint that managed systems should use. ServerHostname string // ActiveUri is the API URL of cthe cluster as built from the virtual addr. ActiveUri string // Token is an API authentication token that can be sued to perform cluster operations. Token string // HaID is the ID of the cluster as a whole. HaID string // Valid indicates that this state is valid and has been consistency checked. Valid bool // Roots is a list of self-signed trust roots that consensus nodes will use // to verify communication. These roots are automatically created and rotated // on a regular basis. Roots []Cert } func (g *GlobalHaState) FillTls() error { for i := range g.Roots { if err := (&g.Roots[i]).setTls(); err != nil { return err } } return nil } // NodeHaState tracks the HA state for an individual node. type NodeHaState struct { // ConsensusID us the unique autogenerated ID for this node. // Once set, it must not be changed, or else other nodes in // a cluster will not recognize this node. ConsensusID uuid.UUID // VirtInterface is the network interface that the global VirtAddr // will be added on when this node si the active node, and removed from // when this node is no longer the active node. VirtInterface string // VirtInterfaceScript will be called whenever VirtAddr must be added // or removed from VirtInterface. If empty, a default set of scripts // will be sued that are appropriate to the OS type dr-provision is running on. VirtInterfaceScript string // ConsensusAddr is the addr:port that other nodes should attempt to contact // this node no when operating in consensus mode. All communication over these // ports will be secured using TLS 1.3 using per-node short-lived certs signed // by the certs in the global Roots field. ConsensusAddr string // ApiUrl is the URL that can be used to contact this node's API directly. ApiUrl string // Passive indicates that this node is not responsible for handling client connections // or writes via the API. Passive bool // Observer indicates that this node cannot become the active node. It is used when // a node should act as a live backup and a consensus tiebreaker. Observer bool } // CurrentHAState is the GlobalHaState and the NodeHaState for a particular node. type CurrentHAState struct { GlobalHaState NodeHaState } func makeCert(template *x509.Certificate, parentCert *tls.Certificate) (*tls.Certificate, error) { var err error var priv ed25519.PrivateKey var public ed25519.PublicKey public, priv, err = ed25519.GenerateKey(rand.Reader) if err != nil { return nil, err } var parent *x509.Certificate var parentPriv ed25519.PrivateKey if parentCert == nil { parent = template parentPriv = priv } else { parent = parentCert.Leaf parentPriv = parentCert.PrivateKey.(ed25519.PrivateKey) } var derBytes []byte derBytes, err = x509.CreateCertificate(rand.Reader, template, parent, public, parentPriv) if err != nil { return nil, err } finalCert, err := x509.ParseCertificate(derBytes) if err != nil { return nil, err } return &tls.Certificate{ Certificate: [][]byte{derBytes}, PrivateKey: priv, Leaf: finalCert, }, nil } // RotateRoot adds a new self-signed root certificate to the beginning of g.Roots, // and removes any expired certificates. func (g *GlobalHaState) RotateRoot(templateMaker func() (*x509.Certificate, error)) (err error) { // Generate an initial certificate root. var template *x509.Certificate template, err = templateMaker() if err != nil { return } var finalCert *tls.Certificate finalCert, err = makeCert(template, nil) if err != nil { return } res := Cert{Data: finalCert.Certificate, Key: finalCert.PrivateKey.(ed25519.PrivateKey), leaf: finalCert} if len(g.Roots) == 0 { g.Roots = []Cert{res} } else if g.Roots[len(g.Roots)-1].leaf.Leaf.NotAfter.After(time.Now()) { copy(g.Roots[1:], g.Roots) g.Roots[0] = res } else { g.Roots = append([]Cert{res}, g.Roots...) } return } // EndpointCert creates a short-lived per-node certificate that is signed by the most recent root certificate. func (c *CurrentHAState) EndpointCert(templateMaker func() (*x509.Certificate, error)) (*tls.Certificate, error) { tmpl, err := templateMaker() if err != nil { return nil, err } addr, _, err := net.SplitHostPort(c.ConsensusAddr) if err != nil { return nil, err } tmpl.IPAddresses = []net.IP{net.ParseIP(addr)} return makeCert(tmpl, c.Roots[0].TLS()) } // OurIp returns the IP address that should be set in an endpoint certificate for host validation. func (c *CurrentHAState) OurIp() (string, error) { if !c.Enabled { return "", errors.New("HA not enabled") } if c.ConsensusAddr != "" { return c.VirtAddr, nil } if c.LoadBalanced { return c.VirtAddr, nil } ip, _, err := net.ParseCIDR(c.VirtAddr) return ip.String(), err } // Validate validates CurrentHAState to make sure is it sane. func (cOpts *CurrentHAState) Validate() error
return fmt.Errorf("Must specify an IP address for the consensus address") } for _, ourAddr := range ourAddrs { if ourAddr.(*net.IPNet).IP.String() == consensusAddr { cAddrOk = true break } } if !cAddrOk { return fmt.Errorf("Consensus address %s is not present on the system", consensusAddr) } portNo, _ := strconv.ParseInt(consensusPort, 10, 32) if portNo < 0 || portNo > 65536 { return fmt.Errorf("Consensus port %d is out of range", portNo) } } if cOpts.LoadBalanced { if cOpts.VirtAddr == "" { return fmt.Errorf("Error: HA must specify an address that eternal systems will see this system as") } if net.ParseIP(cOpts.VirtAddr) == nil { return fmt.Errorf("Error: Invalid HA address %s", cOpts.VirtAddr) } lbAddrOk := true for _, ourAddr := range ourAddrs { if ourAddr.String() == cOpts.VirtAddr { lbAddrOk = false break } } if !lbAddrOk { return fmt.Errorf("Virt address %s is present on the system, not permitted when load balanced", cOpts.VirtAddr) } } else { if cOpts.VirtAddr == "" { return fmt.Errorf("Error: HA must specify a VIP in CIDR format that DRP will move around") } // In HA mode with a VIP, force everything to talk to the VIP address. ip, cidr, err := net.ParseCIDR(cOpts.VirtAddr) if err != nil { return fmt.Errorf("Error: HA IP address %s not valid: %v", cOpts.VirtAddr, err) } if consensusAddr != "" &&
{ // Validate HA args. if !cOpts.Enabled { return nil } ourAddrs, err := net.InterfaceAddrs() if err != nil { return err } consensusAddr := "" consensusPort := "" if cOpts.ConsensusAddr != "" { consensusAddr, consensusPort, err = net.SplitHostPort(cOpts.ConsensusAddr) if err != nil { return err } cAddrOk := false if net.ParseIP(consensusAddr) == nil {
identifier_body
ha-state.go
Enabled is set, the cluster is using the synchronous replication // protocol with manual failover. Enabled bool // ConsensusEnabled indicates that this cluster is operating on the Raft // based replication protocol with automatic failover. ConsensusEnabled bool // ConsensusJoin is the API URL of the current active node in a consensus // cluster. It may be unset if the cluster nodes cannot agree who should // be the active node, or if the cluster is operating using the sync replication // protocol. ConsensusJoin string // VirtAddr is the IP address that the cluster should appear to have from the // perspective of clients and third parties. VirtAddr string // ServerHostname is the DNS name for the DRP endpoint that managed systems should use. ServerHostname string // ActiveUri is the API URL of cthe cluster as built from the virtual addr. ActiveUri string // Token is an API authentication token that can be sued to perform cluster operations. Token string // HaID is the ID of the cluster as a whole. HaID string // Valid indicates that this state is valid and has been consistency checked. Valid bool // Roots is a list of self-signed trust roots that consensus nodes will use // to verify communication. These roots are automatically created and rotated // on a regular basis. Roots []Cert } func (g *GlobalHaState) FillTls() error { for i := range g.Roots { if err := (&g.Roots[i]).setTls(); err != nil { return err } } return nil } // NodeHaState tracks the HA state for an individual node. type NodeHaState struct { // ConsensusID us the unique autogenerated ID for this node. // Once set, it must not be changed, or else other nodes in // a cluster will not recognize this node. ConsensusID uuid.UUID // VirtInterface is the network interface that the global VirtAddr // will be added on when this node si the active node, and removed from // when this node is no longer the active node. VirtInterface string // VirtInterfaceScript will be called whenever VirtAddr must be added // or removed from VirtInterface. If empty, a default set of scripts // will be sued that are appropriate to the OS type dr-provision is running on. VirtInterfaceScript string // ConsensusAddr is the addr:port that other nodes should attempt to contact // this node no when operating in consensus mode. All communication over these // ports will be secured using TLS 1.3 using per-node short-lived certs signed // by the certs in the global Roots field. ConsensusAddr string // ApiUrl is the URL that can be used to contact this node's API directly. ApiUrl string // Passive indicates that this node is not responsible for handling client connections // or writes via the API. Passive bool // Observer indicates that this node cannot become the active node. It is used when // a node should act as a live backup and a consensus tiebreaker. Observer bool } // CurrentHAState is the GlobalHaState and the NodeHaState for a particular node. type CurrentHAState struct { GlobalHaState NodeHaState } func
(template *x509.Certificate, parentCert *tls.Certificate) (*tls.Certificate, error) { var err error var priv ed25519.PrivateKey var public ed25519.PublicKey public, priv, err = ed25519.GenerateKey(rand.Reader) if err != nil { return nil, err } var parent *x509.Certificate var parentPriv ed25519.PrivateKey if parentCert == nil { parent = template parentPriv = priv } else { parent = parentCert.Leaf parentPriv = parentCert.PrivateKey.(ed25519.PrivateKey) } var derBytes []byte derBytes, err = x509.CreateCertificate(rand.Reader, template, parent, public, parentPriv) if err != nil { return nil, err } finalCert, err := x509.ParseCertificate(derBytes) if err != nil { return nil, err } return &tls.Certificate{ Certificate: [][]byte{derBytes}, PrivateKey: priv, Leaf: finalCert, }, nil } // RotateRoot adds a new self-signed root certificate to the beginning of g.Roots, // and removes any expired certificates. func (g *GlobalHaState) RotateRoot(templateMaker func() (*x509.Certificate, error)) (err error) { // Generate an initial certificate root. var template *x509.Certificate template, err = templateMaker() if err != nil { return } var finalCert *tls.Certificate finalCert, err = makeCert(template, nil) if err != nil { return } res := Cert{Data: finalCert.Certificate, Key: finalCert.PrivateKey.(ed25519.PrivateKey), leaf: finalCert} if len(g.Roots) == 0 { g.Roots = []Cert{res} } else if g.Roots[len(g.Roots)-1].leaf.Leaf.NotAfter.After(time.Now()) { copy(g.Roots[1:], g.Roots) g.Roots[0] = res } else { g.Roots = append([]Cert{res}, g.Roots...) } return } // EndpointCert creates a short-lived per-node certificate that is signed by the most recent root certificate. func (c *CurrentHAState) EndpointCert(templateMaker func() (*x509.Certificate, error)) (*tls.Certificate, error) { tmpl, err := templateMaker() if err != nil { return nil, err } addr, _, err := net.SplitHostPort(c.ConsensusAddr) if err != nil { return nil, err } tmpl.IPAddresses = []net.IP{net.ParseIP(addr)} return makeCert(tmpl, c.Roots[0].TLS()) } // OurIp returns the IP address that should be set in an endpoint certificate for host validation. func (c *CurrentHAState) OurIp() (string, error) { if !c.Enabled { return "", errors.New("HA not enabled") } if c.ConsensusAddr != "" { return c.VirtAddr, nil } if c.LoadBalanced { return c.VirtAddr, nil } ip, _, err := net.ParseCIDR(c.VirtAddr) return ip.String(), err } // Validate validates CurrentHAState to make sure is it sane. func (cOpts *CurrentHAState) Validate() error { // Validate HA args. if !cOpts.Enabled { return nil } ourAddrs, err := net.InterfaceAddrs() if err != nil { return err } consensusAddr := "" consensusPort := "" if cOpts.ConsensusAddr != "" { consensusAddr, consensusPort, err = net.SplitHostPort(cOpts.ConsensusAddr) if err != nil { return err } cAddrOk := false if net.ParseIP(consensusAddr) == nil { return fmt.Errorf("Must specify an IP address for the consensus address") } for _, ourAddr := range ourAddrs { if ourAddr.(*net.IPNet).IP.String() == consensusAddr { cAddrOk = true break } } if !cAddrOk { return fmt.Errorf("Consensus address %s is not present on the system", consensusAddr) } portNo, _ := strconv.ParseInt(consensusPort, 10, 32) if portNo < 0 || portNo > 65536 { return fmt.Errorf("Consensus port %d is out of range", portNo) } } if cOpts.LoadBalanced { if cOpts.VirtAddr == "" { return fmt.Errorf("Error: HA must specify an address that eternal systems will see this system as") } if net.ParseIP(cOpts.VirtAddr) == nil { return fmt.Errorf("Error: Invalid HA address %s", cOpts.VirtAddr) } lbAddrOk := true for _, ourAddr := range ourAddrs { if ourAddr.String() == cOpts.VirtAddr { lbAddrOk = false break } } if !lbAddrOk { return fmt.Errorf("Virt address %s is present on the system, not permitted when load balanced", cOpts.VirtAddr) } } else { if cOpts.VirtAddr == "" { return fmt.Errorf("Error: HA must specify a VIP in CIDR format that DRP will move around") } // In HA mode with a VIP, force everything to talk to the VIP address. ip, cidr, err := net.ParseCIDR(cOpts.VirtAddr) if err != nil { return fmt.Errorf("Error: HA IP address %s not valid: %v", cOpts.VirtAddr, err) } if consensusAddr != "" &&
makeCert
identifier_name
ha-state.go
Enabled is set, the cluster is using the synchronous replication // protocol with manual failover. Enabled bool // ConsensusEnabled indicates that this cluster is operating on the Raft // based replication protocol with automatic failover. ConsensusEnabled bool // ConsensusJoin is the API URL of the current active node in a consensus // cluster. It may be unset if the cluster nodes cannot agree who should // be the active node, or if the cluster is operating using the sync replication // protocol. ConsensusJoin string // VirtAddr is the IP address that the cluster should appear to have from the // perspective of clients and third parties. VirtAddr string // ServerHostname is the DNS name for the DRP endpoint that managed systems should use. ServerHostname string // ActiveUri is the API URL of cthe cluster as built from the virtual addr. ActiveUri string // Token is an API authentication token that can be sued to perform cluster operations. Token string // HaID is the ID of the cluster as a whole. HaID string // Valid indicates that this state is valid and has been consistency checked. Valid bool // Roots is a list of self-signed trust roots that consensus nodes will use // to verify communication. These roots are automatically created and rotated // on a regular basis. Roots []Cert } func (g *GlobalHaState) FillTls() error { for i := range g.Roots { if err := (&g.Roots[i]).setTls(); err != nil { return err } } return nil } // NodeHaState tracks the HA state for an individual node. type NodeHaState struct { // ConsensusID us the unique autogenerated ID for this node. // Once set, it must not be changed, or else other nodes in // a cluster will not recognize this node. ConsensusID uuid.UUID // VirtInterface is the network interface that the global VirtAddr // will be added on when this node si the active node, and removed from // when this node is no longer the active node. VirtInterface string // VirtInterfaceScript will be called whenever VirtAddr must be added // or removed from VirtInterface. If empty, a default set of scripts // will be sued that are appropriate to the OS type dr-provision is running on. VirtInterfaceScript string // ConsensusAddr is the addr:port that other nodes should attempt to contact // this node no when operating in consensus mode. All communication over these // ports will be secured using TLS 1.3 using per-node short-lived certs signed // by the certs in the global Roots field. ConsensusAddr string // ApiUrl is the URL that can be used to contact this node's API directly. ApiUrl string // Passive indicates that this node is not responsible for handling client connections // or writes via the API. Passive bool // Observer indicates that this node cannot become the active node. It is used when // a node should act as a live backup and a consensus tiebreaker. Observer bool } // CurrentHAState is the GlobalHaState and the NodeHaState for a particular node. type CurrentHAState struct { GlobalHaState NodeHaState } func makeCert(template *x509.Certificate, parentCert *tls.Certificate) (*tls.Certificate, error) { var err error var priv ed25519.PrivateKey var public ed25519.PublicKey public, priv, err = ed25519.GenerateKey(rand.Reader) if err != nil { return nil, err } var parent *x509.Certificate var parentPriv ed25519.PrivateKey if parentCert == nil { parent = template parentPriv = priv } else { parent = parentCert.Leaf parentPriv = parentCert.PrivateKey.(ed25519.PrivateKey) } var derBytes []byte derBytes, err = x509.CreateCertificate(rand.Reader, template, parent, public, parentPriv) if err != nil { return nil, err } finalCert, err := x509.ParseCertificate(derBytes) if err != nil { return nil, err } return &tls.Certificate{ Certificate: [][]byte{derBytes}, PrivateKey: priv, Leaf: finalCert, }, nil } // RotateRoot adds a new self-signed root certificate to the beginning of g.Roots, // and removes any expired certificates. func (g *GlobalHaState) RotateRoot(templateMaker func() (*x509.Certificate, error)) (err error) { // Generate an initial certificate root. var template *x509.Certificate template, err = templateMaker() if err != nil { return } var finalCert *tls.Certificate finalCert, err = makeCert(template, nil) if err != nil { return } res := Cert{Data: finalCert.Certificate, Key: finalCert.PrivateKey.(ed25519.PrivateKey), leaf: finalCert} if len(g.Roots) == 0 { g.Roots = []Cert{res} } else if g.Roots[len(g.Roots)-1].leaf.Leaf.NotAfter.After(time.Now()) { copy(g.Roots[1:], g.Roots) g.Roots[0] = res } else { g.Roots = append([]Cert{res}, g.Roots...) } return } // EndpointCert creates a short-lived per-node certificate that is signed by the most recent root certificate. func (c *CurrentHAState) EndpointCert(templateMaker func() (*x509.Certificate, error)) (*tls.Certificate, error) { tmpl, err := templateMaker() if err != nil { return nil, err } addr, _, err := net.SplitHostPort(c.ConsensusAddr) if err != nil { return nil, err } tmpl.IPAddresses = []net.IP{net.ParseIP(addr)} return makeCert(tmpl, c.Roots[0].TLS()) } // OurIp returns the IP address that should be set in an endpoint certificate for host validation. func (c *CurrentHAState) OurIp() (string, error) { if !c.Enabled { return "", errors.New("HA not enabled") } if c.ConsensusAddr != "" { return c.VirtAddr, nil } if c.LoadBalanced { return c.VirtAddr, nil } ip, _, err := net.ParseCIDR(c.VirtAddr) return ip.String(), err } // Validate validates CurrentHAState to make sure is it sane. func (cOpts *CurrentHAState) Validate() error { // Validate HA args. if !cOpts.Enabled { return nil } ourAddrs, err := net.InterfaceAddrs() if err != nil { return err } consensusAddr := "" consensusPort := "" if cOpts.ConsensusAddr != "" { consensusAddr, consensusPort, err = net.SplitHostPort(cOpts.ConsensusAddr) if err != nil { return err } cAddrOk := false if net.ParseIP(consensusAddr) == nil { return fmt.Errorf("Must specify an IP address for the consensus address") } for _, ourAddr := range ourAddrs { if ourAddr.(*net.IPNet).IP.String() == consensusAddr { cAddrOk = true break } } if !cAddrOk { return fmt.Errorf("Consensus address %s is not present on the system", consensusAddr) } portNo, _ := strconv.ParseInt(consensusPort, 10, 32) if portNo < 0 || portNo > 65536 { return fmt.Errorf("Consensus port %d is out of range", portNo) } } if cOpts.LoadBalanced { if cOpts.VirtAddr == "" { return fmt.Errorf("Error: HA must specify an address that eternal systems will see this system as") } if net.ParseIP(cOpts.VirtAddr) == nil { return fmt.Errorf("Error: Invalid HA address %s", cOpts.VirtAddr) } lbAddrOk := true for _, ourAddr := range ourAddrs { if ourAddr.String() == cOpts.VirtAddr
} if !lbAddrOk { return fmt.Errorf("Virt address %s is present on the system, not permitted when load balanced", cOpts.VirtAddr) } } else { if cOpts.VirtAddr == "" { return fmt.Errorf("Error: HA must specify a VIP in CIDR format that DRP will move around") } // In HA mode with a VIP, force everything to talk to the VIP address. ip, cidr, err := net.ParseCIDR(cOpts.VirtAddr) if err != nil { return fmt.Errorf("Error: HA IP address %s not valid: %v", cOpts.VirtAddr, err) } if consensusAddr != ""
{ lbAddrOk = false break }
conditional_block
nbd.rs
( Code::Internal, "no such bdev exists".to_string(), )); } let nbd_disk = nbd_disk.unwrap(); if let Some(mount) = match_mount( Some(&nbd_disk.nbd_device), Some(&target_path), false, ) { if mount.source == nbd_disk.nbd_device && mount.dest == target_path { // the device is already mounted we should return OK return ok((true, nbd_disk, target_path, uuid)); } else { // something is there already return error return err(Status::new( Code::AlreadyExists, "Some different BDEV on that path already".to_string(), )); } } ok((false, nbd_disk, target_path, uuid)) }) .and_then(move |mounted| { if !mounted.0 { Either::A( probed_format(&mounted.1.nbd_device, &filesystem.name) .then(move |format_result| { let mnt_result = if mount_fail || format_result.is_err() { if !mount_fail { Err(format_result.unwrap_err()) } else { debug!("Simulating mount failure"); Err("simulated".to_owned()) } } else { mount_fs( &mounted.1.nbd_device, &mounted.2, false, &filesystem.name, &mnt_opts, ) }; if let Err(reason) = mnt_result { Box::new(err(Status::new( Code::Internal, reason, ))) } else { info!( "staged {} on {}", &mounted.3, &mounted.2 ); Box::new(ok(Response::new( NodeStageVolumeResponse {}, ))) } }), ) } else { Either::B(Box::new(ok(Response::new( NodeStageVolumeResponse {}, )))) } }); Box::new(f) } pub fn create_blkdev( socket: String, msg: &CreateBlkdevRequest, ) -> Box<dyn Future<Item = Response<CreateBlkdevReply>, Error = Status> + Send> { trace!("{:?}", msg); debug!("Creating NBD device for {} ...", msg.uuid); let nbd_dev_info = NbdDevInfo::new(); let uuid = msg.uuid.clone(); // what ever instance we got assigned, it was in use, and is now removed // from the device list if nbd_dev_info.is_none() { return Box::new(err(Status::new( Code::Internal, String::from("EAGAIN"), ))); } let nbd_dev_info = nbd_dev_info.unwrap(); let f = get_nbd_instance(&socket, &uuid) // TODO: Avoid this step in future chain by returning eexist from // start-nbd-disk json-rpc method. .and_then(enclose! { (uuid) move |bdev| { if let Some(bdev) = bdev { return err(Status::new( Code::AlreadyExists, format!( "Bbdev {} already published at {}", uuid, bdev.nbd_device ), )); } ok(()) }}) .map_err(|e| jsonrpc::error::Error::GenericError(e.to_string())) .and_then(enclose! { (uuid) move |_| { jsonrpc::call::<jsondata::StartNbdDiskArgs, String>( &socket, "start_nbd_disk", Some(jsondata::StartNbdDiskArgs { bdev_name: uuid, nbd_device: format!("{}", nbd_dev_info), }), ) }}) .and_then(move |nbd_device| { trace!("NBD device {} created", &nbd_device); device::await_size(&nbd_device).map_err(jsonrpc::error::Error::from) }) .and_then(move |size| { info!("Device {} reported size: {}", nbd_dev_info, size); let reply = CreateBlkdevReply { blk_dev: format!("{}", nbd_dev_info), }; ok(Response::new(reply)) }) .map_err(move |err| { error!( "Putting back nbd device {} due to error: {}", nbd_dev_info, err.to_string() ); nbd_dev_info.put_back(); err.into_status() }); Box::new(f) } pub fn destroy_blkdev( socket: String, msg: &DestroyBlkdevRequest, ) -> Box<dyn Future<Item = Response<Null>, Error = Status> + Send> { trace!("{:?}", msg); let uuid = msg.uuid.clone(); debug!("Deleting NBD device for {} ...", uuid); let f = get_nbd_instance(&socket, &uuid) // TODO: Avoid this step by returning enoent from stop-nbd-disk // json-rpc method. .and_then(move |nbd_disk| { if nbd_disk.is_none() { trace!("bdev {} not found", uuid); return err(Status::new( Code::Internal, format!("no such bdev {}", uuid), )); } let nbd_disk = nbd_disk.unwrap(); ok(nbd_disk) }) .and_then(move |nbd_disk| { trace!("Stopping NBD device {}", nbd_disk.nbd_device); jsonrpc::call::<jsondata::StopNbdDiskArgs, bool>( &socket, "stop_nbd_disk", Some(jsondata::StopNbdDiskArgs { nbd_device: nbd_disk.nbd_device.clone(), }), ) .map_err(|err| err.into_status()) .and_then(|done| { if done { info!( "Stopped NBD device {} with bdev {}", nbd_disk.nbd_device, nbd_disk.bdev_name ); NbdDevInfo::from(nbd_disk.nbd_device).put_back(); Box::new(ok(Response::new(Null {}))) } else { let msg = format!( "Failed to stop nbd device {} for {}", nbd_disk.nbd_device, nbd_disk.bdev_name ); error!("{}", msg); Box::new(err(Status::new(Code::Internal, msg))) } }) }); Box::new(f) } pub fn get_nbd_instance( sock: &str, bdev_name: &str, ) -> Box<dyn Future<Item = Option<jsondata::NbdDisk>, Error = Status> + Send> { let bdev_name = bdev_name.to_string(); let socket = sock.to_string(); let f = jsonrpc::call::<jsondata::GetBdevsArgs, Vec<jsondata::Bdev>>( &socket, "get_bdevs", Some(jsondata::GetBdevsArgs { name: bdev_name.clone(), }), ) .map_err(|e| { Status::new(Code::NotFound, format!("Failed to list bdevs: {}", e)) }) .and_then(move |bdev| { jsonrpc::call::<(), Vec<jsondata::NbdDisk>>( &socket, "get_nbd_disks", None, ) .map(move |nbd_disks| { nbd_disks .into_iter() .find(|ent| ent.bdev_name == bdev[0].name) }) .map_err(|err| { Status::new( Code::NotFound, format!("Failed to find nbd disk: {}", err), ) }) }); Box::new(f) } impl NbdDevInfo { /// This will return the next available nbd device pub fn new() -> Option<Self> { let instance = ARRAY.lock().unwrap().pop()?; trace!("Will use nbd slot {}", instance); NbdDevInfo::create(instance) } fn create(instance: u32) -> Option<Self> { let mut path = PathBuf::from(&format!("/sys/class/block/nbd{}", instance)); path.push("pid"); if path.exists() { trace!( "Dropping nbd instance: {} as it appears to be in use", instance ); return None; } path.pop(); let e = path .strip_prefix("/sys/class/block") .unwrap() .to_str() .unwrap() .split_at(3); let instance = e.1.parse().unwrap(); let dev_t: String = sysfs::parse_value(&path, "dev").unwrap(); let nums: Vec<u64> = dev_t.split(':').map(|x| x.parse().unwrap()).collect(); // Documentation/admin-guide/devices.txt if nums[0] != 43 { warn!("Invalid major number of nbd dev {}", path.display()); } let nbd = NbdDevInfo { instance, major: nums[0], minor: nums[1], }; assert_eq!(nbd.instance, instance); Some(nbd) } pub fn put_back(&self) { ARRAY.lock().unwrap().push(self.instance); trace!("instance {} added back to the free list", self.instance); } pub fn
num_devices
identifier_name
nbd.rs
9, 10, 11, 12, 14, 15]); } #[derive(Clone, Copy)] pub struct NbdDevInfo { instance: u32, major: u64, minor: u64, } impl fmt::Display for NbdDevInfo { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "/dev/nbd{}", self.instance) } } impl fmt::Debug for NbdDevInfo { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "nbd{} ({}:{})", self.instance, self.major, self.minor) } } pub fn nbd_stage_volume( socket: String, msg: &NodeStageVolumeRequest, filesystem: Fs, mnt_opts: Vec<String>, ) -> Box< dyn Future<Item = Response<NodeStageVolumeResponse>, Error = Status> + Send, > { //let msg = request.into_inner(); let uuid = msg.volume_id.clone(); let target_path = msg.staging_target_path.to_string(); let mount_fail = msg.publish_context.contains_key("mount"); let f = get_nbd_instance(&socket.clone(), &uuid) .and_then(move |nbd_disk| { if nbd_disk.is_none() { // if we dont have a nbd device with a corresponding bdev, // its an error ass it should error!("No device instance found for {}, likely a bug", &uuid); return err(Status::new( Code::Internal, "no such bdev exists".to_string(), )); } let nbd_disk = nbd_disk.unwrap(); if let Some(mount) = match_mount( Some(&nbd_disk.nbd_device), Some(&target_path), false, ) { if mount.source == nbd_disk.nbd_device && mount.dest == target_path { // the device is already mounted we should return OK return ok((true, nbd_disk, target_path, uuid)); } else { // something is there already return error return err(Status::new( Code::AlreadyExists, "Some different BDEV on that path already".to_string(), )); } } ok((false, nbd_disk, target_path, uuid)) }) .and_then(move |mounted| { if !mounted.0 { Either::A( probed_format(&mounted.1.nbd_device, &filesystem.name) .then(move |format_result| { let mnt_result = if mount_fail || format_result.is_err() { if !mount_fail { Err(format_result.unwrap_err()) } else { debug!("Simulating mount failure"); Err("simulated".to_owned()) } } else { mount_fs( &mounted.1.nbd_device, &mounted.2, false, &filesystem.name, &mnt_opts, ) }; if let Err(reason) = mnt_result { Box::new(err(Status::new( Code::Internal, reason, ))) } else { info!( "staged {} on {}", &mounted.3, &mounted.2 ); Box::new(ok(Response::new( NodeStageVolumeResponse {}, ))) } }), ) } else { Either::B(Box::new(ok(Response::new( NodeStageVolumeResponse {}, )))) } }); Box::new(f) } pub fn create_blkdev( socket: String, msg: &CreateBlkdevRequest, ) -> Box<dyn Future<Item = Response<CreateBlkdevReply>, Error = Status> + Send> { trace!("{:?}", msg); debug!("Creating NBD device for {} ...", msg.uuid); let nbd_dev_info = NbdDevInfo::new(); let uuid = msg.uuid.clone(); // what ever instance we got assigned, it was in use, and is now removed // from the device list if nbd_dev_info.is_none() { return Box::new(err(Status::new( Code::Internal, String::from("EAGAIN"), ))); } let nbd_dev_info = nbd_dev_info.unwrap(); let f = get_nbd_instance(&socket, &uuid) // TODO: Avoid this step in future chain by returning eexist from // start-nbd-disk json-rpc method. .and_then(enclose! { (uuid) move |bdev| { if let Some(bdev) = bdev { return err(Status::new( Code::AlreadyExists, format!( "Bbdev {} already published at {}", uuid, bdev.nbd_device ), )); } ok(()) }}) .map_err(|e| jsonrpc::error::Error::GenericError(e.to_string())) .and_then(enclose! { (uuid) move |_| { jsonrpc::call::<jsondata::StartNbdDiskArgs, String>( &socket, "start_nbd_disk", Some(jsondata::StartNbdDiskArgs { bdev_name: uuid, nbd_device: format!("{}", nbd_dev_info), }), ) }}) .and_then(move |nbd_device| { trace!("NBD device {} created", &nbd_device); device::await_size(&nbd_device).map_err(jsonrpc::error::Error::from) }) .and_then(move |size| { info!("Device {} reported size: {}", nbd_dev_info, size); let reply = CreateBlkdevReply { blk_dev: format!("{}", nbd_dev_info), }; ok(Response::new(reply)) }) .map_err(move |err| { error!( "Putting back nbd device {} due to error: {}", nbd_dev_info, err.to_string() ); nbd_dev_info.put_back(); err.into_status() }); Box::new(f) } pub fn destroy_blkdev( socket: String, msg: &DestroyBlkdevRequest, ) -> Box<dyn Future<Item = Response<Null>, Error = Status> + Send> { trace!("{:?}", msg); let uuid = msg.uuid.clone(); debug!("Deleting NBD device for {} ...", uuid); let f = get_nbd_instance(&socket, &uuid) // TODO: Avoid this step by returning enoent from stop-nbd-disk // json-rpc method. .and_then(move |nbd_disk| { if nbd_disk.is_none() { trace!("bdev {} not found", uuid); return err(Status::new( Code::Internal, format!("no such bdev {}", uuid), )); } let nbd_disk = nbd_disk.unwrap(); ok(nbd_disk) }) .and_then(move |nbd_disk| { trace!("Stopping NBD device {}", nbd_disk.nbd_device); jsonrpc::call::<jsondata::StopNbdDiskArgs, bool>( &socket, "stop_nbd_disk", Some(jsondata::StopNbdDiskArgs { nbd_device: nbd_disk.nbd_device.clone(), }), ) .map_err(|err| err.into_status()) .and_then(|done| { if done
else { let msg = format!( "Failed to stop nbd device {} for {}", nbd_disk.nbd_device, nbd_disk.bdev_name ); error!("{}", msg); Box::new(err(Status::new(Code::Internal, msg))) } }) }); Box::new(f) } pub fn get_nbd_instance( sock: &str, bdev_name: &str, ) -> Box<dyn Future<Item = Option<jsondata::NbdDisk>, Error = Status> + Send> { let bdev_name = bdev_name.to_string(); let socket = sock.to_string(); let f = jsonrpc::call::<jsondata::GetBdevsArgs, Vec<jsondata::Bdev>>( &socket, "get_bdevs", Some(jsondata::GetBdevsArgs { name: bdev_name.clone(), }), ) .map_err(|e| { Status::new(Code::NotFound, format!("Failed to list bdevs: {}", e)) }) .and_then(move |bdev| { jsonrpc::call::<(), Vec<jsondata::NbdDisk>>( &socket, "get_nbd_disks", None, ) .map(move |nbd_disks| { nbd_disks .into_iter() .find(|ent| ent.bdev_name == bdev[0].name) }) .map_err(|err| { Status::new( Code::NotFound, format!("Failed to find nbd disk: {}", err), ) }) }); Box::new(f) } impl NbdDevInfo { /// This will return the next available nbd device
{ info!( "Stopped NBD device {} with bdev {}", nbd_disk.nbd_device, nbd_disk.bdev_name ); NbdDevInfo::from(nbd_disk.nbd_device).put_back(); Box::new(ok(Response::new(Null {}))) }
conditional_block
nbd.rs
9, 10, 11, 12, 14, 15]); } #[derive(Clone, Copy)] pub struct NbdDevInfo { instance: u32, major: u64, minor: u64, } impl fmt::Display for NbdDevInfo { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
} impl fmt::Debug for NbdDevInfo { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "nbd{} ({}:{})", self.instance, self.major, self.minor) } } pub fn nbd_stage_volume( socket: String, msg: &NodeStageVolumeRequest, filesystem: Fs, mnt_opts: Vec<String>, ) -> Box< dyn Future<Item = Response<NodeStageVolumeResponse>, Error = Status> + Send, > { //let msg = request.into_inner(); let uuid = msg.volume_id.clone(); let target_path = msg.staging_target_path.to_string(); let mount_fail = msg.publish_context.contains_key("mount"); let f = get_nbd_instance(&socket.clone(), &uuid) .and_then(move |nbd_disk| { if nbd_disk.is_none() { // if we dont have a nbd device with a corresponding bdev, // its an error ass it should error!("No device instance found for {}, likely a bug", &uuid); return err(Status::new( Code::Internal, "no such bdev exists".to_string(), )); } let nbd_disk = nbd_disk.unwrap(); if let Some(mount) = match_mount( Some(&nbd_disk.nbd_device), Some(&target_path), false, ) { if mount.source == nbd_disk.nbd_device && mount.dest == target_path { // the device is already mounted we should return OK return ok((true, nbd_disk, target_path, uuid)); } else { // something is there already return error return err(Status::new( Code::AlreadyExists, "Some different BDEV on that path already".to_string(), )); } } ok((false, nbd_disk, target_path, uuid)) }) .and_then(move |mounted| { if !mounted.0 { Either::A( probed_format(&mounted.1.nbd_device, &filesystem.name) .then(move |format_result| { let mnt_result = if mount_fail || format_result.is_err() { if !mount_fail { Err(format_result.unwrap_err()) } else { debug!("Simulating mount failure"); Err("simulated".to_owned()) } } else { mount_fs( &mounted.1.nbd_device, &mounted.2, false, &filesystem.name, &mnt_opts, ) }; if let Err(reason) = mnt_result { Box::new(err(Status::new( Code::Internal, reason, ))) } else { info!( "staged {} on {}", &mounted.3, &mounted.2 ); Box::new(ok(Response::new( NodeStageVolumeResponse {}, ))) } }), ) } else { Either::B(Box::new(ok(Response::new( NodeStageVolumeResponse {}, )))) } }); Box::new(f) } pub fn create_blkdev( socket: String, msg: &CreateBlkdevRequest, ) -> Box<dyn Future<Item = Response<CreateBlkdevReply>, Error = Status> + Send> { trace!("{:?}", msg); debug!("Creating NBD device for {} ...", msg.uuid); let nbd_dev_info = NbdDevInfo::new(); let uuid = msg.uuid.clone(); // what ever instance we got assigned, it was in use, and is now removed // from the device list if nbd_dev_info.is_none() { return Box::new(err(Status::new( Code::Internal, String::from("EAGAIN"), ))); } let nbd_dev_info = nbd_dev_info.unwrap(); let f = get_nbd_instance(&socket, &uuid) // TODO: Avoid this step in future chain by returning eexist from // start-nbd-disk json-rpc method. .and_then(enclose! { (uuid) move |bdev| { if let Some(bdev) = bdev { return err(Status::new( Code::AlreadyExists, format!( "Bbdev {} already published at {}", uuid, bdev.nbd_device ), )); } ok(()) }}) .map_err(|e| jsonrpc::error::Error::GenericError(e.to_string())) .and_then(enclose! { (uuid) move |_| { jsonrpc::call::<jsondata::StartNbdDiskArgs, String>( &socket, "start_nbd_disk", Some(jsondata::StartNbdDiskArgs { bdev_name: uuid, nbd_device: format!("{}", nbd_dev_info), }), ) }}) .and_then(move |nbd_device| { trace!("NBD device {} created", &nbd_device); device::await_size(&nbd_device).map_err(jsonrpc::error::Error::from) }) .and_then(move |size| { info!("Device {} reported size: {}", nbd_dev_info, size); let reply = CreateBlkdevReply { blk_dev: format!("{}", nbd_dev_info), }; ok(Response::new(reply)) }) .map_err(move |err| { error!( "Putting back nbd device {} due to error: {}", nbd_dev_info, err.to_string() ); nbd_dev_info.put_back(); err.into_status() }); Box::new(f) } pub fn destroy_blkdev( socket: String, msg: &DestroyBlkdevRequest, ) -> Box<dyn Future<Item = Response<Null>, Error = Status> + Send> { trace!("{:?}", msg); let uuid = msg.uuid.clone(); debug!("Deleting NBD device for {} ...", uuid); let f = get_nbd_instance(&socket, &uuid) // TODO: Avoid this step by returning enoent from stop-nbd-disk // json-rpc method. .and_then(move |nbd_disk| { if nbd_disk.is_none() { trace!("bdev {} not found", uuid); return err(Status::new( Code::Internal, format!("no such bdev {}", uuid), )); } let nbd_disk = nbd_disk.unwrap(); ok(nbd_disk) }) .and_then(move |nbd_disk| { trace!("Stopping NBD device {}", nbd_disk.nbd_device); jsonrpc::call::<jsondata::StopNbdDiskArgs, bool>( &socket, "stop_nbd_disk", Some(jsondata::StopNbdDiskArgs { nbd_device: nbd_disk.nbd_device.clone(), }), ) .map_err(|err| err.into_status()) .and_then(|done| { if done { info!( "Stopped NBD device {} with bdev {}", nbd_disk.nbd_device, nbd_disk.bdev_name ); NbdDevInfo::from(nbd_disk.nbd_device).put_back(); Box::new(ok(Response::new(Null {}))) } else { let msg = format!( "Failed to stop nbd device {} for {}", nbd_disk.nbd_device, nbd_disk.bdev_name ); error!("{}", msg); Box::new(err(Status::new(Code::Internal, msg))) } }) }); Box::new(f) } pub fn get_nbd_instance( sock: &str, bdev_name: &str, ) -> Box<dyn Future<Item = Option<jsondata::NbdDisk>, Error = Status> + Send> { let bdev_name = bdev_name.to_string(); let socket = sock.to_string(); let f = jsonrpc::call::<jsondata::GetBdevsArgs, Vec<jsondata::Bdev>>( &socket, "get_bdevs", Some(jsondata::GetBdevsArgs { name: bdev_name.clone(), }), ) .map_err(|e| { Status::new(Code::NotFound, format!("Failed to list bdevs: {}", e)) }) .and_then(move |bdev| { jsonrpc::call::<(), Vec<jsondata::NbdDisk>>( &socket, "get_nbd_disks", None, ) .map(move |nbd_disks| { nbd_disks .into_iter() .find(|ent| ent.bdev_name == bdev[0].name) }) .map_err(|err| { Status::new( Code::NotFound, format!("Failed to find nbd disk: {}", err), ) }) }); Box::new(f) } impl NbdDevInfo { /// This will return the next available nbd
{ write!(f, "/dev/nbd{}", self.instance) }
identifier_body
nbd.rs
} let nbd_disk = nbd_disk.unwrap(); if let Some(mount) = match_mount( Some(&nbd_disk.nbd_device), Some(&target_path), false, ) { if mount.source == nbd_disk.nbd_device && mount.dest == target_path { // the device is already mounted we should return OK return ok((true, nbd_disk, target_path, uuid)); } else { // something is there already return error return err(Status::new( Code::AlreadyExists, "Some different BDEV on that path already".to_string(), )); } } ok((false, nbd_disk, target_path, uuid)) }) .and_then(move |mounted| { if !mounted.0 { Either::A( probed_format(&mounted.1.nbd_device, &filesystem.name) .then(move |format_result| { let mnt_result = if mount_fail || format_result.is_err() { if !mount_fail { Err(format_result.unwrap_err()) } else { debug!("Simulating mount failure"); Err("simulated".to_owned()) } } else { mount_fs( &mounted.1.nbd_device, &mounted.2, false, &filesystem.name, &mnt_opts, ) }; if let Err(reason) = mnt_result { Box::new(err(Status::new( Code::Internal, reason, ))) } else { info!( "staged {} on {}", &mounted.3, &mounted.2 ); Box::new(ok(Response::new( NodeStageVolumeResponse {}, ))) } }), ) } else { Either::B(Box::new(ok(Response::new( NodeStageVolumeResponse {}, )))) } }); Box::new(f) } pub fn create_blkdev( socket: String, msg: &CreateBlkdevRequest, ) -> Box<dyn Future<Item = Response<CreateBlkdevReply>, Error = Status> + Send> { trace!("{:?}", msg); debug!("Creating NBD device for {} ...", msg.uuid); let nbd_dev_info = NbdDevInfo::new(); let uuid = msg.uuid.clone(); // what ever instance we got assigned, it was in use, and is now removed // from the device list if nbd_dev_info.is_none() { return Box::new(err(Status::new( Code::Internal, String::from("EAGAIN"), ))); } let nbd_dev_info = nbd_dev_info.unwrap(); let f = get_nbd_instance(&socket, &uuid) // TODO: Avoid this step in future chain by returning eexist from // start-nbd-disk json-rpc method. .and_then(enclose! { (uuid) move |bdev| { if let Some(bdev) = bdev { return err(Status::new( Code::AlreadyExists, format!( "Bbdev {} already published at {}", uuid, bdev.nbd_device ), )); } ok(()) }}) .map_err(|e| jsonrpc::error::Error::GenericError(e.to_string())) .and_then(enclose! { (uuid) move |_| { jsonrpc::call::<jsondata::StartNbdDiskArgs, String>( &socket, "start_nbd_disk", Some(jsondata::StartNbdDiskArgs { bdev_name: uuid, nbd_device: format!("{}", nbd_dev_info), }), ) }}) .and_then(move |nbd_device| { trace!("NBD device {} created", &nbd_device); device::await_size(&nbd_device).map_err(jsonrpc::error::Error::from) }) .and_then(move |size| { info!("Device {} reported size: {}", nbd_dev_info, size); let reply = CreateBlkdevReply { blk_dev: format!("{}", nbd_dev_info), }; ok(Response::new(reply)) }) .map_err(move |err| { error!( "Putting back nbd device {} due to error: {}", nbd_dev_info, err.to_string() ); nbd_dev_info.put_back(); err.into_status() }); Box::new(f) } pub fn destroy_blkdev( socket: String, msg: &DestroyBlkdevRequest, ) -> Box<dyn Future<Item = Response<Null>, Error = Status> + Send> { trace!("{:?}", msg); let uuid = msg.uuid.clone(); debug!("Deleting NBD device for {} ...", uuid); let f = get_nbd_instance(&socket, &uuid) // TODO: Avoid this step by returning enoent from stop-nbd-disk // json-rpc method. .and_then(move |nbd_disk| { if nbd_disk.is_none() { trace!("bdev {} not found", uuid); return err(Status::new( Code::Internal, format!("no such bdev {}", uuid), )); } let nbd_disk = nbd_disk.unwrap(); ok(nbd_disk) }) .and_then(move |nbd_disk| { trace!("Stopping NBD device {}", nbd_disk.nbd_device); jsonrpc::call::<jsondata::StopNbdDiskArgs, bool>( &socket, "stop_nbd_disk", Some(jsondata::StopNbdDiskArgs { nbd_device: nbd_disk.nbd_device.clone(), }), ) .map_err(|err| err.into_status()) .and_then(|done| { if done { info!( "Stopped NBD device {} with bdev {}", nbd_disk.nbd_device, nbd_disk.bdev_name ); NbdDevInfo::from(nbd_disk.nbd_device).put_back(); Box::new(ok(Response::new(Null {}))) } else { let msg = format!( "Failed to stop nbd device {} for {}", nbd_disk.nbd_device, nbd_disk.bdev_name ); error!("{}", msg); Box::new(err(Status::new(Code::Internal, msg))) } }) }); Box::new(f) } pub fn get_nbd_instance( sock: &str, bdev_name: &str, ) -> Box<dyn Future<Item = Option<jsondata::NbdDisk>, Error = Status> + Send> { let bdev_name = bdev_name.to_string(); let socket = sock.to_string(); let f = jsonrpc::call::<jsondata::GetBdevsArgs, Vec<jsondata::Bdev>>( &socket, "get_bdevs", Some(jsondata::GetBdevsArgs { name: bdev_name.clone(), }), ) .map_err(|e| { Status::new(Code::NotFound, format!("Failed to list bdevs: {}", e)) }) .and_then(move |bdev| { jsonrpc::call::<(), Vec<jsondata::NbdDisk>>( &socket, "get_nbd_disks", None, ) .map(move |nbd_disks| { nbd_disks .into_iter() .find(|ent| ent.bdev_name == bdev[0].name) }) .map_err(|err| { Status::new( Code::NotFound, format!("Failed to find nbd disk: {}", err), ) }) }); Box::new(f) } impl NbdDevInfo { /// This will return the next available nbd device pub fn new() -> Option<Self> { let instance = ARRAY.lock().unwrap().pop()?; trace!("Will use nbd slot {}", instance); NbdDevInfo::create(instance) } fn create(instance: u32) -> Option<Self> { let mut path = PathBuf::from(&format!("/sys/class/block/nbd{}", instance)); path.push("pid"); if path.exists() { trace!( "Dropping nbd instance: {} as it appears to be in use", instance ); return None; } path.pop(); let e = path .strip_prefix("/sys/class/block") .unwrap() .to_str() .unwrap() .split_at(3); let instance = e.1.parse().unwrap(); let dev_t: String = sysfs::parse_value(&path, "dev").unwrap(); let nums: Vec<u64> = dev_t.split(':').map(|x| x.parse().unwrap()).collect(); // Documentation/admin-guide/devices.txt if nums[0] != 43 { warn!("Invalid major number of nbd dev {}", path.display()); } let nbd = NbdDevInfo { instance, major: nums[0], minor: nums[1], }; assert_eq!(nbd.instance, instance); Some(nbd) } pub fn put_back(&self) { ARRAY.lock().unwrap().push(self.instance); trace!("instance {} added back to the free list", self.instance); } pub fn num_devices() -> usize { glob("/sys/class/block/nbd*").unwrap().count() }
random_line_split
sidebar.js
[0]); Sidebar.shadowRoot.appendChild($sidebar[0]); }; Sidebar.initShadowDOM = function () { // Note: shadowDOM doesn't support adding <script> tag through innerHTML, but only through appendChild. // Moreover, unlike with stylesheets, shadowDOM doesn't act as a sandbox for Javascript code (use iframe for this). const injectDiv = document.createElement('div'); injectDiv.setAttribute("id", "injector"); const shadowRoot = injectDiv.attachShadow({ mode: 'open' }); shadowRoot.innerHTML = `\ <link rel="stylesheet" type="text/css" href="${Sidebar.getResource("libs/bootstrap-4.1.3/bootstrap.min.css")}"></link>\ <link rel="stylesheet" type="text/css" href="${Sidebar.getResource("libs/tippy-6.2.3/tippy.min.css")}"></link>\ <link rel="stylesheet" type="text/css" href="${Sidebar.getResource("libs/font-awesome-5.13.0/css/all.min.css")}"></link>\ <link rel="stylesheet" type="text/css" href="${Sidebar.getResource("css/sidebar.css")}"></link>\ `; document.body.appendChild(injectDiv); return shadowRoot; } Sidebar.getSidebarTogglerHTML = function () { var sidebarToggler = `<a id="sidebarToggler" title="toggle sidebar">\ <img id="sidebarTogglerImage" src="${Sidebar.getResource("images/svg/icon48.svg")}"></img>\ </a>`; return sidebarToggler; }; Sidebar.getSidebarHTML = function () { var buttons = [ { id: "copier", label: 'Copy to clipboard', classes: 'fas fa-copy' }, { id: "saver", label: 'Save', classes: 'fas fa-save' }, { id: "flipper", label: 'Change sidebar position', classes: 'fas fa-compass' }, ] var sidebar = `<div id="sidebar" class="collapsed">\ <div id="buttons" class="text-${Sidebar.config.direction == "ltr" ? "right" : "left"}">\ <div class="btn-group">\ ${buttons.map(b => `<a id="${b.id}" class="${b.classes}" title="${b.label}"></a>`).join('')} </div>\ </div>\ <div id="cards">\ </div> </div>`; return sidebar; }; Sidebar.getSidebarRowHTML = function (id) { var sidebarRow = `<div id=${id} class="row sidebar-row">\ <div class="col-sm-12 my-2">\ <div class="card card-body">\ <span class="sidebar-row-content"></span>\ <div class="text-${Sidebar.config.direction == "ltr" ? "right" : "left"}">\ <div class="btn-group">\ <i id="delete" class="fas fa-trash" title="delete"></i>\ </div>\ </div>\ </div>\ </div>\ </div>`; return sidebarRow; }; Sidebar.getResource = function (path) { if (Sidebar.isChromeExtension) { path = chrome.extension.getURL(path); } return path; } /* ------------------------------------- */ /* Initializing sidebar's elements */ /* ------------------------------------- */ Sidebar.init = function () { Sidebar.initToggler(); Sidebar.initButtons(); Sidebar.initCards(); Sidebar.setPosition(); } /* ------------------------------------- */ /* Sidebar toggler initialization */ /* ------------------------------------- */ Sidebar.initToggler = function () { Sidebar.shadowRoot.querySelector("#sidebarToggler").addEventListener("click", Sidebar.toggleSidebar); }; // 'toggle' toggles both the sidebar and the sidebar's toggle itself Sidebar.toggle = function () { Sidebar.toggleSidebarToggler(); Sidebar.hide(); } // 'toggleSidebarToggler' toggles the sidebar's toggle itself Sidebar.toggleSidebarToggler = function () { var $toggler = $(Sidebar.shadowRoot.querySelector("#sidebarToggler")); if (!$toggler.is(':visible')) { $toggler.show(); } else { $toggler.hide(); } } // 'toggleSidebar' toggles the sidebar Sidebar.toggleSidebar = function () { if (false) { } else if (Sidebar.show())
else if (Sidebar.hide()) { } } Sidebar.show = function ($sidebar) { var $sidebar = $(Sidebar.shadowRoot.querySelector('#sidebar')); var isShown = false; if ($sidebar.hasClass("collapsed")) { $sidebar.removeClass("collapsed"); isShown = true; } return isShown; } Sidebar.hide = function ($sidebar) { var $sidebar = $(Sidebar.shadowRoot.querySelector('#sidebar')); var isHidden = false; if (!$sidebar.hasClass("collapsed")) { $sidebar.addClass("collapsed"); isHidden = true; } return isHidden; } /* ------------------------------------- */ /* Sidebar buttons initialization */ /* ------------------------------------- */ Sidebar.initButtons = function () { $buttons = $(Sidebar.shadowRoot.querySelector('#buttons')); $buttons.find("#saver").on("click", function (e) { Sidebar.save(Sidebar.showTooltip(e.target)); }); $buttons.find("#copier").on("click", function (e) { Sidebar.copyToClipboard(Sidebar.showTooltip(e.target)); }); $buttons.find("#flipper").on("click", Sidebar.setPosition); }; Sidebar.save = function (callback) { if (Sidebar.isChromeExtension) { Storage.save(Storage.storageType.chrome, window.location.toString(), Sidebar.config.cssClass, callback); } }; Sidebar.copyToClipboard = function (callback) { var contents = $(Sidebar.shadowRoot.querySelector("#sidebar")) .find(".sidebar-row-content") .toArray() .map(s => $(s).text()) .join(" ") .replace(/\s\s+/g, " "); var $temp = $("<input>"); $("body").append($temp); $temp.val(contents).select(); document.execCommand("copy"); $temp.remove(); callback(); }; Sidebar.setPosition = function () { var $sidebar = Sidebar.shadowRoot.querySelector("#sidebar"); var $sidebarToggle = Sidebar.shadowRoot.querySelector("#sidebarToggler"); var currentDirection = Sidebar.config.direction; //window.getComputedStyle($sidebar).getPropertyValue('direction'); var fromTo = currentDirection === 'ltr' ? { currentPosition: 'left', toPosition: 'right', direction: 'rtl' } : { currentPosition: 'right', toPosition: 'left', direction: 'ltr' }; // 1.a set sidebar's direction property Sidebar.config.direction = fromTo.direction; // 1.a. flip sidebar's direction $sidebar.style.direction = fromTo.direction; // 1.b. flip sidebar's position $sidebar.style[fromTo.currentPosition] = 'unset', $sidebar.style[fromTo.toPosition] = '0px'; // 2.a. flip sidebarToggler position $sidebarToggle.style[fromTo.currentPosition] = 'unset', $sidebarToggle.style[fromTo.toPosition] = '5px', $sidebarToggle.style.float = fromTo.toPosition; // 2.b. flip sidebarToggler image orientation $sidebarToggle.style.transform = fromTo.direction === 'ltr' ? 'scaleX(-1)' : 'scaleX(1)'; // 3. flip Bootstrap's 'text-' element direction var elementsToFlip = Sidebar.shadowRoot.querySelectorAll('*[class^="text"]'); if (elementsToFlip) { elementsToFlip.forEach(element => { // note: buttons are opposite to $sidebar direction element.classList.replace( ['text-', fromTo.toPosition].join(''), ['text-', fromTo.currentPosition].join('') ); }) } }; Sidebar.showTooltip = function (element) { var tip = tippy(element, { content: 'done!', animation: 'fade', arrow: true, trigger: 'manual', position: 'bottom', zIndex: 2147483646, // the content is always in English // (note: this property was added by me to tippy.min.js. ) direction: 'ltr' }) return function () { tip.show() setTimeout(function () { tip.hide(); tip.destroy(); }, 2000); } } /* ------------------------------------- */ /* Sidebar cards initialization */ /* ------------------------------------- */ Sidebar.initCards = function ($sidebar) { $cards = $(Sidebar.shadowRoot.querySelector('#cards')); $cards.sortable(); $cards.disableSelection(); }; /* --------------------------- */ /* Sidebar's rows functions */ /* --------------------------- */ Sidebar.addRow = function (highlightedText, id) { var $sidebar = Sidebar.shadowRoot.querySelector("#cards"); var $sidebarRow = $(Sidebar.getSidebarRowHTML(id)); Sidebar.initSidebarRow($sidebarRow, highlightedText); $sidebar.appendChild($sidebarRow[0]); }; Sidebar.initSidebarRow = function ($sidebarRow, highlightedText) { $sidebarRow.find(".sidebar-row-content").text(highlightedText); $sidebarRow.on('click', Sidebar.scrollIntoView); $sidebarRow.find('#delete').on('click', Sidebar.deleteRow); }; Sidebar.scrollIntoView = function (event) { var id = $(event.target).closest('.sidebar-row').prop('id'); var options = { behavior: "smooth", block: "center", inline: "nearest" };
{ }
conditional_block
sidebar.js
; return sidebarToggler; }; Sidebar.getSidebarHTML = function () { var buttons = [ { id: "copier", label: 'Copy to clipboard', classes: 'fas fa-copy' }, { id: "saver", label: 'Save', classes: 'fas fa-save' }, { id: "flipper", label: 'Change sidebar position', classes: 'fas fa-compass' }, ] var sidebar = `<div id="sidebar" class="collapsed">\ <div id="buttons" class="text-${Sidebar.config.direction == "ltr" ? "right" : "left"}">\ <div class="btn-group">\ ${buttons.map(b => `<a id="${b.id}" class="${b.classes}" title="${b.label}"></a>`).join('')} </div>\ </div>\ <div id="cards">\ </div> </div>`; return sidebar; }; Sidebar.getSidebarRowHTML = function (id) { var sidebarRow = `<div id=${id} class="row sidebar-row">\ <div class="col-sm-12 my-2">\ <div class="card card-body">\ <span class="sidebar-row-content"></span>\ <div class="text-${Sidebar.config.direction == "ltr" ? "right" : "left"}">\ <div class="btn-group">\ <i id="delete" class="fas fa-trash" title="delete"></i>\ </div>\ </div>\ </div>\ </div>\ </div>`; return sidebarRow; }; Sidebar.getResource = function (path) { if (Sidebar.isChromeExtension) { path = chrome.extension.getURL(path); } return path; } /* ------------------------------------- */ /* Initializing sidebar's elements */ /* ------------------------------------- */ Sidebar.init = function () { Sidebar.initToggler(); Sidebar.initButtons(); Sidebar.initCards(); Sidebar.setPosition(); } /* ------------------------------------- */ /* Sidebar toggler initialization */ /* ------------------------------------- */ Sidebar.initToggler = function () { Sidebar.shadowRoot.querySelector("#sidebarToggler").addEventListener("click", Sidebar.toggleSidebar); }; // 'toggle' toggles both the sidebar and the sidebar's toggle itself Sidebar.toggle = function () { Sidebar.toggleSidebarToggler(); Sidebar.hide(); } // 'toggleSidebarToggler' toggles the sidebar's toggle itself Sidebar.toggleSidebarToggler = function () { var $toggler = $(Sidebar.shadowRoot.querySelector("#sidebarToggler")); if (!$toggler.is(':visible')) { $toggler.show(); } else { $toggler.hide(); } } // 'toggleSidebar' toggles the sidebar Sidebar.toggleSidebar = function () { if (false) { } else if (Sidebar.show()) { } else if (Sidebar.hide()) { } } Sidebar.show = function ($sidebar) { var $sidebar = $(Sidebar.shadowRoot.querySelector('#sidebar')); var isShown = false; if ($sidebar.hasClass("collapsed")) { $sidebar.removeClass("collapsed"); isShown = true; } return isShown; } Sidebar.hide = function ($sidebar) { var $sidebar = $(Sidebar.shadowRoot.querySelector('#sidebar')); var isHidden = false; if (!$sidebar.hasClass("collapsed")) { $sidebar.addClass("collapsed"); isHidden = true; } return isHidden; } /* ------------------------------------- */ /* Sidebar buttons initialization */ /* ------------------------------------- */ Sidebar.initButtons = function () { $buttons = $(Sidebar.shadowRoot.querySelector('#buttons')); $buttons.find("#saver").on("click", function (e) { Sidebar.save(Sidebar.showTooltip(e.target)); }); $buttons.find("#copier").on("click", function (e) { Sidebar.copyToClipboard(Sidebar.showTooltip(e.target)); }); $buttons.find("#flipper").on("click", Sidebar.setPosition); }; Sidebar.save = function (callback) { if (Sidebar.isChromeExtension) { Storage.save(Storage.storageType.chrome, window.location.toString(), Sidebar.config.cssClass, callback); } }; Sidebar.copyToClipboard = function (callback) { var contents = $(Sidebar.shadowRoot.querySelector("#sidebar")) .find(".sidebar-row-content") .toArray() .map(s => $(s).text()) .join(" ") .replace(/\s\s+/g, " "); var $temp = $("<input>"); $("body").append($temp); $temp.val(contents).select(); document.execCommand("copy"); $temp.remove(); callback(); }; Sidebar.setPosition = function () { var $sidebar = Sidebar.shadowRoot.querySelector("#sidebar"); var $sidebarToggle = Sidebar.shadowRoot.querySelector("#sidebarToggler"); var currentDirection = Sidebar.config.direction; //window.getComputedStyle($sidebar).getPropertyValue('direction'); var fromTo = currentDirection === 'ltr' ? { currentPosition: 'left', toPosition: 'right', direction: 'rtl' } : { currentPosition: 'right', toPosition: 'left', direction: 'ltr' }; // 1.a set sidebar's direction property Sidebar.config.direction = fromTo.direction; // 1.a. flip sidebar's direction $sidebar.style.direction = fromTo.direction; // 1.b. flip sidebar's position $sidebar.style[fromTo.currentPosition] = 'unset', $sidebar.style[fromTo.toPosition] = '0px'; // 2.a. flip sidebarToggler position $sidebarToggle.style[fromTo.currentPosition] = 'unset', $sidebarToggle.style[fromTo.toPosition] = '5px', $sidebarToggle.style.float = fromTo.toPosition; // 2.b. flip sidebarToggler image orientation $sidebarToggle.style.transform = fromTo.direction === 'ltr' ? 'scaleX(-1)' : 'scaleX(1)'; // 3. flip Bootstrap's 'text-' element direction var elementsToFlip = Sidebar.shadowRoot.querySelectorAll('*[class^="text"]'); if (elementsToFlip) { elementsToFlip.forEach(element => { // note: buttons are opposite to $sidebar direction element.classList.replace( ['text-', fromTo.toPosition].join(''), ['text-', fromTo.currentPosition].join('') ); }) } }; Sidebar.showTooltip = function (element) { var tip = tippy(element, { content: 'done!', animation: 'fade', arrow: true, trigger: 'manual', position: 'bottom', zIndex: 2147483646, // the content is always in English // (note: this property was added by me to tippy.min.js. ) direction: 'ltr' }) return function () { tip.show() setTimeout(function () { tip.hide(); tip.destroy(); }, 2000); } } /* ------------------------------------- */ /* Sidebar cards initialization */ /* ------------------------------------- */ Sidebar.initCards = function ($sidebar) { $cards = $(Sidebar.shadowRoot.querySelector('#cards')); $cards.sortable(); $cards.disableSelection(); }; /* --------------------------- */ /* Sidebar's rows functions */ /* --------------------------- */ Sidebar.addRow = function (highlightedText, id) { var $sidebar = Sidebar.shadowRoot.querySelector("#cards"); var $sidebarRow = $(Sidebar.getSidebarRowHTML(id)); Sidebar.initSidebarRow($sidebarRow, highlightedText); $sidebar.appendChild($sidebarRow[0]); }; Sidebar.initSidebarRow = function ($sidebarRow, highlightedText) { $sidebarRow.find(".sidebar-row-content").text(highlightedText); $sidebarRow.on('click', Sidebar.scrollIntoView); $sidebarRow.find('#delete').on('click', Sidebar.deleteRow); }; Sidebar.scrollIntoView = function (event) { var id = $(event.target).closest('.sidebar-row').prop('id'); var options = { behavior: "smooth", block: "center", inline: "nearest" }; $(`[id^='${id}']`)[0].scrollIntoView(options); }; Sidebar.deleteRow = function (event) { var id = $(event.target).closest('.sidebar-row').prop('id'); // 1. remove card var element = Sidebar.shadowRoot.querySelector(['#', id].join('')); element.parentNode.removeChild(element); // 2. remove highlights Highlighter.removeHighlightsById(id); event.stopPropagation(); }; /* ---------------------------- */ /* Highlighter initialization */ /* ---------------------------- */ Sidebar.initHighlighter = function () { $(document.body).mouseup(function (e) { // note: if you're debugging, make sure you don't press the debugger's '>' button ('resume script execution') // that appears on top of the viewport, as this will count as another 'mouseup' event and will cause strange results. var isCtrlPressed = e.ctrlKey; if (!Sidebar.config.useCtrlKey || isCtrlPressed) { var selection = Highlighter.getSelection(); if (!Sidebar.isInside(selection) && selection.toString() != "") { var rangesData = Highlighter.highlight({ selection: selection }, Sidebar.config.cssClass); // rangesData.ranges.length would be 0 e.g. if a selection was made inside an <input> if (rangesData.ranges.length) { Sidebar.addRow(rangesData.ranges[0].text(), rangesData.id); } } } }); }; Sidebar.isInside = function (selection) { var isInside = false;
random_line_split
basic_statements.py
StatementNoArgs, ParsedStatementDef, ParsedStatementPrint from basic_parsing import ParsedStatementGo, ParsedStatementDim from basic_parsing import ParsedStatementInput, ParsedStatementNext from basic_lexer import get_lexer from basic_types import NUMBERS, LETTERS from basic_expressions import Expression from basic_utils import smart_split def stmt_rem(executor, stmt): """ Does nothing. :return: """ return None def stmt_print(executor, stmt:ParsedStatementPrint): """ Prints output. :param executor: The program execution environment. Contains variables in its SymbolTable :param stmt: This print statement, contains parameters to the PRINT command. :return: None """ for i, arg in enumerate(stmt._outputs): if arg[0] == '"': # quoted string output = arg[1:-1] #output.replace(" ", "*") # TODO delete this line. executor.do_print(output, end='') else: # Expression v = eval_expression(executor._symbols, arg) #v = executor.get_symbol(arg) if type(v) == float: executor.do_print(F" {v:g} ", end='') # I'm trying to figure out BASIC's rules for spacing. # NO spaces is wrong (see initial print out) # Spaces around everything is wrong. # Spaces around numbers but not strings seems to work, so far. else: executor.do_print(F"{v}", end='') if not stmt._no_cr: executor.do_print("") return None def stmt_goto(executor, stmt: ParsedStatementGo): destination = stmt.destination executor.goto_line(int(destination)) return None def stmt_gosub(executor, stmt: ParsedStatementGo): destination = stmt.destination assert_syntax(str.isdigit(destination), F"Gosub target is not an int ") executor.gosub(int(destination)) return None def stmt_error(executor, stmt:ParsedStatement): raise Exception("THIS EXCEPTION IS EXPECTED. It is for testing.") def stmt_for(executor, stmt: ParsedStatementFor): var = stmt._index_clause start = stmt._start_clause start = eval_expression(executor, start) is_valid_identifier(var) executor.put_symbol(var, start, SymbolType.VARIABLE, None) executor.do_for(var, start, stmt._to_clause, stmt._step_clause, executor.get_next_stmt()) def stmt_next(executor, stmt:ParsedStatementNext): index = stmt.loop_var var, to_clause, step_clause, loop_top = executor.do_next_peek(index) value = executor.get_symbol(var) to_value = eval_expression(executor._symbols, to_clause) step_value = eval_expression(executor._symbols, step_clause) value = value + step_value executor.put_symbol(var, value, SymbolType.VARIABLE, None) if value <= to_value: executor._goto_location(loop_top) else: executor.do_next_pop(var) def is_string_variable(variable:str): return variable.endswith("$") def assign_variable(executor, variable, value): """ Variable assignment can include assigning array elements. :param variable: :param value: :return: """ variable = variable.replace(" ", "") # TODO Should move parsing of this to ParsedStatementLet. # TODO Need to handle N-dimensional array element assignment. i = variable.find("(") if i != -1: # Array reference j = variable.find(")", i+1) if j == -1: raise BasicSyntaxError(F"Missing ) in in array assignment to {variable}") if i+1 == j: raise BasicSyntaxError(F"Missing array subscript in assignment to {variable}") subscripts = variable[i+1:j].split(",") variable = variable[:i] is_valid_identifier(variable) subscripts = [int(eval_expression(executor._symbols, subscript)) - 1 for subscript in subscripts] executor.put_symbol_element(variable, value, subscripts) else: is_valid_identifier(variable) executor.put_symbol(variable, value, symbol_type=SymbolType.VARIABLE, arg=None) def eval_expression(symbols, value): lexer = get_lexer() tokens = lexer.lex(value) e = Expression() result = e.eval(tokens, symbols=symbols) return result def stmt_let(executor, stmt:ParsedStatementLet): result = stmt._expression.eval(stmt._tokens, symbols=executor._symbols) assign_variable(executor, stmt._variable, result) def stmt_clear(executor, stmt): # Clear statement removes all variables. executor.init_symbols() def init_array(dimensions:list): if len(dimensions) == 1: return [0] * dimensions[0] one = [] for x in range(dimensions[0]): one.append(init_array(dimensions[1:])) return one def stmt_dim(executor, stmt:ParsedStatementDim): """ Declares an array. Initializes it to zeros. TODO Handle more than two dimensions. :param executor: :param stmt: :return: """ for name, value in stmt._dimensions: initializer = init_array(value) executor.put_symbol(name, initializer, SymbolType.ARRAY, arg=None) # Not right, but for now. def stmt_if(executor, stmt): """ An if statement works by skipping to the next line, if the THEN clause is false, otherwise it continues to execute the clauses after the THEN. :param executor: :param stmt: :return: None """ e = Expression() result = e.eval(stmt._tokens, symbols=executor._symbols) if not result: executor.goto_next_line() def stmt_input(executor, stmt): for var in stmt._input_vars: is_valid_identifier(var) prompt = stmt._prompt # Not sure if this can be an expression. None are used in my examples, but why not? if prompt: # TODO If we add semicolon an an op that behaves like comma, multi-element prompts should work. prompt = eval_expression(executor._symbols, prompt) while True:
executor.put_symbol(var, value, SymbolType.VARIABLE, None) else: break # Break the while, if we did NOT get an invalid number (break from for) def stmt_on(executor, stmt): var = stmt._expression op = stmt._op result = eval_expression(executor._symbols, var) assert_syntax(type(result) == int or type(result) == float, "Expression not numeric in ON GOTO/GOSUB") result = int(result) - 1 # Basic is 1-based. # According to this: https://hwiegman.home.xs4all.nl/gw-man/ONGOSUB.html # on gosub does NOT generate an error in the value is out of range, # It just goes on to the next line. #assert_syntax(result < len(stmt._target_lines), "No target for value of {result} in ON GOTO/GOSUB") if result >= len(stmt._target_lines): # No line matching the index, just go on. return if op == "GOTO": executor.goto_line(stmt._target_lines[result]) elif op == "GOSUB": executor.gosub(stmt._target_lines[result]) else: assert_syntax(False, "Bad format for ON statement.") def stmt_end(executor, stmt): print("Ending program") executor._run = RunStatus.END_CMD def stmt_def(executor, stmt:ParsedStatementDef): """ Define a user-defined function. 470 DEF FND(D)=SQR((K(I,1)-S1)^2+(K(I,2)-S2)^2) :param executor: :param stmt: :return: """ executor.put_symbol(stmt._variable, stmt._tokens, SymbolType.FUNCTION, stmt._function_arg) def stmt_return(executor, stmt): executor.do_return() def stmt_width(executor, stmt): """ The WIDTH statement is only for compatibility with some versions of BASIC. It set the width of the screen. Ignored. :param executor: :param stmt: :return: """ pass class KB: def __init__(self, exec, parser_class=ParsedStatement): self._parser = parser_class self._exec = exec def get_parser_class(self): return self._parser def get_exec(self): return self._exec class Keywords(Enum): CLEAR = KB(stmt_clear, ParsedStatement) # Some uses of clear take arguments, which we ignore. DEF = KB(stmt_def, Parsed
executor.do_print(prompt, end='') result = executor.do_input() if result is None: print("Bad response from trekbot") result = result.split(",") if len(result) != len(stmt._input_vars): print(F"Mismatched number of inputs. Expected {len(stmt._input_vars)} got {len(result)}. Try Again.") continue for value, var in zip(result, stmt._input_vars): ok = False if not is_string_variable(var): try: value = float(value) except Exception as e: print("Invalid number. Try again.") break else: if UPPERCASE_INPUT: value = value.upper()
conditional_block
basic_statements.py
StatementNoArgs, ParsedStatementDef, ParsedStatementPrint from basic_parsing import ParsedStatementGo, ParsedStatementDim from basic_parsing import ParsedStatementInput, ParsedStatementNext from basic_lexer import get_lexer from basic_types import NUMBERS, LETTERS from basic_expressions import Expression from basic_utils import smart_split def stmt_rem(executor, stmt): """ Does nothing. :return: """ return None def stmt_print(executor, stmt:ParsedStatementPrint): """ Prints output. :param executor: The program execution environment. Contains variables in its SymbolTable :param stmt: This print statement, contains parameters to the PRINT command. :return: None """ for i, arg in enumerate(stmt._outputs): if arg[0] == '"': # quoted string output = arg[1:-1] #output.replace(" ", "*") # TODO delete this line. executor.do_print(output, end='') else: # Expression v = eval_expression(executor._symbols, arg) #v = executor.get_symbol(arg) if type(v) == float: executor.do_print(F" {v:g} ", end='') # I'm trying to figure out BASIC's rules for spacing. # NO spaces is wrong (see initial print out) # Spaces around everything is wrong. # Spaces around numbers but not strings seems to work, so far. else: executor.do_print(F"{v}", end='') if not stmt._no_cr: executor.do_print("") return None def stmt_goto(executor, stmt: ParsedStatementGo): destination = stmt.destination executor.goto_line(int(destination)) return None def stmt_gosub(executor, stmt: ParsedStatementGo): destination = stmt.destination assert_syntax(str.isdigit(destination), F"Gosub target is not an int ") executor.gosub(int(destination)) return None def stmt_error(executor, stmt:ParsedStatement): raise Exception("THIS EXCEPTION IS EXPECTED. It is for testing.") def stmt_for(executor, stmt: ParsedStatementFor): var = stmt._index_clause start = stmt._start_clause start = eval_expression(executor, start) is_valid_identifier(var) executor.put_symbol(var, start, SymbolType.VARIABLE, None) executor.do_for(var, start, stmt._to_clause, stmt._step_clause, executor.get_next_stmt()) def stmt_next(executor, stmt:ParsedStatementNext): index = stmt.loop_var var, to_clause, step_clause, loop_top = executor.do_next_peek(index) value = executor.get_symbol(var) to_value = eval_expression(executor._symbols, to_clause) step_value = eval_expression(executor._symbols, step_clause) value = value + step_value executor.put_symbol(var, value, SymbolType.VARIABLE, None) if value <= to_value: executor._goto_location(loop_top) else: executor.do_next_pop(var) def is_string_variable(variable:str): return variable.endswith("$") def assign_variable(executor, variable, value): """ Variable assignment can include assigning array elements. :param variable: :param value: :return: """ variable = variable.replace(" ", "") # TODO Should move parsing of this to ParsedStatementLet. # TODO Need to handle N-dimensional array element assignment. i = variable.find("(") if i != -1: # Array reference j = variable.find(")", i+1) if j == -1: raise BasicSyntaxError(F"Missing ) in in array assignment to {variable}") if i+1 == j: raise BasicSyntaxError(F"Missing array subscript in assignment to {variable}") subscripts = variable[i+1:j].split(",") variable = variable[:i] is_valid_identifier(variable) subscripts = [int(eval_expression(executor._symbols, subscript)) - 1 for subscript in subscripts] executor.put_symbol_element(variable, value, subscripts) else: is_valid_identifier(variable) executor.put_symbol(variable, value, symbol_type=SymbolType.VARIABLE, arg=None) def eval_expression(symbols, value): lexer = get_lexer() tokens = lexer.lex(value) e = Expression() result = e.eval(tokens, symbols=symbols) return result def stmt_let(executor, stmt:ParsedStatementLet): result = stmt._expression.eval(stmt._tokens, symbols=executor._symbols) assign_variable(executor, stmt._variable, result) def stmt_clear(executor, stmt): # Clear statement removes all variables. executor.init_symbols() def init_array(dimensions:list): if len(dimensions) == 1: return [0] * dimensions[0] one = [] for x in range(dimensions[0]): one.append(init_array(dimensions[1:])) return one def stmt_dim(executor, stmt:ParsedStatementDim): """ Declares an array. Initializes it to zeros. TODO Handle more than two dimensions. :param executor: :param stmt: :return: """ for name, value in stmt._dimensions: initializer = init_array(value) executor.put_symbol(name, initializer, SymbolType.ARRAY, arg=None) # Not right, but for now. def stmt_if(executor, stmt): """ An if statement works by skipping to the next line, if the THEN clause is false, otherwise it continues to execute the clauses after the THEN. :param executor: :param stmt: :return: None """ e = Expression() result = e.eval(stmt._tokens, symbols=executor._symbols) if not result: executor.goto_next_line() def stmt_input(executor, stmt): for var in stmt._input_vars: is_valid_identifier(var) prompt = stmt._prompt # Not sure if this can be an expression. None are used in my examples, but why not? if prompt: # TODO If we add semicolon an an op that behaves like comma, multi-element prompts should work. prompt = eval_expression(executor._symbols, prompt) while True: executor.do_print(prompt, end='') result = executor.do_input() if result is None: print("Bad response from trekbot") result = result.split(",") if len(result) != len(stmt._input_vars): print(F"Mismatched number of inputs. Expected {len(stmt._input_vars)} got {len(result)}. Try Again.") continue for value, var in zip(result, stmt._input_vars): ok = False if not is_string_variable(var): try: value = float(value) except Exception as e: print("Invalid number. Try again.") break else: if UPPERCASE_INPUT: value = value.upper() executor.put_symbol(var, value, SymbolType.VARIABLE, None) else: break # Break the while, if we did NOT get an invalid number (break from for) def stmt_on(executor, stmt): var = stmt._expression op = stmt._op result = eval_expression(executor._symbols, var) assert_syntax(type(result) == int or type(result) == float, "Expression not numeric in ON GOTO/GOSUB") result = int(result) - 1 # Basic is 1-based. # According to this: https://hwiegman.home.xs4all.nl/gw-man/ONGOSUB.html # on gosub does NOT generate an error in the value is out of range, # It just goes on to the next line. #assert_syntax(result < len(stmt._target_lines), "No target for value of {result} in ON GOTO/GOSUB") if result >= len(stmt._target_lines): # No line matching the index, just go on. return if op == "GOTO": executor.goto_line(stmt._target_lines[result]) elif op == "GOSUB": executor.gosub(stmt._target_lines[result]) else: assert_syntax(False, "Bad format for ON statement.") def stmt_end(executor, stmt): print("Ending program") executor._run = RunStatus.END_CMD def stmt_def(executor, stmt:ParsedStatementDef): """ Define a user-defined function. 470 DEF FND(D)=SQR((K(I,1)-S1)^2+(K(I,2)-S2)^2) :param executor: :param stmt: :return: """ executor.put_symbol(stmt._variable, stmt._tokens, SymbolType.FUNCTION, stmt._function_arg) def stmt_return(executor, stmt): executor.do_return() def stmt_width(executor, stmt): """ The WIDTH statement is only for compatibility with some versions of BASIC. It set the width of the screen. Ignored. :param executor: :param stmt: :return: """ pass class KB: def __init__(self, exec, parser_class=ParsedStatement): self._parser = parser_class self._exec = exec def get_parser_class(self):
def get_exec(self): return self._exec class Keywords(Enum): CLEAR = KB(stmt_clear, ParsedStatement) # Some uses of clear take arguments, which we ignore. DEF = KB(stmt_def, ParsedStatement
return self._parser
identifier_body
basic_statements.py
from basic_parsing import ParsedStatement, ParsedStatementIf, ParsedStatementFor, ParsedStatementOnGoto from basic_parsing import ParsedStatementLet, ParsedStatementNoArgs, ParsedStatementDef, ParsedStatementPrint from basic_parsing import ParsedStatementGo, ParsedStatementDim from basic_parsing import ParsedStatementInput, ParsedStatementNext from basic_lexer import get_lexer from basic_types import NUMBERS, LETTERS from basic_expressions import Expression from basic_utils import smart_split def stmt_rem(executor, stmt): """ Does nothing. :return: """ return None def stmt_print(executor, stmt:ParsedStatementPrint): """ Prints output. :param executor: The program execution environment. Contains variables in its SymbolTable :param stmt: This print statement, contains parameters to the PRINT command. :return: None """ for i, arg in enumerate(stmt._outputs): if arg[0] == '"': # quoted string output = arg[1:-1] #output.replace(" ", "*") # TODO delete this line. executor.do_print(output, end='') else: # Expression v = eval_expression(executor._symbols, arg) #v = executor.get_symbol(arg) if type(v) == float: executor.do_print(F" {v:g} ", end='') # I'm trying to figure out BASIC's rules for spacing. # NO spaces is wrong (see initial print out) # Spaces around everything is wrong. # Spaces around numbers but not strings seems to work, so far. else: executor.do_print(F"{v}", end='') if not stmt._no_cr: executor.do_print("") return None def stmt_goto(executor, stmt: ParsedStatementGo): destination = stmt.destination executor.goto_line(int(destination)) return None def stmt_gosub(executor, stmt: ParsedStatementGo): destination = stmt.destination assert_syntax(str.isdigit(destination), F"Gosub target is not an int ") executor.gosub(int(destination)) return None def stmt_error(executor, stmt:ParsedStatement): raise Exception("THIS EXCEPTION IS EXPECTED. It is for testing.") def stmt_for(executor, stmt: ParsedStatementFor): var = stmt._index_clause start = stmt._start_clause start = eval_expression(executor, start) is_valid_identifier(var) executor.put_symbol(var, start, SymbolType.VARIABLE, None) executor.do_for(var, start, stmt._to_clause, stmt._step_clause, executor.get_next_stmt()) def stmt_next(executor, stmt:ParsedStatementNext): index = stmt.loop_var var, to_clause, step_clause, loop_top = executor.do_next_peek(index) value = executor.get_symbol(var) to_value = eval_expression(executor._symbols, to_clause) step_value = eval_expression(executor._symbols, step_clause) value = value + step_value executor.put_symbol(var, value, SymbolType.VARIABLE, None) if value <= to_value: executor._goto_location(loop_top) else: executor.do_next_pop(var) def is_string_variable(variable:str): return variable.endswith("$") def assign_variable(executor, variable, value): """ Variable assignment can include assigning array elements. :param variable: :param value: :return: """ variable = variable.replace(" ", "") # TODO Should move parsing of this to ParsedStatementLet. # TODO Need to handle N-dimensional array element assignment. i = variable.find("(") if i != -1: # Array reference j = variable.find(")", i+1) if j == -1: raise BasicSyntaxError(F"Missing ) in in array assignment to {variable}") if i+1 == j: raise BasicSyntaxError(F"Missing array subscript in assignment to {variable}") subscripts = variable[i+1:j].split(",") variable = variable[:i] is_valid_identifier(variable) subscripts = [int(eval_expression(executor._symbols, subscript)) - 1 for subscript in subscripts] executor.put_symbol_element(variable, value, subscripts) else: is_valid_identifier(variable) executor.put_symbol(variable, value, symbol_type=SymbolType.VARIABLE, arg=None) def eval_expression(symbols, value): lexer = get_lexer() tokens = lexer.lex(value) e = Expression() result = e.eval(tokens, symbols=symbols) return result def stmt_let(executor, stmt:ParsedStatementLet): result = stmt._expression.eval(stmt._tokens, symbols=executor._symbols) assign_variable(executor, stmt._variable, result) def stmt_clear(executor, stmt): # Clear statement removes all variables. executor.init_symbols() def init_array(dimensions:list): if len(dimensions) == 1: return [0] * dimensions[0] one = [] for x in range(dimensions[0]): one.append(init_array(dimensions[1:])) return one def stmt_dim(executor, stmt:ParsedStatementDim): """ Declares an array. Initializes it to zeros. TODO Handle more than two dimensions. :param executor: :param stmt: :return: """ for name, value in stmt._dimensions: initializer = init_array(value) executor.put_symbol(name, initializer, SymbolType.ARRAY, arg=None) # Not right, but for now. def stmt_if(executor, stmt): """ An if statement works by skipping to the next line, if the THEN clause is false, otherwise it continues to execute the clauses after the THEN. :param executor: :param stmt: :return: None """ e = Expression() result = e.eval(stmt._tokens, symbols=executor._symbols) if not result: executor.goto_next_line() def stmt_input(executor, stmt): for var in stmt._input_vars: is_valid_identifier(var) prompt = stmt._prompt # Not sure if this can be an expression. None are used in my examples, but why not? if prompt: # TODO If we add semicolon an an op that behaves like comma, multi-element prompts should work. prompt = eval_expression(executor._symbols, prompt) while True: executor.do_print(prompt, end='') result = executor.do_input() if result is None: print("Bad response from trekbot") result = result.split(",") if len(result) != len(stmt._input_vars): print(F"Mismatched number of inputs. Expected {len(stmt._input_vars)} got {len(result)}. Try Again.") continue for value, var in zip(result, stmt._input_vars): ok = False if not is_string_variable(var): try: value = float(value) except Exception as e: print("Invalid number. Try again.") break else: if UPPERCASE_INPUT: value = value.upper() executor.put_symbol(var, value, SymbolType.VARIABLE, None) else: break # Break the while, if we did NOT get an invalid number (break from for) def stmt_on(executor, stmt): var = stmt._expression op = stmt._op result = eval_expression(executor._symbols, var) assert_syntax(type(result) == int or type(result) == float, "Expression not numeric in ON GOTO/GOSUB") result = int(result) - 1 # Basic is 1-based. # According to this: https://hwiegman.home.xs4all.nl/gw-man/ONGOSUB.html # on gosub does NOT generate an error in the value is out of range, # It just goes on to the next line. #assert_syntax(result < len(stmt._target_lines), "No target for value of {result} in ON GOTO/GOSUB") if result >= len(stmt._target_lines): # No line matching the index, just go on. return if op == "GOTO": executor.goto_line(stmt._target_lines[result]) elif op == "GOSUB": executor.gosub(stmt._target_lines[result]) else: assert_syntax(False, "Bad format for ON statement.") def stmt_end(executor, stmt): print("Ending program") executor._run = RunStatus.END_CMD def stmt_def(executor, stmt:ParsedStatementDef): """ Define a user-defined function. 470 DEF FND(D)=SQR((K(I,1)-S1)^2+(K(I,2)-S2)^2) :param executor: :param stmt: :return: """ executor.put_symbol(stmt._variable, stmt._tokens, SymbolType.FUNCTION, stmt._function_arg) def stmt_return(executor, stmt): executor.do_return() def stmt_width(executor, stmt): """ The WIDTH statement is only for compatibility with some versions of BASIC. It set the width of the screen. Ignored. :param executor: :param stmt: :return: """ pass class KB: def __init__(self, exec, parser_class=ParsedStatement): self._
from basic_dialect import UPPERCASE_INPUT from basic_types import BasicSyntaxError, assert_syntax, is_valid_identifier from basic_types import SymbolType, RunStatus
random_line_split
basic_statements.py
StatementNoArgs, ParsedStatementDef, ParsedStatementPrint from basic_parsing import ParsedStatementGo, ParsedStatementDim from basic_parsing import ParsedStatementInput, ParsedStatementNext from basic_lexer import get_lexer from basic_types import NUMBERS, LETTERS from basic_expressions import Expression from basic_utils import smart_split def stmt_rem(executor, stmt): """ Does nothing. :return: """ return None def stmt_print(executor, stmt:ParsedStatementPrint): """ Prints output. :param executor: The program execution environment. Contains variables in its SymbolTable :param stmt: This print statement, contains parameters to the PRINT command. :return: None """ for i, arg in enumerate(stmt._outputs): if arg[0] == '"': # quoted string output = arg[1:-1] #output.replace(" ", "*") # TODO delete this line. executor.do_print(output, end='') else: # Expression v = eval_expression(executor._symbols, arg) #v = executor.get_symbol(arg) if type(v) == float: executor.do_print(F" {v:g} ", end='') # I'm trying to figure out BASIC's rules for spacing. # NO spaces is wrong (see initial print out) # Spaces around everything is wrong. # Spaces around numbers but not strings seems to work, so far. else: executor.do_print(F"{v}", end='') if not stmt._no_cr: executor.do_print("") return None def stmt_goto(executor, stmt: ParsedStatementGo): destination = stmt.destination executor.goto_line(int(destination)) return None def stmt_gosub(executor, stmt: ParsedStatementGo): destination = stmt.destination assert_syntax(str.isdigit(destination), F"Gosub target is not an int ") executor.gosub(int(destination)) return None def stmt_error(executor, stmt:ParsedStatement): raise Exception("THIS EXCEPTION IS EXPECTED. It is for testing.") def stmt_for(executor, stmt: ParsedStatementFor): var = stmt._index_clause start = stmt._start_clause start = eval_expression(executor, start) is_valid_identifier(var) executor.put_symbol(var, start, SymbolType.VARIABLE, None) executor.do_for(var, start, stmt._to_clause, stmt._step_clause, executor.get_next_stmt()) def stmt_next(executor, stmt:ParsedStatementNext): index = stmt.loop_var var, to_clause, step_clause, loop_top = executor.do_next_peek(index) value = executor.get_symbol(var) to_value = eval_expression(executor._symbols, to_clause) step_value = eval_expression(executor._symbols, step_clause) value = value + step_value executor.put_symbol(var, value, SymbolType.VARIABLE, None) if value <= to_value: executor._goto_location(loop_top) else: executor.do_next_pop(var) def is_string_variable(variable:str): return variable.endswith("$") def assign_variable(executor, variable, value): """ Variable assignment can include assigning array elements. :param variable: :param value: :return: """ variable = variable.replace(" ", "") # TODO Should move parsing of this to ParsedStatementLet. # TODO Need to handle N-dimensional array element assignment. i = variable.find("(") if i != -1: # Array reference j = variable.find(")", i+1) if j == -1: raise BasicSyntaxError(F"Missing ) in in array assignment to {variable}") if i+1 == j: raise BasicSyntaxError(F"Missing array subscript in assignment to {variable}") subscripts = variable[i+1:j].split(",") variable = variable[:i] is_valid_identifier(variable) subscripts = [int(eval_expression(executor._symbols, subscript)) - 1 for subscript in subscripts] executor.put_symbol_element(variable, value, subscripts) else: is_valid_identifier(variable) executor.put_symbol(variable, value, symbol_type=SymbolType.VARIABLE, arg=None) def eval_expression(symbols, value): lexer = get_lexer() tokens = lexer.lex(value) e = Expression() result = e.eval(tokens, symbols=symbols) return result def stmt_let(executor, stmt:ParsedStatementLet): result = stmt._expression.eval(stmt._tokens, symbols=executor._symbols) assign_variable(executor, stmt._variable, result) def stmt_clear(executor, stmt): # Clear statement removes all variables. executor.init_symbols() def init_array(dimensions:list): if len(dimensions) == 1: return [0] * dimensions[0] one = [] for x in range(dimensions[0]): one.append(init_array(dimensions[1:])) return one def stmt_dim(executor, stmt:ParsedStatementDim): """ Declares an array. Initializes it to zeros. TODO Handle more than two dimensions. :param executor: :param stmt: :return: """ for name, value in stmt._dimensions: initializer = init_array(value) executor.put_symbol(name, initializer, SymbolType.ARRAY, arg=None) # Not right, but for now. def stmt_if(executor, stmt): """ An if statement works by skipping to the next line, if the THEN clause is false, otherwise it continues to execute the clauses after the THEN. :param executor: :param stmt: :return: None """ e = Expression() result = e.eval(stmt._tokens, symbols=executor._symbols) if not result: executor.goto_next_line() def stmt_input(executor, stmt): for var in stmt._input_vars: is_valid_identifier(var) prompt = stmt._prompt # Not sure if this can be an expression. None are used in my examples, but why not? if prompt: # TODO If we add semicolon an an op that behaves like comma, multi-element prompts should work. prompt = eval_expression(executor._symbols, prompt) while True: executor.do_print(prompt, end='') result = executor.do_input() if result is None: print("Bad response from trekbot") result = result.split(",") if len(result) != len(stmt._input_vars): print(F"Mismatched number of inputs. Expected {len(stmt._input_vars)} got {len(result)}. Try Again.") continue for value, var in zip(result, stmt._input_vars): ok = False if not is_string_variable(var): try: value = float(value) except Exception as e: print("Invalid number. Try again.") break else: if UPPERCASE_INPUT: value = value.upper() executor.put_symbol(var, value, SymbolType.VARIABLE, None) else: break # Break the while, if we did NOT get an invalid number (break from for) def stmt_on(executor, stmt): var = stmt._expression op = stmt._op result = eval_expression(executor._symbols, var) assert_syntax(type(result) == int or type(result) == float, "Expression not numeric in ON GOTO/GOSUB") result = int(result) - 1 # Basic is 1-based. # According to this: https://hwiegman.home.xs4all.nl/gw-man/ONGOSUB.html # on gosub does NOT generate an error in the value is out of range, # It just goes on to the next line. #assert_syntax(result < len(stmt._target_lines), "No target for value of {result} in ON GOTO/GOSUB") if result >= len(stmt._target_lines): # No line matching the index, just go on. return if op == "GOTO": executor.goto_line(stmt._target_lines[result]) elif op == "GOSUB": executor.gosub(stmt._target_lines[result]) else: assert_syntax(False, "Bad format for ON statement.") def stmt_end(executor, stmt): print("Ending program") executor._run = RunStatus.END_CMD def stmt_def(executor, stmt:ParsedStatementDef): """ Define a user-defined function. 470 DEF FND(D)=SQR((K(I,1)-S1)^2+(K(I,2)-S2)^2) :param executor: :param stmt: :return: """ executor.put_symbol(stmt._variable, stmt._tokens, SymbolType.FUNCTION, stmt._function_arg) def
(executor, stmt): executor.do_return() def stmt_width(executor, stmt): """ The WIDTH statement is only for compatibility with some versions of BASIC. It set the width of the screen. Ignored. :param executor: :param stmt: :return: """ pass class KB: def __init__(self, exec, parser_class=ParsedStatement): self._parser = parser_class self._exec = exec def get_parser_class(self): return self._parser def get_exec(self): return self._exec class Keywords(Enum): CLEAR = KB(stmt_clear, ParsedStatement) # Some uses of clear take arguments, which we ignore. DEF = KB(stmt_def, Parsed
stmt_return
identifier_name
ViewCreater3d.ts
, ViewQueryParams, IModelReadRpcInterface, ViewDefinition3dProps, CategorySelectorProps, ModelSelectorProps, ViewDefinitionProps, DisplayStyle3dProps, CameraProps, IModelError, } from "@bentley/imodeljs-common"; import { Environment, IModelConnection, SpatialViewState, ViewState, StandardViewId, loggerCategory, } from "@bentley/imodeljs-frontend"; import { Range3d } from "@bentley/geometry-core"; /**@beta Interface providing options for 3D view creation. */ export interface ViewCreator3dOptions { /** Turn camera on when generating view */ cameraOn?: boolean; /** Turn skybox on when generating view */ skyboxOn?: boolean; /** Standard view id for the view state */ standardViewId?: StandardViewId; /** Marge in Props from seed view (default spatial view) in iModel */ useSeedView?: boolean; /** vpAspect aspect ratio of vp to create fit view. */ vpAspect?: number; } /**@beta API for creating a 3D default view for an iModel. */ export class ViewCreator3d { /** * Constructs ViewCreator with iModelConnection. * @param _imodel IModelConnection to query for categories and/or models */ constructor(private _imodel: IModelConnection) {} /** * Creates a default view based on the given model ids. Uses all models ON if no modelIds passed * @param options for view creation * @param modelIds [optional] Model Ids to use in the view * @throws [[IModelError]] if no physical models are found. */ public async createDefaultView( options?: ViewCreator3dOptions, modelIds?: string[] ): Promise<ViewState> { const models = modelIds ? modelIds : await this._getAllModels(); if (models === undefined || models.length === 0) throw new IModelError( IModelStatus.BadModel, "ViewCreator3d.createDefaultView: no physical models found in iModel", Logger.logError, loggerCategory, () => ({ models }) ); const props = await this._createViewStateProps(models, options); const viewState = SpatialViewState.createFromProps( props, this._imodel ) as SpatialViewState; await viewState.load(); const hasBackgroundMapProps = viewState.displayStyle.toJSON().jsonProperties && viewState.displayStyle.toJSON().jsonProperties.styles.backgroundMap; if (viewState.viewFlags.backgroundMap && !hasBackgroundMapProps) { viewState.getDisplayStyle3d().changeBackgroundMapProps({ providerName: "BingProvider", providerData: { mapType: BackgroundMapType.Hybrid, }, }); } if (options?.standardViewId) viewState.setStandardRotation(options.standardViewId); const range = viewState.computeFitRange(); viewState.lookAtVolume(range, options?.vpAspect); return viewState; } /** * Generates a view state props object for creating a view. Merges display styles with a seed view if the NavigatorApp.flags.useSeedView is ON * @param models Models to put in view props * @param options view creation options like camera On and skybox On */ private async _createViewStateProps( models: Id64String[], options?: ViewCreator3dOptions ): Promise<ViewStateProps> { // Use dictionary model in all props const dictionaryId = IModel.dictionaryId; const categories: Id64Array = await this._getAllCategories(); // model extents const modelProps = await this._imodel.models.queryModelRanges(models); const modelExtents = Range3d.fromJSON(modelProps[0]); let originX = modelExtents.low.x; let originY = modelExtents.low.y; let originZ = modelExtents.low.z; let deltaX = modelExtents.xLength(); let deltaY = modelExtents.yLength(); let deltaZ = modelExtents.zLength(); // if vp aspect given, update model extents to fit view if (options?.vpAspect) { const modelAspect = deltaY / deltaX; if (modelAspect > options.vpAspect) { const xFix = deltaY / options.vpAspect; originX = originX - xFix / 2; deltaX = deltaX + xFix; } else if (modelAspect < options.vpAspect) { const yFix = deltaX * options.vpAspect; originY = originY - yFix / 2; deltaY = deltaY + yFix; } } const categorySelectorProps: CategorySelectorProps = { categories, code: Code.createEmpty(), model: dictionaryId, classFullName: "BisCore:CategorySelector", }; const modelSelectorProps: ModelSelectorProps = { models, code: Code.createEmpty(), model: dictionaryId, classFullName: "BisCore:ModelSelector", }; const cameraData = new Camera(); const cameraOn = options?.cameraOn ? options.cameraOn : false; let c = Code.createEmpty(); c.value = "MyView2020"; const viewDefinitionProps: ViewDefinition3dProps = { categorySelectorId: "", displayStyleId: "", code: c, model: dictionaryId, origin: { x: originX, y: originY, z: originZ }, extents: { x: deltaX, y: deltaY, z: deltaZ }, classFullName: "BisCore:SpatialViewDefinition", cameraOn, camera: { lens: cameraData.lens.toJSON(), focusDist: cameraData.focusDist, eye: cameraData.eye.toJSON(), }, }; const displayStyleProps: DisplayStyle3dProps = { code: Code.createEmpty(), model: dictionaryId, classFullName: "BisCore:DisplayStyle", jsonProperties: { styles: { viewflags: { renderMode: RenderMode.SmoothShade, noSourceLights: false, noCameraLights: false, noSolarLight: false, noConstruct: true, noTransp: false, visEdges: false,
}, environment: options !== undefined && options.skyboxOn !== undefined && options.skyboxOn ? new Environment({ sky: { display: true } }).toJSON() : undefined, }, }, }; const viewStateProps: ViewStateProps = { displayStyleProps, categorySelectorProps, modelSelectorProps, viewDefinitionProps, }; // merge seed view props if needed return options?.useSeedView ? this._mergeSeedView(viewStateProps) : viewStateProps; } /** * Merges a seed view in the iModel with the passed view state props. It will be a no-op if there are no default 3D views in the iModel * @param viewStateProps Input view props to be merged */ private async _mergeSeedView( viewStateProps: ViewStateProps ): Promise<ViewStateProps> { const viewId = await this._getDefaultViewId(); // Handle iModels without any default view id if (viewId === undefined) return viewStateProps; const seedViewState = (await this._imodel.views.load( viewId )) as SpatialViewState; const seedViewStateProps = { categorySelectorProps: seedViewState.categorySelector.toJSON(), modelSelectorProps: seedViewState.modelSelector.toJSON(), viewDefinitionProps: seedViewState.toJSON(), displayStyleProps: seedViewState.displayStyle.toJSON(), }; const mergedDisplayProps = seedViewStateProps.displayStyleProps; if (mergedDisplayProps.jsonProperties !== undefined) { mergedDisplayProps.jsonProperties.styles = { ...mergedDisplayProps.jsonProperties.styles, ...viewStateProps.displayStyleProps.jsonProperties!.styles, }; } return { ...seedViewStateProps, ...viewStateProps, displayStyleProps: mergedDisplayProps, }; } /** * Get ID of default view. */ private async _getDefaultViewId(): Promise<Id64String | undefined> { const viewId = await this._imodel.views.queryDefaultViewId(); const params: ViewQueryParams = {}; params.from = SpatialViewState.classFullName; params.where = "ECInstanceId=" + viewId; // Check validity of default view const viewProps = await IModelReadRpcInterface.getClient().queryElementProps( this._imodel.getRpcProps(), params ); if (viewProps.length === 0) { // Return the first view we can find const viewList = await this._imodel.views.getViewList({ wantPrivate: false, }); if (viewList.length === 0) return undefined; const spatialViewList = viewList.filter( (value: IModelConnection.ViewSpec) => value.class.indexOf("Spatial") !== -1 ); if (spatialViewList.length === 0) return undefined; return spatialViewList[0].id; } return viewId; } /** * Get all categories containing elements */ private async _getAllCategories(): Promise<
backgroundMap: this._imodel.isGeoLocated,
random_line_split
ViewCreater3d.ts
, ViewQueryParams, IModelReadRpcInterface, ViewDefinition3dProps, CategorySelectorProps, ModelSelectorProps, ViewDefinitionProps, DisplayStyle3dProps, CameraProps, IModelError, } from "@bentley/imodeljs-common"; import { Environment, IModelConnection, SpatialViewState, ViewState, StandardViewId, loggerCategory, } from "@bentley/imodeljs-frontend"; import { Range3d } from "@bentley/geometry-core"; /**@beta Interface providing options for 3D view creation. */ export interface ViewCreator3dOptions { /** Turn camera on when generating view */ cameraOn?: boolean; /** Turn skybox on when generating view */ skyboxOn?: boolean; /** Standard view id for the view state */ standardViewId?: StandardViewId; /** Marge in Props from seed view (default spatial view) in iModel */ useSeedView?: boolean; /** vpAspect aspect ratio of vp to create fit view. */ vpAspect?: number; } /**@beta API for creating a 3D default view for an iModel. */ export class ViewCreator3d { /** * Constructs ViewCreator with iModelConnection. * @param _imodel IModelConnection to query for categories and/or models */ constructor(private _imodel: IModelConnection) {} /** * Creates a default view based on the given model ids. Uses all models ON if no modelIds passed * @param options for view creation * @param modelIds [optional] Model Ids to use in the view * @throws [[IModelError]] if no physical models are found. */ public async
( options?: ViewCreator3dOptions, modelIds?: string[] ): Promise<ViewState> { const models = modelIds ? modelIds : await this._getAllModels(); if (models === undefined || models.length === 0) throw new IModelError( IModelStatus.BadModel, "ViewCreator3d.createDefaultView: no physical models found in iModel", Logger.logError, loggerCategory, () => ({ models }) ); const props = await this._createViewStateProps(models, options); const viewState = SpatialViewState.createFromProps( props, this._imodel ) as SpatialViewState; await viewState.load(); const hasBackgroundMapProps = viewState.displayStyle.toJSON().jsonProperties && viewState.displayStyle.toJSON().jsonProperties.styles.backgroundMap; if (viewState.viewFlags.backgroundMap && !hasBackgroundMapProps) { viewState.getDisplayStyle3d().changeBackgroundMapProps({ providerName: "BingProvider", providerData: { mapType: BackgroundMapType.Hybrid, }, }); } if (options?.standardViewId) viewState.setStandardRotation(options.standardViewId); const range = viewState.computeFitRange(); viewState.lookAtVolume(range, options?.vpAspect); return viewState; } /** * Generates a view state props object for creating a view. Merges display styles with a seed view if the NavigatorApp.flags.useSeedView is ON * @param models Models to put in view props * @param options view creation options like camera On and skybox On */ private async _createViewStateProps( models: Id64String[], options?: ViewCreator3dOptions ): Promise<ViewStateProps> { // Use dictionary model in all props const dictionaryId = IModel.dictionaryId; const categories: Id64Array = await this._getAllCategories(); // model extents const modelProps = await this._imodel.models.queryModelRanges(models); const modelExtents = Range3d.fromJSON(modelProps[0]); let originX = modelExtents.low.x; let originY = modelExtents.low.y; let originZ = modelExtents.low.z; let deltaX = modelExtents.xLength(); let deltaY = modelExtents.yLength(); let deltaZ = modelExtents.zLength(); // if vp aspect given, update model extents to fit view if (options?.vpAspect) { const modelAspect = deltaY / deltaX; if (modelAspect > options.vpAspect) { const xFix = deltaY / options.vpAspect; originX = originX - xFix / 2; deltaX = deltaX + xFix; } else if (modelAspect < options.vpAspect) { const yFix = deltaX * options.vpAspect; originY = originY - yFix / 2; deltaY = deltaY + yFix; } } const categorySelectorProps: CategorySelectorProps = { categories, code: Code.createEmpty(), model: dictionaryId, classFullName: "BisCore:CategorySelector", }; const modelSelectorProps: ModelSelectorProps = { models, code: Code.createEmpty(), model: dictionaryId, classFullName: "BisCore:ModelSelector", }; const cameraData = new Camera(); const cameraOn = options?.cameraOn ? options.cameraOn : false; let c = Code.createEmpty(); c.value = "MyView2020"; const viewDefinitionProps: ViewDefinition3dProps = { categorySelectorId: "", displayStyleId: "", code: c, model: dictionaryId, origin: { x: originX, y: originY, z: originZ }, extents: { x: deltaX, y: deltaY, z: deltaZ }, classFullName: "BisCore:SpatialViewDefinition", cameraOn, camera: { lens: cameraData.lens.toJSON(), focusDist: cameraData.focusDist, eye: cameraData.eye.toJSON(), }, }; const displayStyleProps: DisplayStyle3dProps = { code: Code.createEmpty(), model: dictionaryId, classFullName: "BisCore:DisplayStyle", jsonProperties: { styles: { viewflags: { renderMode: RenderMode.SmoothShade, noSourceLights: false, noCameraLights: false, noSolarLight: false, noConstruct: true, noTransp: false, visEdges: false, backgroundMap: this._imodel.isGeoLocated, }, environment: options !== undefined && options.skyboxOn !== undefined && options.skyboxOn ? new Environment({ sky: { display: true } }).toJSON() : undefined, }, }, }; const viewStateProps: ViewStateProps = { displayStyleProps, categorySelectorProps, modelSelectorProps, viewDefinitionProps, }; // merge seed view props if needed return options?.useSeedView ? this._mergeSeedView(viewStateProps) : viewStateProps; } /** * Merges a seed view in the iModel with the passed view state props. It will be a no-op if there are no default 3D views in the iModel * @param viewStateProps Input view props to be merged */ private async _mergeSeedView( viewStateProps: ViewStateProps ): Promise<ViewStateProps> { const viewId = await this._getDefaultViewId(); // Handle iModels without any default view id if (viewId === undefined) return viewStateProps; const seedViewState = (await this._imodel.views.load( viewId )) as SpatialViewState; const seedViewStateProps = { categorySelectorProps: seedViewState.categorySelector.toJSON(), modelSelectorProps: seedViewState.modelSelector.toJSON(), viewDefinitionProps: seedViewState.toJSON(), displayStyleProps: seedViewState.displayStyle.toJSON(), }; const mergedDisplayProps = seedViewStateProps.displayStyleProps; if (mergedDisplayProps.jsonProperties !== undefined) { mergedDisplayProps.jsonProperties.styles = { ...mergedDisplayProps.jsonProperties.styles, ...viewStateProps.displayStyleProps.jsonProperties!.styles, }; } return { ...seedViewStateProps, ...viewStateProps, displayStyleProps: mergedDisplayProps, }; } /** * Get ID of default view. */ private async _getDefaultViewId(): Promise<Id64String | undefined> { const viewId = await this._imodel.views.queryDefaultViewId(); const params: ViewQueryParams = {}; params.from = SpatialViewState.classFullName; params.where = "ECInstanceId=" + viewId; // Check validity of default view const viewProps = await IModelReadRpcInterface.getClient().queryElementProps( this._imodel.getRpcProps(), params ); if (viewProps.length === 0) { // Return the first view we can find const viewList = await this._imodel.views.getViewList({ wantPrivate: false, }); if (viewList.length === 0) return undefined; const spatialViewList = viewList.filter( (value: IModelConnection.ViewSpec) => value.class.indexOf("Spatial") !== -1 ); if (spatialViewList.length === 0) return undefined; return spatialViewList[0].id; } return viewId; } /** * Get all categories containing elements */ private async _getAllCategories(): Promise<Id
createDefaultView
identifier_name
expenv.py
.gridsz[X], self.gridsz[Y], NUM_LAYERS)) put(st, start_box, agentLayer, True) put(st, flavor_loc, goalLayer, True) put_all(st, mobile_locs, mobileLayer, True) put_all(st, block_locs, immobileLayer, True) self.start_states.append( { 'flavor signal': flav_id, \ 'state': st, '_whichgoal':flav_id, \ '_startpos':start_box, 'goal loc':flavor_loc }) # rnd_state = self.start_states[np.random.choice(range(24))] self.curr_sorted_states = self.start_states.copy() def dist(state_): x,y = state_['goal loc'], state_['_startpos'] return abs(x[0]-y[0])+abs(x[1]-y[1]) self.curr_sorted_states.sort(key=dist) rnd_state = np.random.choice(self.start_states) if debug: print('flag 93747') print_state(rnd_state, 'condensed') def _view_state_copy(self, st): sret = {} for key in ('_startpos','flavor signal','_whichgoal'): sret[key] = st[key] sret['state'] = np.copy(st['state']) return sret def get_random_starting_state(self): ''' Public method: get a random state struct with fields: 'state', '_startpos', 'flavor signal', '_whichgoal', 'goal loc'. The three later fields are helper attributes for, say, curricula or presentation. ''' #st = self.start_states[np.random.choice(range(24))] return self._view_state_copy(np.random.choice(self.start_states)) def get_weighted_starting_state(self, envir, pct): pct=float(pct) # print(envir,pct, 0.5*pct,1-pct) if envir=='r-u': raise Exception() assert (envir=='r-u-ru') ps = [0.5*pct, 0.5*pct, 1-pct] return self._view_state_copy(np.random.choice(\ self.curr_sorted_states, p=ps)) # for s in self.start_states: # print([s[x] for x in ['goal loc','_startpas'] ]) # for s in self.curr_sorted_states: # print([s[x] for x in ['goal loc','_startpos'] ]) def get_starting_state(self, curriculum_name, epoch, envir=None): # interface wrapper method for submethods curr = curriculum_name cspl = curriculum_name.split(':') if curr==None: return self.get_random_starting_state()['state'] elif len(curr)>4 and curr[:4]=='FLAT' and len(cspl)==2: return self.get_weighted_starting_state(envir, float(cspl[1]))['state'] elif len(curr)>6 and curr[:6]=='STEP-1' and len(cspl)==4: if epoch >= int(cspl[3]): return self.get_weighted_starting_state(envir, cspl[2])['state'] else: return self.get_weighted_starting_state(envir, cspl[1])['state'] elif len(curr)>8 and curr[:8]=='LINEAR-1' and len(cspl)==4: param = min(1.0, max(0.0, epoch/float(cspl[3]))) pct = param*float(cspl[2])+(1-param)*float(cspl[1]) return self.get_weighted_starting_state(envir, pct)['state'] else: raise exception(curr, cspl, epoch, envir) return curriculum_name, 'error expenv line ~200' # Very hacky: assert(self.experiment_name=='r-u-ru') l1 = len(TEMPLATE_R_U) l2 = len(TEMPLATE_RU) if curriculum_name=='STEP': ps = [0.5, 0.5, 0] if False else False return self._view_state_copy(np.random.choice(self.start_states), p=ps)['state'] def get_all_starting_states(self): ''' Public method: get a random state struct with fields: 'state', '_startpos', 'flavor signal', '_whichgoal', 'goal loc'. The three later fields are helper attributes for, say, curricula or presentation. ''' return [self._view_state_copy(st) for st in self.start_states] def get_agent_loc(self,state): '''Public method: query the location of the agent. (<0,0> is NW corner.)''' return self._get_loc(state,targ='agent') def get_goal_loc(self,s): return self._get_loc(s,targ='goal') # def get_allo_loc(self,s): return self._get_loc(s,targ='map') # center def _get_loc(self, state_matrix, targ): if targ=='agent': return map_nparr_to_tup(np.where(state_matrix[:,:,agentLayer]==1)) if targ=='goal': return map_nparr_to_tup(np.where(state_matrix[:,:,goalLayer]==1)) if targ=='map': return multvec(self.gridsz, 2, '//') # center def _out_of_bounds(self, pos): return (pos[X] < 0 or pos[X] >= self.gridsz[X] or \ pos[Y] < 0 or pos[Y] >= self.gridsz[Y]) def _is_valid_move(self, st, move): aloc = self.get_agent_loc(st) newaloc = addvec(aloc, move) if self._out_of_bounds(newaloc): return False if at(st, newaloc, immobileLayer): return False if at(st, newaloc, mobileLayer): st2 = np.copy(st) put(st2, newaloc, agentLayer, True) put(st2, aloc, agentLayer, False) return self._is_valid_move(st2, move) return True def _move_ent_from_to(self, mat, loc, nextloc, lyr): m2 = np.copy(mat) if not at(m2,loc,lyr): raise Exception() #print ("Adjusting",lyr,loc,nextloc) put(m2,loc,lyr, False) put(m2,nextloc,lyr, True) return m2 def _adjust_blocks(self, mat, loc, dir_vec, debug=True): nloc = addvec(loc, dir_vec) if self._out_of_bounds(nloc): return mat, False arr = [what(mat, loc), what(mat, nloc)] ploc=nloc while True: nloc = addvec(ploc, dir_vec) #print('>>',dir_vec) if self._out_of_bounds(nloc): return mat, False if not arr[-1][mobileLayer]: return mat, not arr[-1][immobileLayer] nmat = self._move_ent_from_to(mat, ploc, nloc, mobileLayer) if len(arr)>2: put(nmat, ploc, mobileLayer, True) arr.append(what(mat, nloc)) ploc=nloc mat=nmat raise Exception() def _move_agent(self, state_mat, dir_vec, ret_valid_move): aloc = self.get_agent_loc(state_mat) newL = addvec(aloc, dir_vec) state_mat2, success = self._adjust_blocks(state_mat, aloc, dir_vec) state_mat2 = self._move_ent_from_to(state_mat2, aloc, newL, agentLayer) if self.centr == 'egocentric': shft, axis = { 0:(1,1), 1:(-1,1), 2:(-1,0), 3:(1,0) }[dir_vec] state_mat2=np.roll(state_mat2, shift=shft, axis=axis) elif not self.centr == 'allocentric': raise Exception(self.centr) isValid = self._is_valid_move(state_mat, dir_vec) if isValid: if ret_valid_move==True: return state_mat2, isValid else: return state_mat2 if ret_valid_move==True: return state_mat, isValid return state_mat def _rot_agent(self, state_mat, nrots, ret_valid_move): aloc = self.get_agent_loc(state_mat) assert(self.experiment_name == 'r-u-ru') assert(nrots in [1,2,3]) state_mat = np.rot90(state_mat, k=nrots, axes=(0,1)) # state_mat2=np.roll(state_mat2, shift=shft, axis=axis) if self.centr == 'egocentric':
centr_pos = (3,3) dx, dy = aloc[0]-centr_pos[0], aloc[1]-centr_pos[1] state_mat = np.roll(state_mat, shift=dx, axis=0) state_mat = np.roll(state_mat, shift=dy, axis=1)
conditional_block
expenv.py
): for p in pos_list: put(mat, p, lyr, v) #------#------#------#------#------#------#------#------#------#------#------#-- #*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#* #*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#* # # Experiment API class: # #*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#* #------#------#------#------#------#------#------#------#------#------#------#-- # Experiment class: class ExpAPI(environment_handler3): def __init__(self, experiment_name, centr, card_or_rot='card', debug=False): ''' Initializer for Experiment class. Please provide: - experiment_name keyword, which indirectly signals the starting states. Currently takes 'tse2007'. - centr, the keyword for the reference frame. Currently takes 'allocentric' or 'egocentric' but in future will facilitate rotational heading frames. - optional debug-mode boolean flag ''' environment_handler3.__init__(self, gridsize = \ { 'tse2007': (11,11), 'r-u': (7,7), 'ru': (7,7), \ 'r-u-ru': (7,7) }[experiment_name], \ action_mode = centr, card_or_rot=card_or_rot ) self.centr = centr self.card_or_rot = card_or_rot self.state_gen = state_generator(self.gridsz) self.start_states = [] self._set_starting_states({\ 'tse2007':TEMPLATE_TSE, \ 'r-u-ru': TEMPLATE_R_U_RU, \ 'r-u': TEMPLATE_R_U, \ }[experiment_name], debug) self.experiment_name = experiment_name def _find_all(self, a_str, char): # [internal] scan from a template string (eg, TEMPLATE_TSE) s = a_str.replace(' ','') startX, startY = 0,0 for c in s: if c==char: yield((startX, startY)) elif c=='r': startY += 1 startX = 0 if c in 'a!xm.': startX += 1 # Set this experiment's possible starting states using complete template str def _set_starting_states(self, state_template, debug=False): oind = state_template.index('o') if state_template.index('e') > oind: raise Exception() num_start_locs = state_template.count('a') num_goal_locs = state_template.count('!') if not state_template.find('*') > oind: raise Exception() start_locs = list(self._find_all(state_template, 'a')) goal_locs = list(self._find_all(state_template, '!')); block_locs = list(self._find_all(state_template, 'x')); if 'D' in state_template: mobile_locs = list(self._find_all(state_template, '!')); self.valid_states = np.array( [AL, GL, AL|GL, IL, ML, ML|GL] ).T else: try: mobile_locs = list(self._find_all(state_template, 'm')); except: mobile_locs = [] self.valid_states = np.array( [AL, GL, AL|GL, IL, ML] ).T # self.valid_states = np.append(self.valid_states, np.expand_dims(\ # np.array([0,0,0,0], dtype=bool)), axis=0) rx = [0,1,self.gridsz[X]-2, self.gridsz[X]-1] ry = [0,1,self.gridsz[Y]-2, self.gridsz[Y]-1] ''' flavor == goal here. ''' for start_box in start_locs: for flav_id, flavor_loc in enumerate(goal_locs): st = np.zeros( (self.gridsz[X], self.gridsz[Y], NUM_LAYERS)) put(st, start_box, agentLayer, True) put(st, flavor_loc, goalLayer, True) put_all(st, mobile_locs, mobileLayer, True) put_all(st, block_locs, immobileLayer, True) self.start_states.append( { 'flavor signal': flav_id, \ 'state': st, '_whichgoal':flav_id, \ '_startpos':start_box, 'goal loc':flavor_loc }) # rnd_state = self.start_states[np.random.choice(range(24))] self.curr_sorted_states = self.start_states.copy() def dist(state_): x,y = state_['goal loc'], state_['_startpos'] return abs(x[0]-y[0])+abs(x[1]-y[1]) self.curr_sorted_states.sort(key=dist) rnd_state = np.random.choice(self.start_states) if debug: print('flag 93747') print_state(rnd_state, 'condensed') def _view_state_copy(self, st): sret = {} for key in ('_startpos','flavor signal','_whichgoal'): sret[key] = st[key] sret['state'] = np.copy(st['state']) return sret def get_random_starting_state(self): ''' Public method: get a random state struct with fields: 'state', '_startpos', 'flavor signal', '_whichgoal', 'goal loc'. The three later fields are helper attributes for, say, curricula or presentation. ''' #st = self.start_states[np.random.choice(range(24))] return self._view_state_copy(np.random.choice(self.start_states)) def get_weighted_starting_state(self, envir, pct): pct=float(pct) # print(envir,pct, 0.5*pct,1-pct) if envir=='r-u': raise Exception() assert (envir=='r-u-ru') ps = [0.5*pct, 0.5*pct, 1-pct] return self._view_state_copy(np.random.choice(\ self.curr_sorted_states, p=ps)) # for s in self.start_states: # print([s[x] for x in ['goal loc','_startpas'] ]) # for s in self.curr_sorted_states: # print([s[x] for x in ['goal loc','_startpos'] ]) def get_starting_state(self, curriculum_name, epoch, envir=None): # interface wrapper method for submethods curr = curriculum_name cspl = curriculum_name.split(':') if curr==None: return self.get_random_starting_state()['state'] elif len(curr)>4 and curr[:4]=='FLAT' and len(cspl)==2: return self.get_weighted_starting_state(envir, float(cspl[1]))['state'] elif len(curr)>6 and curr[:6]=='STEP-1' and len(cspl)==4: if epoch >= int(cspl[3]): return self.get_weighted_starting_state(envir, cspl[2])['state'] else: return self.get_weighted_starting_state(envir, cspl[1])['state'] elif len(curr)>8 and curr[:8]=='LINEAR-1' and len(cspl)==4: param = min(1.0, max(0.0, epoch/float(cspl[3]))) pct = param*float(cspl[2])+(1-param)*float(cspl[1]) return self.get_weighted_starting_state(envir, pct)['state'] else: raise exception(curr, cspl, epoch, envir) return curriculum_name, 'error expenv line ~200' # Very hacky: assert(self.experiment_name=='r-u-ru') l1 = len(TEMPLATE_R_U) l2 = len(TEMPLATE_RU) if curriculum_name=='STEP': ps = [0.5, 0.5, 0] if False else False return self._view_state_copy(np.random.choice(self.start_states), p=ps)['state'] def get_all_starting_states(self): ''' Public method: get a random state struct with fields: 'state', '_startpos', 'flavor signal', '_whichgoal', 'goal loc'. The three later fields are helper attributes for, say, curricula or presentation. ''' return [self._view_state_copy(st) for st in self.start_states] def get_agent_loc(self,state):
'''Public method: query the location of the agent. (<0,0> is NW corner.)''' return self._get_loc(state,targ='agent')
identifier_body
expenv.py
# except: # return tuple([i+m for i in Iterable]) def addvec(Iterable, m, optn=None): try: m[0] except: if m>80: m = DVECS[m] else: m = DVECS[INDICES_TO_CARDINAL_ACTIONS[m]] return tuple([i+m for i,m in zip(Iterable,m)]) def multvec(Iterable, m, optn=None): if optn=='//': return tuple([i//m for i in Iterable]) if optn=='/': return tuple([i/m for i in Iterable]) if optn==int: return tuple([int(i*m) for i in Iterable]) return tuple([i*m for i in Iterable]) def at(mat, pos, lyr): return mat[pos[X], pos[Y], lyr] def empty(mat, pos): return np.any(mat[pos[X], pos[Y], :]) def what(mat, pos): return np.array([at(mat, pos, lyr) for lyr in OLAYERS]) def put(mat, pos, lyr, v): mat[pos[X], pos[Y], lyr] = v def put_all(mat, pos_list, lyr, v): for p in pos_list: put(mat, p, lyr, v) #------#------#------#------#------#------#------#------#------#------#------#-- #*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#* #*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#* # # Experiment API class: # #*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#* #------#------#------#------#------#------#------#------#------#------#------#-- # Experiment class: class ExpAPI(environment_handler3): def __init__(self, experiment_name, centr, card_or_rot='card', debug=False): ''' Initializer for Experiment class. Please provide: - experiment_name keyword, which indirectly signals the starting states. Currently takes 'tse2007'. - centr, the keyword for the reference frame. Currently takes 'allocentric' or 'egocentric' but in future will facilitate rotational heading frames. - optional debug-mode boolean flag ''' environment_handler3.__init__(self, gridsize = \ { 'tse2007': (11,11), 'r-u': (7,7), 'ru': (7,7), \ 'r-u-ru': (7,7) }[experiment_name], \ action_mode = centr, card_or_rot=card_or_rot ) self.centr = centr self.card_or_rot = card_or_rot self.state_gen = state_generator(self.gridsz) self.start_states = [] self._set_starting_states({\ 'tse2007':TEMPLATE_TSE, \ 'r-u-ru': TEMPLATE_R_U_RU, \ 'r-u': TEMPLATE_R_U, \ }[experiment_name], debug) self.experiment_name = experiment_name def _find_all(self, a_str, char): # [internal] scan from a template string (eg, TEMPLATE_TSE) s = a_str.replace(' ','') startX, startY = 0,0 for c in s: if c==char: yield((startX, startY)) elif c=='r': startY += 1 startX = 0 if c in 'a!xm.': startX += 1 # Set this experiment's possible starting states using complete template str def _set_starting_states(self, state_template, debug=False): oind = state_template.index('o') if state_template.index('e') > oind: raise Exception() num_start_locs = state_template.count('a') num_goal_locs = state_template.count('!') if not state_template.find('*') > oind: raise Exception() start_locs = list(self._find_all(state_template, 'a')) goal_locs = list(self._find_all(state_template, '!')); block_locs = list(self._find_all(state_template, 'x')); if 'D' in state_template: mobile_locs = list(self._find_all(state_template, '!')); self.valid_states = np.array( [AL, GL, AL|GL, IL, ML, ML|GL] ).T else: try: mobile_locs = list(self._find_all(state_template, 'm')); except: mobile_locs = [] self.valid_states = np.array( [AL, GL, AL|GL, IL, ML] ).T # self.valid_states = np.append(self.valid_states, np.expand_dims(\ # np.array([0,0,0,0], dtype=bool)), axis=0) rx = [0,1,self.gridsz[X]-2, self.gridsz[X]-1] ry = [0,1,self.gridsz[Y]-2, self.gridsz[Y]-1] ''' flavor == goal here. ''' for start_box in start_locs: for flav_id, flavor_loc in enumerate(goal_locs): st = np.zeros( (self.gridsz[X], self.gridsz[Y], NUM_LAYERS)) put(st, start_box, agentLayer, True) put(st, flavor_loc, goalLayer, True) put_all(st, mobile_locs, mobileLayer, True) put_all(st, block_locs, immobileLayer, True) self.start_states.append( { 'flavor signal': flav_id, \ 'state': st, '_whichgoal':flav_id, \ '_startpos':start_box, 'goal loc':flavor_loc }) # rnd_state = self.start_states[np.random.choice(range(24))] self.curr_sorted_states = self.start_states.copy() def dist(state_): x,y = state_['goal loc'], state_['_startpos'] return abs(x[0]-y[0])+abs(x[1]-y[1]) self.curr_sorted_states.sort(key=dist) rnd_state = np.random.choice(self.start_states) if debug: print('flag 93747') print_state(rnd_state, 'condensed') def _view_state_copy(self, st): sret = {} for key in ('_startpos','flavor signal','_whichgoal'): sret[key] = st[key] sret['state'] = np.copy(st['state']) return sret def get_random_starting_state(self): ''' Public method: get a random state struct with fields: 'state', '_startpos', 'flavor signal', '_whichgoal', 'goal loc'. The three later fields are helper attributes for, say, curricula or presentation. ''' #st = self.start_states[np.random.choice(range(24))] return self._view_state_copy(np.random.choice(self.start_states)) def get_weighted_starting_state(self, envir, pct): pct=float(pct) # print(envir,pct, 0.5*pct,1-pct) if envir=='r-u': raise Exception() assert (envir=='r-u-ru') ps = [0.5*pct, 0.5*pct, 1-pct] return self._view_state_copy(np.random.choice(\ self.curr_sorted_states, p=ps)) # for s in self.start_states: # print([s[x] for x in ['goal loc','_startpas'] ]) # for s in self.curr_sorted_states: # print([s[x] for x in ['goal loc','_startpos'] ]) def get_starting_state(self, curriculum_name, epoch, envir=None): # interface wrapper method for submethods curr = curriculum_name cspl = curriculum_name.split(':') if curr==None: return self.get_random_starting_state()['state'] elif len(curr)>4 and curr[:4]=='FLAT' and len(cspl)==2: return self.get_weighted_starting_state(envir, float(cspl[1]))['state'] elif len(curr)>6 and curr[:6]=='STEP-1' and len(cspl)==4: if epoch >= int(cspl[3]): return self.get_weighted_starting_state(envir, cspl[2])['state'] else: return self.get_weighted_starting_state(envir, cspl[1])['state'] elif len(curr)>8 and curr[:8]=='LINEAR-1' and len(cspl)==4: param = min(1.
#def addvec(Iterable, m, optn=None): # try: # return tuple([i+m for i,m in zip(Iterable,m)])
random_line_split
expenv.py
return tuple([int(i*m) for i in Iterable]) return tuple([i*m for i in Iterable]) def at(mat, pos, lyr): return mat[pos[X], pos[Y], lyr] def empty(mat, pos): return np.any(mat[pos[X], pos[Y], :]) def what(mat, pos): return np.array([at(mat, pos, lyr) for lyr in OLAYERS]) def put(mat, pos, lyr, v): mat[pos[X], pos[Y], lyr] = v def put_all(mat, pos_list, lyr, v): for p in pos_list: put(mat, p, lyr, v) #------#------#------#------#------#------#------#------#------#------#------#-- #*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#* #*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#* # # Experiment API class: # #*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#*$#* #------#------#------#------#------#------#------#------#------#------#------#-- # Experiment class: class ExpAPI(environment_handler3): def __init__(self, experiment_name, centr, card_or_rot='card', debug=False): ''' Initializer for Experiment class. Please provide: - experiment_name keyword, which indirectly signals the starting states. Currently takes 'tse2007'. - centr, the keyword for the reference frame. Currently takes 'allocentric' or 'egocentric' but in future will facilitate rotational heading frames. - optional debug-mode boolean flag ''' environment_handler3.__init__(self, gridsize = \ { 'tse2007': (11,11), 'r-u': (7,7), 'ru': (7,7), \ 'r-u-ru': (7,7) }[experiment_name], \ action_mode = centr, card_or_rot=card_or_rot ) self.centr = centr self.card_or_rot = card_or_rot self.state_gen = state_generator(self.gridsz) self.start_states = [] self._set_starting_states({\ 'tse2007':TEMPLATE_TSE, \ 'r-u-ru': TEMPLATE_R_U_RU, \ 'r-u': TEMPLATE_R_U, \ }[experiment_name], debug) self.experiment_name = experiment_name def _find_all(self, a_str, char): # [internal] scan from a template string (eg, TEMPLATE_TSE) s = a_str.replace(' ','') startX, startY = 0,0 for c in s: if c==char: yield((startX, startY)) elif c=='r': startY += 1 startX = 0 if c in 'a!xm.': startX += 1 # Set this experiment's possible starting states using complete template str def _set_starting_states(self, state_template, debug=False): oind = state_template.index('o') if state_template.index('e') > oind: raise Exception() num_start_locs = state_template.count('a') num_goal_locs = state_template.count('!') if not state_template.find('*') > oind: raise Exception() start_locs = list(self._find_all(state_template, 'a')) goal_locs = list(self._find_all(state_template, '!')); block_locs = list(self._find_all(state_template, 'x')); if 'D' in state_template: mobile_locs = list(self._find_all(state_template, '!')); self.valid_states = np.array( [AL, GL, AL|GL, IL, ML, ML|GL] ).T else: try: mobile_locs = list(self._find_all(state_template, 'm')); except: mobile_locs = [] self.valid_states = np.array( [AL, GL, AL|GL, IL, ML] ).T # self.valid_states = np.append(self.valid_states, np.expand_dims(\ # np.array([0,0,0,0], dtype=bool)), axis=0) rx = [0,1,self.gridsz[X]-2, self.gridsz[X]-1] ry = [0,1,self.gridsz[Y]-2, self.gridsz[Y]-1] ''' flavor == goal here. ''' for start_box in start_locs: for flav_id, flavor_loc in enumerate(goal_locs): st = np.zeros( (self.gridsz[X], self.gridsz[Y], NUM_LAYERS)) put(st, start_box, agentLayer, True) put(st, flavor_loc, goalLayer, True) put_all(st, mobile_locs, mobileLayer, True) put_all(st, block_locs, immobileLayer, True) self.start_states.append( { 'flavor signal': flav_id, \ 'state': st, '_whichgoal':flav_id, \ '_startpos':start_box, 'goal loc':flavor_loc }) # rnd_state = self.start_states[np.random.choice(range(24))] self.curr_sorted_states = self.start_states.copy() def dist(state_): x,y = state_['goal loc'], state_['_startpos'] return abs(x[0]-y[0])+abs(x[1]-y[1]) self.curr_sorted_states.sort(key=dist) rnd_state = np.random.choice(self.start_states) if debug: print('flag 93747') print_state(rnd_state, 'condensed') def _view_state_copy(self, st): sret = {} for key in ('_startpos','flavor signal','_whichgoal'): sret[key] = st[key] sret['state'] = np.copy(st['state']) return sret def get_random_starting_state(self): ''' Public method: get a random state struct with fields: 'state', '_startpos', 'flavor signal', '_whichgoal', 'goal loc'. The three later fields are helper attributes for, say, curricula or presentation. ''' #st = self.start_states[np.random.choice(range(24))] return self._view_state_copy(np.random.choice(self.start_states)) def get_weighted_starting_state(self, envir, pct): pct=float(pct) # print(envir,pct, 0.5*pct,1-pct) if envir=='r-u': raise Exception() assert (envir=='r-u-ru') ps = [0.5*pct, 0.5*pct, 1-pct] return self._view_state_copy(np.random.choice(\ self.curr_sorted_states, p=ps)) # for s in self.start_states: # print([s[x] for x in ['goal loc','_startpas'] ]) # for s in self.curr_sorted_states: # print([s[x] for x in ['goal loc','_startpos'] ]) def get_starting_state(self, curriculum_name, epoch, envir=None): # interface wrapper method for submethods curr = curriculum_name cspl = curriculum_name.split(':') if curr==None: return self.get_random_starting_state()['state'] elif len(curr)>4 and curr[:4]=='FLAT' and len(cspl)==2: return self.get_weighted_starting_state(envir, float(cspl[1]))['state'] elif len(curr)>6 and curr[:6]=='STEP-1' and len(cspl)==4: if epoch >= int(cspl[3]): return self.get_weighted_starting_state(envir, cspl[2])['state'] else: return self.get_weighted_starting_state(envir, cspl[1])['state'] elif len(curr)>8 and curr[:8]=='LINEAR-1' and len(cspl)==4: param = min(1.0, max(0.0, epoch/float(cspl[3]))) pct = param*float(cspl[2])+(1-param)*float(cspl[1]) return self.get_weighted_starting_state(envir, pct)['state'] else: raise exception(curr, cspl, epoch, envir) return curriculum_name, 'error expenv line ~200' # Very hacky: assert(self.experiment_name=='r-u-ru') l1 = len(TEMPLATE_R_U) l2 = len(TEMPLATE_RU) if curriculum_name=='STEP': ps = [0.5, 0.5, 0] if False else False return self._view_state_copy(np.random.choice(self.start_states), p=ps)['state'] def
get_all_starting_states
identifier_name
main.rs
::convert::TryInto; use std::env; use std::fs; use std::fs::File; use std::io::prelude::*; use std::str; use uuid::Uuid; static mut USER_TOKEN: Vec<(String, String)> = Vec::new(); static mut USER_CHALLENGE: Vec<(String, u64)> = Vec::new(); #[derive(Debug)] struct User { username: String, salt: Salt, password_kdf: [u8; 32], secret: String, } #[derive(Serialize, Deserialize, Debug)] struct UserChallenge { username: String, challenge: u64, salt: Salt, } #[derive(Serialize, Deserialize, Debug)] struct Metadata { file_name: String, username: Vec<String>, nonce: [u8; 12], key: Vec<u8>, } #[derive(Deserialize, Debug)] struct ComputedChallenge { challenge: [u8; 32], } lazy_static! { static ref USER_DB: HashMap<&'static str, User> = { let mut map = HashMap::new(); // configuration google authenticator let auth = GoogleAuthenticator::new(); // Cette partie se fait normalement sur le client mais elle est volontairement // mise sur le serveur pour simplifié l'architecture let salt = argon2id13::gen_salt(); let mut key = [0u8; 32]; argon2id13::derive_key( &mut key, "P@ssw0rd".as_bytes(), &salt, argon2id13::OPSLIMIT_SENSITIVE, argon2id13::MEMLIMIT_SENSITIVE, ) .unwrap(); map.insert( "jerome", User { username: "jerome".to_string(), salt: salt, password_kdf: key, secret: auth.create_secret(32), }, ); map }; } #[get("/server/{user_id}")] async fn username(web::Path(user_id): web::Path<String>) -> HttpResponse { // regarde si l'utilisateur est dans la DB, si oui on lui envoie un challenge à résoudre match USER_DB.get::<str>(&user_id.to_string()) { Some(username) => { let user_challenge = UserChallenge { username: user_id.to_string(), salt: username.salt, challenge: OsRng.next_u64(), }; unsafe { USER_CHALLENGE.push((user_id, user_challenge.challenge)); } HttpResponse::Ok().body(serde_json::to_string(&user_challenge).unwrap()) } None => HttpResponse::NotFound().finish(), } } #[post("/server/{user_id}")] // <- define path parameters async fn username_post( web::Path(user_id): web::Path<String>, mut body: web::Payload, ) -> HttpResponse { // check dans la DB si l'utilisateur est présent let user = match USER_DB.get::<str>(&user_id.to_string()) { Some(user) => user, None => { return HttpResponse::NotFound().finish(); } }; // lecture du body pour avoir le challenge envoyé let mut bytes = web::BytesMut::new(); while let Some(item) = body.next().await { let item = item.unwrap(); bytes.extend_from_slice(&item); } // on désérialise le challenge envoyé let computed_challenge: ComputedChallenge = serde_json::from_str(str::from_utf8(&bytes).unwrap()).unwrap(); // récupération du challenge envoyé au client let challenge_to_compute: u64; unsafe { let index = USER_CHALLENGE.iter().position(|x| x.0 == user_id).unwrap(); challenge_to_compute = USER_CHALLENGE.get(index).unwrap().1; USER_CHALLENGE.remove(index); } // Fait le mac à partir de la kdf dans la DB type HmacSha256 = Hmac<Sha256>; let mut mac = HmacSha256::new_varkey(&user.password_kdf).expect("HMAC Error"); mac.update(&challenge_to_compute.to_be_bytes()); let challenge: [u8; 32] = mac .finalize() .into_bytes() .as_slice() .try_into() .expect("Wrong length"); // on teste si les valeurs sont identiques if challenge == computed_challenge.challenge { return HttpResponse::Ok().finish(); } HttpResponse::NonAuthoritativeInformation().finish() } #[get("/2fa/{user_id}")] async fn get_code(web::Path(user_id): web::Path<String>) -> HttpResponse { // configuration google authenticator let auth = GoogleAuthenticator::new(); // check dans la DB si l'utilisateur est présent let user = match USER_DB.get::<str>(&user_id.to_string()) { Some(user) => user, None => { return HttpResponse::NotFound().finish(); } }; // création du code QR let url = auth.qr_code_url( &user.secret, "qr_code", "name", 200, 200, ErrorCorrectionLevel::High, ); HttpResponse::Ok().body(url) } #[post("/2fa/{user_id}")] async fn validate_code(web::Path(user_id): web::Path<String>, req: HttpRequest) -> HttpResponse { // configuration google authenticator let auth = GoogleAuthenticator::new(); // check dans la DB si l'utilisateur est présent let user = match USER_DB.get::<str>(&user_id.to_string()) { Some(user) => user, None => { return HttpResponse::NotFound().finish();
// récupère le code dans le header let input_code: &str = req.headers().get("Code").unwrap().to_str().unwrap(); if !auth.verify_code(&user.secret, &input_code, 0, 0) { println!("Mauvais code."); return HttpResponse::Unauthorized().finish(); } // si ok, un token est envoyé à l'utilisateur pour les prochains échanges let user_token: String = Uuid::new_v4().hyphenated().to_string(); unsafe { USER_TOKEN.push((user_id, user_token.clone())); } HttpResponse::Ok().header("Token", user_token).finish() } #[post("/upload")] async fn upload(mut body: web::Payload, req: HttpRequest) -> HttpResponse { // lire et vérifier le Token if !check_token(&req) { return HttpResponse::NonAuthoritativeInformation().finish(); } // lire le body let mut bytes = web::BytesMut::new(); while let Some(item) = body.next().await { let item = item.unwrap(); bytes.extend_from_slice(&item); } let res: Vec<u8> = bytes.to_vec(); // écriture des données dans un fichier let mut file = File::create(req.headers().get("filename").unwrap().to_str().unwrap()).unwrap(); file.write_all(&res).unwrap(); HttpResponse::Ok().finish() } #[get("/download")] async fn download(req: HttpRequest) -> HttpResponse { // lire et vérifier le Token let filename: &str = req.headers().get("FileName").unwrap().to_str().unwrap(); if !check_token(&req) { return HttpResponse::NonAuthoritativeInformation().finish(); } let work_file = env::current_dir().unwrap().join(&filename); // ouvrir et lire le fichier let mut file = match File::open(work_file) { Ok(result) => result, Err(_) => { return HttpResponse::NoContent().finish(); } }; let mut ciphertext: Vec<u8> = Vec::new(); file.read_to_end(&mut ciphertext).unwrap(); HttpResponse::Ok().body(ciphertext) } #[get("/list")] async fn get_list(req: HttpRequest) -> HttpResponse { // lire et vérifier le Token if !check_token(&req) { return HttpResponse::NonAuthoritativeInformation().finish(); } let user_name: &str = req.headers().get("Username").unwrap().to_str().unwrap(); // préparation des clés pour AES-GCM et du nonce let key_aes = Key::from_slice(b"an example very very secret key."); let aead = Aes256Gcm::new(key_aes); let nonce = Nonce::from_slice(b"unique nonce"); let mut file_list = String::new(); // on lit le contenu du répertoire let paths = fs::read_dir("./").unwrap(); for path in paths { let file = path.unwrap().path().into_os_string().into_string().unwrap(); // pour tous les fichiers est de type metadonnée if file.contains(".metadata") { let mut current_file = File::open(&file).expect("Unable to open the file"); let mut contents = String::new(); current_file .read_to_string(&mut contents) .expect("Unable to read the file"); let meta: Metadata = serde_json::from_str(&contents).unwrap(); if meta.username.contains(&user_name.to_string()) { file_list.push_str(&file.split(".metadata").collect::<String>());
} };
random_line_split