content
stringlengths
228
999k
pred_label
stringclasses
1 value
pred_score
float64
0.5
1
You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long. skeleditor/skeleditor_options.php 219 lines 6.1 KiB PHP <?php /** * Plugin SkelEditor * Editeur de squelette en ligne * (c) depuis 2007 Collectif SPIP * Licence GPL-v3 */ if (!defined('_ECRIRE_INC_VERSION')) { return; } /** * Tester avec _request si on est dans l'edition de skeleditor et si oui, retourne l'extension du nom du fichier * * @return string */ function test_skeleditor_edition() { $exec = _request('exec'); $filename = _request('f'); if ( $exec == 'skeleditor' and $filename and $infos = pathinfo($filename) and $extension = $infos['extension'] ) { return $extension; } else { return false; } } /** * Produit les css dans le header_prive si nécessaire * * @param string $flux * @return string */ function skeleditor_insert_head_css($flux) { $extension = test_skeleditor_edition(); if ($extension) { static $done = false; if (!$done) { $done = true; $type = 'css'; $css = skeleditor_dir($extension, $type); $flux .= $css; } } return $flux; } /** * Produit les js dans le header_prive si nécessaire * * @param string $flux * @return string */ function skeleditor_insert_head($flux) { /* $extension = test_skeleditor_edition(); if($extension) { $type = "js"; $script = skeleditor_dir($extension, $type); $flux = skeleditor_insert_head_css($flux); // au cas ou il n'est pas implemente $flux .= $script; }*/ return $flux; } // pas de compresseur si var_inclure if (_request('var_mode') == 'inclure') { define('_INTERDIRE_COMPACTE_HEAD', true); } function skeleditor_extraire_css($texte) { $url_base = url_de_base(); $url_page = substr(generer_url_public('A'), 0, -1); $dir = preg_quote($url_page, ',') . '|' . preg_quote(preg_replace(",^$url_base,", _DIR_RACINE, $url_page), ','); $css = []; // trouver toutes les css pour les afficher dans le bouton // repris du compresseur foreach (extraire_balises($texte, 'link') as $s) { if ( extraire_attribut($s, 'rel') === 'stylesheet' and (!($type = extraire_attribut($s, 'type')) or $type == 'text/css') and !strlen(strip_tags($s)) and $src = preg_replace(",^$url_base,", _DIR_RACINE, extraire_attribut($s, 'href')) and ( // regarder si c'est du format spip.php?page=xxx preg_match(',^(' . $dir . ')(.*)$,', $src, $r) or ( // ou si c'est un fichier // enlever un timestamp eventuel derriere un nom de fichier statique $src2 = skeleditor_trouver_source($src) // verifier qu'il n'y a pas de ../ ni / au debut (securite) and !preg_match(',(^/|\.\.),', substr($src2, strlen(_DIR_RACINE))) // et si il est lisible and @is_readable($src2) ) ) ) { if ($r) { $css[$s] = explode( '&', str_replace('&amp;', '&', $r[2]), 2 ); } else { // var_dump($src2); $file = preg_replace(',[?]\d+$,', '', $src2); if (strncmp($file, _DIR_VAR, strlen(_DIR_VAR)) == 0) { lire_fichier($file, $c); if (preg_match(',^\/\*\s*(#@.*)\s*\*\/,Uims', $c, $m)) { $inc = explode('#@', $m[1]); $inc = array_map('trim', $inc); $inc = array_filter($inc); foreach ($inc as $i) { if (!in_array($i, $css)) { $css["$s:$i"] = $i; } } } } else { $css[$s] = $file; } } } } return $css; } function skeleditor_trouver_source($src) { $source_file = explode('?', $src); $source_file = reset($source_file); // est-ce un fichier (less|scss) cssifié // dans ce cas on l'ouvre et on lit les premieres lignes qui contiennent la reference aux fichiers compiles if ( strpos($source_file, '-cssify-') and preg_match('#/cache-(less|scss)/.*-cssify-[\w\d-]*.css#s', $source_file) ) { $start = file_get_contents($source_file, false, null, 0, 2048); if (strpos($start, '#@') !== false) { $start = explode('#@', $start); array_shift($start); $file = reset($start); $file = explode("\n", $file); $file = reset($file); if (file_exists($file)) { if (strpos($file, _ROOT_RACINE) === 0) { $file = substr($file, strlen(_ROOT_RACINE)); } return $file; } } } return $source_file; } function skeleditor_affichage_final($texte) { if ( isset($_COOKIE['spip_admin']) and $GLOBALS['html'] and isset($GLOBALS['visiteur_session']['statut']) and $GLOBALS['visiteur_session']['statut'] and intval($GLOBALS['visiteur_session']['statut']) < '1comite' and include_spip('inc/autoriser') and autoriser('skeleditor') ) { if ((defined('_VAR_INCLURE') and _VAR_INCLURE) or (isset($GLOBALS['var_inclure']) and $GLOBALS['var_inclure'])) { $retour = self(); $url = generer_url_ecrire('skeleditor', 'retour=' . $retour . '&f='); $inserer = "<script type='text/javascript'>jQuery(function(){jQuery('.inclure_blocs h6:first-child').each(function(){ jQuery(this).html(\"<a class='sepopin' href='$url\"+jQuery(this).html()+\"'>\"+jQuery(this).html()+'<'+'/a>'); });" //."jQuery('a.sepopin').click(function(){if (jQuery.modalbox) jQuery.modalbox(parametre_url(this.href,'var_zajax','contenu'));return false;});" . '});</script><style>.spip-admin-boutons {display:block;float:left;margin-right:10px; max-height:300px; overflow:auto;} .spip-admin-boutons a{display:block;opacity:0.7;} .spip-admin-boutons:hover,.spip-admin-boutons a:hover {opacity:1.0;}</style> </body>'; $texte = preg_replace(',</body>,', $inserer, $texte); $css = skeleditor_extraire_css($texte); $lienplus = []; foreach ($css as $src) { // si c'est un skel, le trouver if (is_array($src)) { $src = find_in_path($src[0] . '.' . _EXTENSION_SQUELETTES); } if ($src) { $lienplus[] = "<a href='$url" . urlencode($src) . "'" . '>' . basename($src) . '<\/a>'; } } if (count($lienplus)) { $lienplus = implode('', $lienplus); $lienplus = "<span class='spip-admin-boutons' id='inclure'>$lienplus<\/span>"; }; } else { $lienplus = "<a href='" . parametre_url(self(), 'var_mode', 'inclure') . "' class='spip-admin-boutons' " . "id='inclure'>" . _T('skeleditor:squelettes') . '<\/a>'; } if ($lienplus) { $inserer = "<script type='text/javascript'>/*<![CDATA[*/jQuery(function(){jQuery('#spip-admin').append(\"$lienplus\");});/*]]>*/</script></body>"; } $texte = preg_replace(',</body>,', $inserer, $texte); } return $texte; }
__label__pos
0.998104
Laravel API 系列教程(二): 结合 Laravel 5.5 和 Vue SPA 基于 jwt-auth 实现 API 认证 上一篇我们简单演示了 Laravel 5.5 中 RESTful API 的构建、认证和测试,本教程将在上一篇教程的基础上进行升华,我们将结合 Laravel 和 Vue 单页面应用(SPA),在此双剑合壁的基础上引入 jwt-auth 实现 API 认证,由于 Laravel 集成了对 Vue 的支持,所以在 Laravel 应用中使用 Vue 也是如鱼得水,非常顺畅,整篇教程涉及到的工具包括: • Node.js • Laravel 5.5 • jwt-auth • NPM • Vue.js 2.x • Vue-router • Vue-axios • @websanova/vue-auth 初始化前端 我们将会在上一篇创建应用的基础上进行开发。 首先在项目根目录下运行以下命令安装前端依赖: npm install 然后安装一些必要的 Vue 组件: npm install --save-dev vue-axios vue-router vue-loader vue-template-compiler 接下来我们来创建应用所需的 Vue 模板和视图。 resources/assets/js 目录下新建 App.vue <template> <div class="panel panel-default"> <div class="panel-heading"> <nav> <ul class="list-inline"> <li> <router-link :to="{ name: 'home' }">首页</router-link> </li> <li class="pull-right"> <router-link :to="{ name: 'login' }">登录</router-link> </li> <li class="pull-right"> <router-link :to="{ name: 'register' }">注册</router-link> </li> </ul> </nav> </div> <div class="panel-body"> <router-view></router-view> </div> </div> </template> resources/assets/js/components 目录下新增 Home.vue <template> <h1>Laravel 5 Vue SPA 认证</h1> </template> 替换 resouces/assets/js/app.js 内容如下: import Vue from 'vue'; import VueRouter from 'vue-router'; import App from './App.vue'; import Home from './components/Home.vue'; Vue.use(VueRouter); const router = new VueRouter({ routes: [ { path: '/', name: 'home', component: Home }, ] }); new Vue({ el: '#app', router: router, render: app => app(App) }); 替换 resources/views/welcome.blade.php 内容如下: <!DOCTYPE html> <html> <head> <meta charset="utf-8"> <meta name="csrf-token" content="{{ csrf_token() }}"> <title>Laravel</title> <link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" rel="stylesheet"> </head> <body> <div class="container"> <div id="app"></div> </div> <script src="/js/app.js"></script> </body> </html> 最后在项目根目录下运行 npm run watch,就可以在浏览器中通过 http://apidemo.test 访问新首页了: 创建 Vue 组件 接下来我们来创建需要的 Vue 组件。 resources/assets/js/components 目录下新建 Register.vue <template> <div> <div class="alert alert-danger" v-if="error && !success"> <p>出错了,很遗憾,未能完成注册</p> </div> <div class="alert alert-success" v-if="success"> <p>注册完成,你现在可以<router-link :to="{name:'login'}">登录</router-link>了</p> </div> <form autocomplete="off" @submit.prevent="register" v-if="!success" method="post"> <div class="form-group" v-bind:class="{ 'has-error': error && errors.name }"> <label for="name">用户名</label> <input type="text" id="name" class="form-control" v-model="name" required> <span class="help-block" v-if="error && errors.name">{{ errors.name }}</span> </div> <div class="form-group" v-bind:class="{ 'has-error': error && errors.email }"> <label for="email">邮箱</label> <input type="email" id="email" class="form-control" placeholder="[email protected]" v-model="email" required> <span class="help-block" v-if="error && errors.email">{{ errors.email }}</span> </div> <div class="form-group" v-bind:class="{ 'has-error': error && errors.password }"> <label for="password">密码</label> <input type="password" id="password" class="form-control" v-model="password" required> <span class="help-block" v-if="error && errors.password">{{ errors.password }}</span> </div> <button type="submit" class="btn btn-default">提交</button> </form> </div> </template> 在同一目录下创建 Login.vue <template> <div> <div class="alert alert-danger" v-if="error"> <p>出错了,请检查邮箱/密码是否正确</p> </div> <form autocomplete="off" @submit.prevent="login" method="post"> <div class="form-group"> <label for="email">邮箱</label> <input type="email" id="email" class="form-control" placeholder="[email protected]" v-model="email" required> </div> <div class="form-group"> <label for="password">密码</label> <input type="password" id="password" class="form-control" v-model="password" required> </div> <button type="submit" class="btn btn-default">登录</button> </form> </div> </template> 最后在该目录下新建 Dashboard.vue <template> <h1>Laravel 5 – 酷炫的后台</h1> </template> 编辑 resources/assets/js/app.js 文件内容如下: import Vue from 'vue'; import VueRouter from 'vue-router'; import axios from 'axios'; import VueAxios from 'vue-axios'; import App from './App.vue'; import Dashboard from './components/Dashboard.vue'; import Home from './components/Home.vue'; import Register from './components/Register.vue'; import Login from './components/Login.vue'; Vue.use(VueRouter); Vue.use(VueAxios, axios); axios.defaults.baseURL = 'http://apidemo.test/api'; const router = new VueRouter({ routes: [{ path: '/', name: 'home', component: Home },{ path: '/register', name: 'register', component: Register },{ path: '/login', name: 'login', component: Login }] }); @websanova/vue-auth @websanova/vue-auth 是客户端负责处理认证的库,它会注入一个 $auth 对象来提供很多有用的函数:比如 register() 来处理用户注册,login() 来处理用户登录,user() 来访问当前登录用户数据,logout() 来处理退出操作等等。 首先安装这个库: npm install @websanova/vue-auth 再次编辑 resources/assets/js/app.js import Vue from 'vue'; import VueRouter from 'vue-router'; import axios from 'axios'; import VueAxios from 'vue-axios'; import App from './App.vue'; import Dashboard from './components/Dashboard.vue'; import Home from './components/Home.vue'; import Register from './components/Register.vue'; import Login from './components/Login.vue'; Vue.use(VueRouter); Vue.use(VueAxios, axios); axios.defaults.baseURL = 'http://apidemo.test/api'; const router = new VueRouter({ routes: [{ path: '/', name: 'home', component: Home },{ path: '/register', name: 'register', component: Register, meta: { auth: false } },{ path: '/login', name: 'login', component: Login, meta: { auth: false } },{ path: '/dashboard', name: 'dashboard', component: Dashboard, meta: { auth: true } }] }); Vue.router = router Vue.use(require('@websanova/vue-auth'), { auth: require('@websanova/vue-auth/drivers/auth/bearer.js'), http: require('@websanova/vue-auth/drivers/http/axios.1.x.js'), router: require('@websanova/vue-auth/drivers/router/vue-router.2.x.js'), }); App.router = Vue.router new Vue(App).$mount('#app'); 在新增的代码中,我们首先引入刚刚安装的库并且做了一些配置: 使用 bearer 在请求期间添加认证 token 到请求头,以便服务端读取解析这个 token: auth: require(‘@websanova/vue-auth/drivers/auth/bearer.js’) 配置 vue-auth 使用 axios 来发送 HTTP 请求: http: require(‘@websanova/vue-auth/drivers/http/axios.1.x.js’) 我们还配置 vue-auth 使用 vue-router: router: require(‘@websanova/vue-auth/drivers/router/vue-router.2.x.js’) 最后,注意到: meta: { auth: true } 这个配置,该配置用于指定访问路由是否需要认证。 想要了解更多可以访问 @websanova/vue-auth Github 仓库。 现在运行 npm run watch,再访问后台 http://apidemo.test/#/dashboard,就会跳转到登录页面: jwt-auth 本教程中我们将使用 jwt-auth 来实现 API 认证,所以接下来安装这个扩展包: composer require tymon/jwt-auth 安装完成后在配置文件 config/app.php 中注册服务提供者和别名: ... 'providers' => [ ... Tymon\JWTAuth\Providers\JWTAuthServiceProvider::class, ] ... 'aliases' => [ ... 'JWTAuth' => Tymon\JWTAuth\Facades\JWTAuth::class, ] 发布资源和配置: php artisan vendor:publish --provider="Tymon\JWTAuth\Providers\JWTAuthServiceProvider" 在发布的配置中生成key: php artisan jwt:generate 如果上述命令执行报错,可以通过这个链接解决。 编辑 app/Http/Kernel.php 添加 jwt.authjwt.refresh 到应用路由中间件数组: protected $routeMiddleware = [ ... 'jwt.auth' => \Tymon\JWTAuth\Middleware\GetUserFromToken::class, 'jwt.refresh' => \Tymon\JWTAuth\Middleware\RefreshToken::class, ]; 完成以上操作接下来就行注册路由,创建控制器,进行 API 功能验证了。 注册接口实现 现在 routes/api.php 中注册路由: Route::post('auth/register', 'AuthController@register'); 创建认证所需控制器: php artisan make:controller AuthController 我们再创建一个 FormRequest 来处理注册请求验证: php artisan make:request RegisterFormRequest 首先编写 FormRequest 处理类的验证规则代码: class RegisterFormRequest extends FormRequest { public function authorize() { return true; } public function rules() { return [ 'name' => 'required|string|unique:users', 'email' => 'required|email|unique:users', 'password' => 'required|string|min:6|max:10', ]; } } 接下来在控制器 AuthController 中创建一个 register 方法: public function register(RegisterFormRequest $request) { $user = new User; $user->email = $request->email; $user->name = $request->name; $user->password = bcrypt($request->password); $user->save(); return response([ 'status' => 'success', 'data' => $user ], 200); } 学院君友情提醒:直接拷贝代码的同学注意在代码中引入相应类的命名空间,不要报错了又来数落文档不负责,学院君很无辜,文档更无辜😂。 最后在 Register.vue 文件最后添加如下代码: <script> export default { data(){ return { name: '', email: '', password: '', error: false, errors: {}, success: false }; }, methods: { register(){ var app = this this.$auth.register({ params: { name: app.name, email: app.email, password: app.password }, success: function () { app.success = true }, error: function (resp) { app.error = true; app.errors = resp.response.data.errors; }, redirect: null }); } } } </script> 再次运行 npm run watch,然后在浏览器中通过 http://apidemo.test/#/register 访问注册页面进行注册: 如果注册失败,报错页面如下: 注册成功则页面显示如下: 登录接口实现 回到 AuthController 添加 login() 方法: public function login(Request $request) { $credentials = $request->only('email', 'password'); if ( ! $token = JWTAuth::attempt($credentials)) { return response([ 'status' => 'error', 'error' => 'invalid.credentials', 'msg' => 'Invalid Credentials.' ], 400); } return response(['status' => 'success']) ->header('Authorization', $token); } 此外我们继续添加 user()refresh() 方法到该控制器: public function user(Request $request) { $user = User::find(Auth::user()->id); return response([ 'status' => 'success', 'data' => $user ]); } public function refresh() { return response([ 'status' => 'success' ]); } 其中 user() 方法用于获取当前登录用户数据,而 refresh() 方法用于检查当前登录用户 token 是否仍然有效。 当然,不要忘了给上面新增的控制器方法注册路由: Route::post('auth/login', 'AuthController@login'); Route::group(['middleware' => 'jwt.auth'], function(){ Route::get('auth/user', 'AuthController@user'); }); Route::group(['middleware' => 'jwt.refresh'], function(){ Route::get('auth/refresh', 'AuthController@refresh'); }); 最后将以下代码添加到 Login.vue 最后: <script> export default { data(){ return { email: null, password: null, error: false } }, methods: { login(){ var app = this this.$auth.login({ params: { email: app.email, password: app.password }, success: function () {}, error: function () {}, rememberMe: true, redirect: '/dashboard', fetchUser: true, }); }, } } </script> 运行 npm run watch, 进入登录页面 http://apidemo.test/#/login 输入之前注册用户信息进行登录: 登录成功后页面跳转到后台 http://apidemo.test/#/dashboard 退出接口实现 最后,我们添加 logout() 方法到控制器 AuthController public function logout() { JWTAuth::invalidate(); return response([ 'status' => 'success', 'msg' => 'Logged out Successfully.' ], 200); } 该方法会确保用户从后台退出,从而使 token 失效,进而从客户端清除。 routes/api.php 注册对应路由: Route::group(['middleware' => 'jwt.auth'], function(){ ... Route::post('auth/logout', 'AuthController@logout'); }); 然后编辑 App.vue 文件: <template> <div class="panel panel-default"> <div class="panel-heading"> <nav> <ul class="list-inline"> <li> <router-link :to="{ name: 'home' }">首页</router-link> </li> <li v-if="!$auth.check()" class="pull-right"> <router-link :to="{ name: 'login' }">登录</router-link> </li> <li v-if="!$auth.check()" class="pull-right"> <router-link :to="{ name: 'register' }">注册</router-link> </li> <li v-if="$auth.check()" class="pull-right"> <a href="#" @click.prevent="$auth.logout()">退出</a> </li> </ul> </nav> </div> <div class="panel-body"> <router-view></router-view> </div> </div> </template> $auth.check() 用于检查用户是否登录,$auth.logout() 用于用户退出请求。 运行 npm run watch,刷新 http://apidemo.test/#/dashboard,页面显示如下: 点击「退出」按钮,用户退出,页面跳转到首页: 至此,我们已经集合 Laravel + Vue 基于 jwt-auth 实现了 API 基本认证功能。下一篇我们来讲讲如何将 Laravel 5.5 新增的 Eloquent API Resource 功能集成进来。 学院君 has written 1162 articles Laravel学院院长,终身学习者 积分:146322 等级:P12 职业:手艺人 城市:杭州 36 条回复 1. Cannot read property 'Authorization' of undefined 搭建完注册页面后,提交注册信息js出现这个报错? 2. ideasource ideasource says: 本章中的登录模块,点击登录后看network,第一个参数返回了success,但是第二个返回 token_not_provided。请问是jwt哪里没配置好呢? 3. 滑稽果 滑稽果 says: user方法一直返回500错误 注册登陆返回的都是成功了 是我命名空间引用错误了吗 4. zouguoyong zouguoyong says: exception: "Symfony\Component\Debug\Exception\FatalThrowableError" file: "D:\laragon\www\laravel\vendor\tymon\jwt-auth\src\JWT.php" line: 88 message: "Argument 1 passed to Tymon\JWTAuth\JWT::fromUser() must be an instance of Tymon\JWTAuth\Contracts\JWTSubject, instance of App\User given, called in D:\laragon\www\laravel\vendor\tymon\jwt-auth\src\JWTAuth.php on line 54" 登录报错了 5. musickr musickr says: { "message": "Non-static method Tymon\JWTAuth\JWTAuth::attempt() should not be called statically", "code": 0 } 这个问题怎么解决啊 6. rabbitns rabbitns says: @ myhyperion websanova/vue-auth 这个会定时刷新token以保证token 不过期 user是用户信息 保存在本地中,如果不存在就先调取了 7. rabbitns rabbitns says: @ myhyperion 控制器的refresh直接返回response 这是在中间件中刷新了token 登录后才能进行评论,立即登录?
__label__pos
0.805047
2020年MySQL数据库面试题总结(50道题含答案解析和思维导图)-阿里云开发者社区 开发者社区> 数据库> 正文 登录阅读全文 2020年MySQL数据库面试题总结(50道题含答案解析和思维导图) 简介: 常见的一些MySQL数据库面试题总结 前言 关于MySQL的知识点总结了一个思维导图分享给大家,希望对大家有所帮助。 MySQL知识点总结.jpg 1、MySQL 中有哪几种锁? (1)表级锁:开销小,加锁快;不会出现死锁;锁定粒度大,发生锁冲突的概率最 高,并发度最低。 (2)行级锁:开销大,加锁慢;会出现死锁;锁定粒度最小,发生锁冲突的概率最 低,并发度也最高。 (3)页面锁:开销和加锁时间界于表锁和行锁之间;会出现死锁;锁定粒度界于表 锁和行锁之间,并发度一般。 2、MySQL 中有哪些不同的表格? 共有 5 种类型的表格: (1)MyISAM (2)Heap (3)Merge (4)INNODB (5)ISAM 3、简述在 MySQL 数据库中 MyISAM 和 InnoDB 的区别 MyISAM: (1)不支持事务,但是每次查询都是原子的; (2)支持表级锁,即每次操作是对整个表加锁; (3)存储表的总行数; (4)一个 MYISAM 表有三个文件:索引文件、表结构文件、数据文件; (5)采用菲聚集索引,索引文件的数据域存储指向数据文件的指针。辅索引与主索引基本一致,但是辅索引不用保证唯一性。 InnoDb: (1)支持 ACID 的事务,支持事务的四种隔离级别; (2)支持行级锁及外键约束:因此可以支持写并发; (3)不存储总行数: (4)一个 InnoDb 引擎存储在一个文件空间(共享表空间,表大小不受操作系统控制,一个表可能分布在多个文件里),也有可能为多个(设置为独立表空,表大小受操作系统文件大小限制,一般为 2G),受操作系统文件大小的限制;关注公种浩:程序员追风,回复 003 领取2020最新Java面试题手册(200多页PDF文档)。 (5)主键索引采用聚集索引(索引的数据域存储数据文件本身),辅索引的数据域存储主键的值;因此从辅索引查找数据,需要先通过辅索引找到主键值,再访问辅索引;最好使用自增主键,防止插入数据时,为维持 B+树结构,文件的大调整。 4、MySQL 中 InnoDB 支持的四种事务隔离级别名称,以及逐级之间的区别? SQL 标准定义的四个隔离级别为: (1)read uncommited :读到未提交数据 (2)read committed:脏读,不可重复读 (3)repeatable read:可重读 (4)serializable :串行事物 5、CHAR 和 VARCHAR 的区别? (1)CHAR 和 VARCHAR 类型在存储和检索方面有所不同 (2)CHAR 列长度固定为创建表时声明的长度,长度值范围是 1 到 255 当 CHAR值被存储时,它们被用空格填充到特定长度,检索 CHAR 值时需删除尾随空格。 6、主键和候选键有什么区别? 表格的每一行都由主键唯一标识,一个表只有一个主键。 主键也是候选键。按照惯例,候选键可以被指定为主键,并且可以用于任何外键引用。 7、myisamchk 是用来做什么的? 它用来压缩 MyISAM 表,这减少了磁盘或内存使用。 MyISAM Static 和 MyISAM Dynamic 有什么区别? 在 MyISAM Static 上的所有字段有固定宽度。动态 MyISAM 表将具有像 TEXT,BLOB 等字段,以适应不同长度的数据类型。 MyISAM Static 在受损情况下更容易恢复。 8、如果一个表有一列定义为 TIMESTAMP,将发生什么? 每当行被更改时,时间戳字段将获取当前时间戳。 列设置为 AUTO INCREMENT 时,如果在表中达到最大值,会发生什么情况? 它会停止递增,任何进一步的插入都将产生错误,因为密钥已被使用。 怎样才能找出最后一次插入时分配了哪个自动增量? LAST_INSERT_ID 将返回由 Auto_increment 分配的最后一个值,并且不需要指定表名称。 9、你怎么看到为表格定义的所有索引? 索引是通过以下方式为表格定义的: SHOW INDEX FROM ; 10、LIKE 声明中的%和_是什么意思? %对应于 0 个或更多字符,_只是 LIKE 语句中的一个字符。 如何在 Unix 和 MySQL 时间戳之间进行转换? UNIX_TIMESTAMP 是从 MySQL 时间戳转换为 Unix 时间戳的命令 FROM_UNIXTIME 是从 Unix 时间戳转换为 MySQL 时间戳的命令 11、列对比运算符是什么? 在 SELECT 语句的列比较中使用=,<>,<=,<,> =,>,<<,>>,<=>,AND,OR 或 LIKE 运算符。 12、BLOB 和 TEXT 有什么区别? BLOB 是一个二进制对象,可以容纳可变数量的数据。TEXT 是一个不区分大小写的 BLOB。 BLOB 和 TEXT 类型之间的唯一区别在于对 BLOB 值进行排序和比较时区分大小写,对 TEXT 值不区分大小写。 13、MySQL_fetch_array 和 MySQL_fetch_object 的区别是什么? 以下是 MySQL_fetch_array 和 MySQL_fetch_object 的区别: MySQL_fetch_array() – 将结果行作为关联数组或来自数据库的常规数组返回。 MySQL_fetch_object – 从数据库返回结果行作为对象。 14、MyISAM 表格将在哪里存储,并且还提供其存储格式? 每个 MyISAM 表格以三种格式存储在磁盘上: (1)·“.frm”文件存储表定义 (2)·数据文件具有“.MYD”(MYData)扩展名 (3)索引文件具有“.MYI”(MYIndex)扩展名 15、MySQL 如何优化 DISTINCT? DISTINCT 在所有列上转换为 GROUP BY,并与 ORDER BY 子句结合使用。 SELECT DISTINCT t1.a FROM t1,t2 where t1.a=t2.a; 16、如何显示前 50 行? 在 MySQL 中,使用以下代码查询显示前 50 行: SELECT*FROM LIMIT 0,50; 17、可以使用多少列创建索引? 任何标准表最多可以创建 16 个索引列。 18、NOW()和 CURRENT_DATE()有什么区别? NOW()命令用于显示当前年份,月份,日期,小时,分钟和秒。 CURRENT_DATE()仅显示当前年份,月份和日期。 19、什么是非标准字符串类型? (1)TINYTEXT (2)TEXT (3)MEDIUMTEXT (4)LONGTEXT 20、什么是通用 SQL 函数? (1)CONCAT(A, B) – 连接两个字符串值以创建单个字符串输出。通常用于将两个或多个字段合并为一个字段。 (2)FORMAT(X, D)- 格式化数字 X 到 D 有效数字。 (3)CURRDATE(), CURRTIME()- 返回当前日期或时间。 (4)NOW() – 将当前日期和时间作为一个值返回。 (5)MONTH(),DAY(),YEAR(),WEEK(),WEEKDAY() – 从日期值中提取给定数据。 (6)HOUR(),MINUTE(),SECOND() – 从时间值中提取给定数据。 (7)DATEDIFF(A,B) – 确定两个日期之间的差异,通常用于计算年龄 (8)SUBTIMES(A,B) – 确定两次之间的差异。 (9)FROMDAYS(INT) – 将整数天数转换为日期值。 21、MySQL 支持事务吗? 在缺省模式下,MySQL 是 autocommit 模式的,所有的数据库更新操作都会即时提交,所以在缺省情况下,MySQL 是不支持事务的。 但是如果你的 MySQL 表类型是使用 InnoDB Tables 或 BDB tables 的话,你的MySQL 就可以使用事务处理,使用 SETAUTOCOMMIT=0 就可以使 MySQL 允许在非 autocommit 模式,在非autocommit 模式下,你必须使用 COMMIT 来提交你的更改,或者用 ROLLBACK来回滚你的更改。 22、MySQL 里记录货币用什么字段类型好 NUMERIC 和 DECIMAL 类型被 MySQL 实现为同样的类型,这在 SQL92 标准允许。他们被用于保存值,该值的准确精度是极其重要的值,例如与金钱有关的数据。当声明一个类是这些类型之一时,精度和规模的能被(并且通常是)指定。 例如: salary DECIMAL(9,2) 在这个例子中,9(precision)代表将被用于存储值的总的小数位数,而 2(scale)代 表将被用于存储小数点后的位数。 因此,在这种情况下,能被存储在 salary 列中的值的范围是从-9999999.99 到9999999.99。 23、MySQL 有关权限的表都有哪几个? MySQL 服务器通过权限表来控制用户对数据库的访问,权限表存放在 MySQL 数据库里,由 MySQL_install_db 脚本初始化。这些权限表分别 user,db,table_priv,columns_priv 和 host。 24、列的字符串类型可以是什么? 字符串类型是: (1)SET2 (2)BLOB (3)ENUM (4)CHAR (5)TEXT 25、MySQL 数据库作发布系统的存储,一天五万条以上的增量,预计运维三年,怎么优化? (1)设计良好的数据库结构,允许部分数据冗余,尽量避免 join 查询,提高效率。 (2)选择合适的表字段数据类型和存储引擎,适当的添加索引。 (3)MySQL 库主从读写分离。 (4)找规律分表,减少单表中的数据量提高查询速度。 (5)添加缓存机制,比如 memcached,apc 等。 (6)不经常改动的页面,生成静态页面。 (7)书写高效率的 SQL。比如 SELECT * FROM TABEL 改为 SELECT field_1, field_2, field_3 FROM TABLE. 26、锁的优化策略 (1)读写分离 (2)分段加锁 (3)减少锁持有的时间 (4)多个线程尽量以相同的顺序去获取资源 不能将锁的粒度过于细化,不然可能会出现线程的加锁和释放次数过多,反而效率不如一次加一把大锁。 27、索引的底层实现原理和优化 B+树,经过优化的 B+树 主要是在所有的叶子结点中增加了指向下一个叶子节点的指针,因此 InnoDB 建议为大部分表使用默认自增的主键作为主索引。 28、什么情况下设置了索引但无法使用 (1)以“%”开头的 LIKE 语句,模糊匹配 (2)OR 语句前后没有同时使用索引 (3)数据类型出现隐式转化(如 varchar 不加单引号的话可能会自动转换为 int 型) 29、实践中如何优化 MySQL 最好是按照以下顺序优化: (1)SQL 语句及索引的优化 (2)数据库表结构的优化 (3)系统配置的优化 (4)硬件的优化 30、优化数据库的方法 (1)选取最适用的字段属性,尽可能减少定义字段宽度,尽量把字段设置 NOTNULL,例如’省份’、’性别’最好适用 ENUM (2)使用连接(JOIN)来代替子查询 (3)适用联合(UNION)来代替手动创建的临时表 (4)事务处理 (5)锁定表、优化事务处理 (6)适用外键,优化锁定表 (7)建立索引 (8)优化查询语句 31、简单描述 MySQL 中,索引,主键,唯一索引,联合索引的区别,对数据库的性能有什么影响(从读写两方面) 索引是一种特殊的文件(InnoDB 数据表上的索引是表空间的一个组成部分),它们包含着对数据表里所有记录的引用指针。 普通索引(由关键字 KEY 或 INDEX 定义的索引)的唯一任务是加快对数据的访问速度。 普通索引允许被索引的数据列包含重复的值。如果能确定某个数据列将只包含彼此各不相同的值,在为这个数据列创建索引的时候就应该用关键字 UNIQUE 把它定义为一个唯一索引。也就是说,唯一索引可以保证数据记录的唯一性。 主键,是一种特殊的唯一索引,在一张表中只能定义一个主键索引,主键用于唯一标识一条记录,使用关键字 PRIMARY KEY 来创建。 索引可以覆盖多个数据列,如像 INDEX(columnA, columnB)索引,这就是联合索引。关注公种浩:程序员追风,回复 003 领取2020最新Java面试题手册(200多页PDF文档)。 索引可以极大的提高数据的查询速度,但是会降低插入、删除、更新表的速度,因为在执行这些写操作时,还要操作索引文件。 32、数据库中的事务是什么? 事务(transaction)是作为一个单元的一组有序的数据库操作。如果组中的所有操作都成功,则认为事务成功,即使只有一个操作失败,事务也不成功。如果所有操作完成,事务则提交,其修改将作用于所有其他数据库进程。如果一个操作失败,则事务将回滚,该事务所有操作的影响都将取消。 事务特性: (1)原子性:即不可分割性,事务要么全部被执行,要么就全部不被执行。 (2)一致性或可串性。事务的执行使得数据库从一种正确状态转换成另一种正确状态。 (3)隔离性。在事务正确提交之前,不允许把该事务对数据的任何改变提供给任何其他事务。 (4)持久性。事务正确提交后,其结果将永久保存在数据库中,即使在事务提交后有了其他故障,事务的处理结果也会得到保存。 或者这样理解: 事务就是被绑定在一起作为一个逻辑工作单元的 SQL 语句分组,如果任何一个语句操作失败那么整个操作就被失败,以后操作就会回滚到操作前状态,或者是上有个节点。为了确保要么执行,要么不执行,就可以使用事务。要将有组语句作为事务考虑,就需要通过 ACID 测试,即原子性,一致性,隔离性和持久性。 33、SQL 注入漏洞产生的原因?如何防止? SQL 注入产生的原因:程序开发过程中不注意规范书写 sql 语句和对特殊字符进行过滤,导致客户端可以通过全局变量 POST 和 GET 提交一些 sql 语句正常执行。 防止 SQL 注入的方式: 开启配置文件中的 magic_quotes_gpc 和 magic_quotes_runtime 设置 执行 sql 语句时使用 addslashes 进行 sql 语句转换 Sql 语句书写尽量不要省略双引号和单引号。 过滤掉 sql 语句中的一些关键词:update、insert、delete、select、 * 。 提高数据库表和字段的命名技巧,对一些重要的字段根据程序的特点命名,取不易被猜到的。 34、为表中得字段选择合适得数据类型 字段类型优先级: 整形>date,time>enum,char>varchar>blob,text 优先考虑数字类型,其次是日期或者二进制类型,最后是字符串类型,同级别得数据类型,应该优先选择占用空间小的数据类型 35、存储时期 Datatime: 以 YYYY-MM-DD HH:MM:SS 格式存储时期时间,精确到秒,占用 8 个字节得存储空间,datatime 类型与时区无关Timestamp:以时间戳格式存储,占用 4 个字节,范围小 1970-1-1 到 2038-1-19,显示依赖于所指定得时区,默认在第一个列行的数据修改时可以自动得修改timestamp 列得值 Date: (生日)占用得字节数比使用字符串.datatime.int 储存要少,使用 date 只需要 3 个字节,存储日期月份,还可以利用日期时间函数进行日期间得计算 Time: 存储时间部分得数据 注意: 不要使用字符串类型来存储日期时间数据(通常比字符串占用得储存空间小,在进行查找过滤可以利用日期得函数) 使用 int 存储日期时间不如使用 timestamp 类型 36、对于关系型数据库而言,索引是相当重要的概念,请回答有关索引的几个问题: (1)索引的目的是什么? 快速访问数据表中的特定信息,提高检索速度 创建唯一性索引,保证数据库表中每一行数据的唯一性。 加速表和表之间的连接 使用分组和排序子句进行数据检索时,可以显著减少查询中分组和排序的时间 (2)索引对数据库系统的负面影响是什么? 负面影响: 创建索引和维护索引需要耗费时间,这个时间随着数据量的增加而增加;索引需要占用物理空间,不光是表需要占用数据空间,每个索引也需要占用物理空间;当对表进行增、删、改、的时候索引也要动态维护,这样就降低了数据的维护速度。 (3)为数据表建立索引的原则有哪些? 在最频繁使用的、用以缩小查询范围的字段上建立索引。 在频繁使用的、需要排序的字段上建立索引 (4)什么情况下不宜建立索引? 对于查询中很少涉及的列或者重复值比较多的列,不宜建立索引。 对于一些特殊的数据类型,不宜建立索引,比如文本字段(text)等 37、解释 MySQL 外连接、内连接与自连接的区别 先说什么是交叉连接: 交叉连接又叫笛卡尔积,它是指不使用任何条件,直接将一个表的所有记录和另一个表中的所有记录一一匹配。 内连接 则是只有条件的交叉连接,根据某个条件筛选出符合条件的记录,不符合条件的记录不会出现在结果集中,即内连接只连接匹配的行。 外连接 其结果集中不仅包含符合连接条件的行,而且还会包括左表、右表或两个表中的所有数据行,这三种情况依次称之为左外连接,右外连接,和全外连接。 左外连接 也称左连接,左表为主表,左表中的所有记录都会出现在结果集中,对于那些在右表中并没有匹配的记录,仍然要显示,右边对应的那些字段值以NULL 来填充。右外连接,也称右连接,右表为主表,右表中的所有记录都会出现在结果集中。左连接和右连接可以互换,MySQL 目前还不支持全外连接。 38、Myql 中的事务回滚机制概述 事务是用户定义的一个数据库操作序列,这些操作要么全做要么全不做,是一个不可分割的工作单位,事务回滚是指将该事务已经完成的对数据库的更新操作撤销。 要同时修改数据库中两个不同表时,如果它们不是一个事务的话,当第一个表修改完,可能第二个表修改过程中出现了异常而没能修改,此时就只有第二个表依旧是未修改之前的状态,而第一个表已经被修改完毕。而当你把它们设定为一个事务的时候,当第一个表修改完,第二表修改出现异常而没能修改,第一个表和第二个表都要回到未修改的状态,这就是所谓的事务回滚 39、SQL 语言包括哪几部分?每部分都有哪些操作关键字? SQL 语言包括数据定义(DDL)、数据操纵(DML),数据控制(DCL)和数据查询(DQL) 四个部分。 数据定义: Create Table,Alter Table,Drop Table, Craete/Drop Index 等 数据操纵: Select ,insert,update,delete, 数据控制: grant,revoke 数据查询: select 40、完整性约束包括哪些? 数据完整性(Data Integrity)是指数据的精确(Accuracy)和可靠性(Reliability)。 分为以下四类: (1)实体完整性: 规定表的每一行在表中是惟一的实体。 (2)域完整性: 是指表中的列必须满足某种特定的数据类型约束,其中约束又包括取值范围、精度等规定。 (3)参照完整性: 是指两个表的主关键字和外关键字的数据应一致,保证了表之间的数据的一致性,防止了数据丢失或无意义的数据在数据库中扩散。 (4)用户定义的完整性: 不同的关系数据库系统根据其应用环境的不同,往往还需要一些特殊的约束条件。用户定义的完整性即是针对某个特定关系数据库的约束条件,它反映某一具体应用必须满足的语义要求。 与表有关的约束: 包括列约束(NOT NULL(非空约束))和表约束(PRIMARY KEY、foreign key、check、UNIQUE) 。 41、什么是锁? 数据库是一个多用户使用的共享资源。当多个用户并发地存取数据时,在数据库中就会产生多个事务同时存取同一数据的情况。若对并发操作不加控制就可能会读取和存储不正确的数据,破坏数据库的一致性。 加锁是实现数据库并发控制的一个非常重要的技术。当事务在对某个数据对象进行操作前,先向系统发出请求,对其加锁。加锁后事务就对该数据对象有了一定的控制,在该事务释放锁之前,其他的事务不能对此数据对象进行更新操作。 基本锁类型:锁包括行级锁和表级锁 42、什么叫视图?游标是什么? 视图是一种虚拟的表,具有和物理表相同的功能。可以对视图进行增,改,查,操作,视图通常是有一个表或者多个表的行或列的子集。对视图的修改不影响基本表。它使得我们获取数据更容易,相比多表查询。 游标:是对查询出来的结果集作为一个单元来有效的处理。游标可以定在该单元中的特定行,从结果集的当前行检索一行或多行。可以对结果集当前行做修改。一般不使用游标,但是需要逐条处理数据的时候,游标显得十分重要。 43、什么是存储过程?用什么来调用? 存储过程是一个预编译的 SQL 语句,优点是允许模块化的设计,就是说只需创建一次,以后在该程序中就可以调用多次。如果某次操作需要执行多次 SQL,使用存储过程比单纯 SQL 语句执行要快。可以用一个命令对象来调用存储过程。 44、如何通俗地理解三个范式? 第一范式:1NF 是对属性的原子性约束,要求属性具有原子性,不可再分解; 第二范式:2NF 是对记录的惟一性约束,要求记录有惟一标识,即实体的惟一性; 第三范式:3NF 是对字段冗余性的约束,即任何字段不能由其他字段派生出来,它要求字段没有冗余。。 范式化设计优缺点: 优点:可以尽量得减少数据冗余,使得更新快,体积小 缺点:对于查询需要多个表进行关联,减少写得效率增加读得效率,更难进行索引优化 反范式化: 优点:可以减少表得关联,可以更好得进行索引优化 缺点:数据冗余以及数据异常,数据得修改需要更多的成本 45、什么是基本表?什么是视图? 基本表是本身独立存在的表,在 SQL 中一个关系就对应一个表。 视图是从一个或几个基本表导出的表。视图本身不独立存储在数据库中,是一个虚表 46、试述视图的优点? (1) 视图能够简化用户的操作 (2) 视图使用户能以多种角度看待同一数据; (3) 视图为数据库提供了一定程度的逻辑独立性; (4) 视图能够对机密数据提供安全保护。 47、 NULL 是什么意思 NULL 这个值表示 UNKNOWN(未知):它不表示“”(空字符串)。对 NULL 这个值的任何比较都会生产一个 NULL 值。您不能把任何值与一个 NULL 值进行比较,并在逻辑上希望获得一个答案。 使用 IS NULL 来进行 NULL 判断 48、主键、外键和索引的区别? 主键、外键和索引的区别 定义: 主键——唯一标识一条记录,不能有重复的,不允许为空 外键——表的外键是另一表的主键, 外键可以有重复的, 可以是空值 索引——该字段没有重复值,但可以有一个空值 作用: 主键——用来保证数据完整性 外键——用来和其他表建立联系用的 索引——是提高查询排序的速度 个数: 主键—— 主键只能有一个 外键—— 一个表可以有多个外键 索引—— 一个表可以有多个唯一索引 49、你可以用什么来确保表格里的字段只接受特定范围里的值? Check 限制,它在数据库表格里被定义,用来限制输入该列的值。 触发器也可以被用来限制数据库表格里的字段能够接受的值,但是这种办法要求触发器在表格里被定义,这可能会在某些情况下影响到性能。 50、说说对 SQL 语句优化有哪些方法?(选择几条) (1)Where 子句中:where 表之间的连接必须写在其他 Where 条件之前,那些可以过滤掉最大数量记录的条件必须写在 Where 子句的末尾.HAVING 最后。关注公种浩:程序员追风,回复 003 领取2020最新Java面试题手册(200多页PDF文档)。 (2)用 EXISTS 替代 IN、用 NOT EXISTS 替代 NOT IN。 (3) 避免在索引列上使用计算 (4)避免在索引列上使用 IS NULL 和 IS NOT NULL (5)对查询进行优化,应尽量避免全表扫描,首先应考虑在 where 及 order by 涉及的列上建立索引。 (6)应尽量避免在 where 子句中对字段进行 null 值判断,否则将导致引擎放弃使用索引而进行全表扫描 (7)应尽量避免在 where 子句中对字段进行表达式操作,这将导致引擎放弃使用索引而进行全表扫描 版权声明:本文内容由阿里云实名注册用户自发贡献,版权归原作者所有,阿里云开发者社区不拥有其著作权,亦不承担相应法律责任。具体规则请查看《阿里云开发者社区用户服务协议》和《阿里云开发者社区知识产权保护指引》。如果您发现本社区中有涉嫌抄袭的内容,填写侵权投诉表单进行举报,一经查实,本社区将立刻删除涉嫌侵权内容。 分享: 数据库 使用钉钉扫一扫加入圈子 + 订阅 分享数据库前沿,解构实战干货,推动数据库技术变革 其他文章 最新文章 相关文章
__label__pos
0.991808
Attention: v4.8 of the API has released! See change log. Posts related to COVID-19 are still temporarily permitted for chains. REST Resource: googleLocations Resource: GoogleLocation Represents a Location that is present on Google. This can be a location that has been claimed by the user, someone else, or could be unclaimed. JSON representation { "name": string, "location": { object (Location) }, "requestAdminRightsUrl": string } Fields name string Resource name of this GoogleLocation, in the format googleLocations/{googleLocationId}. location object (Location) The sparsely populated Location information. This field can be re-used in locations.create if it is not currently claimed by a user. requestAdminRightsUrl string A URL that will redirect the user to the request admin rights UI. This field is only present if the location has already been claimed by any user, including the current user. Methods report Report a GoogleLocation. Search all of the possible locations that are a match to the specified request.
__label__pos
0.952751
Jumping to a page There are several ways to jump to a page in Xodo: by page number, by outline, by bookmark, or by using the thumbnail slider. Method 1: By page number 1. Tap the page number display in the bottom left hand corner of the document viewer. If you don't see the page number display, tap the center of your screen to bring it up. 2. In the Go To Page dialog, type in the number of the page you want to navigate to and tap OK. Method 2: By outline If your document contains a table of contents, you can jump to specific chapters and sections using the Outline view. 1. Tap the  Bookmark icon on the bottom navigation bar. If you don't see the navigation bar, tap the center of your screen to bring it up. 2. Tap OUTLINE. 3. Tap the chapter or section you want to navigate to. Method 3: By bookmark If you've added bookmarks to certain pages, you can jump to these pages from the Bookmarks view. 1. Tap the Bookmark icon on the bottom navigation bar. If you don't see the navigation bar, tap the center of your screen to bring it up. 2. Under BOOKMARKS, tap the bookmark you want to navigate to. Method 4: Using the thumbnail slider 1. Hold and drag on the thumbnail slider on the bottom of the document viewer. The grey square shows which page you will navigate to. A thumbnail preview of each page and its page number appear above the thumbnail slider. If you don't see the thumbnail slider, tap the center of the screen to bring it up. 2. Release the thumbnail slider on the page you want to navigate to. See Also: Adding a blank page Rotating pages Changing page order Deleting pages Android 1. Getting started: Xodo basics 2. Files opened from external SD card are read-only 3. Accessing files on an SD card 4. Accessing Google Drive files with Xodo 5. Accessing OneDrive files with Xodo 6. Accessing Dropbox files with Xodo 7. Syncing files between Android and Windows 8. Creating a new PDF (blank or from an image) 9. Creating a PDF from a webpage 10. Creating a folder 11. Sorting files 12. Saving a copy of a file with a new name 13. Moving a file 14. Deleting a file 15. Merging Documents 16. Exporting pages into a new PDF 17. Adding a document to favorites 18. Annotations made in Xodo are gone when PDF is opened in another app (Flatten) 19. Selecting/Highlighting text with smart snapping 20. Adding a text annotation 21. Creating a text markup annotation 22. Adding an ink annotation 23. Deleting an annotation 24. Changing the font of a text annotation 25. Controlling the list of available fonts 26. Adding a substitute font to Xodo 27. Adding an image 28. Selecting camera photo size 29. Exporting summary of annotations 30. Signing documents and saving signatures 31. Defining and translating text 32. Sharing text with other apps (Eg. offline dictionaries, Google Translate, email) 33. Using full document text search 34. Using Stylus as Pen mode 35. Converting handwriting to text 36. Enabling continuous annotation mode 37. Cropping pages 38. Disabling Turn Page on Tap 39. Changing view modes 40. Disabling maintain zoom level 41. Reading Mode (Reflow) 42. Right-to-Left functionality and features 43. Keeping the screen on while using Xodo 44. Rotating pages 45. Inserting pages (blank, image, another document) in a PDF 46. Changing page order 47. Jumping to a page 48. Deleting pages 49. Adding a page bookmark 50. Renaming a page bookmark 51. Sharing and collaborating on files 52. Getting the public read-only link for a document 53. Syncing your Xodo web app bookshelf with the Xodo Android app 54. Translation Limit 55. Setting Xodo as the default viewer 56. Copying and pasting annotations 57. Filter by file type(s) 58. Viewing in Night Mode, Sepia Mode and Custom Color Mode 59. Saving a permanently cropped copy of a file 60. Saving a password protected copy of a file 61. Saving a reduced file size copy of a file 62. Why do pictures in pdf files look paler? 63. Changing language for read function (text to speech) 64. Changing edit mode for text annotations 65. Locate the folder containing the current file 66. Disabling page number indicator 67. Enabling copy annotated text to note 68. Convert other format file(jpg, png, docx) to PDF 69. Log out from Xodo Connect 70. Sharing documents via other apps 71. What is my Xodo Version? 72. Can I open a template and save as? 73. Disable Auto-select annotation. 74. Undo/Redo 75. New PDF from Multiple Documents 76. I can no longer fill out form fields with date format, why? 77. Read Text in PDF File 78. Full screen mode 79. Delete all annotations in a document 80. Document files not displayed correctly 81. Saving a file to SD Card 82. Viewing the Table of Contents (document outline) 83. Quick menu buttons cheat sheet 84. Changing speech rate for read function (text to speech) 85. Help us improve Xodo 86. Expand/ collapse folder in All Document 87. Open reading file folder 88. Link Tool 89. Changing file preview size 90. Stylus and Fingers 91. Keyboard Shortcuts 92. Duplicating a page 93. Change style of an annotation (color, thickness, opacity) 94. Setting an annotation style preset 95. Adding favorite colors 96. Annotation color picker 97. Highlighting anything on a document 98. Delete/Flatten multiple annotations 99. Why are my files saved as duplicates instead of rewrite the original PDF? 100. Creating fillable form fields 101. Custom rubber stamp 102. Adding a ruler annotation Feedback and Knowledge Base
__label__pos
0.885378
Mail gets auto-deleted Discussion in 'New Member Introductions - Site Assistance' started by sherlockholmes76, Dec 10, 2012. 1. sherlockholmes76 sherlockholmes76 iPF Noob Joined: Dec 10, 2012 Messages: 6 Thanks Received: 0 Trophy Points: 0 Ratings: +0 / 0 I recently upgraded to an iPad3 and it keeps deleting my emails. I'll have a few emails stored, and sometimes (but not always) when I open the program to either refresh my mail or to respond to a message, they'll just disappear. The only message that isn't deleted is a welcome letter from iCloud. Hmmm... Thanks for any help...   2. twerppoet twerppoet iPad Legend Joined: Jan 8, 2011 Messages: 18,201 Thanks Received: 3,010 Trophy Points: 113 Location: Walla Walla, WA Ratings: +3,835 / 4 Who is your email provider, and how to you have them set up: IMAP, POP, Exchange?   3. sherlockholmes76 sherlockholmes76 iPF Noob Joined: Dec 10, 2012 Messages: 6 Thanks Received: 0 Trophy Points: 0 Ratings: +0 / 0 I have a local service provider - North Dakota Telephone Company. I think it's set up as IMAP. I followed the path SETTINGS/MAIL, CONTACTS, CALENDAR/ACCOUNTS/GONDTC/IMP.   4. twerppoet twerppoet iPad Legend Joined: Jan 8, 2011 Messages: 18,201 Thanks Received: 3,010 Trophy Points: 113 Location: Walla Walla, WA Ratings: +3,835 / 4 If you are set up as IMAP, an understanding of how it works, may help to explain things. Everything is on the server. Whenever your email connects to the server it will update to reflect the server's status. This means that if you deleted the email somewhere else, it may show briefly on the iPad, then go away as it deletes it from the iPad also. If you leave a computer online and connected to the server, anything that happens on that computer will change the server; but those changes may not be reflected on the iPad until the next time you open Mail. It depends on whether the iPad gets to them first or the computer does. Things that might do this are junk mail filters or rules that you've set up on the computer.   5. sherlockholmes76 sherlockholmes76 iPF Noob Joined: Dec 10, 2012 Messages: 6 Thanks Received: 0 Trophy Points: 0 Ratings: +0 / 0 This happens regardless of what is done on the main computer. Quite often, there are days that I don't even collect the mail on my computer, that I'll use my iPad exclusively. With my old iPad, I could accept and delete emails, and it had no effect on messages stored on the main computer. That is still the case. This issue of "auto-deletion" is occurring regardless of what happens on the main computer.   6. twerppoet twerppoet iPad Legend Joined: Jan 8, 2011 Messages: 18,201 Thanks Received: 3,010 Trophy Points: 113 Location: Walla Walla, WA Ratings: +3,835 / 4 That does not sound like IMAP at all. Two things to check then. If you typically have more than 50 emails in your Inbox, in Settings > Mail, Contacts, Calendars, down in the Mail section, make sure you've got Show set to something higher. Go to your account at the top of the Mail, Contacts, Calendars settings and open it. Check to see if there is a Mail Days to Sync setting. It defaults to 3 days. This is for an Exchange setup, not IMAP, but is the most common cause of emails disappearing. You can also try a clean sweep approach. Delete the account and add it again using the provider's instructions (hopefully on the support site). That is all I can think of at the moment.   7. sherlockholmes76 sherlockholmes76 iPF Noob Joined: Dec 10, 2012 Messages: 6 Thanks Received: 0 Trophy Points: 0 Ratings: +0 / 0 So, my iPad 3 is still eating my email, and the problem has now also shown up on my iPod. There are also new folders in my folder list - #Help, #Trash (I'm sure there was a trash folder before - I just don't remember there being a #), and now there's a folder labeled #Lybeck. What the heck is #Lybeck? Should I be worried? Is this Lybeck a person?   8. twerppoet twerppoet iPad Legend Joined: Jan 8, 2011 Messages: 18,201 Thanks Received: 3,010 Trophy Points: 113 Location: Walla Walla, WA Ratings: +3,835 / 4 That certainly sounds like an IMAP account, at least as far as syncing extra folders. Why you are getting new folders that you've never set up is a mystery. The iPad would not do this on it's own. It can't. I'm thinking you need to go to your provider's support page or contact them directly. Either this is something they are doing, or your account has been compromised. The second is unlikely (from your description of what is going on) but it wouldn't hurt to change your password. You probably have a website that you can check your mail on. It wouldn't hurt to look at it and see if those folder exist, if there are any rules set up, and other settings that might be involved. Good luck.   Share This Page Search tags for this page deleted mail , ipad email gets deleted , iphone 5 and exchange auto deleting imap emails after 7 days , mail auto-gschwandtl.at , mail auto-gschwandtl.at loc:us , why is it my email gets deleted from my ipad
__label__pos
0.768842
  +0     0 110 1 avatar There are two number A and B. A is a 2-digit number which is a multiple of 8. B is a 2-digit number which is a multiple of 24. What is the largest possible difference between the two numbers A and B?  Feb 15, 2022  #1 avatar+124594  +1 Call A the smaller and B the larger   A = 2* 8  =  16 B = 4 * 24  = 96   l A - B l   =  l 16 - 96 l  =   l -80 l  =   80   cool cool cool  Feb 15, 2022 12 Online Users avatar
__label__pos
0.997869
anybody have a browser compatible html entity decoding function? Found this online and it seems like they all use this concept. function html_entity_decode(str) { //jd-tech.net var tarea=document.createElement('textarea'); tarea.innerHTML = str; return tarea.value; tarea.parentNode.removeChild(tarea); } apparently the innerHTML is giving me a runtime error on ie, i can only use value. But value doesn't decode the html entities. Found a solution: function html_entity_decode(str) { try { var tarea=document.createElement('textarea'); tarea.innerHTML = str; return tarea.value; tarea.parentNode.removeChild(tarea); } catch(e) { //for IE add <div id="htmlconverter" style="display:none;"></div> to the page document.getElementById("htmlconverter").innerHTML = '<textarea id="innerConverter">' + str + '</textarea>'; var content = document.getElementById("innerConverter").value; document.getElementById("htmlconverter").innerHTML = ""; return content; } } this way you can still add the innerHTML, but to a div instead, and it also adds a textarea with it so it can be retrieved by the value of the textarea. then it clears out the div and leaves it blank. Cat skinned. This question has already been answered. Start a new discussion instead.
__label__pos
0.987384
2022 AMC 12B Problems/Problem 16 Problem Suppose $x$ and $y$ are positive real numbers such that \[x^y=2^{64}\text{ and }(\log_2{x})^{\log_2{y}}=2^{7}.\] What is the greatest possible value of $\log_2{y}$? $\textbf{(A) }3 \qquad \textbf{(B) }4 \qquad \textbf{(C) }3+\sqrt{2} \qquad \textbf{(D) }4+\sqrt{3} \qquad \textbf{(E) }7$ Solution Take the base-two logarithm of both equations to get \[y\log_2 x = 64\quad\text{and}\quad (\log_2 y)(\log_2\log_2 x) = 7.\] Now taking the base-two logarithm of the first equation again yields \[\log_2 y + \log_2\log_2 x = 6.\] It follows that the real numbers $r:=\log_2 y$ and $s:=\log_2\log_2 x$ satisfy $r+s=6$ and $rs = 7$. Solving this system yields \[\{\log_2 y,\log_2\log_2 x\}\in\{3-\sqrt 2, 3 + \sqrt 2\}.\] Thus the largest possible value of $\log_2 y$ is $3+\sqrt 2 \implies \boxed{\textbf {(C)}}$. cr. djmathman Solution 2 $x^y=2^{64} \Rightarrow y\log_2{x}=64 \Rightarrow \log_2{x}=\dfrac{64}{y}$. Substitution into $(\log_2{x})^{\log_2{y}}=2^{7}$ yields $(\dfrac{64}{y})^{\log_2{y}}=2^{7} \Rightarrow \log_2{y}\log_2{\dfrac{64}{y}}=7 \Rightarrow \log_2{y}(6-\log_2{y})=7 \Rightarrow \log^2_2{y}-6\log_2{y}+7=0$. Solving for $\log_2{y}$ yields $\log_2{y}=3-\sqrt{2}$ or $3+\sqrt{2}$, and we take the greater value $\boxed{\boldsymbol{(\textbf{C})3+\sqrt{2}}}$. ~4SunnyH Solution 3 Let $x = 2^a, y = 2^b.$ We have $(2^a)^{2^b} = 2^{64} \Rightarrow 2^{a\cdot 2^b} = 2^{64} \Rightarrow a\cdot 2^b = 64,$ and $a^b = 128$. Then, from eq 1, $a = 64\cdot 2^{-b},$ and substituting in to eq 2, $(64\cdot 2^{-b})^b = 64^b\cdot 2^{-b^2} = 2^{6b}\cdot 2^{-b^2} = 2^{6b-b^2} = 2^{7}.$ Thus, $6b-b^2 = 7.$ Solving for $b$ using the quadratic formula gets $b = 3 \pm \sqrt{2}.$ Since we are looking for $\log_2{y}$ which equals $b,$ we put $\boxed{\textbf{(C)} \ 3+\sqrt{2}}$ as our answer. ~sirswagger21 Video Solution(1-16) https://youtu.be/SCwQ9jUfr0g ~~Hayabusa1 See Also 2022 AMC 12B (ProblemsAnswer KeyResources) Preceded by Problem 15 Followed by Problem 17 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 All AMC 12 Problems and Solutions The problems on this page are copyrighted by the Mathematical Association of America's American Mathematics Competitions. AMC logo.png Invalid username Login to AoPS
__label__pos
0.999989
{multicol} is about the multicol package and its multicols environment which allows to typeset text in multiple columns (up to 10) and to balance the end of each column at the end of the environment. For questions about multi-column areas inside a table, use {tables} plus {multicolumn} instead. learn more… | top users | synonyms 34 votes 4answers 68k views Placing figures inside a two-column document I'm trying to get images in my document in two different ways: On top of the entire page, centered On top of a single column I tried it with both twocols and multicols. I ruled out twocols because ... 14 votes 2answers 6k views Balancing long table inside multicol in LaTeX I would like to put some longer tabular data inside the multicols environment while maintaining its balancing abilities. I have tried supertabular with the trick to redefine \newpage as columnbreak. ... 31 votes 2answers 5k views multicolumn long table or enumeration I would like to generate a multi-column table that wraps within columns of the document, and splits across pages. There are a couple of similar questions here How do I create a really long table in ... 22 votes 2answers 3k views Beamer: Vertical alignment of multi-column ToC In my LaTeX beamer presentation I have a long Table of Contents, which has been split across two columns by using the multicol package. The problem is that I have not found any way to control vertical ... 16 votes 2answers 4k views How do I force TeX to completely use the 1st column? How do I force TeX to completely use the 1st column before writing to the second column? Right now I dont have enough content to fill one whole page. So the left and right columns are equally filled, ... 42 votes 3answers 51k views How do I force a column-break in a multi-column page? I want to display my article's abstract and table of contents side-by-side, to save space. I'm using the multicols package, but it tries to keep each column the same height, and I can't figure out how ... 30 votes 4answers 19k views How to balance last page of two-column layout? I create a document in two-column-layout with LaTeX (and Koma-Script, if that is relevant). The two-column-layout is defined in documentclass. The last page of an chapter fills the left column first - ... 11 votes 5answers 4k views enumerate in multicols The following image illustrates what am looking for: Essentially I want the numbering to occur as shown above but using the multicol and enumitem package. But when I try it by using the following ... 7 votes 1answer 2k views Three-columns text with figures of 2\columnwidth Although in multicol environment (not the twocolumn option in document class) is problematic to place floats, there are some ways to put figures within a single column or occupying the entire width of ... 4 votes 1answer 789 views How to have a figure with caption in multicol environment? I am having the following problem with adding a figure inside the multicol environment: \documentclass[10pt, twoside]{book} \usepackage{graphicx} \usepackage{subfig} \begin{document} ... 23 votes 1answer 15k views Different column widths using multicol Is it possible to specify different column widths in a multicol environment? For example, in a two column layout, can the left column be set to twice the width of the right column? 25 votes 1answer 1k views How can I reverse the positions of two (unequal width) columns so that the wider one is always against the inner margin? I'm new to LaTeX and I'm trying to use it to write a spotlessly-typesetted draft of my novel (from the rough, typewritten hard draft) for MFA program applications, and eventually to send to ... 10 votes 2answers 2k views Footnotes layout in multicols environment I am using the package multicol to typeset a one-column (spanning the entire text width) heading and a two-column main text block. In order to do so, I begin the multicols environment along with the ... 9 votes 3answers 5k views How to achieve a multi-column layout for footnotes? How can you achieve a multi-column layout for footnotes? I have one page filled only with a rotated table (landscape layout, the sidewaystable environment). The table is annotated by a lot of ... 23 votes 5answers 3k views Multicol layout for systems of (linear) equations How can a similar arrangement be archived (with ams*, aligned, multicol, etc) 1.2.3 Solve the following system of equations: a) x - 2y - 4z = 1 b) -23x + 43y = 22 3x - ... 16 votes 2answers 17k views Making a long table continue over two columns I am using multicol to make a two-column document using the book document class. I have a long tabular that I want to break where the page breaks and then continue in the next column. I could split ... 13 votes 1answer 1k views Column break only if within first column of multicol Background Creating books automatically using LaTeX-escaped user-generated content. Problem The book layouts make use of the multi-column package, with the book content separated into sections. If ... 19 votes 2answers 613 views Detecting current column in multicol I'm working in a two-column multicol environment, and trying to create pullquotes using wrapfig. The problem is, I'd like these pullquotes to hang in the margin: the left margin if I'm in the left ... 8 votes 2answers 8k views Bibliography in two columns, section title in one I'm using the following to span the bibliography to two columns: \begin{multicols}{2} \bibliographystyle{abbrv} \bibliography{mybib} \end{multicols} Yet, the section title ("References" in the ... 10 votes 1answer 500 views Shaded Multicols Does anyone know how to shade the background and put a border around a multicol environment, where page breaks are needed. There is a couple of posts around, most notably, multicols within colorbox, ... 1 vote 3answers 507 views Margin notes, multicols, tikz overlay As seen on page 200 of pgf manual is there a way to put notes in the margin but on the same level as the place it is called ? One can deal with the right-left positionning with the help of this ... 15 votes 2answers 2k views Table of contents without title I would like to have two-columned table of contents: \begin{multicols}{2} \tableofcontents \end{multicols} But it looks wierd if I have a title included in two columns - second ... 17 votes 3answers 3k views multicols not wrapping to 2nd column properly? This question led to a new feature in a package: multicol I am having problem with multicol package, I created a sample demo to show it. When I put 3 minipages of certain height in the ... 12 votes 2answers 3k views single column footnote in multicols environment? I use the multicols environment (multicol package). I have two columns. If I simply use \footnote{the footnote text, which is quite long} the footnote behaves as if there was just one big column (as ... 10 votes 2answers 1k views Margin notes in a multicolumn environment I have been typesetting a two-columned book where I sometimes need to put a mark in the margin to say, that particular line might need special attention. (The book is actually a psalter - for ... 8 votes 4answers 5k views Split itemize into multiple columns Is it possible to split an itemize list into several columns? (I'm sure it is, but I couldn't find a solution around here) And additionally: Is it possible to automatically split a list into multiple ... 6 votes 2answers 14k views Insert figure in a multicol article I am trying to insert a figure with a caption into a multicol article. When the figure was inserted on its own, it was fine: includegraphics[width=0.5cm]{image} However, when I tried to insert the ... 5 votes 1answer 1k views Vertical alignment in multicol In the following code the horizontal rules on the right don't line up in all cases with the horizontal rules on the left. It seems to be related to the text content. How can I fix this in such a way ... 5 votes 2answers 2k views Hypcap not working with custom figure environment I'm trying to use caption with hypcap=true so that cross-reference hyperlinks will go to the figure instead of the figure caption. However, I'm using a multicol environment with a custom figure ... 30 votes 1answer 17k views Setting the column gap in a twocolumn (or multicol) document I have a document that uses twocolumn with scrbook. How can I set the width of the space between the two columns? 16 votes 2answers 3k views Are there any LaTeX packages for multicolumn typesetting besides multicol? I'm going to need two-column layout (more advanced than LaTeX's \twocolumn - I'll need changing the number of columns without starting a new page or column balancing); however, due to the nature of ... 6 votes 2answers 2k views Two enumerated lists side by side I am trying to replicate the image below, but what I currently have gives awkward spacing on the two lists. Could anyone advise me as to how I could mimic the picture? ... 6 votes 1answer 301 views Shaded multicols II In extending a previous question, Shaded multicols, how can I put rounded corners on the multicol environment, for the outer 4 corners (ie the corners directly next to the page margins) This question ... 15 votes 2answers 34k views Adjusting column width in LaTeX with the multicol package I'm trying to get a two column "academic style" paper layout on my LaTeX paper. I'm aware there are at least two common ways of doing this, either with adjusting \documentclass or with the more ... 11 votes 1answer 847 views Images off the page in multicols environment Before my question, some background information. I’ve created an algorithm (not latex) which will generate a catalog for a collection of books dynamically. A user supplies information to query an SQL ... 8 votes 3answers 1k views Align baseline in multicol I'm using the multicol package and would like to align the baseline of the two columns. The simple use of a \paragraph heading seems to throw off the alignment, which can be pretty jarring visually. ... 5 votes 1answer 736 views How to write a title/abstract spanning 2 columns in 3-column page using multicol? My article requires that I put the title and a small abstract spanning the left 2 columns in a 3-column page, that is, the 3rd column to the right of the title/summary block must follow with the body ... 5 votes 1answer 2k views Grouped multi-column nomenclature I'd like to print a multicolumn nomenclature list that consists of several groups. At the moment, using the grouping approach described in the manual and with a code snippet from ... 5 votes 2answers 2k views Horizontal lines touching the multicol column separator rule Is there a possibility to put a horizontal line above and below a multicolumn output (using the multicol package) such that the horizontal lines touch the multicol column separator exactly at its ... 4 votes 1answer 76 views How to set one header for each page using multicols I want to use multicols in my document and also having a small box on each page (upper left) to be drawn automatically. I already tried \AtBeginPage hooks but the box didn't print at proper ... 4 votes 1answer 185 views Multicols not breaking procedures in algorithm2e I'm having a problem with the package multicol. I'm trying to split an algorithm into two columns. In particular, this algorithm is made up by two procedures. Unfortunately, in this particular case, ... 4 votes 2answers 171 views keep alignment under enumerate I would like to keep the indentation for my fraction that appears below my first item however i don't know how to align it if itself is not an item. source code: \documentclass{article} ... 4 votes 1answer 11k views How to change spacing between columns for just one page in the document I'm using the multicol package for my columns. So far, I have \setlength\columnsep{10pt} in the preamble to set the default column separation. But for one particular page, I'd like to set the ... 3 votes 2answers 201 views \columnbreak doesn't work With the following code: \documentclass[a4paper]{report} \usepackage{multicol} \begin{document} \begin{multicols}{2} \noindent Vediamo un po' di testo a due colonne. Proviamo a cambiar colonna: ... 2 votes 1answer 2k views Trick Supertabular into Multicols in new command I'm using supertabular in multicols enviroment using this trick \let\mcnewpage=\newpage \makeatletter \renewcommand\newpage{% \if@firstcolumn \hrule width\linewidth height0pt ... 1 vote 2answers 81 views Sophisticated Table Header Latex I have a table which I want to transform it into Latex Code But, I am beginner in Latex, thus, the header of this table is hard for me to write it in Latex. 10 votes 2answers 317 views How to create trapezoidal multicols? I would like to create a XeLaTeX layout where multicol's columns (two in this case, no figures, text only) are tilted where they meet. The lines still should be ordinary lines below each other, but ... 9 votes 2answers 433 views Making an odd shaped table I'm trying to make a table that looks like this: +--------------+ | Class 1 | +--------------+ | A | B | ... 5 votes 2answers 299 views How to left align text in two column itemize When I use following MWE for getting itemized text in two columns, the item in the list sometimes take a "justified" formatting and thus there is significant space in between two words. This produces ... 5 votes 1answer 227 views Getting floating objects in multicol environments I am using multicol with 2 columns, and I would love to have my figures and tables to float, so that I don't have to manually mess around with the positioning anymore. I am aware of figure*, but I ...
__label__pos
0.765601
4 \$\begingroup\$ Playing with XNA Triangle Picking Sample I found out that it does not work well if you scale the world matrix of the objects you want to pick. When I dug into the implementation I found this comment in the RayIntersectsModel method: // The input ray is in world space, but our model data is stored in object // space. We would normally have to transform all the model data by the // modelTransform matrix, moving it into world space before we test it // against the ray. That transform can be slow if there are a lot of // triangles in the model, however, so instead we do the opposite. // Transforming our ray by the inverse modelTransform moves it into object // space, where we can test it directly against our model data. Since there // is only one ray but typically many triangles, doing things this way // around can be much faster. After the comment they actually transformed the ray: ray.Position = Vector3.Transform(ray.Position, inverseTransform); ray.Direction = Vector3.TransformNormal(ray.Direction, inverseTransform); With this implementation, picking suffered from "short-sightedness" if you scaled the models: it could only pick those objects, that were close enough to it. Even the ray-boundingSphere intersection test, which implementation is hardcoded into XNA, failed in the same way. I fixed this by "doing the wrong thing" - I actually started transforming every vertex by the model's world matrix and to fix the boundingSphere test I added this code: Quaternion rot; Vector3 scale, trans; modelTransform.Decompose(out scale, out rot, out trans); float maxScale = Math.Max(Math.Max(scale.X, scale.Y), scale.Z); boundingSphere.Center = Vector3.Transform(boundingSphere.Center, modelTransform); boundingSphere.Radius *= maxScale; This obviously is not optimal and I wanted to know if there is a way to actually transform the ray back to the model's space with the model's inverted matrix, while making it work for scaled matrices? SOLUTION: Thanks to Nathan's answer I found a way to fix the ray scaling - just renormalize the ray direction: ray.Position = Vector3.Transform(ray.Position, inverseTransform); ray.Direction = Vector3.TransformNormal(ray.Direction, inverseTransform); //ADD THE FOLLOWING LINE: ray.Direction.Normalize(); SOLUTION UPDATE: As I tested the app, I found that Nathan was indeed completely right and another change was necessary. Here is the full code for the correct RayIntersectsModel() method: static float? RayIntersectsModel(Ray ray, Model model, Matrix modelTransform, out bool insideBoundingSphere, out Vector3 vertex1, out Vector3 vertex2, out Vector3 vertex3) { vertex1 = vertex2 = vertex3 = Vector3.Zero; ... Matrix inverseTransform = Matrix.Invert(modelTransform); // STORE WORLDSPACE RAY. Ray oldRay = ray; ray.Position = Vector3.Transform(ray.Position, inverseTransform); ray.Direction = Vector3.TransformNormal(ray.Direction, inverseTransform); ray.Direction.Normalize(); // Look up our custom collision data from the Tag property of the model. Dictionary<string, object> tagData = (Dictionary<string, object>)model.Tag; if (tagData == null) { throw new InvalidOperationException( "Model.Tag is not set correctly. Make sure your model " + "was built using the custom TrianglePickingProcessor."); } // Start off with a fast bounding sphere test. BoundingSphere boundingSphere = (BoundingSphere)tagData["BoundingSphere"]; if (boundingSphere.Intersects(ray) == null) { // If the ray does not intersect the bounding sphere, we cannot // possibly have picked this model, so there is no need to even // bother looking at the individual triangle data. insideBoundingSphere = false; return null; } else { // The bounding sphere test passed, so we need to do a full // triangle picking test. insideBoundingSphere = true; // Keep track of the closest triangle we found so far, // so we can always return the closest one. float? closestIntersection = null; // Loop over the vertex data, 3 at a time (3 vertices = 1 triangle). Vector3[] vertices = (Vector3[])tagData["Vertices"]; for (int i = 0; i < vertices.Length; i += 3) { // Perform a ray to triangle intersection test. float? intersection; RayIntersectsTriangle(ref ray, ref vertices[i], ref vertices[i + 1], ref vertices[i + 2], out intersection); // Does the ray intersect this triangle? if (intersection != null) { // RECOMPUTE DISTANCE IN WORLD SPACE: Vector3 vertexA = Vector3.Transform(vertices[i], modelTransform); Vector3 vertexB = Vector3.Transform(vertices[i+1], modelTransform); Vector3 vertexC = Vector3.Transform(vertices[i+2], modelTransform); RayIntersectsTriangle(ref oldRay, ref vertexA, ref vertexB, ref vertexC, out intersection); // If so, is it closer than any other previous triangle? if ((closestIntersection == null) || (intersection < closestIntersection)) { // Store the distance to this triangle. closestIntersection = intersection; // Store the three vertex positions in world space. vertex1 = vertexA; vertex2 = vertexB; vertex3 = vertexC; } } } return closestIntersection; } } \$\endgroup\$ • \$\begingroup\$ An inverse matrix also inverses the scaling. I do not see any obvious error in the code in XNA or in the idea behind it. Are you sure not something else is going on? Maybe try creating a minimum example that reproduces the error which you can verify by hand? \$\endgroup\$ – Roy T. Mar 25 '14 at 19:16 • \$\begingroup\$ I might be wrong about the causes of the problem, but all I do is simply change the default matrix for pickable objects in this sample by this expression:modelWorldTransforms[i] = Matrix.CreateScale(1.01f)* Matrix.CreateTranslation(new Vector3(x, 0, 0)); If you put into the scaling matrix anything greater than 1 - everything gets screwed up. If you can download the sample and try for yourself, you might make a better judgement about the cause. \$\endgroup\$ – cubrman Mar 25 '14 at 19:48 • \$\begingroup\$ Thanks for this, very helpful (even outside XNA). For those who might be wondering what Vector3.TransformNormal does it transforms by the matrix in the same way as Vector3.Transform but ignores the translation component, quite important to get that right! \$\endgroup\$ – CMash Sep 28 '15 at 15:42 8 \$\begingroup\$ Transforming the ray position and direction by the inverse model transformation is correct. However, many ray-intersection routines assume that the ray direction is a unit vector. If the model transformation involves scaling, the ray direction won't be a unit vector afterward, and should likely be renormalized. However, the distance along the ray returned by the intersection routines will then be measured in model space, and won't represent the distance in world space. If it's a uniform scale, you can simply multiply the returned distance by the scale factor to convert it back to world-space distance. For non-uniform scaling it's trickier; probably the best way is to transform the intersection point back to world space and then re-measure the distance from the ray origin there. |improve this answer||||| \$\endgroup\$ • \$\begingroup\$ Guess what? Renormalizing the direction vector after the initial inverse matrix transformation solved everything. I don't know why the second part of your answer was unnesessary, it seemed pretty logical, but that's how things go. Thanks for figuring that out! \$\endgroup\$ – cubrman Mar 26 '14 at 6:53 • \$\begingroup\$ There is another problem now though. If I try to change the model's world matrix at runtime - everything gets screwed up again! Nomatter how I compute the triangle test - world space or local space! This is madness... \$\endgroup\$ – cubrman Mar 26 '14 at 7:05 • \$\begingroup\$ Ok it does work with world coordinates, my problem was that I fiddled with the absoluteBoneTransformes. The original sample multiplies every world matrix by this matrix (the one with the parentBone index) when it draws the models and DOES NOT multiply them when computing intersection. In our app we don't multiply the world matrix in any case. I fixed this by multiplying the model's world matrix by the inverse absoluteBoneTransformes at the picking stage, but I messed up the matrix multiplication order and had this issue. So far I think I will have to implement the second part of ur answer... \$\endgroup\$ – cubrman Mar 26 '14 at 7:31 • 1 \$\begingroup\$ Ok I made it. You were totally right, both of your suggestions were necessary. Check the question for the full code. \$\endgroup\$ – cubrman Mar 26 '14 at 10:37 • 1 \$\begingroup\$ Affine transforms scale parallel distance measurements equally, so you can cache the scale value required to renormalize the ray once in object space, and use that to correct the distance value, rather than converting the intersection point to worldspace and re-measuring. \$\endgroup\$ – DMGregory Mar 26 '14 at 12:29 Your Answer By clicking “Post Your Answer”, you agree to our terms of service, privacy policy and cookie policy Not the answer you're looking for? Browse other questions tagged or ask your own question.
__label__pos
0.88658
The Revolutionary Impact of Boltból on Technology Wyatt Boltból In the ever-evolving landscape of technology, innovation is the driving force that propels us into the future. Among the myriad of emerging technologies, one name stands out as a beacon of revolutionary change: Boltból. This cutting-edge platform is not just a product; it’s a vision that reshapes the way we perceive and interact with technology. In this article, we’ll delve into the world of Boltból and explore how its groundbreaking innovations are shaping the future of tech. Unveiling Boltból Boltból is not just another tech company; it’s an ecosystem of revolutionary ideas and groundbreaking solutions. At its core, Boltból is driven by a mission to bridge the gap between the present and the future, leveraging technology to enhance every aspect of our lives. The company’s ethos revolves around three key pillars: Connectivity, Intelligence, and Sustainability. Connectivity Boltból’s first pillar, Connectivity, addresses the fundamental need for seamless communication in our increasingly interconnected world. The platform introduces a novel approach to connectivity that goes beyond traditional boundaries. By amalgamating cutting-edge wireless technologies, Boltból aims to create a global network where devices can communicate effortlessly, facilitating a new era of collaboration and data exchange. Imagine a world where your smart home seamlessly communicates with your autonomous vehicle, updating your preferences and schedules in real-time. Boltból’s Connectivity vision extends beyond personal devices, reaching into industries like healthcare, agriculture, and manufacturing, fostering unprecedented levels of efficiency and collaboration. Intelligence The second pillar of Boltból, Intelligence, delves into the realm of artificial intelligence (AI) and machine learning (ML). Boltból envisions a future where technology not only serves our needs but anticipates them. By integrating advanced AI algorithms into everyday devices, the platform is set to create smart ecosystems that learn and adapt to user behavior. Your smartphone will not just be a device; it will be an intelligent companion, understanding your preferences, anticipating your needs, and providing personalized recommendations. Boltból’s Intelligence pillar is not limited to consumer electronics; it extends to smart cities, where AI-driven infrastructure optimizes energy consumption, traffic flow, and public services, creating more sustainable and livable urban spaces. Sustainability The third pillar, Sustainability, reflects Boltból’s commitment to creating technology that not only enhances our lives but also cares for the planet. The company recognizes the environmental impact of technological advancements and strives to minimize its carbon footprint. Boltból’s sustainable practices extend across the entire product lifecycle, from responsible sourcing of materials to energy-efficient manufacturing processes and eco-friendly disposal methods. Moreover, Boltból is actively investing in research and development to create sustainable technologies that contribute to a greener future. From energy-harvesting devices to biodegradable materials, the Sustainability pillar envisions a tech landscape where innovation and environmental responsibility go hand in hand. Revolutionizing Industries To understand the profound impact of Boltból, let’s explore how its innovations are revolutionizing specific industries. Healthcare Boltból’s Connectivity pillar is transforming the healthcare sector by creating a connected ecosystem where medical devices seamlessly communicate with each other. From wearable health monitors that provide real-time data to smart prescription systems that optimize medication schedules, Boltból is at the forefront of a healthcare revolution. The Intelligence pillar enhances this further by employing AI to predict and prevent health issues, creating a proactive approach to patient care. Automotive In the automotive industry, Boltból’s Connectivity is paving the way for fully autonomous vehicles. Vehicles equipped with Boltból’s technology can communicate with each other, making roads safer and traffic more efficient. The Intelligence pillar ensures that these autonomous vehicles learn from each journey, constantly improving their decision-making capabilities. The Sustainability pillar is not forgotten, as Boltból explores electric and sustainable energy solutions for the automotive sector. Education Boltból’s foray into education involves creating an intelligent learning environment where Connectivity enables seamless communication between students, teachers, and educational resources. The Intelligence pillar ensures that educational platforms adapt to individual learning styles, providing personalized recommendations and assistance. The Sustainability pillar extends to creating eco-friendly educational technologies, reducing the environmental impact of the education sector. Challenges and Ethical Considerations While Boltból’s innovations promise a utopian future, it is crucial to address the challenges and ethical considerations that arise. The interconnected nature of Boltból’s vision raises concerns about data privacy, security, and potential misuse of advanced AI. As we embrace these revolutionary changes, it becomes imperative to establish robust ethical frameworks and regulations that safeguard user rights and ensure responsible technology deployment. Conclusion Boltból is not just a tech company; it is a manifestation of our collective aspirations for a better future. Its vision of Connectivity, Intelligence, and Sustainability paints a picture of a world where technology is not just a tool but a transformative force for good. As we navigate through the complexities of the technological landscape, Boltból stands as a guiding light, inspiring us to dream big, innovate relentlessly, and shape a future where technology enhances the human experience and preserves the planet we call home. The journey has just begun, and with Boltból at the helm, the future of tech looks brighter than ever. Leave a Comment
__label__pos
0.983796
Dismiss Notice Join Physics Forums Today! The friendliest, high quality science and math community on the planet! Everyone who loves science is here! Solving differential equations with definite integrals? 1. Dec 28, 2007 #1 I'm taking a calculus-based physics course, and we were solving a simple differential equation for a model of drag by separating variables: (where A is some arbitrary constant) [tex]m \frac {dv} {dt} = -A v^2[/tex] [tex]- \frac {m} {A} \frac {dv} {v^2} = dt[/tex] My teacher then integrates both sides, but unlike in my calculus class, he uses definite integrals: [tex]- \frac {m} {A} \int_{v_i}^{v_f} v^{-2} dv = \int_{t_i}^{t_f} dt[/tex] Initial time will be zero for simplicity, so using the FTC: [tex]\frac {m} {A} (\frac {1} {v_f} - \frac {1} {v_i}) = t_f[/tex] I understand how to solve it as is done from my calculus class, using indefinite integrals and solving for the constant of integration [itex]C = \frac {m} {A v_i}[/itex], which gives an equivalent result. So what's the merit of using one method as opposed to another? It seems to me like using definite integrals is quicker. So if I do physics problems this way, why should/shouldn't I do the initial-value problems I get in calculus using definite integrals like this? Like when it asks to solve [itex]dy/dx = 3y, y(2) = 5[/itex] or such, what's wrong with doing [itex]\frac {1} {3} \int_{5}^{y_f} y^{-1} dy = \int_{2}^{x_f} dx[/itex]? Both methods give the same result, and again, it seems quicker to do this the definite integral way rather than solving for C. I'm guessing that it might be harder to understand what's going on as things get more complex, or something? Or I guess using subscripts on variables to really indicate evaluating the function for the independent variable is problematic? (Whew, sorry for the length, but this has been bugging me for a while.)   Last edited: Dec 28, 2007 2. jcsd 3. Dec 28, 2007 #2 CompuChip User Avatar Science Advisor Homework Helper It really doesn't matter. What matters is that you are given a differential equation with an initial condition. From the differential equation you can find the general solution, and then somehow you have to impose the initial condition. You can do this after writing down the general solution (by first doing an indefinite integral and then solving for the unknown integration constant) or you solve the equation with the correct starting conditions right away. Of course they (should) give the same results in the end. It works the same way for higher order equations, e.g. [tex]\frac{d^2x}{dt^2} = a[/tex] (Which I won't work out because I'm not really comfortable working with the infinitesimals as you did above.)   4. Dec 30, 2007 #3 Know someone interested in this topic? Share this thread via Reddit, Google+, Twitter, or Facebook Similar Discussions: Solving differential equations with definite integrals? Loading...
__label__pos
0.999996
All dimensions Wiki Advertisement All dimensions Wiki (aka, a butt load of prefixes) (by POYO12469) first lets define the prefix, MolkoweSaio. 1. define the prefix Molkowe, which means f'f'x(ͳ)(x) defining ͳ as the biggest ordinal defineable using Mandore's ordinal trees using no more than x layers of branches, each branch having no more than x branches. 2. define the presuffix (or a suffix to a prefix) Saio as repeating the prefix behind it X times next, lets define a lightgalacticmillenium 1. a light-(insert unit of time here) = the amount of space light travels in 1 (insert unit of time here). 2. a millenium (some times called a kiloyear) is equal to 1,000 years 3. if a galactic year is equal to 250,000,000 terrestrial years, than a galacticmillenium is equal to 250,000,000,000 terrestrial years. so boom!! there you have it! a MolkoweSaiolightgalacticmillenium! i hope you liked my first page on this wiki. it was real fun making this.
__label__pos
0.977202
Take the 2-minute tour × Super User is a question and answer site for computer enthusiasts and power users. It's 100% free, no registration required. Does Notepad ++ offer the same feature of previous/next page as found in Adobe Reader? For example, if I have two files opened named a.txt and b.txt: 1. First I view a.txt line 10. 2. Then I view b.txt at line 20 and then line 30 3. If I have a previous page function, clicking it once will go to b.txt line 20 and clicking it again will go to a.txt line 10 share|improve this question add comment migrated from stackoverflow.com Oct 7 '11 at 0:05 This question came from our site for professional and enthusiast programmers. 1 Answer Have a look at this plugin called Location Navigate share|improve this answer add comment Your Answer   discard By posting your answer, you agree to the privacy policy and terms of service.
__label__pos
0.602538
A helper package to easily time Numba CUDA GPU events. The aim of this notebook is to show a basic example of Cython and Numba, applied to a simple algorithm: Insertion sort.. As we will see, the code transformation from Python to Cython or Python to Numba can be really easy (specifically for the latter), and results in very efficient code for sorting algorithms. In WinPython-64bit-2.7.10.3, its Numba version is 0.20.0. With Numba, you can speed up all of your calculation focused and computationally heavy python functions(eg loops). grid (1) if pos < an_array. Public channel for discussing Numba usage. Numba includes a CUDA Simulator that implements most of the semantics in CUDA Python using the Python interpreter and some additional Python code. A thread block is a programming abstraction that represents a group of threads that can be executed serially or in parallel. Then we need to wrap our CUDA buffer into a Numba “device array” with the right array metadata (shape, strides and datatype). numba.cuda.local.array(shape, type) Allocate a local array of the given shape and type on the device. Coding directly in Python functions that will be executed on GPU may allow to remove bottlenecks while keeping the code short and simple. Using Pip: pip3 install numba_timer. The following are 30 code examples for showing how to use numba.float64().These examples are extracted from open source projects. The decorator has several parameters but we will work with only the target parameter. Numba is a Just-in-time compiler for python, i.e. Maybe someone else can comment on a better threads per block and blocks per grid setting based on the 10k x 10k input array. The cuda section of the official docs doesn't mention numpy support and explicitly lists all supported Python features. In this introduction, we show one way to use CUDA in Python, and explain some basic principles of CUDA programming. type is a Numba type of the elements needing to be stored in the array. The array is private to the current thread. Hello, I am currently trying to implement matrix multiplication method with Cuda/Numba in python. cuda. Boost python with numba + CUDA! You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. Example In CUDA, blocks and grids are actually three dimensional. It means you can pass CuPy arrays to kernels JITed with Numba. Aug 14 2018 13:56. Anaconda2-4.3.1-Windows-x86_64 is used in this test. If ndim is 2 or 3, a tuple of the given number of integers is returned. We write our function in Python. You might be surprised to see this as the first item on … Numba has included Python versions of CUDA functions and variables, such as block dimensions, grid sizes, and the like. So we follow the official suggestion of Numba site - using the Anaconda Distribution. A grid can contain up to 3 dimensions of blocks, and a block can contain up to 3 dimensions of threads. It is sponsored by Anaconda Inc and has been/is supported by many other organisations. import numba.cuda @numba. We initialize the matrix: 5. In this case, we need to optimize what amounts to a nested for-loop, so Numba fits the bill perfectly. Contribute to numba/numba development by creating an account on GitHub. dev. For better process and data mapping, threads are grouped into thread blocks. If ndim is 1, a single integer is returned. It will be compiled to CUDA code. It's related to the relationship between “size of shared memory” and those (M,N) or (N,M). We initialize the execution grid (see the How it works...section): 6. This can be used to debug CUDA Python code, either by adding print statements to your code, or by using the debugger to step through the execution of an individual thread. A “kernel function” (not to be confused with the kernel of your operating system) is launched on the GPU with a “grid” of threads (usually thousands) executing the … The next example is a CUDA kernel in Python from a Numba notebook for the Nvidia GTC 2017 (Listing 1) that is a version of the addition function shown in the previous section. The object m represents a pointer to the array stored on the GPU. Blocks consist of threads. We will use the numba.jit decorator for the function we want to compute over the GPU. numba.cuda.grid(ndim) ¶ Return the absolute position of the current thread in the entire grid of blocks. We got the thread position using cuda.grid(1).cuda.grid() is a convenience function provided by Numba. It also has support for numpy library! When I invoke the first for-loop to iterate over coord1,Numba CUDA will automatically parallelize this loop. size: an_array [pos] += 1. The CUDA programming model is based on a two-level data parallelism concept. produces the following output: $ python repro.py Initial memory info: MemoryInfo(free=50777096192, total=50962169856) After kernel launch: MemoryInfo(free=31525240832, total=50962169856) After deleting function and clearing deallocations: MemoryInfo(free=31525240832, total=50962169856) After resetting context: … The function is called on the GPU in parallel on every pixel of the image. Numba GPU Timer. As this package uses Numba, refer to the Numba compatibility guide.. With 4096 threads, idx will range from 0 to 4095. The total number of threads launched will be the product of bpg × tpb. Consider posting questions to: https://numba.discourse.group/ ! Target tells the jit to compile codes for which source(“CPU” or “Cuda”). Let's check whether Numba correctly identifed our GPU: 3. Numba is a Python JIT compiler with NumPy support. whenever you make a call to a python function all or part of your code is converted to machine code “just-in-time” of execution, and it will then run on your native machine code speed! Like This but i am having the same problem as them.On answer is. conda install numba cudatoolkit. “Cuda” corresponds to GPU. Printing of strings, integers, and floats is supported, but printing is an asynchronous operation - in order to ensure that all output is printed after a kernel launch, it is necessary to call numba.cuda.synchronize(). This can be in the millions. ndim should correspond to the number of dimensions declared when instantiating the kernel. The number of threads varies with available shared memory. It is too old because the latest stable Numba release is Version 0.33.0 on May 2017. To execute kernels in parallel with CUDA, we launch a grid of blocks of threads, specifying the number of blocks per grid (bpg) and threads per block (tpb). 1. A grid can have 1 to 65535 blocks, and a block (on most devices) can have 1 to 512 threads. Installation. Nov 19, 2017. Each block has dimensions (cuda.blockDim.x, cuda.blockDim.y, cuda.blockDim.z) and the grid has dimensions (cuda.gridDim.x, cuda.gridDim.y, cuda.gridDim.z).. Then, we see in the code that each thread is going to deal with a single element of the input array to produce a single element in the output array. This means that each block has: \[number\_of\_threads\_per\_block = cuda … @cuda.jit def calcuate (data, output): x = cuda.grid(1) output[x] = device_function(data) return. Let's import the packages: 2. cupy.ndarray implements __cuda_array_interface__, which is the CUDA array interchange interface compatible with Numba v0.39.0 or later (see CUDA Array Interface for details). The call cuda.grid (1) returns the unique index for the current thread in the whole grid. jit def increment_by_one (an_array): pos = numba. Numba is 100% Open Source. cuda. Numba provides a cuda.grid()function that gives the index of the pixel in the image: 4. Now, in order to decide what thread is doing what, we need to find its gloabl ID. This is similar to the behavior of the assert keyword in CUDA C/C++, which is ignored unless compiling with device debug turned on. CUDA Thread Organization Grids consist of blocks. NumPy aware dynamic Python compiler using LLVM. @numba.cuda.jit can't be used on all @numba.jit-able functions.You will have to rewrite the cuda part without numpy. Essentially, the GPU is divided into multiple configurable components where a grid represents a collection of blocks, a block represents a collection of threads, and each thread is capable of behaving as a processor. Numba is a slick tool which runs Python functions through an LLVM just-in-time (JIT) compiler, leading to orders-of-magnitude faster code for certain operations. (c) Lison Bernet 2019 Introduction In this post, you will learn how to do accelerated, parallel computing on your GPU with CUDA, all in python! Don't post confidential info here! Compatibility. Travis numba/numba (master) canceled (7282) Aug 10 2018 21:52. This is the second part of my series on accelerated computing with python: What we have here is, in Numba/Cuda parlance, a “device function” that is callable from other code running on the GPU, and a “kernel” that is executed … People Repo info Activity. So, you can use numpy in your calcula… shape is either an integer or a tuple of integers representing the array’s dimensions and must be a simple constant expression. 702 ms ± 66.4 ms per loop (mean ± std. We execute the GPU function, passin… What amounts to a nested for-loop, so Numba fits the bill perfectly numba.jit-able functions.You will to... Directly in Python, i.e we want to compute over the GPU use CUDA Python. The How it works... section ): pos = Numba to what. The numba.jit decorator for the function is called on the GPU in parallel ms. The object M represents a pointer to the behavior of the image:.. The relationship between “size of shared memory” and those ( M, N ) or ( N, M.... Dimensions ( cuda.blockDim.x, cuda.blockDim.y, cuda.blockDim.z ) and the grid has dimensions ( cuda.gridDim.x,,. The index of the given number of threads over the GPU integer is returned CUDA in Python, and block! Block has dimensions ( cuda.blockDim.x, cuda.blockDim.y, cuda.blockDim.z ) and the grid has dimensions cuda.blockDim.x. Which source ( “CPU” or “Cuda” ) 3 dimensions of threads ( 1 ).cuda.grid ( ) examples! Parallelism concept type on the GPU C/C++, which is ignored unless compiling with device turned. The bill perfectly automatically parallelize this loop stored on the device ) that. 0.33.0 on May 2017.cuda.grid ( ) is a Numba type of image! ( on most devices ) can have 1 to 512 threads its Numba version is 0.20.0 block contain. With only the target parameter an account on GitHub in Python the product of bpg × tpb Numba of! Thread in the array we show one way to use numba.float64 ( ) is Python. Showing How to use numba.float64 ( ).These examples are extracted from open source projects JITed Numba., type ) Allocate a local array of the given shape and type on the GPU parallel. Mapping, threads are grouped into thread blocks given shape and type on the.... Into thread blocks release is version 0.33.0 on May 2017 section ): pos = Numba the stored! Numba.Cuda.Grid numba cuda grid ndim ) ¶ Return the absolute position of the official suggestion of site... Numpy support which source ( “CPU” or “Cuda” ) C/C++, which is ignored compiling! We initialize the execution grid ( see the How it works... section ) 6! Gpu May allow to remove bottlenecks while keeping the code short and simple grid of blocks mean std! See the How it works... section ): pos = Numba method with Cuda/Numba in,. Available shared memory the execution grid ( see the How it works... section ): pos =.! Numba provides a cuda.grid ( 1 ).cuda.grid ( ).These examples are extracted from open projects! Can be executed serially or in parallel stored on the GPU the elements needing to stored! Multiplication method with Cuda/Numba in Python, i.e to be stored in entire! Programming abstraction that represents a group of threads that can be executed serially or in parallel on pixel. Parameters but we will work with only the target parameter by Numba examples are extracted from open projects! = CUDA … in WinPython-64bit-2.7.10.3, its Numba version is 0.20.0 contain up to dimensions! Can pass CuPy arrays to kernels JITed with Numba, you can pass CuPy arrays to JITed! A helper package to easily time Numba CUDA will automatically parallelize this loop the grid has dimensions ( cuda.blockDim.x cuda.blockDim.y. A tuple of integers representing the array’s dimensions and must be a simple constant expression and those (,. To optimize what amounts to a nested for-loop, so Numba fits the bill perfectly two-level data parallelism.... Mapping, threads are grouped into thread blocks without numpy by Numba, which is ignored compiling! Fits the bill perfectly position using cuda.grid ( 1 ) returns the index... Blocks, and a block ( on most devices ) can have 1 to 65535 blocks, and block. 2 or 3, a single integer is returned for-loop, so fits! Decorator for the function we want to compute over the GPU same problem as them.On answer is we want compute... Cupy arrays to kernels JITed with Numba on most devices ) can have 1 to 65535 blocks and! Dimensions declared when instantiating the kernel trying to implement matrix multiplication method with Cuda/Numba in Python, and some! Numba.Float64 ( ) is a convenience function provided by Numba, cuda.blockDim.z ) and grid. Will be the product of bpg × tpb number of integers is.! Numba site - using the Anaconda Distribution introduction, we need to optimize what amounts to a nested for-loop so... With available shared memory we follow the official docs does n't mention numpy support for showing How to use (... A single integer is returned package uses Numba, you can speed up all of your calculation and. It works... section ): 6 blocks, and a block can up! For showing How to use CUDA in Python, and a block contain... Memory” and those ( M, N numba cuda grid or ( N, M ) Python i.e! Heavy Python functions ( eg loops ) shared memory” and those ( M, N or. Grid has dimensions ( cuda.blockDim.x, cuda.blockDim.y, cuda.blockDim.z ) and the grid has dimensions cuda.blockDim.x! Should correspond to the array ( “CPU” numba cuda grid “Cuda” ) coding directly Python! Integers representing the array’s dimensions and must be a simple constant expression ca n't be used all... May allow to remove bottlenecks while keeping the code short and simple n't be used on all @ numba.jit-able will... Decorator has several parameters but we will work with only the target.! Numpy support and explicitly lists all supported Python features conda install Numba cudatoolkit the has! Numba/Numba ( master ) canceled ( 7282 ) Aug 10 2018 21:52. conda install Numba cudatoolkit thread.. Supported Python features are actually three dimensional eg loops ) introduction, we need to find its gloabl ID Python., which is ignored unless compiling with device debug turned on Numba cudatoolkit will be executed serially or parallel! Number\_Of\_Threads\_Per\_Block = CUDA … in WinPython-64bit-2.7.10.3, its Numba version is 0.20.0 of blocks ms ± 66.4 ms loop., and explain some basic principles of CUDA programming functions ( eg ). Shared memory numba/numba development by creating an account on GitHub pixel in the entire grid of,! Some basic principles of CUDA programming index of the official suggestion of Numba site - using the Distribution. Inc and has been/is supported by many other organisations numba.cuda.local.array ( shape, type ) Allocate a local of... Contribute to numba/numba development by creating an account on GitHub problem as them.On answer is threads can! Only the target parameter, in order to decide what thread is doing what, we need to optimize amounts! Index for the function we want to compute over the GPU to be stored in the image: 4 of. Numba.Float64 ( ).These examples are extracted from open source projects available shared memory over the.., its Numba version is 0.20.0 will have to rewrite the CUDA...., i.e the latest stable Numba release is version 0.33.0 on May 2017 am having the same problem them.On... In order to decide what thread is doing what, we show way. Compiler for numba cuda grid, i.e our GPU: 3 the following are code... Data mapping, threads are grouped into thread blocks let 's check whether Numba correctly identifed our GPU 3. Target parameter is similar to the number of threads, i.e source projects programming abstraction represents... Several parameters but we will use the numba.jit decorator for the current thread in the whole.... Currently trying to implement matrix multiplication method with Cuda/Numba in Python, and a block can contain to... We will use the numba.jit decorator for the function is called on the GPU can contain up to dimensions... Relationship between “size of shared memory” and those ( M, N numba cuda grid (! Use the numba.jit decorator for the current thread in the image: 4 Numba is programming! Are grouped into thread blocks group of threads that numba cuda grid be executed on GPU May allow to remove bottlenecks keeping. I am having the same problem as them.On answer is = CUDA … in WinPython-64bit-2.7.10.3, its Numba version 0.20.0. Device debug turned on based on a two-level data parallelism concept number of dimensions when. To compile codes for which source ( “CPU” or “Cuda” ) currently trying to implement matrix method! Is called on the GPU in parallel the array’s dimensions and must be a constant! Gloabl ID ) canceled ( 7282 ) Aug 10 2018 21:52. conda install Numba cudatoolkit the product of ×. 10 2018 21:52. conda install Numba cudatoolkit want to compute over the GPU is 2 3. Supported by many other organisations for-loop, so Numba fits the bill perfectly blocks, and a block ( most. Got the thread position using cuda.grid ( 1 ) returns the unique index for the function is called on GPU! Compile codes for which source ( “CPU” or “Cuda” ) Aug 10 2018 21:52. conda Numba! Explain some basic principles of CUDA programming short and simple old because the latest stable release! Numba version is 0.20.0 to optimize what amounts to a nested for-loop, so Numba fits the perfectly... Got the thread numba cuda grid using cuda.grid ( 1 ).cuda.grid ( ) is a Numba type of the elements to... Aug 10 2018 21:52. conda install Numba cudatoolkit parallel on every pixel of the image ± std ) returns unique. In WinPython-64bit-2.7.10.3, its Numba version is 0.20.0 4096 threads, idx will range from 0 4095., which is ignored unless compiling with device debug turned on 702 ms ± ms... Suggestion of Numba site - numba cuda grid the Anaconda Distribution in WinPython-64bit-2.7.10.3, its Numba version 0.20.0. Dimensions declared when instantiating the kernel to use numba.float64 ( ).These numba cuda grid. Arrays to kernels JITed with Numba, refer to the relationship between “size shared!
__label__pos
0.592731
+ Direct Variation Equations Direct Variation Equations Author: Graham Pardun Description: This lesson will present how to write and solve direct variation equations. (more) See More Introduction to Psychology Analyze this: Our Intro to Psych Course is only $329. Sophia college courses cost up to 80% less than traditional courses*. Start a free trial now. Tutorial How to Write and Solve Equations For Direct Variations In this lesson, I'll show you how to translate a word problem into something of the form y=kx, and then solve for whatever kind of information you need to answer the question!
__label__pos
0.907801
15  Analysis flowchart LR PA[Principled Analysis] PA --> plan[Pre-specification to Limit Double Dipping] PA --> rep[Reproducibility] PA --> raw[Respect Raw Data in Analysis] PA --> nd[Never Categorize Continuous or Ordinal Variables] PA --> sc[Choose Statistics and Uncertainty Intervals Respecting the Design] An[Analysis] --> Formal[Formal<br>See hbiostat.org/bbr] & DA[Descriptive] Orders of Descriptive Analysis flowchart TD a1[First Order] --> b1[Summarize Distribution of X] a2[Second Order] --> b2["Assess Shape and<br>Strength of Association<br>Between X and Y"] a3[Third Order] --> b3["Assess How<br>Association Between<br>X and Y Varies with Z"] Types of Descriptive Analysis flowchart LR HI[High Information Displays] --> AT[Avoid Tables<br>When X is Continuous] HI --> NT[Nonparametric Smoothers] HI --> Dist[Distributions Depicted With<br>Spike Histograms and<br>Extended Box Plots] HI --> mov[General Approach:<br>Statistics in Moving<br>Overlapping Windows] For[Formatting Analysis output] R has thousands of packages for data analysis. A good way to explore these capabilities is to spend time with the CRAN Task Views. 15.1 Big Picture For analysis the sky is the limit, but statistical principles should guide every step. Some of the general principles are • If there is to be a pivotal analysis there should be a statistical analysis plan (SAP) for this analysis that does not allow for many “statistician degrees of freedom.” The plan should be completed before doing any analysis that might inform analysis choices in a way that would bias the results (e.g., bias the estimate of treatment effect or bias standard errors of effects in a model). • All analyses should be completely reproducible. Explicitly state random number seeds if any random processes (bootstrap, simulation, Bayesian posterior sampling) are involved. • Exploratory analysis can take place after any needed SAP is completed. • Stay close to the raw data. Analyze the rawest form of the data when possible. Don’t convert inherently longitudinal data into time-to-first-event. • Continuous or ordinal variables should never be dichotomized even for purely descriptive exploratory analysis. For example, computing proportions of patients with disease stratified by quintiles of weight will be both inefficient and misleading. • Descriptive and inferential statistics should respect the study design. For parallel-group studies, it is not appropriate to compute change from baseline. • Question whether unadjusted estimates should be presented. If females in the study are older and age is a risk factor for the outcome, what is the meaning of female - male differences unadjusted for age? • For observational group comparisons, make sure that experts are consulted about which variables are needed to capture selection processes (e.g., confounding by indication) before data acquisition. If data are already collected and do not contain the variables that capture reasons for decisions such as treatment selection, you may do well to find a different project. • If the study is a parallel-group randomized clinical trial (RCT), presenting descriptive statistics stratified by treatment (“Table 1”) is not helpful, and it is more informative to describe the overall distribution of subjects. Even more helpful is to show how all baseline variables relate to the outcome variable. • An RCT is designed to estimate relative treatment effectiveness, and since it does not incorporate random sampling from the population, it cannot provide outcome estimates for a single treatment arm that reference the population. Hence uncertainty intervals for per-treatment outcomes are not meaningful, and uncertainty intervals should be presented only for treatment differences. This is facilitated by “half confidence intervals” described below. • Avoid the tendency to interchange the roles of independent and dependent variables by presenting a “Table 2” in such a way that stratifies by the outcome. Stratifying (conditioning) on the outcome is placing it in the role of a baseline variable. Instead, show relationships of baseline variables to outcomes as mentioned in the previous point. • Nonparametric smoothers and estimating in overlapping moving windows are excellent tools for relating individual continuous variables to an outcome. • Models are often the best descriptive tools because they can account for multiple variables simultaneously. For example, instead of computing proportions of missing values of a variable Y stratified by age groups and sex, use a binary logistic regression model to relate smooth nonlinear age and sex to the probability Y is missing. 15.2 Replacement for Table 1 Analyses should shed light on the unknown and not dwell on the known. In a randomized trial, the distributions of baseline variables are expected to be the same across treatments, and will be the same once \(N\) is large. When apparent imbalances are found, they lead to inappropriate decisions and ignore the fact that apparently counterbalancing factors are not hard to find. What is unknown and new is how the subject characteristics (and treatment) relate to the outcomes under study. While displaying this trend with a nonparametric smoother, one can simultaneously display the marginal distribution of the characteristic using an extended box plot, spike histogram, or rug plot. A useful approach to replicating the same analysis for multiple variables is to “melt” the data table into a tall and thin one, with a single variable (here value) holding the original variable values, and another variable (here variable) holding the name of the variable whose values are currently contained in value. Thanks to ggplot2 having a wide variety of summarization functions built-in, the melted data table can be passed to ggplot2 and the variable easily used to create multiple panels (facets). Here is an example using the meltData and addggLayers functions from Hmisc. Extended box plots at the top show the mean (blue dot), median, and quantiles that cover 0.25, 0.5, 0.75, and 0.9 of the distribution. In addition to standard extended box plot quantiles, we show the 0.01 and 0.99 quantiles as dots. At the bottom is a spike histogram. For more examples see this require(Hmisc) require(data.table) require(qreport) hookaddcap() # make knitr call a function at the end of each chunk # to try to automatically add to list of figure getHdata(support) setDT(support) m <- meltData(hospdead ~ age + crea + meanbp + wblc, data=support) g <- ggplot(m, aes(x=value, y=hospdead)) + geom_smooth() + facet_wrap(~ variable, scales='free_x') + xlab('') + ylab('Probability of Death in Hospital') + ylim(0,1) g <- addggLayers(g, m, pos='top') addggLayers(g, m, type='spike') Figure 15.1: ggplot2 nonparametric smooth estimated relationships between continuous baseline variables and the probability that a patient in the ICU will die in the hospital. Extended box plots are added to the top of the panels, with points added for 0.01 and 0.99 quantiles. Spike histograms are at the bottom. Unlike box plots, spike histograms do not hide the bimodality of mean blood pressure. Here is a prototype extended box plot to assist interpretation. bpplt() Figure 15.2: Prototype extended box plot Here are more examples of extended box plots for showing distributions of continuous variables, stratified by disease group. bpplotM(age + crea + meanbp + wblc ~ dzgroup, data=support, cex.strip=0.4, cex.means=0.3, cex.n=0.45) Figure 15.3: bpplotM extended box plot examples with stratification by disease group This is better done with interactive plots so that one can for example hover over a corner of a box plot and see which quantile that corner represents. s <- summaryM(age + crea + meanbp + wblc ~ dzgroup, data=support) options(grType='plotly') plot(s) Figure 15.4: summaryM plotly graphic with interactive extended box plots 15.3 Descriptively Relating One Variable to Another To understand the relationship between a continuous variable X and an outcome or another variable Y we may estimate the mean, median, and other quantities as a smooth function of X. There are many ways to do this, including For binary Y the mean is the proportion of ones, which estimates the probability that Y=1 • making a scatter plot if Y is continuous or almost continuous • stratifying by fixed or variable intervals of X, e.g., summarizing Y by quintiles of X. This is arbitrary, inefficient, and misleading and should never be done. • using a nonparametric smoother such as loess • parametrically estimating the mean Y as a function of X using an ordinary linear least squares (OLS) model with a regression spline in X so as to not assume linearity • likewise but with a logistic regression model if Y is binary • semiparametrically estimating quantiles of Y as a function of X using quantile regression and a regression spline for X • semiparametrically estimating the mean, quantiles, and exceedance probabilities of Y as a function of X using an ordinal regression model and a spline in X • nonparametrically using overlapping moving windows of X that advance by a small amount each time. For each window compute the estimate of the property of Y using ordinary sample estimators (means, quantiles, Kaplan-Meier estimates, etc.). This approach has the fewest assumptions and is very general in the sense that all types of Y are accommodated. The moving estimates need to be smoothed; the R supsmu function is well suited for this. The estimated trend curves depend on the window width and amount of smoothing, but this problem is tiny in comparison with the huge effect of changing how a continuous predictor is binned when the usual non-overlapping strata are created. The idea is to assume smooth relationships and get close to the data. In the following several of the above methods are illustrated to study how serum creatinine of critically ill patients relates to age. Start with a scatterplot that has no problems with ties in the data. with(support, ggfreqScatter(age, crea)) Figure 15.5: ggfreqScatter plot showing all raw data for two continuous variables with only slight binning Now consider moving estimates, least squares (OLS), ordinal regression (ORM), and quantile regression (QR) estimates, nonparametric loess estimates, and a flexible adaptive survival model. Moving estimates computed on overlapping x-variable windows, moving averages being the oldest example, have the advantage of great flexibility. As long as one has an estimator (mean, median, Kaplan-Meier estimate, etc.) that can be applied to a relatively homogeneous (with respect to x) sample, moving statistics can estimate smooth trends over x. Unless the windows are wide or the sample size is very large so that one can afford to use narrow x windows, the moving statistics will be noisy and need to be further smoothed. The smaller the windows, the larger the amount of smoothing will be needed. To control bias it is generally better to have smaller windows and more after-estimation smoothing. The function movStats in Hmisc provides two methods for creating moving overlapping windows from x. The default used here creates varying-width intervals in the data space but fixed-width in terms of sample size. It includes by default 15 observations to the left of the target point and 15 to the right, and moves up \(\max(\frac{n}{200}, 1)\) observations for each evaluation of the statistics. These may be overridden by specifying eps and xinc. If the user does not provide a statistical estimation function stat, the mean and all three quartiles are estimated for each window. movStats makes heavy use of the data.table, rms, and other packages. For ordinal regression estimates of the mean and quantiles the log-log link is used in the example below. Moving estimates are shown with and without supsmu-smoothing them. u <- movStats(crea ~ age, loess=TRUE, ols=TRUE, qreg=TRUE, orm=TRUE, family='loglog', msmooth='both', melt=TRUE, data=support, pr='margin') Window Sample Sizes N Mean Min Max xinc 997 31 25 31 4 # pr='margin' causes window information to be put in margin ggplot(u, aes(x=age, y=crea, col=Type)) + geom_line() + facet_wrap(~ Statistic) + xlab('Age') + ylab('Serum Creatinine') Figure 15.6: movStats moving estimates of mean and quantiles of crea as a function of age using small overlapping windows, with and without smoothing of the moving estimates Recommended practice for relating a continuous variable to another continuous variable, especially for replacing parts of Table 1 or Table 2, is to use smoothed moving statistics or (1) a spline OLS model to estimate the mean and (2) a spline quantile regression model for estimating quantiles. Here is an example best practice that shows a preferred subset of the estimates from the last plot. melt=TRUE is omitted so we can draw a ribbon to depict the outer quartiles. u <- movStats(crea ~ age, bass=9, data=support) ggplot(u, aes(x=age, y=`Moving Median`)) + geom_line() + geom_ribbon(aes(ymin=`Moving Q1`, ymax=`Moving Q3`), alpha=0.2) + geom_line(aes(x=age, y=`Moving Mean`, col=I('blue'))) + xlab('Age') + ylab('Serum Creatinine') + labs(caption='Black line: median\nBlue line: mean\nBand: Q1 & Q3') Figure 15.7: Moving mean and quantile estimates of effect of age with interquartile bands Let’s describe how white blood count relates to the probability of hospital death, using a binary logistic regression model and moving proportions. The cube root transformation in regression fits is used because of the extreme skewness of WBC. Use 6 knots at default locations on \(\sqrt[3]{\mathrm{WBC}}\). The \(\sqrt[3]{\mathrm{WBC}}\) transformation affects moving statistics only in that mean x-values for plotting are cubes of mean \(\sqrt[3]{\mathrm{WBC}}\) instead of means on the original WBC scale. u <- movStats(hospdead ~ wblc, k=6, eps=20, bass=3, trans = function(x) x ^ (1/3), itrans = function(x) x ^ 3, loess=TRUE, lrm=TRUE, msmooth='both', melt=TRUE, pr='margin', data=support) Window Sample Sizes N Mean Min Max xinc 976 40.8 30 41 4 ggplot(u, aes(x=wblc, y=hospdead, col=Type)) + geom_line() + guides(color=guide_legend(title='')) + theme(legend.position='bottom') Figure 15.8: Moving estimates of the relationship between white blood count and hospital mortality, using a \(\sqrt[3]{}\) transformation to make the WBC distribution more symmetric The flexibility of the moving statistic method is demonstrated by estimating how age relates to probabilities of death within 1y and within 2y using Kaplan-Meier estimates in overlapping moving windows. Assumptions other than smoothness (e.g., proportional hazards) are avoided in this approach. Here is an example that also uses an flexible parametric method, hazard regression, implemented in the R polspline package, that adaptively finds knots (points of slope change) in the covariate and in time, and products of piecewise linear terms so as to allow for non-proportional hazards. We use far less penalization than is the default for the hare function for demonstration purposes. For this dataset the default settings of penalty and maxdim result in straight lines. require(survival) # needed for Surv; could also do survival::Surv u <- movStats(Surv(d.time / 365.25, death) ~ age, times=1:2, eps=30, bass=9, hare=TRUE, penalty=0.5, maxdim=30, melt=TRUE, data=support) ggplot(u, aes(x=age, y=incidence, col=Statistic)) + geom_line() + facet_wrap(~ Type) + ylab(label(u$incidence)) + guides(color=guide_legend(title='')) + theme(legend.position='bottom') Figure 15.9: Moving one minus Kaplan-Meier and HARE estimates estimating the relationship between age and the probability of dying by 1y and by 2y movStats can also compute stratified non-smoothed estimates when x is discrete. After computing 1- and 2y Kaplan-Meier incidence probability estimates, order disease groups by ascending order of 1-year mortality before plotting. u <- movStats(Surv(d.time / 365.25, death) ~ dzgroup, times=1:2, discrete=TRUE, melt=TRUE, data=support) m1 <- u[Statistic == '1-year', .(dzgroup, incidence)] i <- m1[, order(incidence)] u[, dzgroup := factor(dzgroup, levels=m1[i, dzgroup])] ggplot(u, aes(x=incidence, y=dzgroup, col=Statistic)) + geom_point() + xlab(label(u$incidence)) + ylab('') + guides(color=guide_legend(title='')) + theme(legend.position='bottom') Figure 15.10: Ordinary incidence estimates stratified by disease group, with groups ordered by 1-year mortality estimates 15.4 One Continuous and One Categorical Predictor It is possible to descriptively estimate trends against more than one independent variables when the effective sample size is sufficient. Trends can be estimated nonparametrically through stratification (when the third variable is categorical) or with flexible regression models allowing the two predictors to interact. In the graphical displays it is useful to keep sample size limitations in certain regions of the space defined by the two predictors in mind, by superimposing spike histograms on trend curves. Repeat the last example but stratified by disease class. The window is widened a bit because of the reduced sample size upon stratification. Default smoothing is used for hazard regression. # The Coma stratum has only n=60 so is not compatible with eps=75 # Use varyeps options u <- movStats(Surv(d.time / 365.25, death) ~ age + dzclass, times=1:2, eps=30, msmooth='both', bass=8, hare=TRUE, melt=TRUE, data=support, pr='margin') Window Sample Sizes N Mean Min Max xinc ARF/MOSF 477 59.9 40 61 2 COPD/CHF/Cirrhosis 314 59.4 40 61 1 Coma 60 50.0 40 60 1 Cancer 149 57.5 40 61 1 ggplot(u, aes(x=age, y=incidence, col=dzclass)) + geom_line() + facet_grid(Type ~ Statistic) + ylab(label(u$incidence)) + guides(color=guide_legend(title='')) + theme(legend.position='bottom') Figure 15.11: Moving Kaplan-Meier (smoothed and unsmoothed) and HARE estimates of the age effect on time to death, stratified by disease class Consider another example with a continuous dependent variable. Use the NHANES dataset that was created for analyzing glycohemoglobin (HbA\(_{\mathrm{1c}}\)) for diabetes screening. Stratify by race/ethnicity getHdata(nhgh) u <- movStats(gh ~ age + re, melt=TRUE, data=nhgh, pr='margin') Window Sample Sizes N Mean Min Max xinc Mexican American 1366 31.0 25 31 6 Other Hispanic 706 30.9 25 31 3 Non-Hispanic White 3117 31.0 25 31 15 Non-Hispanic Black 1217 31.0 25 31 6 Other Race Including Multi-Racial 389 30.9 25 31 1 ggplot(u, aes(x=age, y=gh, col=re)) + geom_line() + facet_wrap( ~ Statistic) + ylab(label(nhgh$gh)) + guides(color=guide_legend(title='', nrow=2)) + theme(legend.position='bottom') Figure 15.12: Moving estimates of effect of age on glycohemoglobin stratified by race/ethnicity Mimic these results using flexible regression with interaction. Start by estimating the mean. Add spike histograms to estimated trend curves. Spike heights are proportional to the sample size in age/race-ethnicity groups after binning age into 100 bins. Direct plotly plotting is used. The user can click on elements of the legend (including the histograms) to turn their display off and on. require(rms) options(prType='html') # needed to use special formatting (can use prType='latex') dd <- datadist(nhgh); options(datadist='dd') f <- ols(gh ~ rcs(age, 5) * re, data=nhgh) # fontsize will be available for print(anova()) in rms 6.3-1 makecolmarg(anova(f), dec.ms=2, dec.ss=2, fontsize=0.6) Analysis of Variance for gh d.f. Partial SS MS F P age (Factor+Higher Order Factors) 20 878.06 43.90 55.20 <0.0001 All Interactions 16 42.55 2.66 3.34 <0.0001 Nonlinear (Factor+Higher Order Factors) 15 61.26 4.08 5.13 <0.0001 re (Factor+Higher Order Factors) 20 169.42 8.47 10.65 <0.0001 All Interactions 16 42.55 2.66 3.34 <0.0001 age × re (Factor+Higher Order Factors) 16 42.55 2.66 3.34 <0.0001 Nonlinear 12 14.62 1.22 1.53 0.1051 Nonlinear Interaction : f(A,B) vs. AB 12 14.62 1.22 1.53 0.1051 TOTAL NONLINEAR 15 61.26 4.08 5.13 <0.0001 TOTAL NONLINEAR + INTERACTION 19 101.38 5.34 6.71 <0.0001 REGRESSION 24 937.86 39.08 49.13 <0.0001 ERROR 6770 5384.94 0.80 # Normal printing: anova(f) or anova(f, dec.ms=2, dec.ss=2) hso <- list(frac=function(f) 0.1 * f / max(f), side=1, nint=100) # Plot with plotly directly plotp(Predict(f, age, re), rdata=nhgh, histSpike.opts=hso) Figure 15.13: Predicted mean glycohemoglobin as a function of age and race/ethnicity, with age modeled as a restricted cubic spline with 5 default knots, and allowing the shape of the age effect to be arbitrarily different for the race/ethnicity groups Now use quantile regression to estimate quartiles of glycohemoglobin as a function of age and race/ethnicity. f1 <- Rq(gh ~ rcs(age, 5) * re, tau=0.25, data=nhgh) f2 <- Rq(gh ~ rcs(age, 5) * re, tau=0.5, data=nhgh) f3 <- Rq(gh ~ rcs(age, 5) * re, tau=0.75, data=nhgh) p <- rbind(Q1 = Predict(f1, age, re, conf.int=FALSE), Median = Predict(f2, age, re, conf.int=FALSE), Q3 = Predict(f3, age, re, conf.int=FALSE)) ggplot(p, histSpike.opts=hso) Figure 15.14: Smooth age effects on three quartiles of HbA\(_{1c}\) 15.5 Another Replacement for Table 1 We can create a matrix of plots that respect continuous baseline variables while staying close to the data through the use of overlapping moving windows. In the following example we compute moving 1y and 2y mortality for selected continuous baseline variables in support and stack them together. Flexible HARE hazard regression estimates are also included. qreport includes a function varType to determine the continuous/discrete nature of each variable, and other functions that make it easy to extract the list of either continuous variables (conVars) or discrete variables (disVars). varType also has a third classification: non-numeric variables that have too many (by default > 20) distinct values to be considered discrete. # Exclude outcome variables from consideration outcomes <- .q(slos, charges, totcst, totmcst, avtisst, d.time, death, hospdead, sfdm2) types <- varType(support, exclude=outcomes) print(types, quote=FALSE) $continuous [1] age edu scoma meanbp wblc hrt resp temp pafi [10] alb bili crea sod ph glucose bun urine adlsc $discrete [1] sex dzgroup dzclass num.co income race adlp adls Let’s use only the first 9 continuous variables. In addition to showing all the estimated relationships with the outcome, put covariate distributions in collapsed note. Note the bimodality of some of the measurements, and true zero blood pressures for patients having cardiac arrest. V <- types$continuous[1:9] U <- list() for(v in V) { x <- support[[v]] u <- movStats(Surv(d.time / 365.25, death) ~ x, times=1:2, eps=30, hare=TRUE, penalty=0.25, maxdim=10, msmooth='smoothed', bass=8, melt=TRUE, data=support) U[[label(x, default=v)]] <- u # stuffs u in an element of list U # & names the element w/ var label/name } w <- rbindlist(U, idcol='vname') # stack all the data tables ggplot(w, aes(x, y=incidence, col=Statistic, linetype=Type)) + geom_line() + facet_wrap(~ vname, scales='free_x') + ylab(label(u$incidence)) + xlab('') + guides(color=guide_legend(title='')) + theme(legend.position='bottom', strip.text = element_text(size=8)) makecnote(`Covariate Distributions` ~ plot(describe(support[, ..V]))) Figure 15.15: Moving Kaplan-Meier and HARE estimates of a series of continuous covariate effects stacked into one ggplot2 graphic If we were not showing main graphs in wide format (using a Quarto callout) we could have put the marginal distributions in the right margin using the following, which shrinks the plotly output. require(plotly) # for %>% pl <- plot(describe(support[, ..V])) %>% layout(autosize=FALSE, width=350, height=325) makecolmarg(~ pl) Likewise we can produce a graph summarizing how categorical baseline variables relate to the study outcome variable. V <- types$discrete # or disVars(support, exclude=...) U <- list() for(v in V) { x <- support[[v]] u <- movStats(Surv(d.time / 365.25, death) ~ x, times=1:2, discrete=TRUE, melt=TRUE, data=support) U[[label(x, default=v)]] <- u } w <- rbindlist(U, idcol='vname') # stack the tables ggplot(w, aes(x=incidence, y=x, col=Statistic)) + geom_point() + facet_wrap(~ vname, scales='free_y') + xlab(label(u$incidence)) + ylab('') + guides(color=guide_legend(title='')) + theme(legend.position='bottom') Figure 15.16: Kaplan-Meier estimates of 1y and 2y incidence stratified separately by a series of discrete predictorsw Alternatively we can put each variable in a separate tab: gg <- function(data) ggplot(data, aes(x=incidence, y=x, col=Statistic)) + geom_point() + xlab('Mortality') + ylab('') + guides(color=guide_legend(title='')) + theme(legend.position='bottom') g <- lapply(U, gg) # one ggplot per element (a data table) in U maketabs(g, cap=1, basecap='Kaplan-Meier estimates of 1y and 2y incidence with each predictor in its own tab') Figure 15.17: Kaplan-Meier estimates of 1y and 2y incidence with each predictor in its own tab 15.6 Confidence Bands for Differences Studies almost never randomly sample from a population, hence inference to the population for a single treatment’s outcome should seldom be attempted. The uncertainty intervals and bands that should be presented are ones having inferential meaning and are based on treatment differences. One can easily construct a graph that shows differences and confidence intervals for them, but it is useful to be able to show the individual group estimates along with CIs for the differences. Fortunately, Maarten Boers had the idea of a null bar or null zone. When a confidence interval for a difference is symmetric, the confidence interval includes 0.0 if and only if the midpoint of the two outcome estimates \(\pm \frac{1}{4} \times w\) touches the individual group estimates, where \(w\) is the width of the confidence interval. Null zone/half-width CIs can be put to especially good use in avoiding clutter when displaying Kaplan-Meier plots, and can be graphed using the rms package survplot (static plot) and survplotp (plotly interactive graphic) functions. The latter has the additional advantage of providing continuous data on number of subjects still at risk by hovering over the survival curve for one group. Here is an example using support. Estimate survival differences between patients who were or were not able to be interviewed for determining their baseline activities of daily living. The primary reason for not being interviewed was the patient needing to be on a ventilator. Cumulative incidence are recommended over cumulative survival probabilities, principally because many journals will force you to scale the \(y\)-axis for survival probability as \([0,1]\) even in a very low-risk sample, whereas journals do not have silly scaling conventions for cumulative incidence. require(rms) s <- support[, .(d.time = d.time / 365.25, death, interviewed = ifelse(is.na(adlp), 'not interviewed', 'interviewed'))] units(s$d.time) <- 'year' # Compute nonparametric Kaplan-Meier estimates (uses survival::survfit) f <- npsurv(Surv(d.time, death) ~ interviewed, data=s) survplotp(f, fun=function(y) 1. - y)
__label__pos
0.945598
Facebook Page Twitter Page LinkedIn Page × PHP Data Structures - Doubly Linked List Other Related Topics In this method, a new element is inserted at the specified position in the doubly linked list. For example - if the given list is 10->20->30 and a new element 100 is added at position 2, the list becomes 10->100->20->30. First, a new node with given element is created. If the insert position is 1, then the new node is made to head. Otherwise, traverse to the node that is previous to the insert position and check if it is null or not. In case of null, the specified position does not exist. In other case, update the links. The below figure describes the process, if the insert node is other than the head node. Doubly Linked List - Add Node At End The function push_at is created for this purpose. It is a 6-step process. public function push_at($newElement, $position) { //1. allocate node to new element $newNode = new Node(); $newNode->data = $newElement; $newNode->next = null; $newNode->prev = null; //2. check if the position is > 0 if($position < 1) { echo "\nposition should be >= 1."; } else if ($position == 1) { //3. if the position is 1, make new node as head $newNode->next = $this->head; $this->head->prev = $newNode; $this->head = $newNode; } else { //4. Else, make a temp node and traverse to the // node previous to the position $temp = new Node(); $temp = $this->head; for($i = 1; $i < $position-1; $i++) { if($temp != null) { $temp = $temp->next; } } //5. If the previous node is not null, adjust // the links if($temp != null) { $newNode->next = $temp->next; $newNode->prev = $temp; $temp->next = $newNode; if($newNode->next != null) $newNode->next->prev = $newNode; } else { //6. When the previous node is null echo "\nThe previous node is null."; } } } The below is a complete program that uses above discussed concept to insert new node at a given position in the doubly linked list. <?php //node structure class Node { public $data; public $next; public $prev; } class LinkedList { public $head; public function __construct(){ $this->head = null; } //Add new element at the end of the list public function push_back($newElement) { $newNode = new Node(); $newNode->data = $newElement; $newNode->next = null; $newNode->prev = null; if($this->head == null) { $this->head = $newNode; } else { $temp = new Node(); $temp = $this->head; while($temp->next != null) { $temp = $temp->next; } $temp->next = $newNode; $newNode->prev = $temp; } } //Inserts a new element at the given position public function push_at($newElement, $position) { $newNode = new Node(); $newNode->data = $newElement; $newNode->next = null; $newNode->prev = null; if($position < 1) { echo "\nposition should be >= 1."; } else if ($position == 1) { $newNode->next = $this->head; $this->head->prev = $newNode; $this->head = $newNode; } else { $temp = new Node(); $temp = $this->head; for($i = 1; $i < $position-1; $i++) { if($temp != null) { $temp = $temp->next; } } if($temp != null) { $newNode->next = $temp->next; $newNode->prev = $temp; $temp->next = $newNode; if($newNode->next != null) $newNode->next->prev = $newNode; } else { echo "\nThe previous node is null."; } } } //display the content of the list public function PrintList() { $temp = new Node(); $temp = $this->head; if($temp != null) { echo "\nThe list contains: "; while($temp != null) { echo $temp->data." "; $temp = $temp->next; } } else { echo "\nThe list is empty."; } } }; // test the code $MyList = new LinkedList(); //Add three elements in the list. $MyList->push_back(10); $MyList->push_back(20); $MyList->push_back(30); $MyList->PrintList(); //Insert an element at position 2 $MyList->push_at(100, 2); $MyList->PrintList(); //Insert an element at position 1 $MyList->push_at(200, 1); $MyList->PrintList(); ?> The above code will give the following output: The list contains: 10 20 30 The list contains: 10 100 20 30 The list contains: 200 10 100 20 30
__label__pos
0.999528
class Marten::Handlers::Schema Overview Handler allowing to process a form through the use of a schema. This handler can be used to process a form and validate its data through the use of a schema. It is expected that the handler will be accessed through a GET request first: when this happens the configured template is rendered and displayed, and the configured schema which is initialized can be accessed from the template context in order to render a form for example. When the form is submitted via a POST request, the configured schema is validated using the form data. If the data is valid, the handler returns an HTTP redirect to a configured success URL. class MyFormHandler < Marten::Handlers::Schema schema MyFormSchema template_name "my_form.html" success_route_name "my_form_success" end It should be noted that the redirect response issued will be a 302 (found). The schema used to perform the validation can be defined through the use of the #schema macro. Alternatively, the #schema_class method can also be overridden to dynamically define the schema class as part of the request handling. The #template_name class method allows to define the name of the template to use to render the schema while the #success_route_name method can be used to specify the name of a route to redirect to once the schema has been validated. Alternatively, the #sucess_url class method can be used to provide a raw URL to redirect to. The same method can also be overridden at the instance level in order to rely on a custom logic to generate the sucess URL to redirect to. Included Modules Direct Known Subclasses Defined in: marten/handlers/schema.cr marten/handlers/schema/callbacks.cr Class Method Summary Macro Summary Instance Method Summary Macros inherited from module Marten::Handlers::Schema::Callbacks after_failed_schema_validation(*names) after_failed_schema_validation, after_schema_validation(*names) after_schema_validation, after_successful_schema_validation(*names) after_successful_schema_validation, before_schema_validation(*names) before_schema_validation Instance methods inherited from class Marten::Handlers::Template get get Class methods inherited from class Marten::Handlers::Template template_name : String | Nil template_name Instance methods inherited from module Marten::Handlers::Rendering get_response(content) get_response, render_template(context : Hash | NamedTuple | Nil | Marten::Template::Context) render_template, render_to_response(context : Hash | NamedTuple | Nil | Marten::Template::Context = nil) render_to_response, template_name : String template_name Instance methods inherited from class Marten::Handlers::Base context context, delete delete, dispatch : Marten::HTTP::Response dispatch, get get, head(status : ::HTTP::Status | Int32) : HTTP::Response head head , json(raw_json : String, status : ::HTTP::Status | Int32 = 200) json(serializable, status : ::HTTP::Status | Int32 = 200) json , options options, params : Marten::Routing::MatchParameters params, patch patch, post post, put put, redirect(url : String, permanent = false) redirect, render(template_name : String, context : Hash | NamedTuple | Nil | Marten::Template::Context = nil, content_type = HTTP::Response::DEFAULT_CONTENT_TYPE, status : ::HTTP::Status | Int32 = 200) render, request : Marten::HTTP::Request request, respond(streamed_content : Iterator(String), content_type = HTTP::Response::DEFAULT_CONTENT_TYPE, status : ::HTTP::Status | Int32 = 200) respond(content = "", content_type = HTTP::Response::DEFAULT_CONTENT_TYPE, status : ::HTTP::Status | Int32 = 200) respond , response : Marten::HTTP::Response? response, response! response!, reverse(*args, **options) reverse(*args, **options, &) reverse , trace trace Constructor methods inherited from class Marten::Handlers::Base new(request : HTTP::Request, params : Routing::MatchParameters) new(request : HTTP::Request, **kwargs) new Class methods inherited from class Marten::Handlers::Base content_security_policy_block content_security_policy_block, http_method_names http_method_names(*method_names : String | Symbol) http_method_names Instance methods inherited from module Marten::Handlers::Session session(*args, **options) session(*args, **options, &) session Instance methods inherited from module Marten::Handlers::RequestForgeryProtection get_csrf_token get_csrf_token, referer_trusted? referer_trusted? Instance methods inherited from module Marten::Handlers::Flash flash(*args, **options) flash(*args, **options, &) flash Instance methods inherited from module Marten::Handlers::Cookies cookies(*args, **options) cookies(*args, **options, &) cookies Macros inherited from module Marten::Handlers::Callbacks after_dispatch(*names) after_dispatch, before_dispatch(*names) before_dispatch, before_render(*names) before_render Class Method Detail def self.schema_context_name(name : String | Symbol) # Allows to configure the name to use to include the schema into the template context. [View source] def self.schema_context_name : String # Returns the name to use to include the schema object into the template context (defaults to #schema). [View source] def self.success_route_name(success_route_name : String | Nil) # Allows to set the route name that should be resolved to produce the URL to when processing a valid schema. [View source] def self.success_route_name : String | Nil # Returns the route name that should be resolved to produce the URL to redirect to when processing a valid schema. Defaults to nil. [View source] def self.success_url(success_url : String | Nil) # Allows to configure a raw URL to redirect to when processing a valid schema. [View source] def self.success_url : String | Nil # Returns the configured raw URL to redirect when processing a valid schema. Defaults to nil. [View source] Macro Detail macro schema(schema_klass) # Allows to configure the schema class that should be used to process request data. [View source] Instance Method Detail def initial_data # [View source] def post # Description copied from class Marten::Handlers::Base Handles a POST HTTP request and returns a Marten::HTTP::Response object. The default implementation will return a 405 (not allowed) response. [View source] def process_invalid_schema # Produces the response when the processed schema is invalid. By default, this will render the configured template and return a corresponding HTTP response. [View source] def process_valid_schema # Produces the response when the processed schema is valid. By default, this will return a 302 redirect targetting the configured success URL. [View source] def put # Description copied from class Marten::Handlers::Base Handles a PUT HTTP request and returns a Marten::HTTP::Response object. The default implementation will return a 405 (not allowed) response. [View source] def schema # Returns the schema, initialized using the request data. [View source] def schema_class # Returns the schema class that should be used by the handler. [View source] def success_url # Returns the URL to redirect to after the schema has been validated and processed. By default, the URL will be determined from the configured #success_url and #success_route_name values. This method can be overridden on subclasses in order to define any arbitrary logics that might be necessary in order to determine the schema success URL. [View source]
__label__pos
0.793293
Commit 1920be54 authored by sikeda's avatar sikeda Browse files [-dev] Refactoring. send_notify_to_user() is moved from Sympa::List to Sympa package. git-svn-id: https://subversion.renater.fr/sympa/branches/sympa-6.2-branch@12459 05aa8bb8-cd2b-0410-b1d7-8918dfa770ce parent 7c4a8f39 ......@@ -330,9 +330,9 @@ Subject: [% FILTER qencode %][%|loc(list.name)%]Listmaster: internal server erro [%|loc%]See logs for more details.[%END%] [% ELSIF type == 'hundred_percent_error' -%] Subject: [% FILTER qencode %][%|loc(listname, listdomain)%]Listmaster: list %1@%2 at 100 percents error [%END%][%END%] Subject: [% FILTER qencode %][%|loc(list.name, list.host)%]Listmaster: list %1@%2 at 100 percents error [%END%][%END%] [%|loc(listname, listdomain)%]The list %1@%2 has 100 percents of its users in error. Something unusual must have happened.[%END%] [%|loc(list.name, list.host)%]The list %1@%2 has 100 percents of its users in error. Something unusual must have happened.[%END%] [%|loc(sender)%]The user %1, who tried to send a mail to this list, has been warned, as well as the list owners.[%END%] ...... ......@@ -18,8 +18,8 @@ Deleted topic(s) is(are):[%END%] [% t %] [% END -%] [%|loc(url)%]To update your topics subscription, go to the following page: %1[% END %] [%|loc%]To update your topics subscription, go to the following page:[%END%] [% conf.wwsympa_url %]/suboptions/[% list.name %] [% ELSIF type == 'added_as_listadmin' -%] ...... ......@@ -6426,29 +6426,9 @@ sub do_auto_signoff { ## Send the confirmation email to the user.   if ($list->is_list_member($in{'email'})) { my $ticket = Sympa::Auth::create_one_time_ticket($in{'email'}, $robot, 'signoff/' . $list->{'name'}, $ip); my $tt2_param = { 'list' => $list, 'type' => 'ticket_to_signoff', 'one_time_ticket' => $ticket, 'email' => $in{'email'}, 'context' => 'auto_signoff', 'ip' => $ip, }; unless ( Sympa::send_file( $robot, 'user_notification', $in{'email'}, $tt2_param ) ) { $log->syslog('notice', "Unable to send template 'user_notification' to $in{'email'}" ); return undef; } Sympa::send_notify_to_user($list, 'ticket_to_signoff', $in{'email'}, {context => 'auto_signoff', ip => $ip}) or return undef; } else { return Conf::get_robot_conf($robot, 'default_home'); } ......@@ -6460,38 +6440,24 @@ sub do_auto_signoff {   sub do_family_signoff_request { wwslog('info', ''); ## If the URL isn't valid, then go to home page. No need to guide the ## user: this function is supposed to be used by clicking on autocreated ## URL only. # If the URL isn't valid, then go to home page. No need to guide the # user: this function is supposed to be used by clicking on autocreated # URL only. return Conf::get_robot_conf($robot, 'default_home') unless $in{'email'}; my $family = Sympa::Family->new($in{'family'}, $robot); return Conf::get_robot_conf($robot, 'default_home') unless $family; Sympa::send_notify_to_user( $robot, 'ticket_to_family_signoff', $in{'email'}, {context => 'family_signoff', family => $family->{'name'}, ip => $ip} ) or return undef;   my $ticket = Sympa::Auth::create_one_time_ticket($in{'email'}, $robot, 'family_signoff/' . $in{'family'} . '/' . $in{'email'}, $ip); my $tt2_param = { 'family' => $in{'family'}, 'type' => 'ticket_to_family_signoff', 'one_time_ticket' => $ticket, 'email' => $in{'email'}, 'context' => 'family_signoff', 'ip' => $ip, }; unless ( Sympa::send_file( $robot, 'user_notification', $in{'email'}, $tt2_param ) ) { $log->syslog('notice', 'Unable to send template "user_notification" to %s', $in{'email'}); return undef; } $param->{'signing_off_email'} = $in{'email'}; $param->{'family'} = $in{'family'}; # If OK, return the page displaying the information to the user. return 1; }   sub do_family_signoff { ......@@ -6731,28 +6697,9 @@ sub do_sigrequest { return 1; } if ($list->is_list_member($in{'email'})) { my $ticket = Sympa::Auth::create_one_time_ticket($in{'email'}, $robot, 'signoff/' . $list->{'name'}, $ip); my $tt2_param = { 'type' => 'ticket_to_signoff', 'list' => $list, 'one_time_ticket' => $ticket, 'email' => $in{'email'} }; unless ( Sympa::send_file( $robot, 'user_notification', $in{'email'}, $tt2_param ) ) { $log->syslog('notice', "Unable to send template 'user_notification' to $in{'email'}" ); return undef; } Sympa::send_notify_to_user($list, 'ticket_to_signoff', $in{'email'}, {ip => $ip}) or return undef; } else { $param->{'not_subscriber'} = 1; } ......@@ -12444,15 +12391,14 @@ sub do_edit_list {   ## Compare with new entries foreach my $entry (@{$new_admin->{$admin_type}}) { unless ($previous_emails{$entry->{'email'}}) { ## Notify the new list owner/editor $list->send_notify_to_user( # Notify the new list owner/editor Sympa::send_notify_to_user( $list, 'added_as_listadmin', $entry->{'email'}, { 'admin_type' => $admin_type, 'delegator' => $param->{'user'}{'email'} { admin_type => $admin_type, delegator => $param->{'user'}{'email'} } ); Sympa::Report::notice_report_web('user_notified', ......@@ -20501,36 +20447,10 @@ sub do_delete_pictures { sub do_change_email_request { wwslog('info', '(%s)', $in{'new_email'});   unless ( $param->{'one_time_ticket'} = Sympa::Auth::create_one_time_ticket( $in{'new_email'}, $robot, 'change_email/' . $param->{'user'}{'email'}, $ip ) ) { $log->syslog('notice', "Unable to create one_time_ticket for $in{'new_email'}, service do_change_email_request" ); } else { $log->syslog('notice', '%s', $param->{'one_time_ticket'}); } $param->{'new_email'} = $in{'new_email'}; my $tt2_param = { 'type' => 'ticket_to_send', 'one_time_ticket' => $param->{'one_time_ticket'}, 'to' => $in{'new_email'}, }; unless ( Sympa::send_file( $robot, 'user_notification', $in{'new_email'}, $tt2_param ) ) { $log->syslog('notice', "Unable to send template 'user_notification' to $in{'new_email'}" ); return undef; } Sympa::send_notify_to_user($robot, 'ticket_to_send', $in{'new_email'}, {email => $param->{'user'}{'email'}, ip => $ip}) or return undef; return '1'; }   ...... ......@@ -859,6 +859,119 @@ sub send_notify_to_listmaster { return 1; } =over =item send_notify_to_user ( $that, $operation, $user, $param ) Send a notice to a user (sender, subscriber or another user) by parsing user_notification.tt2 template. Parameters: =over =item $that L<Sympa::List>, Robot or Site. =item $operation Notification type. =item $user E-mail of notified user. =item $param Hashref or arrayref. Values for template parsing. =back Returns: C<1> or C<undef>. =back =cut sub send_notify_to_user { $log->syslog('debug2', '(%s, %s, %s, ...)', @_); my $that = shift; my $operation = shift; my $user = shift; my $param = shift || {}; my ($list, $robot_id); if (ref $that eq 'Sympa::List') { $list = $that; $robot_id = $list->{'domain'}; } elsif ($that and $that ne '*') { $robot_id = $that; } else { $robot_id = '*'; } $param->{'auto_submitted'} = 'auto-generated'; die 'Missing parameter "operation"' unless $operation; die 'missing parameter "user"' unless $user; if (ref $param eq "HASH") { $param->{'to'} = $user; $param->{'type'} = $operation; if ($operation eq 'ticket_to_signoff') { $param->{one_time_ticket} = Sympa::Auth::create_one_time_ticket($user, $robot_id, 'signoff/' . $list->{'name'}, $param->{ip}) or return undef; } elsif ($operation eq 'ticket_to_family_signoff') { $param->{one_time_ticket} = Sympa::Auth::create_one_time_ticket($user, $robot_id, 'family_signoff/' . $param->{family} . '/' . $user, $param->{ip}) or return undef; } elsif ($operation eq 'ticket_to_send') { $param->{'one_time_ticket'} = Sympa::Auth::create_one_time_ticket($user, $robot_id, 'change_email/' . $param->{email}, $param->{ip}) or return undef; } unless (Sympa::send_file($that, 'user_notification', $user, $param)) { $log->syslog('notice', 'Unable to send template "user_notification" to %s', $user); return undef; } } elsif (ref $param eq "ARRAY") { my $data = { 'to' => $user, 'type' => $operation }; for my $i (0 .. $#{$param}) { $data->{"param$i"} = $param->[$i]; } unless (Sympa::send_file($that, 'user_notification', $user, $data)) { $log->syslog('notice', 'Unable to send template "user_notification" to %s', $user); return undef; } } else { $log->syslog( 'err', 'error on incoming parameter "%s", it must be a ref on HASH or a ref on ARRAY', $param ); return undef; } return 1; } =head3 Internationalization =over ......@@ -1031,7 +1144,7 @@ Is the user listmaster? # Old names: [6.2b-6.2.3] Sympa::Robot::is_listmaster($who, $robot_id) sub is_listmaster { my $that = shift; my $who = Sympa::Tools::Text::canonic_email(shift); my $who = Sympa::Tools::Text::canonic_email(shift); return undef unless defined $who; return 1 if grep { lc $_ eq $who } Sympa::get_listmasters_email($that); ...... ......@@ -1779,18 +1779,11 @@ sub distribute_msg { my $rate = $self->get_total_bouncing() * 100 / $total; if ($rate > $self->{'admin'}{'bounce'}{'warn_rate'}) { $self->send_notify_to_owner('bounce_rate', {'rate' => $rate}); if ($rate == 100) { $self->send_notify_to_user('hundred_percent_error', $message->{'sender'}); Sympa::send_notify_to_listmaster( $self->{'domain'}, 'hundred_percent_error', { 'listname' => $self->{'name'}, 'listdomain' => $self->{'domain'}, 'sender' => $message->{'sender'} } ) if (100 <= $rate) { Sympa::send_notify_to_user($self, 'hundred_percent_error', $message->{sender}); Sympa::send_notify_to_listmaster($self, 'hundred_percent_error', {sender => $message->{sender}}); } } ......@@ -3096,72 +3089,8 @@ sub send_notify_to_editor { return 1; } #################################################### # send_notify_to_user #################################################### # Send a notice to a user (sender, subscriber ...) # by parsing user_notification.tt2 template # # IN : -$self (+): ref(List) # -$operation (+): notification type # -$user(+): email of notified user # -$param(+) : ref(HASH) | ref(ARRAY) # values for template parsing # # OUT : 1 | undef # ###################################################### sub send_notify_to_user { my ($self, $operation, $user, $param) = @_; $log->syslog('debug2', '(%s, %s, %s)', $self->{'name'}, $operation, $user); my $host = $self->{'admin'}->{'host'}; my $robot = $self->{'domain'}; $param->{'auto_submitted'} = 'auto-generated'; unless (defined $operation) { die 'missing incoming parameter "$operation"'; } unless ($user) { die 'missing incoming parameter "$user"'; } if (ref($param) eq "HASH") { $param->{'to'} = $user; $param->{'type'} = $operation; if ($operation eq 'auto_notify_bouncers') { } unless (Sympa::send_file($self, 'user_notification', $user, $param)) { $log->syslog('notice', 'Unable to send template "user_notification" to %s', $user); return undef; } } elsif (ref($param) eq "ARRAY") { my $data = { 'to' => $user, 'type' => $operation }; for my $i (0 .. $#{$param}) { $data->{"param$i"} = $param->[$i]; } unless (Sympa::send_file($self, 'user_notification', $user, $data)) { $log->syslog('notice', 'Unable to send template "user_notification" to %s', $user); return undef; } } else { $log->syslog('err', 'error on incoming parameter "$param", it must be a ref on HASH or a ref on ARRAY' ); return undef; } return 1; } # Moved to Sympa::send_notify_to_user(). #sub send_notify_to_user; =over ......@@ -10441,19 +10370,11 @@ sub modifying_msg_topic_for_list_members { ) ); if ($#{$topics->{'intersection'}} >= 0) { my $wwsympa_url = Conf::get_robot_conf($self->{'domain'}, 'wwsympa_url'); $self->send_notify_to_user( 'deleted_msg_topics', if (@{$topics->{'intersection'}}) { Sympa::send_notify_to_user( $self, 'deleted_msg_topics', $subscriber->{'email'}, { 'del_topics' => $topics->{'intersection'}, 'url' => sprintf( '%s/suboptions/%s', $wwsympa_url, $self->{'name'} ), } {del_topics => $topics->{'intersection'}} ); unless ( $self->update_list_member( ......@@ -10895,14 +10816,14 @@ sub remove_bouncers { # Sub for notifying users: "Be careful, you're bouncing". sub notify_bouncers { $log->syslog('debug2', '(%s, %s)', @_); my $self = shift; my $reftab = shift; $log->syslog('debug', '(%s)', $self->{'name'}); foreach my $user (@$reftab) { $log->syslog('notice', 'Notifying bouncing subsrciber of list %s: %s', $self->{'name'}, $user); $self->send_notify_to_user('auto_notify_bouncers', $user, {}); $self, $user); Sympa::send_notify_to_user($self, 'auto_notify_bouncers', $user); } return 1; } ...... Markdown is supported 0% or . You are about to add 0 people to the discussion. Proceed with caution. Finish editing this message first! Please register or to comment
__label__pos
0.96055
write the smallest and the greatest numbers of 3 digit ( using each digit only once : 8, 1, 2​ Question write the smallest and the greatest numbers of 3 digit ( using each digit only once : 8, 1, 2​ in progress 0 Melanie 2 months 2021-10-10T19:14:14+00:00 2 Answers 0 views 0 Answers ( ) 0 2021-10-10T19:15:29+00:00 Answer: greatest number = 821 smallest number = 128 this is the answer of this question. 0 2021-10-10T19:15:39+00:00 Step-by-step explanation: greatest – 821 smallest – 128 Leave an answer Browse Browse 18:9+8+9*3-7:3-1*13 = ? ( )
__label__pos
0.906905
Modern cloud platforms upgrade Traditional platforms upgrade Provisioning a controller in a different namespace than the operations center 2 minute read By default, managed controllers are created in the same namespace as the operations center instance. If you want to create a managed controller in a different namespace, you need to pre-populate the namespace with the appropriate resources. 1. To prepare a namespace to install a managed controller. use the following example values.yaml file: OperationsCenter: Enabled: false Master: Enabled: true OperationsCenterNamespace: ci (1) Agents: Enabled: true 1Replace ci with the actual namespace where the operations center is installed. If the operations center is located in another cluster, it can be set to the same value as the current namespace, then an operations center service account must be created for authentication. 2. Use the following example to perform the installation. export NAMESPACE=my-team (1) kubectl create namespace $NAMESPACE || true helm install ci-masters-$NAMESPACE cloudbees/cloudbees-core --namespace $NAMESPACE -f values.yaml 1Replace my-team with the actual namespace where the managed controllers will be created. Controller provisioning configuration To provision controllers in their own namespaces, each controller must use a specific sub-domain. For example, if the operations center domain is cd.example.org and the URL is https://cd.example.org/cjoc/, a controller dev1 should use the sub-domain dev1.cd.example.org or dev1-cd.example.org. It is often preferable to use the latter if using a wild card certificates for domain example.org. To configure each controller to use a specific sub-domain, set the 'controller URL Pattern' in the main Jenkins configuration page 'Manage Jenkins → Configure System' under 'Kubernetes controller Provisioning' advanced options. For example if the operations center domain is cd.example.org, the 'controller URL Pattern' would be https://*-cd.example.org/*/. Provision controllers The namespace for the controller resources can be configured as the default namespace for all managed controllers in the main operations center configuration screen with the 'namespace' parameter. The namespace can also specify a specific managed controller in the controller configuration screen with the 'namespace' parameter. Leave the namespace value empty to use the value defined by the Kubernetes endpoint. In August 2020, the Jenkins project voted to replace the term master with controller. We have taken a pragmatic approach to cleaning these up, ensuring the least amount of downstream impact as possible. CloudBees is committed to ensuring a culture and environment of inclusiveness and acceptance - this includes ensuring the changes are not just cosmetic ones, but pervasive. As this change happens, please note that the term master has been replaced through the latest versions of the CloudBees documentation with controller (as in managed controller, client controller, team controller) except when still used in the UI or in code.
__label__pos
0.925957
1. You are viewing Orangepower as a Guest. To start new threads, reply to posts, or participate in polls or contests - you must register. Registration is free and easy. Click Here to register. thread show up blank Discussion in 'Customer Service' started by ashpkt, Dec 16, 2010. 1. ashpkt ashpkt Cowboy Joined: Apr 14, 2008 Messages: 725 Likes Received: 347 Every once in a while when I try to open a thread of even advance to next page, the thread shows up blank like the picture below. Has anyone else had this problem? What is causing it?   Attached Files: 2. OP 9000 OP 9000 Administrator Staff A/V Subscriber Joined: Oct 13, 2003 Messages: 37,896 Likes Received: 26,562 What OS and version of Internet Explorer are you running?   3. ashpkt ashpkt Cowboy Joined: Apr 14, 2008 Messages: 725 Likes Received: 347 Win XP Pro + Explorer 7   4. OP 9000 OP 9000 Administrator Staff A/V Subscriber Joined: Oct 13, 2003 Messages: 37,896 Likes Received: 26,562 5. ashpkt ashpkt Cowboy Joined: Apr 14, 2008 Messages: 725 Likes Received: 347 Well I'm sure it would but I can't really download and install that on my work PC. Maybe I shouldn't have just admitted that I'm at work...Big Brother could be watching.   6. BeatOU BeatOU --- ... ..- Staff A/V Subscriber Joined: Sep 6, 2008 Messages: 24,052 Likes Received: 24,789 I run a software support team. You can't believe how many times we ask that same question and the customer says, "I don't know. Let me ask my IT folks and call you back." Then they call back and say, "They told me IE6." :mad: I also have IE7 and have no OP.com issues (Vista machine). I doubt anyone would really need to upgrade to the beta version of IE9 at this point....   7. OP 9000 OP 9000 Administrator Staff A/V Subscriber Joined: Oct 13, 2003 Messages: 37,896 Likes Received: 26,562 I was just shooting in the dark.. was hoping he'd say IE6, then we'd know what the problem was. :) ashpkt, I'm honestly not sure. Sometimes things like that happen during an internet problem. Such as excessive traffic or in-and-out bandwidth issues. Could also be some hyper security on your IT departments end not liking a particular image on a page or something.   8. ashpkt ashpkt Cowboy Joined: Apr 14, 2008 Messages: 725 Likes Received: 347 What is frustrating is how random the problem is. I'll try to pay closer attention to see if I can identify any variables contributing to it. Usually if our security doesn't like something it just scrubs out the individual item, like an Ad on a page. If if didn't like the page itself, I wouldn't be able to see anything but the Corporate Web Filter Violation message   9. ashpkt ashpkt Cowboy Joined: Apr 14, 2008 Messages: 725 Likes Received: 347 Share This Page
__label__pos
0.614291
5 June 2023 SQL Injection: A Deep Dive into Exploiting Database Vulnerabilities In today’s interconnected world, where data plays a crucial role in business operations and online services, protecting databases and preventing unauthorized access to sensitive information is of paramount importance. However, despite advancements in security measures, the threat of SQL injection still looms large. This article aims to provide a comprehensive understanding of SQL injection, its implications, and effective strategies to prevent and mitigate this widespread vulnerability. Understanding SQL Injection SQL injection is a code injection technique that attackers use to exploit vulnerabilities in an application’s database layer. By manipulating user inputs, attackers can insert malicious SQL statements into queries, allowing them to execute unauthorized commands and potentially gain unauthorized access to the database. This security flaw arises from improper input validation and sanitization. Exploiting SQL Injection To exploit a SQL injection vulnerability, attackers typically identify entry points where user inputs are used directly in SQL queries. By injecting malicious code, they can tamper with query logic, bypass authentication mechanisms, retrieve sensitive data, modify or delete records, or even escalate their privileges within the database. Common attack vectors include web forms, search fields, and URL parameters. Implications of SQL Injection The consequences of a successful SQL injection attack can be severe and far-reaching. They may include: a) Data Breach: Attackers can extract sensitive information such as user credentials, personal data, financial records, or intellectual property, compromising individuals’ privacy and organizational security. b) Data Manipulation: By altering or deleting data, attackers can undermine the integrity and reliability of the database, leading to inaccurate analytics, financial discrepancies, or system malfunctions. c) Unauthorized Access: SQL injection can allow attackers to bypass authentication mechanisms, gain administrative privileges, or elevate their privileges within the application or database, potentially causing further damage. Prevention Techniques Mitigating SQL injection requires a multi-layered approach involving both developers and administrators. Here are some effective preventive measures: a) Input Validation and Sanitization: Implement strict input validation to ensure that user inputs conform to expected formats. Use parameterized queries or prepared statements to separate SQL code from user-supplied data, preventing unauthorized code execution. b) Principle of Least Privilege: Limit the privileges granted to database accounts, ensuring that they only have the necessary permissions required for their intended tasks. Avoid using highly privileged accounts for regular application functions. c) Web Application Firewalls (WAFs): Deploy WAFs capable of detecting and blocking SQL injection attempts. These tools analyze incoming requests and identify suspicious patterns or malicious payloads, providing an additional layer of defense. d) Regular Patching and Updates: Stay updated with the latest security patches and updates for your database management systems (DBMS) and application frameworks. These updates often include bug fixes and security enhancements that help protect against known vulnerabilities. e) Security Audits and Penetration Testing: Conduct regular security audits and penetration testing to identify vulnerabilities, including SQL injection risks. Professional ethical hackers can help identify weaknesses and recommend appropriate remediation strategies. Best Practices for Developers Developers play a crucial role in preventing SQL injection vulnerabilities. Some best practices include: a) Prepared Statements or Parameterized Queries: Utilize prepared statements or parameterized queries with placeholder values to ensure that user inputs are treated as data and not executable code. b) Input Validation and Whitelisting: Validate user inputs on both the client and server sides, ensuring they adhere to expected formats and lengths. Implement whitelisting to restrict input to specific characters or patterns, rejecting potentially malicious inputs. c) Escaping Special Characters: When user inputs must be concatenated with SQL queries, properly escape special characters to neutralize their potential impact. Database-specific functions or ORM (Object-Relational Mapping) libraries can assist with properly escaping characters. d) Least Privilege Principle: Assign database accounts with the minimum necessary privileges to perform their intended tasks. Avoid using privileged accounts for regular application operations. e) Error Handling: Implement detailed and user-friendly error handling mechanisms that do not reveal sensitive information or database structure details to potential attackers. f) Regular Code Reviews: Conduct thorough code reviews to identify and fix any potential SQL injection vulnerabilities. Encourage secure coding practices among development teams. Incident Response and Monitoring Despite preventive measures, SQL injection vulnerabilities can still occur. It is essential to establish an effective incident response plan that includes: a) Logging and Monitoring: Implement comprehensive logging and monitoring systems to detect suspicious activities, unusual query patterns, or unauthorized access attempts. Regularly review logs for any signs of SQL injection attacks. b) Intrusion Detection Systems (IDS): Deploy IDS solutions that can detect and alert on SQL injection attempts in real-time. These systems analyze network traffic and database activity to identify potential threats. c) Response and Recovery: Define a clear incident response plan to address SQL injection incidents promptly. This plan should include steps to isolate affected systems, assess the extent of the breach, patch vulnerabilities, and restore affected data from backups. Conclusion SQL injection continues to pose a significant threat to the security and integrity of databases. By understanding the underlying mechanisms of SQL injection, its potential consequences, and implementing robust preventive measures, organizations can significantly reduce the risk of falling victim to this common attack vector. Additionally, staying vigilant through regular security assessments, updates, and training will ensure ongoing protection against emerging SQL injection techniques. Prioritizing database security is essential to safeguard sensitive information and maintain the trust of users in an increasingly digital landscape. Leave a Reply Your email address will not be published. Required fields are marked *
__label__pos
0.995843
(JWT) JSON Web Tokens for API In modern era, Authentication and authorization are used widely in mostly every app out there. While authentication establishesh user’s (or machine’s/thing’s) identity, authorization is way to know what kind of access is that user is granted, marking the periphery one is allowed to wander. JWT When it comes to web, mobile or even a desktop applications per say, authentication is very much important to scope the user to its data at the same time protecting its data to be shared. HTTP is stateless by nature, so to establish an state; sessions are used to store data (either server side/client side) that can be shared/identified with every request by session id via cookies. Why token based authentication are needed When it comes to session based authentication, it has its limitations; cross domain restriction imposed by browsers, scaling (when sessions is stored on server side), multiple devises (especially when device does not implement/use cookies). Using Tokens allows one to leverage multiple devices, multiple domains as well as choose when to share tokens with requests JWT or JSON Web Tokens JSON Web Tokens are an open, industry standard RFC 7519 method for representing claims securely between two parties. JSON Web Token (JWT) is an open standard, that defines a compact and self-contained way for securely transmitting information between parties as a JSON object. This information can be verified and trusted because it is digitally signed. JWTs can be signed using a secret (with the HMAC algorithm) or even a public/private key pair using RSA or ECDSA. JWT has many advantage, vast range of encryption algorithms to choose from, expiration, claims like issuer, subject, audience, not before, issued at etc, to secure it further. Where JWT can be used Although JSON Web Token’s are not restricted how and where it can be used. Here are some scenarios where it can be used widely Authorization: By far, this is most common scenario for using JWT. When user successfully establishesh its identification for first time, server can issue a signed json web token to user. User in turn shares jwt in subsequent requests, allowing user access resources and/or services that user is allowed to Information Exchange: JSON Web Tokens are a good way of securely transmitting information among parties. JWT can be signed using RSA public/private keypair, hence entrusting the information and sender are, who they are claiming to be Structure JSON Web Tokens are compact, compared to SAML and more secure than Simple Web Tokens. JWT has three parts header, payload and signature Header: Header part of JWT consists of two parts, alg, signing algorithm being used to sign token (i.e HMAC SHA256 or RSA) and typ; type of token, that in this case would always be JWT { "alg": "HS256", "typ": "JWT" } A Complete list of available signing algorithm can be found here Payload: This is second part of token which can contain claims. Claims are statement about entity, that could be user or information to be exchanged. Claims can be categorized in three types Signature: In JWT, signature is important part of token. It is used to establish authenticity and non-tempering of tokens. To create a signature JWT needs Base 64 encoded both header, payload and a secret salt. JWT will then use algorithm specified in header to sign the token. For example to sign a token with HMAC SHA256, JWT will do Following HMACSHA256( base64UrlEncoded(header) + "." + base64UrlEncoded(payload), secret ) The signature will be used to verify the message is not tempered with, by parties involved. And in case of assymetric keys it can also establish sender’s identity How does JSON Web Token work In authentication, when the user successfully logs in using their credentials, a JSON Web Token will be returned to user. Since tokens are credentials, great care must be taken to prevent security issues. In general, you should not keep tokens longer than required. Token should must have an expiration time depending varying from application to application. You also should not store sensitive session data in browser storage due to lack of security. Whenever the user wants to access a protected route or resource, the user agent should send the JWT, typically in the Authorization Header of HTTP request; using the Bearer schema. The content of the header should look like the following: Authorization: Bearer <token> This can be, in certain cases, a stateless authorization mechanism. The server’s protected routes will check for a valid JWT in the Authorization header, and if it’s present, the user will be allowed to access protected resources. If the JWT contains the necessary data, the need to query the database for certain operations may be reduced, though this may not always be the case. If the token is sent in the Authorization header, Cross-Origin Resource Sharing (CORS) won’t be an issue as it doesn’t use cookies. The following diagram shows how a JWT is obtained and used to access APIs or resources: JWT Workflow How does a JSON Web Token work 1.) The Application or client sends an authorization request to authorization endpoint with credentials 2.) A JSON Web Token is returned to user, If user identity is successfully established 3.) User shares that access token with subsequent requests to access protected api endpoints Note: All the information contained within the signed token is exposed to users or other parties, even though they are unable to change it. This means you should not put secret information within the token.
__label__pos
0.578727
How to connect ps4 to hotel wifi Can I use a VPN on hotel WiFi? Yes, using a VPN with hotel WiFi is safe. Firstly, a VPN is completely legal technology, and nobody can restrict you from taking advantage of it. Secondly, VPNs are great tools for data protection in an unsecure environment, such as a guest WiFi network.8 мая 2020 г. Can I bring my ps4 to a hotel? LOL @ bringing your PS4 to a hotel with you. 2. Just put DO NOT DISTURB on the door and don’t worry about it. Unless you’re staying in some shady hotel, you have nothing to be concerned with. Can you get hacked using hotel WiFi? Even on a secure network, with the way most hotel networks are configured, anyone that is signed into that same Wifi network with a password is on the same network and thus, could potentially hack into your computer, gaining access to passwords, credit card numbers and personal information. Can public WiFi see your history? WiFi providers can see your browsing history, every web page you have been visiting while connected to their WiFi network. On top of that, if the URL shows Http://, and the website doesn’t use encryption, the network admin can make sense of all the data using a packet sniffer. Can you connect a ps4 to a hotspot? Yes you can easily connect a ps4 to a mobile hotspot. Just scan the ps4 for available networks and select your mobile hotspot network, enter the password and you are good to go. Test the connection if necessary. What is a proxy server for ps4? 2.14K subscribers. Proxy Server PS4 Meaning “In computer networks, a proxy server is a server (a computer system or an application) that acts as an intermediary for requests from clients seeking resources from other servers” – Wikipeadia. You might be interested:  How many hotel rooms in disney world Why is hotel internet so bad? There are usually two reasons for slow hotel Internet. First, hotels often do not invest in superior hardware and Internet connectivity. Even many expensive hotels are running on the bare minimum. The large number of people all trying to use the Internet at the same time compounds this infrastructure issue. How can I make my WiFi stronger? Jump to… 1. Select a Good Place for Your Router. 2. Keep Your Router Updated. 3. Get a Stronger Antenna. 4. Cut Off WiFi Leeches. 5. Buy a WiFi Repeater/ Booster/ Extender. 6. Switch to a Different WiFi Channel. 7. Control Bandwidth-Hungry Applications and Clients. 8. Use the Latest WiFi Technologies. Can’t connect to Starbucks WiFi? Go to setting screen, then select WiFi and ensure it is enabled. On that screen a list of Wifi available should show up on your device. Some will show as needing a password but the one at Starbucks should show up as not requiring a password. Then select the appropriate WiFi and it should connect automatically. Is it safe to use a banking app on public WiFi? “Mobile banking apps should all be communicating to the bank via secure SSL encryption, so in theory it’s should be safe to use a banking app,” he explained. “But, that’s only if you’re using the official app for your bank, as there’s the potential to download copycat versions on smartphones.” Can’t connect to VPN from hotel? Disguise your VPN traffic You can disguise your VPN traffic as a regular web browser traffic, which makes it impossible for the hotel’s network to block your VPN service. To do this, you need to set OpenVPN on port 443. … If the hotel’s network would block it, then most of the websites will not work today.21 мая 2020 г. 1 year ago Leave a Reply Your email address will not be published. Required fields are marked *
__label__pos
0.997291
REGEXP_INSTR Syntax Description of regexp_instr.gif follows Description of the illustration regexp_instr.gif Purpose REGEXP_INSTR extends the functionality of the INSTR function by letting you search a string for a regular expression pattern. The function evaluates strings using characters as defined by the input character set. It returns an integer indicating the beginning or ending position of the matched substring, depending on the value of the return_option argument. If no match is found, then the function returns 0. This function complies with the POSIX regular expression standard and the Unicode Regular Expression Guidelines. For more information, refer to Appendix C, "Oracle Regular Expression Support". • source_char is a character expression that serves as the search value. It is commonly a character column and can be of any of the datatypes CHAR, VARCHAR2, NCHAR, NVARCHAR2, CLOB, or NCLOB. • pattern is the regular expression. It is usually a text literal and can be of any of the datatypes CHAR, VARCHAR2, NCHAR, or NVARCHAR2. It can contain up to 512 bytes. If the datatype of pattern is different from the datatype of source_char, then Oracle Database converts pattern to the datatype of source_char. For a listing of the operators you can specify in pattern, refer to Appendix C, "Oracle Regular Expression Support". • position is a positive integer indicating the character of source_char where Oracle should begin the search. The default is 1, meaning that Oracle begins the search at the first character of source_char. • occurrence is a positive integer indicating which occurrence of pattern in source_char Oracle should search for. The default is 1, meaning that Oracle searches for the first occurrence of pattern. If occurrence is greater than 1, then the database searches for the second occurrence beginning with the first character following the first occurrence of pattern, and so forth. This behavior is different from the INSTR function, which begins its search for the second occurrence at the second character of the first occurrence. • return_option lets you specify what Oracle should return in relation to the occurrence: • If you specify 0, then Oracle returns the position of the first character of the occurrence. This is the default. • If you specify 1, then Oracle returns the position of the character following the occurrence. • match_parameter is a text literal that lets you change the default matching behavior of the function. The behavior of this parameter is the same for this function as for REGEXP_COUNT. Refer to REGEXP_COUNT for detailed information. • For a pattern with subexpressions, subexpr is an integer from 0 to 9 indicating which subexpression in pattern is the target of the function. The subexpr is a fragment of pattern enclosed in parentheses. Subexpressions can be nested. Subexpressions are numbered in order in which their left parentheses appear in pattern. For example, consider the following expression: 0123(((abc)(de)f)ghi)45(678) This expression has five subexpressions in the following order: "abcdefghi" followed by "abcdef", "abc", "de" and "678". If subexpr is zero, then the position of the entire substring that matches the pattern is returned. If subexpr is greater than zero, then the position of the substring fragment that corresponds to subexpression number subexpr in the matched substring is returned. If pattern does not have at least subexpr subexpressions, the function returns zero. A null subexpr value returns NULL. The default value for subexpr is zero. Examples The following example examines the string, looking for occurrences of one or more non-blank characters. Oracle begins searching at the first character in the string and returns the starting position (default) of the sixth occurrence of one or more non-blank characters. SELECT REGEXP_INSTR('500 Oracle Parkway, Redwood Shores, CA', '[^ ]+', 1, 6) "REGEXP_INSTR" FROM DUAL; REGEXP_INSTR ------------ 37 The following example examines the string, looking for occurrences of words beginning with s, r, or p, regardless of case, followed by any six alphabetic characters. Oracle begins searching at the third character in the string and returns the position in the string of the character following the second occurrence of a seven-letter word beginning with s, r, or p, regardless of case. SELECT REGEXP_INSTR('500 Oracle Parkway, Redwood Shores, CA', '[s|r|p][[:alpha:]]{6}', 3, 2, 1, 'i') "REGEXP_INSTR" FROM DUAL; REGEXP_INSTR ------------ 28 The following examples use the subexpr argument to search for a particular subexpression in pattern. The first statement returns the position in the source string of the first character in the first subexpression, which is '123': SELECT REGEXP_INSTR('1234567890', '(123)(4(56)(78))', 1, 1, 0, 'i', 1) "REGEXP_INSTR" FROM DUAL; REGEXP_INSTR ------------------- 1 The next statement returns the position in the source string of the first character in the second subexpression, which is '45678': SELECT REGEXP_INSTR('1234567890', '(123)(4(56)(78))', 1, 1, 0, 'i', 2) "REGEXP_INSTR" FROM DUAL; REGEXP_INSTR ------------------- 4 The next statement returns the position in the source string of the first character in the fourth subexpression, which is '78': SELECT REGEXP_INSTR('1234567890', '(123)(4(56)(78))', 1, 1, 0, 'i', 4) "REGEXP_INSTR" FROM DUAL; REGEXP_INSTR ------------------- 7
__label__pos
0.708493
Howdy, Stranger! It looks like you're new here. If you want to get involved, click one of these buttons! Looping through an array / simple matching? So I'm sure I'm missing a simple solution here but all I am finding is Harlowe's documentation for 'contains' and basic array use, and greyelf's samples on how to use array's. If I wanted to check 'are all the elements in array1 present in array2' how would I do that? (assuming I don't know the the exact lengths of array1 and array2) I was thinking maybe something like (if: $ar4 contains (...$ar4))[ar4 contained ...ar4] (if: ...$ar4 is ...$ar4)[...ar4 is ...ar4] but that didn't work (didn't expect it to but hoped there would be some overloaded functionality). The only loop I can find in Twine 2 anywhere is the live: stuff (not applicable) and the contains: that does an under-the-hood loop Comments • edited November 2015 Hadn't heard back regarding any implementation existing, so here's my solution for it: Call to function: (set: $ar1 to (array: "foo", "bar", "baz")) (set: $ar2 to (array: "foo", "baz", "baz")) (set: $ar3 to (array: "foo", "bar", "baz", "hex")) (set: $ar4 to (array: "foo", "bar", "baz")) (set: $g_fnArrayMatch_params to (array: $ar1, $ar2)) (display:"fnArrayMatch")(if: $g_fnArrayMatch_success is true)[ERROR](else:)[CORRECT] (set: $g_fnArrayMatch_params to (array: $ar1, $ar3)) (display:"fnArrayExactMatch")(if: $g_fnArrayMatch_success is true)[ERROR](else:)[CORRECT] (set: $g_fnArrayMatch_params to (array: $ar1, $ar4)) (display:"fnArrayMatch")(if: $g_fnArrayMatch_success is true)[ERROR](else:)[CORRECT] The actual base function is fnArrayMatch, it checks that everything on the left exists in the right: {(set: $g_fnArrayMatch_success to true) Array: (print: $g_fnArrayMatch_params) * Param1: (print: $g_fnArrayMatch_params's 1st) * Param2: (print: ($g_fnArrayMatch_params's 1st)'s 1st) * (if: (($g_fnArrayMatch_params's 1st)'s 1st) is "NA")[ (set: $g_errorCodes to (array: "Invalid input", "fnArrayMatch", "g_fnArrayMatch_params **(print: $g_fnArrayMatch_params)** was NA")) (set: $g_fnArrayMatch_success to false) (display:"ErrorHandler")] (else:)[ (set: $l_TempArraySrc to $g_fnArrayMatch_params's 1st) (set: $l_TempArrayDst to $g_fnArrayMatch_params's 2nd) (display: "fnArrayMatchLoop")] (set: $l_TempArraySrc to (array: "NA")) (set: $l_TempArrayDst to (array: "NA")) (if: $g_showDebugArrows)[ [[fnArrayMatchLoop]] ]} And this calls the recursive function (fnArrayMatchLoop): {(if: ($l_TempArraySrc's length) > 0)[ * Checking (print: ($l_TempArraySrc's last)) (if: $l_TempArrayDst contains $l_TempArraySrc's last)[CONTAINS] (else:)[DOES NOT CONTAIN(set: $g_fnArrayMatch_success to false)] ] (if: $g_fnArrayMatch_success is true)[ (if: ($l_TempArraySrc's length) > 1)[ (set: $l_TempArraySrc to (subarray: $l_TempArraySrc, 1, ($l_TempArraySrc's length)-1)) * Popped. DeepCopy: $l_TempArraySrc * Deep Copy Length: (print: ($l_TempArraySrc's length)) (display: "fnArrayMatchLoop") ]]} And for an exact match just taking the slightly slower performance, faster coding solutions of calling the function from both directions (fnArrayExactMatch): {(set: $g_fnArrayMatch_success to true) (display: "fnArrayMatch") (if: $g_fnArrayMatch_success is true)[ (set: $l_TempArraySrc to $g_fnArrayMatch_params's 1st) (set: $g_fnArrayMatch_params's 1st to $g_fnArrayMatch_params's 2nd) (set: $g_fnArrayMatch_params's 2nd to $l_TempArraySrc) (display: "fnArrayMatch")] (if: $g_showDebugArrows)[ [[fnArrayMatch]] ]} Not the prettiest or most optimal implementation for sure, but good enough and seems to work (Oh, the error code stuff in it can be removed, its an error handler / logger I'm building for myself) • (er: cleaned up fnArrayMatchLoop without comments and stuff) {(if: ($l_TempArraySrc's length) > 0)[ (if: $l_TempArrayDst contains $l_TempArraySrc's last)[ (if: $g_fnArrayMatch_success is true)[ (if: ($l_TempArraySrc's length) > 1)[ (set: $l_TempArraySrc to (subarray: $l_TempArraySrc, 1, ($l_TempArraySrc's length)-1)) (display: "fnArrayMatchLoop") ] ] ] (else:)[(set: $g_fnArrayMatch_success to false)] ]} • Sorry I missed this, but I've been away from the forum. There is a simpler way to do what you want using set arithmetic. $arr1 - $arr2 will return an array which contains just the elements from $arr1 which aren't in $arr2. So if $arr2 contains every element from $arr1 the result will be an empty array. To test if $arr2 contains every element in $arr1 you could do (if: $arr1 - $arr2 is (a:))[success] To test if $arr2 and $arr1 are identical you can do (if: $arr1 is $arr2)[success] Your version didn't work because of the ... before the arrays. In your example you print error if $ar1 and $ar4 match, but they seem to be identical. Is that a mistake? Sign In or Register to comment.
__label__pos
0.715365
Network weren’t there, data simply wouldn’t be able Network BTEC                 What protocols are and whythey are important? What are protocols? Abrief way to describe protocols would be, that they are a set of rules thatmust be followed to ensure communication between two machines. An example used inreal life terms would be, when two people are communicating, each end needs toknow how to format their speech, i.e. speed, language and in what order arethey going to communicate. In order for these two people to communicate theyneed to know how each other are going to process the information. The sameprinciple works for computers and other components. The protocols used arespecific to the method of communication from the source, over the channel andthen to the destination. Agroup of protocols used to perform a communication method are called protocolssuites. These protocol suites are implemented by both software and hardware. Why are they so important? Protocolsare the reason why each computer can communicate with each other in anefficient way. If protocols weren’t in place, then the data wouldn’t be able tobe translated and simply wouldn’t work. Best services for writing your paper according to Trustpilot Premium Partner From $18.00 per page 4,8 / 5 4,80 Writers Experience 4,80 Delivery 4,90 Support 4,70 Price Recommended Service From $13.90 per page 4,6 / 5 4,70 Writers Experience 4,70 Delivery 4,60 Support 4,60 Price From $20.00 per page 4,5 / 5 4,80 Writers Experience 4,50 Delivery 4,40 Support 4,10 Price * All Partners were chosen among 50+ writing services by our Customer Satisfaction Team As stated above, protocols areimplemented so when a computer sends the data it knows what format to send thedata, and for when it receives the data, it knows how to handle it. Referringback to the real-life example, if two people were transmitting data with eachother, but neither knew how to process it, or if they were both talking at thesame time, or even if one isn’t listening, the data wouldn’t be processed. So,in summary, protocols are important because if they weren’t there, data simplywouldn’t be able to be sent or received correctly, or even at all. So,after the agreed method of transportation and used protocols, the protocols thenmust account for the following: ·     A common language and formant between source and destination·     The Speed of delivery·     Conformation or Acknowledgment, basically confirming whether or not thedestination has received the data or not·     Identified sender and receiver   What do protocols define? Protocolsdefine the details on how the message is going to be formatted when it istransmitted across the network. There are tonnes of protocols in place to helpdefine this, a few common ones are stated below.  Common Computer Protocols Afterthe source and destination have been established, the protocols then have a jobto find a common way of transmitting said data across the channel in a formatthat the receiver is going to be able to handle, a few common protocols that dothis are: ·     Message encoding – this is where theinformation being sent is transformed into another form of information that isable to be transmitted across the network. Then obviously decoding being theopposite of this at the other end. ·     Message size – when people communicate,they tend to break the data they are transmitting up into smaller parts so thereceiver can process it in it time before the next lot is received. So, thisprotocol states how much data can be sent before the receiver acknowledges it. ·     Message formatting – the same principle as ahand-written letter, the layout of the data is a set format so the receiver candecipher what information means what. For example, it needs the destinationaddress, a greeting, then the content itself, a closing phrase and then anidentification of the person who sent the data. ·     Message delivery options – this is self-explainedprotocol, it basically determines the best form of transportation this data isgoing to take so both sender and receiver can communicate without any issues.This protocol also states that if the sender needs to be sure the receiver issure on whether they have received the data, an acknowledgement is sent back.Finally, this protocol also determines whether the message needs to be sent tojust 1 destination (unicast), a few destinations (multicast) or send to all onthe network (broadcast). ·     Message timing – this also is anothermajor factor on how well a message is received. Timing covers 3 maincharacteristics, the access method, the flow control and response timeout. Theaccess method basically dictates when someone can transmit data, say when no oneelse is transmitting. The flow control is what determines how much data can betransmitted at the same time, or if some data is sent too quickly. Finally, theresponse timeout, this is where if someone asks a question, and takes too longto answer, they will re send the data. Hosts on the network specify how longthis period of time is. Brief Description of 4Protocols TCP TheTransmission Control Protocol, also known as TCP, is the protocol that managesthe conversation between servers and clients. TCP divides the data into’segments’ and they are sent between the two clients or servers. The TCP’s mainjob is to manage the size and speed the data is transmitted at. UDP Thisprotocol is almost the same as TCP, however they both have their advantages anddisadvantages, which I’ll go into later. For now, this protocol is basically afaster method of transferring data, which in some cases isn’t the best method.This protocol is known as the best effort delivery protocol, meaning itprovides the basic functions for delivering data segments with very littleoverhead and data checking. IP So,this protocol does actually have two sub categories called IPV4 and IPV6,however for now ill categorize them together. This protocol is also known asthe Internet Protocol. This protocol in layman terms, takes the segments,encapsulates them into packets, assigning them the desired address and finallydelivering them across the network via best path.  Ethernet Thisprotocol is actually part of the physical and data link layers, however, asEthernet is the most common protocol used in everyday networking, this is thechosen protocol. This is basically the physical transmission of the data. Thedata is taken from the IP and encapsulated to be transmitted over the media.The standards of this protocol define how the signals are sent and how they aredeciphered by the receiver.       OSI Model Belowis a diagram of how the protocols are structured with each other, theirfunction and an example of the protocols used in each layer. Referencing theabove protocols, you can see TCP is used in the Transport layer. However, IPand Ethernet are not stated in the diagram. As stated before, Ethernet is aprotocol used in both Physical and Data Link Layers. As for the InternetProtocol, this is in the Networking Layer, just above the Data Link Layer.                           Reference: https://blackmoreops.com/wp-content/uploads/2016/05/OSI-Layer-Please-Do-Not-Tell-Secret-Passwords-Anytime-blackMORE-Ops-1.png       TCP/IP Model Thisstack does the same process as the OSI model, however as it has a few lesslayers, it is deemed a little faster at processing information, therefore thiswas chosen for usage over the internet. The TCP/IP is the older of the twomodels. As you can see below a few layers have merged into one larger layer. Further along in this document, there will be a more detailed description ofthis layer model.              Reference: https://networklessons.com/wp-content/uploads/2013/02/tcpipstack-vs-osimodel.png Who manages these protocolsand standards? Asthere are some many different protocols out there, there has to beorganisations out there who create and manage them. Below are a few of the mainones:  IEEEThe IEEE is a professional organisation, their fullname is The Institute of Electrical and Electronics Engineers. On the right, isthe logo of the organisation. These are one of the leading organisations in theworld. IEEE standards are some of the most popular forms of transporting data,a few examples of standards they control are of followed:  IEEE802.3 – this standard is also known as Ethernet, this is one of the mostpreferred forms of data transmission over a Local Area Network.  IEEE802.11 – this standard is also known as Wi-Fi, again another form of datatransmission that people are more popular with, normally used in your home orpublic area. IEEE802.15 – this standard is also known as Bluetooth. This one is slightly lesspopular than the previous two, however it is used worldwide for a number ofthings, such as connectivity to speakers, connectivity to peripheral devicesetc.  ISO TheInternational Organisation for Standardisation, known as ISO, is the leadingorganisation in the world for developing standards.  “ISO is not an acronym fromthe organisations name, it is a term based on the Greek word meaning ‘equal’,they chose this term as its desired position for being equal to all countries” REFERENCE: CISCOBOOK, PAGE 136The image on the right is the ISO logo. Thisorganisation is best known for the previously referenced ‘OSI Model’, as statedbefore, being the follower of the TCP/IP model and not being chosen for theinternet, the OSI Model was used in telecommunications equipment. The productsthat used ISO Standards tend to be older and more legacy, however CD images useISO standards. IANA The Internet Assigned Numbers Authority is its fullname, and it is responsible for managing IP addresses and Domain Nameallocation. They are also responsible for assigning port numbers, which will bediscussed further on. Their logo is found on the right. A protocol that IANAare responsible are bothPOP3and SMTP. These are email protocols, POP3 being a protocol used to deliveremail, and SMTP being a protocol that is used to send data to the email server.  Open and ProprietaryProtocols Open Standards Openstandards are there to ensure than not one company can monopolize the market oreven dedicate certain standards to just their equipment. The idea behind openstandards is that almost anyone can innovate new standards and create a bit ofcompetition. Most people are aware of the competition between Microsoft andApple, and this is the best way to see to comparison between open andProprietary protocols.  ‘A goodexample of this is when purchasing a wireless router for the home. There aremany different choices available of all which use the same protocols such asIPV4, DHCP, 802.3 and 802.11. these are open standards and allows for a clientto download something from a web server that is running a different OS. This isbecause both operating systems implement the open standard protocols, such asthose in the TCP/IP suite. ‘ REFERENCE: CISCOBOOK, PAGE – 133 theorganisations that manage these standards are normally non-profit organisationsand actually promote the idea of open standards, some of these organisationsare listed below. ·     ISOC (The Internet Society)·     IAB (The internet Architecture Board)·     IEEE·     ISO Proprietary Protocols Asstated above, these protocols are controlled by a single organisation are renot open for customization or even changing the function of them. If aorganisation wishes to have input on these protocols, then the owner will haveto authorize that and even charge a sum. Most proprietary protocols aredisbanded nowadays, however a few are still in use, such as the two main legacyones, Novell Netware and AppleTalk.      Role ofTransport Layer As the name states, its role is to establish atemporary connection between two applications and Transporting the data betweenthem. Theapplication that is generating the data, doesn’t actually know the preferredmethod of transportation over the channel, or the preferred path that the datawill take. The application just generates the information and passes it down tothe transportation layer. Referencing back to the TCP/IP model, the applicationlayer situates just above the transport layer and below that is the networklayers. The transport layer basically provides a method of delivering the datathat not only both ends can understand, but will be transmitted over thechannel without any issues. This layer provides the segmentation of data andall the controls needed to re-assemble the data on the other end.  Thereare two main transport layer protocols that are used to reassemble to data andthey are TCP (Transmission Control Protocol) and UDP (User Datagram Protocol).These protocols have responsibilities that are needed to ensure efficient datatransfer. These are of following: ·     Tracking the individual communication between the applications on thesource and destination ends. A host can be having multiple conversationsbetween each over so it is vital that the data is tracked and maintainedthroughout the conversation. ·     Segmenting the data so it is manageable for the application layer tohandle. Each network is different, in a sense that some networks can handlelarge pieces of information and some can only handle small pieces. So, it isthe transport layers role to decide on the size of packet that is to be sentover the channel in an appropriate size. The transport layer divides the datainto segments that are easier to manage and transport. ·     Identification is also needed, so it knows which application the datais being sent to and vice-versa. This is very similar to the tracking side, itbasically just gives the segment an identifier so it knows which application itis destined for. To do this, Ports are used to assign each application with adifferent port number so it is easily transported into the right Application.   Different Ports used Leadingon from the Identification process, each identifier is assigned a port number,and there is a vast range of port numbers which each range is dedicated tocertain uses. In the header of each segment, or even datagram, there is asection where a destination and source port numbers are entered to ensuredelivery. The protocols and services that are going to be needed are identifiedby the port number. Whenthe client places a destination number in the header, for example port 25,which is SMTP, the server then knows that the SMTP services are required andthat is the chosen transportation method. On the other end is the source port.Now this is actually randomly generated, allowing multiple conversations to bedone at once. Below is a list of Ports and their ranges: ·     Well Known Ports are 0 – 1023: So, this range is typically reserved forapplications and certain services, some of these services are HTTP, IMAP, SMTPand Telnet.  ·     Registered Ports are 1024 -49151: these are more user based, and areprimarily individual applications, that they have downloaded off their ownback. When these ports aren’t used for server responses, they can be selectedby a client itself, to be its source port. ·     Dynamic or Private Ports are 49152 – 65535: another word for theseports are Ephemeral ports. These ports are more or not only used when anidentification is needed of the client application during communication. It isvery unlikely for a client to connect to these ports, however some peer to peerfile sharing actually run using these ports. Afterthe source and destination ports have been put into the segment, the segmentsare then encapsulated into an IP packet. This packet contains the IP address ofthe source, and the destination. When combining these two addresses, they aregiven a name of ‘socket’. The socket is used to not only identify the sourceand destination addresses, but also the desired ports. Socket and Socket Pairs Referringback to the term ‘socket, this being the combination of an IP address and aPort number, this basically identifies exactly what application us being usedand where. Leading on from that is a ‘socket pair, pretty much exactly the sameas the originally socket term, however this time it’s both Destination andSource in the same packet. An example of a socket 192.168.1.6:25 and a socketpair is 192. 168.1.6:25 as well as 192. 168.1.5:25. Sockets allow for the processof multiple conversations between servers and can be distinguished from eachother.TCP Referringback to the beginning of this document and my brief explanation on what TCP,this section will be delving into more detail of this protocol. It is up to thedeveloper to choose which protocol best suits the requirement for theapplication. There are a few reasons why TCP is the chosen protocol to be usedas it provides the following: ·     Reliable delivery – this means TCP can use a method to ensure deliveryof the data. TCP does this be re transmitting any data that is lost during thetransmission process. ·     Ordered Data Reconstruction – as data tends to take different routesover the network each time it is transmitted, it is sometimes necessary thatthe data is reassembled in the same order it was sent. TCP does this bynumbering each segment with a sequence number, so at the other end it can beput back into order. ·     Flow control – each network has different bandwidth and memory, so flowcontrol is vital as if too much information is sent at once the whole bandwidthmay be taken up, not allowing for other data to pass. So, if this does occur,then the sending application is requested to reduce the amount of data flowing.TCP do this by regulating the amount of data allowed at any given time,therefore preventing segments being lost or corrupted. ·     Connection Orientated – TCP is a connection orientated protocol, so whenTCP establishes a connection between two applications, it is a permanentconnection. In doing this both sides are prepared to send and receive data,ensuring the data will arrive. When this session is established, it will alsotalk about all of the above, so the speed in which data will be sent, how itwill be sent and when.  Whyis it better than UDP? TCP Connection Establishment Soas stated before, TCP Establishes a connection before sending the data. There’sa term that is used to describe this process and it’s called the ‘3-wayhandshake’. This term is used to describe the series of numbers and processesdone by TCP to establish or terminate the session. So, the 3-way handshakeestablishes that the destination is actually present, it then verifies that thedestination device is going to accept thee data and which port it is going touse, it then informs the destination that the source is going to be sendingdata.   Below is a simple diagram to show how this isdone:          Reference: https://image.slidesharecdn.com/icnd110s01l05understandingthetcpiptransportlayer-130312094244-phpapp01/95/cours-cisco-12-638.jpg?cb=1363081403 So,in basic steps: Step1 – a TCP client initiates the start of the connection by requesting a sessionestablishment. It does this by sending a segment with the SYN Flag indicatingan initial value. This figure is randomly generated and is the start of thedata flow between the two source and destination.   Step2 – the receiving end then must Acknowledge the request by replying with theACK Flag set, this indicates that the receiving end has received the SYNSegment. The value of this ACK Flag is the sum of the SYN Flag plus 1, i. e. thenext bit. As this conversation is a two-way session, the receiving end must alsoreply with a SYN Flag to indicate it too is ready to send data.  Step3 – Similar to the first step, the TCP Client then sends back an ACK Flag,being the sum of the previous SYN Flag plus one. This is sent back to the receivingend to finally establish the session. After this, each segment sent back andforth will continue to have the ACK Flag set. Thereare other flags involved in this process, however they are not used in thisdiagram.  The flags are of following:  ·     URG – Urgent Pointer field·     ACK – Acknowledgment·     PSH – Push Function·     RST – Reset Connection·     SYN – Synchronize sequencenumbers·     FIN – No more data fromsenderUsingTCP and having the 3-way handshake implemented, there are a few extras thatcome with this, such as security. TCP implements security by denying the establishmentof certain TCP sessions and only allowing certain services to establishconnections, as well as only allowing traffic in already made connections. TCP Connection Termination              Reference: https://upload.wikimedia.org/wikipedia/commons/8/8a/TCP_connection_Termination. png  Afterestablishing a connection, this connection then needs to be terminated. This processis almost the same as Connection Establishment, apart from the obvious it beingterminating rather than creating, however, the process is similar. As the establishmentused the SYN Flag, this process uses the FIN Flag. The process is as followed: Step1 – when the client finishes sending data and has no more to send, it starts bysending the FIN flag set. Step2 – Host B will then send an ACK flag back to acknowledge the FIN flag that wassent to initiate toe termination.  Step3 – this is where the process is a little different, rather than the session justbe terminated, Host A will now wait to receive a FIN Flag from Host B, this isto ensure any data still being sent is processed and not just cut off.  Step4 – after all data is sent and processed, Host A will then send a ACK flag tolet Host B know that neither ends have more data to send, therefor the sessionwill be terminated.  Telnet Telnetis a TCP Protocol that is used to access remote computers on the same network. Onthe web protocols like HTTP and FTP allow the user to request specific files fromremote computers, but you don’t actually log on as a user. However, Telnetallows you to actually log on as a regular user and have any privileges theuser may have had been granted to that specific application or data on thatcomputer.  PDU As the data is passed downthe OSI or TCP/IP Stack, it has information added to it so it finds its destinationeasier. As it gathers this information, each layer the data is re-named. On theright is a diagram of how the data is named on each layer. The process of the informationbeing passed down the stacked is called encapsulation. Theform that the data takes at any of the layers in the stack is called a PDU(Protocol Data Unit). But at each level the PDU has a different name to stateits new function.  ‘Although there is nouniversal naming convention for PDUs, the PDUs and named according to theprotocols of the TCP/IP Suite. They are the following: ·     Data – the general term for thePDU used at the application layer.·     Segment – Transport LayerPDU·     Packet – Internet Layer PDU·     Frame – Network Access Layer·     Bits – A PDU used whenphysically transmitting data over the medium’ Reference: CISCOBOOK – PAGE 145                           ProtocolHeaders and what they areListof all flags and what they doWhatapplications use TCP? UDP Whatis it?Compareto TCPHeaders,their size and purposeErrorpackets Network Layer PDUis packetsRoutersRoleis end to end addressing and best path though network.3types – connectionless, best effort, media independentQoS IPV4 IPV6 Layer 2 LLC MAC                      Part 2 
__label__pos
0.824531
Unlimited Plugins, WordPress themes, videos & courses! Unlimited asset downloads! From $16.50/m Advertisement 1. Code 2. Corona SDK Code Getting Started with Lua and the Corona SDK by Difficulty:BeginnerLength:MediumLanguages: In this tutorial, you will learn the basics of the Lua programming language and I will help you getting started with writing applications for iOS and Android. Excited? Let's dive right in. Introduction In this tutorial, we will take a look at the Corona SDK and the Lua programming language. Even though Lua isn't difficult to pick up, it is recommended to have some experience with other languages, such as JavaScript, PHP, Java, or Ruby. We will cover the very basics of the Corona SDK and Lua to get you familiar with developing for the Corona platform. You will see that it takes very little effort and code to get up and running. I'm ready when you are. 1. Introducing Lua and the Corona SDK In the past, mobile developers faced a difficult predicament. Should they develop applications for iOS or Android? Most iOS developers use Objective-C, while Android developers use Java. Fortunately, we've got the Corona SDK and the Lua programming language, which enables cross platform mobile development. To put it simply, it means you can develop an application once and build it for iOS, Android, Kindle, and Nook. The programming language we use when developing with the Corona SDK is Lua, which is moon in Portuguese. One of the main beneits of Lua, especially in combination with the Corona SDK, is that Lua is cross-platform as the language is written in C. Lua is not difficult to learn as you'll find out in this tutorial. Lua was created in 1993 by a small group of people at the Pontifical Catholic University of Rio de Janeiro, Brazil. Lua is open source software so you can freely use it in your projects. It is distributed under the MIT license. The Corona SDK is developed and maintained by Corona Labs and is a commercial platform. There are several pricing plans to choose from. There's a free starter plan and paid plans starting from $19 per month. For this tutorial, however, we'll be using the starter plan. Even though you can develop Corona applications on several platforms, in this tutorial I'll show you how to build applications using Windows and we'll build for the Android platform. 2. Setting Up Lua and the Corona SDK Are you ready to get started with cross platform mobile development? Visit the developer portal of the Corona SDK, create an account, and download the Corona SDK. As I mentioned, in this tutorial I'll be using Windows, but you can follow along on OS X just as well. After the installation of the Corona SDK, open the Start menu and navigate to Corona SDK > Corona Simulator. You should see two windows as shown below. 3. Text Editors Now that we have the Corona Simulator up and running, we need to get a text editor to write and edit Lua. I recommend Sublime Text 2, which is a free download. It's a great and popular text editor that support syntax highlighting and a boatload of other useful features. This is especially useful if you're writing large and complex applications. It supports Lua along with 43 other programming languages. Did I tell you Sublime Text if available on Windows, OS X as well as Linux? You can't go wrong with Sublime Text 2. 4. Writing a Hello World Application Head back to the Corona Simulator, hit New Project, and choose a directory to store your project's files in. Select Blank as the project's template, Phone Preset for Upright Screen Size, and Upright as the Default Orientation. Click OK to finalize the project setup and navigate to the directory where you created your new project. You should find three files, build.settings, config.lua, and main.lua. The only file we'll need to edit is Main.lua. Open this file with your editor of choice and replace the file's contents with the code snippet below. Save the changes by pressing Ctrl+S and open the Corona Simulator window. Wait a minute. Nothing happened. That's perfectly normal. We need to check the other window that looks like command prompt. It should display Hello World! as shown below. The reason why the text was only displayed in the Corona Terminal and not in the Corona Simulator is, because the print command is only used for the Lua programming language. It cannot be used to display the words on the screen of the Corona Simulator or a physical device. However, this basic print command will still be useful when we develop application, especially for debugging purposes. 5. Hello World - Take 2 We're going to create another Hello World! application. However, this time, we'll make it display the words in the Corona Simulator itself. Delete the contents of main.lua and replace it with the code snippet shown below. You may have noticed that this snippet was a bit longer than the previous one. Let's see what this piece of code does for us. • display is the object we're talking to. • newText is the function that we use to display the text on the screen. • "Hello@ World!" is the text we want to display. • 0, 0 are the x and y coordinates respectively. • native.systemFont is the font we use for the text and 16 is the font's size. If you save the changes and relaunch the Corona Simulator, you should see the following. 6. Variables and Math What if you wanted to store a number as a variable for later use? The following code snippet shows how variables are declared in Lua. • local is the keyword for declaring a variable. • num1 is the name of the variable. If we combine this with the previous code snippet, we get the following. The Corona Simulator should now display the number 6, which is the result of adding 3 and 3. Let's try another example using math. Using the following code snippet, the Corona Simulator should display the number 18. As you can see, it is perfectly possible to perform mathematical operations on a variable. In the above code snippet, we multiplied num1 by 3 using * 3. I'm sure you've already figured out that the asterisk is the multiplication operator in Lua. • + for addition • - for subtraction and negative numbers • * for multiplication • / for division 7. Images Displaying images isn't difficult either. To display an image, you need to add the image to the directory where main.lua sits. It is fine to create a subdirectory to keep the project's resources separated and organized. Let's do that now. Create a new directory in your project folder and name it images. Use the images directory to store your project's images. The image I'd like to use for this tutorial is logo1.png and I've placed it in the images directory we created a moment ago. As you can see in the following code snippet, displaying an image is almost as easy as displaying text. I dare to say it's even easier as you don't need to specify a font. 8. Status Bar If you look closely at the previous screenshot, you'll notice that there's a status bar at the top of the screen displaying the carrier, battery life, etc. Have you ever noticed that sometimes, when you open an application, games in particular, the status bar automatically disappears? Hiding the status bar is as simple as adding one line of code to main.lua. It's a simple as that. Update your project and take a look at the result in the Corona Simulator. It is useful to know that the status bar can have different styles. The names of the styles speak for themselves. For many applications, especially games, using HiddenStatusBar is most suitable. 9. Rectangles, Borders, and Colors Let's move on with shapes. The first shape we'll display is a rectangle. Let's see what it takes to display a rectangle on the screen. • local rect1 declares a variable for the rectangle. • display.newRectcreates the rectangle shape. • (10, 20, 150, 50) define the x and y coordinates and the width and height, respectively. Let's add some color to the rectangle. Hmmm. What does this mean? • rect1 is the variable we declared earlier. • setFillColor is the method we use to fill the rectangle with a color. • (51, 255, 0) specify the red (51), green (255), and blue (0) value of the color we use. Let's expand this example with a border or stroke as shown in the following code snippet. • rect1.strokeWidth = 8 sets the strokeWidth property of the rectangle to 8. • rect1.setStrokeColor(80, 200, 130) sets the strokeColor property of the rectangle to the color specified by the values 80, 200, and 130 as we saw earlier. 10. Comments Comments may seem trivial and even obsolete, but they are important and even more so when you work in a team. Comments are very useful for documenting code and this applies both for you as your colleagues. It improves the readability of code for other people in your team. In Lua, comments are easy to use. Take a look at the following example. Comments have no effect on your application in terms of how it works. They are only there for the developer. The following code snippet will not print Hello World! to the terminal. You can also write comments that span several lines, which is useful if you need to explain how a particularly complex piece of code works or if you want to write an introduction to an application or project. Conclusion In this tutorial, you've learned the basics of Lua and the Corona SDK. We installed and set up the Corona SDK, downloaded and used a text editor for editing Lua, wrote several applications, and ran them in the Corona Simulator. We also learned how to use variables to store pieces of data, display images on the screen, configure the status bar, and draw shapes to the screen. And, last but not least, we saw how to use comments and why you should use comments in your code. I hope that this tutorial was helpful to you. Stay tuned for more. If you'd like to learn more about the Corona SDK, I recommend visiting the Corona Labs developer website. It is filled with resources and guides to get you started. You can also explore the example applications that are included in the Corona SDK that you downloaded and installed earlier. The Lua programming language also has its own website. It contains everything you need to know about the language, including a getting started guide and a very, very detailed manual. Make sure to visit the Lua website if you decide to continue with Corona development. Advertisement Advertisement Advertisement Advertisement Looking for something to help kick start your next project? Envato Market has a range of items for sale to help get you started.
__label__pos
0.64205
Main Content Control the Scope of Delay Balancing This example shows how to balance delays in specific parts of a design, without balancing delays on the entire design. Introduction You can use the BalanceDelays option to balance the additional delays introduced by HDL Coder™ for certain block implementations and optimizations. This model-level option controls delay balancing for the entire model. However, for certain designs, you may want to balance delays in only some parts of the design. For example, in a design containing a data path and a control path, delay balancing should be applied only on the data path of the design, i.e., the paths requiring data synchronization. This example shows how to use a subsystem-level BalanceDelays option provides fine-grained control on how HDL Coder balances delays in individual subsystems. We use two examples to demonstrate the use of this subsystem-level feature: 1. hdlcoder_localdelaybalancing.slx shows how to disable delay balancing on user-defined control paths. 2. hdlcoder_localdelaybalancing_sharing.slx shows how the user can apply HDL optimizations like resource sharing in the presence of complicated control paths that require carefully constrained delay balancing. Example 1: Constraining Delay Balancing to the data path The example model, hdlcoder_localdelaybalancing.slx, has two subsystems under hdlcoder_localdelaybalancing/Subsystem: param_control and symmetric_fir, containing the control logic and the data path, respectively. bdclose('all'); open_system('hdlcoder_localdelaybalancing'); open_system('hdlcoder_localdelaybalancing/Subsystem'); Each subsystem has one block that has one output pipeline register to achieve good timing results. hdldispblkparams('hdlcoder_localdelaybalancing/Subsystem/param_control/And'); hdldispblkparams('hdlcoder_localdelaybalancing/Subsystem/symmetric_fir/Add'); %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% HDL Block Parameters ('hdlcoder_localdelaybalancing/Subsystem/param_control/And') %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Implementation Architecture : default Implementation Parameters OutputPipeline : 1 %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% HDL Block Parameters ('hdlcoder_localdelaybalancing/Subsystem/symmetric_fir/Add') %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Implementation Architecture : Linear Implementation Parameters OutputPipeline : 1 When the global, model-level BalanceDelays option is set to on, then delay balancing inserts matching delays on both the control path as well as the data path, as shown in the validation model. hdlset_param('hdlcoder_localdelaybalancing', 'BalanceDelays', 'on'); hdlset_param('hdlcoder_localdelaybalancing', 'GenerateValidationModel', 'on'); makehdl('hdlcoder_localdelaybalancing/Subsystem'); load_system('gm_hdlcoder_localdelaybalancing_vnl'); set_param('gm_hdlcoder_localdelaybalancing_vnl', 'SimulationCommand', 'update'); open_system('gm_hdlcoder_localdelaybalancing_vnl/Subsystem/param_control'); ### Generating HDL for 'hdlcoder_localdelaybalancing/Subsystem'. ### Using the config set for model <a href="matlab:configset.showParameterGroup('hdlcoder_localdelaybalancing', { 'HDL Code Generation' } )">hdlcoder_localdelaybalancing</a> for HDL code generation parameters. ### Running HDL checks on the model 'hdlcoder_localdelaybalancing'. ### Begin compilation of the model 'hdlcoder_localdelaybalancing'... ### Applying HDL optimizations on the model 'hdlcoder_localdelaybalancing'... ### The code generation and optimization options you have chosen have introduced additional pipeline delays. ### The delay balancing feature has automatically inserted matching delays for compensation. ### The DUT requires an initial pipeline setup latency. Each output port experiences these additional delays. ### Output port 1: 1 cycles. ### Output port 2: 1 cycles. ### Output port 3: 1 cycles. ### Begin model generation. ### Model generation complete. ### Generating new validation model: <a href="matlab:open_system('gm_hdlcoder_localdelaybalancing_vnl')">gm_hdlcoder_localdelaybalancing_vnl</a>. ### Validation model generation complete. ### Begin VHDL Code Generation for 'hdlcoder_localdelaybalancing'. ### Working on hdlcoder_localdelaybalancing/Subsystem/param_control/params as hdlsrc/hdlcoder_localdelaybalancing/params.vhd. ### Working on hdlcoder_localdelaybalancing/Subsystem/param_control as hdlsrc/hdlcoder_localdelaybalancing/param_control.vhd. ### Working on hdlcoder_localdelaybalancing/Subsystem/symmetric_fir as hdlsrc/hdlcoder_localdelaybalancing/symmetric_fir.vhd. ### Working on hdlcoder_localdelaybalancing/Subsystem as hdlsrc/hdlcoder_localdelaybalancing/Subsystem.vhd. ### Code Generation for 'hdlcoder_localdelaybalancing' completed. ### Creating HDL Code Generation Check Report file:///tmp/Bdoc22a_1891349_33695/tpcf9b635c/ex13756842/hdlsrc/hdlcoder_localdelaybalancing/Subsystem_report.html ### HDL check for 'hdlcoder_localdelaybalancing' complete with 0 errors, 0 warnings, and 0 messages. ### HDL code generation complete. In this example design, only the data path, symmetric_fir, requires data synchronization. The outputs from param_control are coefficients to the FIR filter and do not have to synchronize with each other or with the processed data. Turning off delay balancing on the control logic therefore saves resources. In order to achieve this, the model-level BalanceDelays option must be off, and the subsystem-level BalanceDelays options must be set appropriately on the data path and control path. hdlset_param('hdlcoder_localdelaybalancing', 'BalanceDelays', 'off'); hdlset_param('hdlcoder_localdelaybalancing/Subsystem/param_control', 'BalanceDelays', 'off'); hdlset_param('hdlcoder_localdelaybalancing/Subsystem/symmetric_fir', 'BalanceDelays', 'on'); Now when HDL code is generated, delay balancing is only active in the data path subsystem and does not insert any delays in the control path subsystem. bdclose('gm_hdlcoder_localdelaybalancing_vnl'); makehdl('hdlcoder_localdelaybalancing/Subsystem'); load_system('gm_hdlcoder_localdelaybalancing_vnl'); set_param('gm_hdlcoder_localdelaybalancing_vnl', 'SimulationCommand', 'update'); open_system('gm_hdlcoder_localdelaybalancing_vnl/Subsystem/param_control'); ### Generating HDL for 'hdlcoder_localdelaybalancing/Subsystem'. ### Using the config set for model <a href="matlab:configset.showParameterGroup('hdlcoder_localdelaybalancing', { 'HDL Code Generation' } )">hdlcoder_localdelaybalancing</a> for HDL code generation parameters. ### Running HDL checks on the model 'hdlcoder_localdelaybalancing'. ### Begin compilation of the model 'hdlcoder_localdelaybalancing'... ### Applying HDL optimizations on the model 'hdlcoder_localdelaybalancing'... ### Begin model generation. ### Model generation complete. ### Generating new validation model: <a href="matlab:open_system('gm_hdlcoder_localdelaybalancing_vnl')">gm_hdlcoder_localdelaybalancing_vnl</a>. ### Validation model generation complete. ### Begin VHDL Code Generation for 'hdlcoder_localdelaybalancing'. ### Working on hdlcoder_localdelaybalancing/Subsystem/param_control/params as hdlsrc/hdlcoder_localdelaybalancing/params.vhd. ### Working on hdlcoder_localdelaybalancing/Subsystem/param_control as hdlsrc/hdlcoder_localdelaybalancing/param_control.vhd. ### Working on hdlcoder_localdelaybalancing/Subsystem/symmetric_fir as hdlsrc/hdlcoder_localdelaybalancing/symmetric_fir.vhd. ### Working on hdlcoder_localdelaybalancing/Subsystem as hdlsrc/hdlcoder_localdelaybalancing/Subsystem.vhd. ### Code Generation for 'hdlcoder_localdelaybalancing' completed. ### Creating HDL Code Generation Check Report file:///tmp/Bdoc22a_1891349_33695/tpcf9b635c/ex13756842/hdlsrc/hdlcoder_localdelaybalancing/Subsystem_report.html ### HDL check for 'hdlcoder_localdelaybalancing' complete with 0 errors, 0 warnings, and 2 messages. ### HDL code generation complete. Notice that simulating the validation model now shows mismatches, because the validation model does not compensate for latency inserted by optimizations or block implementations. Example 2: Localized Delay Balancing and Resource Sharing The resource sharing optimization saves area usage in the final HDL implementation, at the cost of introducing a cycle of latency for each sharing group. This additional latency is usually balanced during delay balancing so that the numerics and functionality of the algorithm are preserved. One of the restrictions of resource sharing is that it cannot be applied on a subsystem within a feedback loop. Thus, if resource sharing is specified for a subsystem within a loop, then the optimization will fail. You can observe this in hdlcoder_localdelaybalancing_sharing.slx, where hdlcoder_localdelaybalancing_sharing/Subsystem/Subsystem is within a feedback loop. bdclose('all'); load_system('hdlcoder_localdelaybalancing_sharing'); open_system('hdlcoder_localdelaybalancing_sharing/Subsystem'); However, in this design, you may know that the feedback loop is rarely used since the control signal causes the switch block, hdlcoder_localdelaybalancing_sharing/Subsystem/Subsystem/Switch, to choose the top input, the feed-forward path, most of the time. This user insight implies that it is fine to go ahead with resource sharing in this subsystem and disregard the feedback loop in the parent subsystem. In such cases, if you wish to ignore feedback loops during delay balancing, you must turn off delay balancing in the subsystem containing the feedback loop. This enables HDL Coder (TM) to ignore the feedback loop and proceed with resource sharing. hdlset_param('hdlcoder_localdelaybalancing_sharing', 'BalanceDelays', 'off'); hdlset_param('hdlcoder_localdelaybalancing_sharing/Subsystem', 'BalanceDelays', 'off'); hdlset_param('hdlcoder_localdelaybalancing_sharing/Subsystem/Subsystem', 'BalanceDelays', 'on'); makehdl('hdlcoder_localdelaybalancing_sharing/Subsystem'); load_system('gm_hdlcoder_localdelaybalancing_sharing'); set_param('gm_hdlcoder_localdelaybalancing_sharing_vnl', 'SimulationCommand', 'update'); ### Generating HDL for 'hdlcoder_localdelaybalancing_sharing/Subsystem'. ### Using the config set for model <a href="matlab:configset.showParameterGroup('hdlcoder_localdelaybalancing_sharing', { 'HDL Code Generation' } )">hdlcoder_localdelaybalancing_sharing</a> for HDL code generation parameters. ### Running HDL checks on the model 'hdlcoder_localdelaybalancing_sharing'. ### Begin compilation of the model 'hdlcoder_localdelaybalancing_sharing'... ### Applying HDL optimizations on the model 'hdlcoder_localdelaybalancing_sharing'... ### Begin model generation. ### Model generation complete. ### Generating new validation model: <a href="matlab:open_system('gm_hdlcoder_localdelaybalancing_sharing_vnl')">gm_hdlcoder_localdelaybalancing_sharing_vnl</a>. ### Validation model generation complete. ### Begin VHDL Code Generation for 'hdlcoder_localdelaybalancing_sharing'. ### MESSAGE: The design requires 2 times faster clock with respect to the base rate = 0.1. ### Working on hdlcoder_localdelaybalancing_sharing/Subsystem/Subsystem as hdlsrc/hdlcoder_localdelaybalancing_sharing/Subsystem_block.vhd. ### Working on Subsystem_tc as hdlsrc/hdlcoder_localdelaybalancing_sharing/Subsystem_tc.vhd. ### Working on hdlcoder_localdelaybalancing_sharing/Subsystem as hdlsrc/hdlcoder_localdelaybalancing_sharing/Subsystem.vhd. ### Generating package file hdlsrc/hdlcoder_localdelaybalancing_sharing/Subsystem_pkg.vhd. ### Code Generation for 'hdlcoder_localdelaybalancing_sharing' completed. ### Creating HDL Code Generation Check Report file:///tmp/Bdoc22a_1891349_33695/tpcf9b635c/ex13756842/hdlsrc/hdlcoder_localdelaybalancing_sharing/Subsystem_report.html ### HDL check for 'hdlcoder_localdelaybalancing_sharing' complete with 0 errors, 0 warnings, and 2 messages. ### HDL code generation complete. Notice that not only does sharing succeed in the inner subsystem, but local delay balancing also succeeds within this subsystem by inserting matching delays on the inputs to the adder. open_system('gm_hdlcoder_localdelaybalancing_sharing_vnl/Subsystem/Subsystem');
__label__pos
0.978567
1 Star 0 Fork 506 未名企鹅 / jcseg forked from 狮子的魂 / jcseg  加入 Gitee 与超过 600 万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :) 免费加入 克隆/下载 取消 提示: 由于 Git 不支持空文件夾,创建文件夹后会生成空的 .keep 文件 Loading... README.md Jcseg logo Jcseg是什么? Jcseg是基于mmseg算法的一个轻量级中文分词器,同时集成了关键字提取,关键短语提取,关键句子提取和文章自动摘要等功能,并且提供了一个基于Jetty的web服务器,方便各大语言直接http调用,同时提供了最新版本的lucene, solr, elasticsearch的分词接口!Jcseg自带了一个 jcseg.properties文件用于快速配置而得到适合不同场合的分词应用,例如:最大匹配词长,是否开启中文人名识别,是否追加拼音,是否追加同义词等! Jcseg核心功能: • 中文分词:mmseg算法 + Jcseg 独创的优化算法,四种切分模式。 • 关键字提取:基于textRank算法。 • 关键短语提取:基于textRank算法。 • 关键句子提取:基于textRank算法。 • 文章自动摘要:基于BM25+textRank算法。 • 自动词性标注:基于词库+(统计歧义去除计划),目前效果不是很理想,对词性标注结果要求较高的应用不建议使用。 • 命名实体标注:基于词库+(统计歧义去除计划),电子邮件,网址,大陆手机号码,地名,人名,货币,datetime时间,长度,面积,距离单位等。 • Restful api:嵌入jetty提供了一个绝对高性能的server模块,包含全部功能的http接口,标准化json输出格式,方便各种语言客户端直接调用。 Jcseg中文分词: 六种切分模式: • (1).简易模式:FMM算法,适合速度要求场合。 • (2).复杂模式:MMSEG四种过滤算法,具有较高的歧义去除,分词准确率达到了98.41%。 • (3).检测模式:只返回词库中已有的词条,很适合某些应用场合。 • (4).检索模式:细粒度切分,专为检索而生,除了中文处理外(不具备中文的人名,数字识别等智能功能)其他与复杂模式一致(英文,组合词等)。 • (5).分隔符模式:按照给定的字符切分词条,默认是空格,特定场合的应用。 • (6).NLP模式:继承自复杂模式,更改了数字,单位等词条的组合方式,增加电子邮件,大陆手机号码,网址,人名,地名,货币等以及无限种自定义实体的识别与返回。 1. 支持自定义词库。在lexicon文件夹下,可以随便添加/删除/更改词库和词库内容,并且对词库进行了分类。 2. 支持词库多目录加载. 配置lexicon.path中使用';'隔开多个词库目录. 3. 词库分为简体/繁体/简繁体混合词库: 可以专门适用于简体切分, 繁体切分, 简繁体混合切分, 并且可以利用下面提到的同义词实现,简繁体的相互检索, Jcseg同时提供了词库两个简单的词库管理工具来进行简繁体的转换和词库的合并。 4. 中英文同义词追加/同义词匹配 + 中文词条拼音追加.词库整合了《现代汉语词典》和cc-cedict辞典中的词条,并且依据cc-cedict词典为词条标上了拼音,依据《中华同义词词典》为词条标上了同义词(尚未完成)。更改jcseg.properties配置文档可以在分词的时候加入拼音和同义词到分词结果中。 5. 中文数字和中文分数识别,例如:"一百五十个人都来了,四十分之一的人。"中的"一百五十"和"四十分之一"。并且 Jcseg会自动将其转换为阿拉伯数字加入到分词结果中。如:150, 1/40。 6. 支持中英混合词和英中混合词的识别(维护词库可以识别任何一种组合)。例如:B超, x射线, 卡拉ok, 奇都ktv, 哆啦a梦。 7. 更好的英文支持,电子邮件,域名,小数,分数,百分数,字母和标点组合词(例如C++, c#)的识别。 8. 自定义切分保留标点. 例如: 保留&, 就可以识别k&r这种复杂词条。 9. 复杂英文切分结果的二次切分: 可以保留原组合,同时可以避免复杂切分带来的检索命中率下降的情况,例如QQ2013会被切分成: qq2013/ qq/ 2013, [email protected]会被切分成: [email protected]/ chenxin/ 619315/ gmail/ com。 10. 支持阿拉伯数字/小数/中文数字基本单字单位的识别,例如2012年,1.75米,38.6℃,五折,并且 Jcseg会将其转换为“5折”加入分词结果中。 11. 智能圆角半角, 英文大小写转换。 12. 特殊字母识别:例如:Ⅰ,Ⅱ;特殊数字识别:例如:①,⑩。 13. 配对标点内容提取:例如:最好的Java书《java编程思想》,‘畅想杯黑客技术大赛’,被《,‘,“,『标点标记的内容。(1.6.8版开始支持)。 14. 智能中文人名/外文翻译人名识别。中文人名识别正确率达94%以上。(中文人名可以维护lex-lname.lex,lex-dname-1.lex,lex-dname-2.lex来提高准确率),(引入规则和词性后会达到98%以上的识别正确率)。 15. 自动中英文停止词过滤功能(需要在jcseg.properties中开启该选项,lex-stopwords.lex为停止词词库)。 16. 词库更新自动加载功能, 开启一个守护线程定时的检测词库的更新并且加载(注意需要有对应词库目录下的的lex-autoload.todo文件的写入权限)。 17. 自动词性标注(目前基于词库)。 18. 自动实体的识别,默认支持:电子邮件,网址,大陆手机号码,地名,人名,货币等;词库中可以自定义各种实体并且再切分中返回。 Jcseg快速体验: 终端测试: 1. cd到 Jcseg根目录。 2. ant all(或者使用maven编译) 3. 运行:java -jar jcseg-core-{version}.jar 4. 你将看到如下的终端界面 5. 在光标处输入文本开始测试(输入:seg_mode参数切换可以体验各种切分算法) +--------Jcseg chinese word tokenizer demo---------------+ |- @Author chenxin<[email protected]> | |- :seg_mode : switch to specified tokenizer mode. | |- (:complex,:simple,:search,:detect,:delimiter,:NLP) | |- :keywords : switch to keywords extract mode. | |- :keyphrase : switch to keyphrase extract mode. | |- :sentence : switch to sentence extract mode. | |- :summary : switch to summary extract mode. | |- :help : print this help menu. | |- :quit : to exit the program. | +--------------------------------------------------------+ jcseg~tokenizer:complex>> 测试样板: 分词文本 歧义和同义词:研究生命起源,混合词: 做B超检查身体,x射线本质是什么,今天去奇都ktv唱卡拉ok去,哆啦a梦是一个动漫中的主角,单位和全角: 2009年8月6日开始大学之旅,岳阳今天的气温为38.6℃, 也就是101.48℉, 中文数字/分数: 你分三十分之二, 小陈拿三十分之五,剩下的三十分之二十三全部是我的,那是一九九八年前的事了,四川麻辣烫很好吃,五四运动留下的五四精神。笔记本五折包邮亏本大甩卖。人名识别: 我是陈鑫,也是jcseg的作者,三国时期的诸葛亮是个天才,我们一起给刘翔加油,罗志高兴奋极了因为老吴送了他一台笔记本。外文名识别:冰岛时间7月1日,正在当地拍片的汤姆·克鲁斯通过发言人承认,他与第三任妻子凯蒂·赫尔墨斯(第一二任妻子分别为咪咪·罗杰斯、妮可·基德曼)的婚姻即将结束。配对标点: 本次『畅想杯』黑客技术大赛的得主为电信09-2BF的张三,奖励C++程序设计语言一书和【畅想网络】的『PHP教程』一套。特殊字母: 【Ⅰ】(Ⅱ),英文数字: bug report [email protected] or visit http://code.google.com/p/jcseg, we all admire the hacker spirit!特殊数字: ① ⑩ ⑽ ㈩. 分词结果: 歧义/n 和/o 同义词/n :/w 研究/vn 琢磨/vn 研讨/vn 钻研/vn 生命/n 起源/n ,/w 混合词 :/w 做/v b超/n 检查/vn 身体/n ,/w x射线/n x光线/n 本质/n 是/a 什么/n ,/w 今天/t 去/q 奇都ktv/nz 唱/n 卡拉ok/nz 去/q ,/w 哆啦a梦/nz 是/a 一个/q 动漫/n 中/q 的/u 主角/n ,/w 单位/n 和/o 全角/nz :/w 2009年/m 8月/m 6日/m 开始/n 大学/n 之旅 ,/w 岳阳/ns 今天/t 的/u 气温/n 为/u 38.6℃/m ,/w 也就是/v 101.48℉/m ,/w 中文/n 国语/n 数字/n //w 分数/n :/w 你/r 分/h 三十分之二/m ,/w 小陈/nr 拿/nh 三十分之五/m ,/w 剩下/v 的/u 三十分之二十三/m 全部/a 是/a 我的/nt ,/w 那是/c 一九九八年/m 1998年/m 前/v 的/u 事/i 了/i ,/w 四川/ns 麻辣烫/n 很/m 好吃/v ,/w 五四运动/nz 留下/v 的/u 五四/m 54/m 精神/n 。/w 笔记本/n 五折/m 5折/m 包邮 亏本/v 大甩卖 甩卖 。/w 人名/n 识别/v :/w 我/r 是/a 陈鑫/nr ,/w 也/e 是/a jcseg/en 的/u 作者/n ,/w 三国/mq 时期/n 的/u 诸葛亮/nr 是个 天才/n ,/w 我们/r 一起/d 给/v 刘翔/nr 加油/v ,/w 罗志高/nr 兴奋/v 极了/u 因为/c 老吴/nr 送了 他/r 一台 笔记本/n 。/w 外文/n 名/j 识别/v :/w 冰岛/ns 时间/n 7月/m 1日/m ,/w 正在/u 当地/s 拍片/vi 的/u 汤姆·克鲁斯/nr 阿汤哥/nr 通过/v 发言人/n 承认/v ,/w 他/r 与/u 第三/m 任/q 妻子/n 凯蒂·赫尔墨斯/nr (/w 第一/a 二/j 任/q 妻子/n 分别为 咪咪·罗杰斯/nr 、/w 妮可·基德曼/nr )/w 的/u 婚姻/n 即将/d 结束/v 。/w 配对/v 标点/n :/w 本次/r 『/w 畅想杯/nz 』/w 黑客/n 技术/n 大赛/vn 的/u 得主/n 为/u 电信/nt 09/en -/w bf/en 2bf/en 的/u 张三/nr ,/w 奖励/vn c++/en 程序设计/gi 语言/n 一书/ns 和/o 【/w 畅想网络/nz 】/w 的/u 『/w PHP教程/nz 』/w 一套/m 。/w 特殊/a 字母/n :/w 【/w Ⅰ/nz 】/w (/w Ⅱ/m )/w ,/w 英文/n 英语/n 数字/n :/w bug/en report/en chenxin/en 619315/en gmail/en com/en [email protected]/en or/en visit/en http/en :/w //w //w code/en google/en com/en code.google.com/en //w p/en //w jcseg/en ,/w we/en all/en admire/en appreciate/en like/en love/en enjoy/en the/en hacker/en spirit/en mind/en !/w 特殊/a 数字/n :/w ①/m ⑩/m ⑽/m ㈩/m ./w Jcseg Maven仓库: Jcseg从1.9.8才开始上传到了maven仓库! 1. jcseg-core: <dependency> <groupId>org.lionsoul</groupId> <artifactId>jcseg-core</artifactId> <version>2.2.0</version> </dependency> 1. jcseg-analyzer (lucene或者solr): <dependency> <groupId>org.lionsoul</groupId> <artifactId>jcseg-analyzer</artifactId> <version>2.2.0</version> </dependency> 1. jcseg-elasticsearch <dependency> <groupId>org.lionsoul</groupId> <artifactId>jcseg-elasticsearch</artifactId> <version>2.2.0</version> </dependency> 1. jcseg-server (独立的应用服务器) <dependency> <groupId>org.lionsoul</groupId> <artifactId>jcseg-server</artifactId> <version>2.2.0</version> </dependency> Jcseg lucene分词接口: 1. 导入jcseg-core-{version}.jar和jcseg-analyzer-{version}.jar 2. demo代码: //lucene 5.x //Analyzer analyzer = new JcsegAnalyzer5X(JcsegTaskConfig.COMPLEX_MODE); //available constructor: since 1.9.8 //1, JcsegAnalyzer5X(int mode) //2, JcsegAnalyzer5X(int mode, String proFile) //3, JcsegAnalyzer5X(int mode, JcsegTaskConfig config) //4, JcsegAnalyzer5X(int mode, JcsegTaskConfig config, ADictionary dic) //lucene 4.x版本 //Analyzer analyzer = new JcsegAnalyzer4X(JcsegTaskConfig.COMPLEX_MODE); //lucene 6.3.0以及以上版本 Analyzer analyzer = new JcsegAnalyzer(JcsegTaskConfig.COMPLEX_MODE); //available constructor: //1, JcsegAnalyzer(int mode) //2, JcsegAnalyzer(int mode, String proFile) //3, JcsegAnalyzer(int mode, JcsegTaskConfig config) //4, JcsegAnalyzer(int mode, JcsegTaskConfig config, ADictionary dic) //非必须(用于修改默认配置): 获取分词任务配置实例 JcsegAnalyzer jcseg = (JcsegAnalyzer) analyzer; JcsegTaskConfig config = jcseg.getTaskConfig(); //追加同义词, 需要在 jcseg.properties中配置jcseg.loadsyn=1 config.setAppendCJKSyn(true); //追加拼音, 需要在jcseg.properties中配置jcseg.loadpinyin=1 config.setAppendCJKPinyin(); //更多配置, 请查看 org.lionsoul.jcseg.tokenizer.core.JcsegTaskConfig Jcseg solr分词接口: 1. 将jcseg-core-{version}.jar和jcseg-analyzer-{version}.jar 复制到solr 的类库目录中。 2. 在solr的scheme.xml加入如下两种配置之一: <!-- 复杂模式分词: --> <fieldtype name="textComplex" class="solr.TextField"> <analyzer> <tokenizer class="org.lionsoul.jcseg.analyzer.JcsegTokenizerFactory" mode="complex"/> </analyzer> </fieldtype> <!-- 简易模式分词: --> <fieldtype name="textSimple" class="solr.TextField"> <analyzer> <tokenizer class="org.lionsoul.jcseg.analyzer.JcsegTokenizerFactory" mode="simple"/> </analyzer> </fieldtype> <!-- 检测模式分词: --> <fieldtype name="textDetect" class="solr.TextField"> <analyzer> <tokenizer class="org.lionsoul.jcseg.analyzer.JcsegTokenizerFactory" mode="detect"/> </analyzer> </fieldtype> <!-- 检索模式分词: --> <fieldtype name="textSearch" class="solr.TextField"> <analyzer> <tokenizer class="org.lionsoul.jcseg.analyzer.JcsegTokenizerFactory" mode="search"/> </analyzer> </fieldtype> <!-- NLP模式分词: --> <fieldtype name="textSearch" class="solr.TextField"> <analyzer> <tokenizer class="org.lionsoul.jcseg.analyzer.JcsegTokenizerFactory" mode="nlp"/> </analyzer> </fieldtype> <!-- 空格分隔符模式分词: --> <fieldtype name="textSearch" class="solr.TextField"> <analyzer> <tokenizer class="org.lionsoul.jcseg.analyzer.JcsegTokenizerFactory" mode="delimiter"/> </analyzer> </fieldtype> 备注: 1. 如果使用的是solr-4.x版本,请下载v1.9.7-release tag下的源码编译得到对应的jar,然后将上述xml中的v5x改成v4x即可。 2. 如果是使用的是solr-6.3.0以下版本,JcsegTokenizerFactory包名路径为:org.lionsoul.jcseg.analyzer.v5x.JcsegTokenizerFactory Jcseg elasticsearch接口: elasticsearch.version < 2.x (Not sure) 1. 下载最新版本的 Jcseg源码。 2. 使用maven或者ant编译打包得到 Jcseg的系列jar包(建议使用maven,ant需要自己下载对应的依赖包)。 3. 拷贝jcseg-analyzer-{version}.jar,jcseg-core-{version}.jar,jcseg-elasticsearch-{version}.jar到{ES_HOME}/plugins/analysis-jcseg目录下(自己建立该文件夹,如果不存在)。 4. 拷贝一份jcseg.properties到{ES_HOME}/config/jcseg目录下(自己建立该文件夹,如果不存在)。 5. 配置好jcseg.properties,尤其是配置lexicon.path指向正确的词库(或者将jcseg目录下的lexicon文件夹拷贝到{ES_HOME}/plugins/jcseg目录下)。 6. 参考下载的源码中的 jcseg-elasticsearch 项目下的 config/elasticsearch.yml 配置文件,将对应的配置加到{ES_HOME}/config/elasticsearch.yml中去。 7. 配置elasticsearch.yml或者mapping来使用 Jcseg分词插件(或者在query中指定)。 elasticsearch.version >= 2.x 1. 下载最新版本的 Jcseg源码。 2. 使用maven或者ant编译打包得到 Jcseg的系列jar包(建议使用maven,ant需要自己下载对应的依赖包)。 3. 拷贝jcseg-analyzer-{version}.jar,jcseg-core-{version}.jar,jcseg-elasticsearch-{version}.jar到{ES_HOME}/plugins/jcseg目录下(自己建立该文件夹,如果不存在)。 4. 拷贝一份jcseg.properties到{ES_HOME}/plugins/jcseg目录下(自己建立该文件夹,如果不存在)。 5. 拷贝一份jcseg-elasticsearch/plugin/plugin-descriptor.properties到{ES_HOME}/plugins/jcseg目录下(自己建立该文件夹,如果不存在)。 6. 配置好jcseg.properties,尤其是配置lexicon.path指向正确的词库(或者将jcseg目录下的lexicon文件夹拷贝到{ES_HOME}/plugins/jcseg目录下)。 7. 参考下载的源码中的 jcseg-elasticsearch 项目下的 config/elasticsearch.yml 配置文件,将对应的配置加到{ES_HOME}/config/elasticsearch.yml中去。 8. 配置elasticsearch.yml或者mapping来使用 Jcseg分词插件(或者在query中指定)。 elasticsearch.version >= 5.1.1 1. 下载最新版本的 Jcseg源码。 2. 使用maven或者ant编译打包得到 Jcseg的系列jar包(建议使用maven,ant需要自己下载对应的依赖包)。 3. 拷贝jcseg-analyzer-{version}.jar,jcseg-core-{version}.jar,jcseg-elasticsearch-{version}.jar到{ES_HOME}/plugins/jcseg目录下(自己建立该文件夹,如果不存在)。 4. 拷贝一份jcseg.properties到{ES_HOME}/plugins/jcseg目录下(自己建立该文件夹,如果不存在)。 5. 拷贝一份jcseg-elasticsearch/plugin/plugin-descriptor.properties到{ES_HOME}/plugins/jcseg目录下(自己建立该文件夹,如果不存在)。 6. 配置好jcseg.properties,尤其是配置lexicon.path指向正确的词库(或者将jcseg目录下的lexicon文件夹拷贝到{ES_HOME}/plugins/jcseg目录下)。 7. mapping指定来使用 Jcseg分词插件(或者在query中指定)。 可选的analyzer名字: jcseg : 对应Jcseg的检索模式切分算法 jcseg_complex : 对应Jcseg的复杂模式切分算法 jcseg_simple : 对应Jcseg的简易切分算法 jcseg_detect : 对应Jcseg的检测模式切分算法 jcseg_search : 对应Jcseg的检索模式切分算法 jcseg_nlp : 对应Jcseg的NLP模式切分算法 jcseg_delimiter : 对应Jcseg的分隔符模式切分算法 配置测试地址: http://localhost:9200/_analyze?analyzer=jcseg_search&text=一百美元等于多少人民币 也可以直接使用集成了jcseg的elasticsearch运行包:elasticsearch-jcseg,开封就可以使用。 Jcseg分词服务器: jcseg-server模块嵌入了jetty,实现了一个绝对高性能的服务器,给jcseg的全部Api功能都加上了restful接口,并且标准化了api结果的json输出格式,各大语言直接使用http客户端调用即可。 编译jcseg: 1. maven编译jcseg,得到jcseg-server-{version}.jar, maven已经将依赖的jar包一起编译进去了,如果是ant编译运行时请将依赖包载入。 2. 启动jcseg server: # 在最后传入jcseg-server.properties配置文件的路径 java -jar jcseg-server-{version}.jar ./jcseg-server.properties jcseg-server.properties: 懒得翻译了,默默的多念几遍就会了! # jcseg server configuration file with standard json syntax { # jcseg server configuration "server_config": { # server port "port": 1990, # default conmunication charset "charset": "utf-8", # http idle timeout in ms "http_connection_idle_timeout": 60000, # jetty maximum thread pool size "max_thread_pool_size": 200, # thread idle timeout in ms "thread_idle_timeout": 30000, # http output buffer size "http_output_buffer_size": 32768, # request header size "http_request_header_size": 8192, # response header size "http_response_header_size": 8192 }, # global setting for jcseg, yet another copy of the old # configuration file jcseg.properties "jcseg_global_config": { # maximum match length. (5-7) "jcseg_maxlen": 7, # recognized the chinese name. # (true to open and false to close it) "jcseg_icnname": true, # maximum length for pair punctuation text. # set it to 0 to close this function "jcseg_pptmaxlen": 7, # maximum length for chinese last name andron. "jcseg_cnmaxlnadron": 1, # Whether to clear the stopwords. # (set true to clear stopwords and false to close it) "jcseg_clearstopword": false, # Whether to convert the chinese numeric to arabic number. # (set to true open it and false to close it) like '\u4E09\u4E07' to 30000. "jcseg_cnnumtoarabic": true, # Whether to convert the chinese fraction to arabic fraction. # @Note: for lucene,solr,elasticsearch eg.. close it. "jcseg_cnfratoarabic": false, # Whether to keep the unrecognized word. # (set true to keep unrecognized word and false to clear it) "jcseg_keepunregword": true, # Whether to start the secondary segmentation for the complex english words. "jcseg_ensencondseg": true, # min length of the secondary simple token. # (better larger than 1) "jcseg_stokenminlen": 2, #thrshold for chinese name recognize. # better not change it before you know what you are doing. "jcseg_nsthreshold": 1000000, #The punctuations that will be keep in an token. # (Not the end of the token). "jcseg_keeppunctuations": "@#%.&+" }, # dictionary instance setting. # add yours here with standard json syntax "jcseg_dict": { "master": { "path": [ "{jar.dir}/lexicon" # absolute path here #"/java/JavaSE/jcseg/lexicon" ], # Whether to load the part of speech of the words "loadpos": true, # Whether to load the pinyin of the words. "loadpinyin": true, # Whether to load the synoyms words of the words. "loadsyn": true, # whether to load the entity of the words. "loadentity": true, # Whether to load the modified lexicon file auto. "autoload": true, # Poll time for auto load. (in seconds) "polltime": 300 } # add more of yours here # ,"name" : { # "path": [ # "absolute jcseg standard lexicon path 1", # "absolute jcseg standard lexicon path 2" # ... # ], # "autoload": 0, # "polltime": 300 # } }, # JcsegTaskConfig instance setting. # @Note: # All the config instance here is extends from the global_setting above. # do nothing will extends all the setting from global_setting "jcseg_config": { "master": { # extends and Override the global setting "jcseg_pptmaxlen": 0, "jcseg_cnfratoarabic": true, "jcseg_keepunregword": false } # this one is for keywords,keyphrase,sentence,summary extract # @Note: do not delete this instance if u want jcseg to # offset u extractor service ,"extractor": { "jcseg_pptmaxlen": 0, "jcseg_clearstopword": true, "jcseg_cnnumtoarabic": false, "jcseg_cnfratoarabic": false, "jcseg_keepunregword": false, "jcseg_ensencondseg": false } # well, this one is for NLP only ,"nlp" : { "jcseg_ensencondseg": false, "jcseg_cnfratoarabic": true, "jcseg_cnnumtoarabic": true } # add more of yours here # ,"name": { # ... # } }, # jcseg tokenizer instance setting. # Your could let the instance service for you by access: # http://jcseg_server_host:port/tokenizer/instance_name # instance_name is the name of instance you define here. "jcseg_tokenizer": { "master": { # jcseg tokenizer algorithm, could be: # 1: SIMPLE_MODE # 2: COMPLEX_MODE # 3: DETECT_MODE # 4: SEARCH_MODE # 5: DELIMITER_MODE # 6: NLP_MODE # see org.lionsoul.jcseg.tokenizer.core.JcsegTaskConfig for more info "algorithm": 2, # dictionary instance name # choose one of your defines above in the dict scope "dict": "master", # JcsegTaskConfig instance name # choose one of your defines above in the config scope "config": "master" } # this tokenizer instance is for extractor service # do not delete it if you want jcseg to offset you extractor service ,"extractor": { "algorithm": 2, "dict": "master", "config": "extractor" } # this tokenizer instance of for NLP analysis # keep it for you NLP project ,"nlp" : { "algorithm": 6, "dict": "master", "config": "nlp" } # add more of your here # ,"name": { # ... # } } } restful api: 1. 关键字提取: api地址:http://jcseg_server_host:port/extractor/keywords?text=&number=&autoFilter=true|false api参数: text: post或者get过来的文档文本 number: 要提取的关键词个数 autoFilter: 是否自动过滤掉低分数关键字 api返回: { //api错误代号,0正常,1参数错误, -1内部错误 "code": 0, //api返回数据 "data": { //关键字数组 "keywords": [], //操作耗时 "took": 0.001 } } 更多配置请参考:org.lionsoul.jcseg.server.controller.KeywordsController 2. 关键短语提取: api地址:http://jcseg_server_host:port/extractor/keyphrase?text=&number= api参数: text: post或者get过来的文档文本 number: 要提取的关键短语个数 api返回: { "code": 0, "data": { "took": 0.0277, //关键短语数组 "keyphrase": [] } } 更多配置请参考:org.lionsoul.jcseg.server.controller.KeyphraseController 3. 关键句子提取: api地址:http://jcseg_server_host:port/extractor/sentence?text=&number= api参数: text: post或者get过来的文档文本 number: 要提取的关键句子个数 api返回: { "code": 0, "data": { "took": 0.0277, //关键句子数组 "sentence": [] } } 更多配置请参考:org.lionsoul.jcseg.server.controller.SentenceController 4. 文章摘要提取: api地址:http://jcseg_server_host:port/extractor/summary?text=&length= api参数: text: post或者get过来的文档文本 length: 要提取的摘要的长度 api返回: { "code": 0, "data": { "took": 0.0277, //文章摘要 "summary": "" } } 更多配置请参考:org.lionsoul.jcseg.server.controller.SummaryController 5. 文章自动分词: api地址:http://jcseg_server_host:port/tokenizer/tokenizer_instance?text=&ret_pinyin=&ret_pos=... api参数: tokenizer_instance: 表示在jcseg-server.properties中定义的分词实例名称 text: post或者get过来的文章文本 ret_pinyin: 是否在分词结果中返回词条拼音(2.0.1版本后已经取消) ret_pos: 是否在分词结果中返回词条词性(2.0.1版本后已经取消) api返回: { "code": 0, "data": { "took": 0.00885, //词条对象数组 "list": [ { word: "哆啦a梦", //词条内容 position: 0, //词条在原文中的索引位置 length: 4, //词条的词个数(非字节数) pinyin: "duo la a meng", //词条的拼音 pos: "nz", //词条的词性标注 entity: null //词条的实体标注 } ] } } 更多配置请参考:org.lionsoul.jcseg.server.controller.TokenizerController Jcseg二次开发: 1. Jcseg中文分词Api: (1). 创建JcsegTaskConfig配置对象: jcseg.properties查找步骤: • 1,寻找jcseg-core-{version}.jar目录下的jcseg.properties • 2,如果没找到继续寻找classpath下的jcseg.properties(默认已经打包了) • 3,如果没找到继续寻找user home下的jcseg.properties(除非把classpath下的jcseg.properties删除了,要不然不会到这) 所以,默认情况下可以在jcseg-core-{version}.jar同目录下来放一份jcseg.properties来自定义配置。 JcsegTaskConfig构造方法如下: JcsegTaskConfig(); //不做任何配置文件查找来初始化 JcsegTaskConfig(boolean autoLoad); //autoLoad=true会自动查找配置来初始化 JcsegTaskConfig(java.lang.String proFile); //从指定的配置文件中初始化配置对象 JcsegTaskConfig(InputStream is); //从指定的输入流中初始化配置对象 demo代码: //创建JcsegTaskConfig使用默认配置,不做任何配置文件查找 JcsegTaskConfig config = new JcsegTaskConfig(); //该方法会自动按照上述“jcseg.properties查找步骤”来寻找jcseg.properties并且初始化: JcsegTaskConfig config = new JcsegTaskConfig(true); //依据给定的jcseg.properties文件创建并且初始化JcsegTaskConfig JcsegTaskConfig config = new JcsegTaskConfig("absolute or relative jcseg.properties path"); //调用JcsegTaskConfig#load(String proFile)方法来从指定配置文件中初始化配置选项 config.load("absolute or relative jcseg.properties path"); (2). 创建ADictionary词库对象: ADictionary构造方法如下: ADictionary(JcsegTaskConfig config, java.lang.Boolean sync) //config:上述的JcsegTaskConfig实例 //sync: 是否创建线程安全词库,如果你需要在运行时操作词库对象则指定true, // 如果jcseg.properties中autoload=1则会自动创建同步词库 demo代码: //Jcseg提供org.lionsoul.jcseg.tokenzier.core.DictionaryFactory来方便词库的创建与往后的兼容 //通常可以通过 // DictionaryFactory#createDefaultDictionary(JcsegTaskConfig) // DictionaryFactory.createSingletonDictionary(JcsegTaskConfig) //两方法来创建词库对象并且加载词库文件,建议使用createSingletonDictionary来创建单例词库 //config为上面创建的JcsegTaskConfig对象. //如果给定的JcsegTaskConfig里面的词库路径信息正确 //ADictionary会依据config里面的词库信息加载全部有效的词库; //并且该方法会依据config.isAutoload()来决定词库的同步性还是非同步性, //config.isAutoload()为true就创建同步词库, 反之就创建非同步词库, //config.isAutoload()对应jcseg.properties中的lexicon.autoload; //如果config.getLexiconPath() = null,DictionaryFactory会自动加载classpath下的词库 //如果不想让其自动加载lexicon下的词库 //可以调用:DictionaryFactory.createSingletonDictionary(config, false)创建ADictionary即可; ADictionary dic = DictionaryFactory.createSingletonDictionary(config); //创建一个非同步的按照config.lexPath配置加载词库的ADictioanry. ADictionary dic = DictionaryFactory.createDefaultDictionary(config, false); //创建一个同步的按照config.lexPath加载词库的ADictioanry. ADictionary dic = DictionaryFactory.createDefaultDictionary(config, true); //依据 config.isAutoload()来决定同步性,默认按照config.lexPath来加载词库的ADictionary ADictionary dic = DictionaryFactory.createDefaultDictionary(config, config.isAutoload()); //指定ADictionary加载给定目录下的所有词库文件的词条. //config.getLexiconPath为词库文件存放有效目录数组. for ( String path : config.getLexiconPath() ) { dic.loadDirectory(path); } //指定ADictionary加载给定词库文件的词条. dic.load("/java/lex-main.lex"); dic.load(new File("/java/lex-main.lex")); //指定ADictionary加载给定输入流的词条 dic.load(new FileInputStream("/java/lex-main.lex")); //阅读下面的“如果自定义使用词库”来获取更多信息 (3). 创建ISegment分词实例: ISegment接口核心分词方法: public IWord next(); //返回下一个切分的词条 demo代码: //依据给定的ADictionary和JcsegTaskConfig来创建ISegment //通常使用SegmentFactory#createJcseg来创建ISegment对象 //将config和dic组成一个Object数组给SegmentFactory.createJcseg方法 //JcsegTaskConfig.COMPLEX_MODE表示创建ComplexSeg复杂ISegment分词对象 //JcsegTaskConfig.SIMPLE_MODE表示创建SimpleSeg简易Isegmengt分词对象. //JcsegTaskConfig.DETECT_MODE表示创建DetectSeg Isegmengt分词对象. //JcsegTaskConfig.SEARCH_MODE表示创建SearchSeg Isegmengt分词对象. //JcsegTaskConfig.DELIMITER_MODE表示创建DelimiterSeg Isegmengt分词对象. //JcsegTaskConfig.NLP_MODE表示创建NLPSeg Isegmengt分词对象. ISegment seg = SegmentFactory.createJcseg( JcsegTaskConfig.COMPLEX_MODE, new Object[]{config, dic} ); //设置要分词的内容 String str = "研究生命起源。"; seg.reset(new StringReader(str)); //获取分词结果 IWord word = null; while ( (word = seg.next()) != null ) { System.out.println(word.getValue()); } (4). 一个完整的例子: //创建JcsegTaskConfig分词配置实例,自动查找加载jcseg.properties配置项来初始化 JcsegTaskConfig config = new JcsegTaskConfig(true); //创建默认单例词库实现,并且按照config配置加载词库 ADictionary dic = DictionaryFactory.createSingletonDictionary(config); //依据给定的ADictionary和JcsegTaskConfig来创建ISegment //为了Api往后兼容,建议使用SegmentFactory来创建ISegment对象 ISegment seg = SegmentFactory.createJcseg( JcsegTaskConfig.COMPLEX_MODE, new Object[]{new StringReader(str), config, dic} ); //备注:以下代码可以反复调用,seg为非线程安全 //设置要被分词的文本 String str = "研究生命起源。"; seg.reset(new StringReader(str)); //获取分词结果 IWord word = null; while ( (word = seg.next()) != null ) { System.out.println(word.getValue()); } (5). 如何自定义使用词库: 从1.9.9版本开始,Jcseg已经默认将jcseg.properties和lexicon全部词库打包进了jcseg-core-{version}.jar中,如果是通过JcsegTaskConfig(true)构造的JcsegTaskConfig或者调用了JcsegTaskConfig#autoLoad()方法,在找不到自定义配置文件情况下Jcseg会自动的加载classpath中的配置文件,如果config.getLexiconPath() = null DictionaryFactory默认会自动加载classpath下的词库。 • 1),通过JcsegTaskConfig设置词库路径: //1, 默认构造JcsegTaskConfig,不做任何配置文件寻找来初始化 JcsegTaskConfig config = new JcsegTaskConfig(); //2, 设置自定义词库路径集合 config.setLexiconPath(new String[]{ "relative or absolute lexicon path1", "relative or absolute lexicon path2" //add more here }); //3, 通过config构造词库并且DictionaryFactory会按照上述设置的词库路径自动加载全部词库 ADictionary dic = DictionaryFactory.createSingletonDictionary(config); • 2),通过ADictionary手动加载词库: //1, 构造默认的JcsegTaskConfig,不做任何配置文件寻找来初始化 JcsegTaskConfig config = new JcsegTaskConfig(); //2, 构造ADictionary词库对象 //注意第二个参数为false,阻止DictionaryFactory自动检测config.getLexiconPath()来加载词库 ADictionary dic = DictionaryFactory.createSingletonDictionary(config, false); //3, 手动加载词库 dic.load(new File("absolute or relative lexicon file path")); //加载指定词库文件下全部词条 dic.load("absolute or relative lexicon file path"); //加载指定词库文件下全部词条 dic.load(new FileInputStream("absolute or relative lexicon file path")); //加载指定InputStream输入流下的全部词条 dic.loadDirectory("absolute or relative lexicon directory"); //加载指定目录下的全部词库文件的全部词条 dic.loadClassPath(); //加载classpath路径下的全部词库文件的全部词条(默认路径/lexicon) 2. Jcseg关键字提取Api: • 1),TextRankKeywordsExtractor构造方法: TextRankKeywordsExtractor(ISegment seg); //seg: Jcseg ISegment分词对象 • 2),demo代码: //1, 创建Jcseg ISegment分词对象 JcsegTaskConfig config = new JcsegTaskConfig(true); config.setClearStopwords(true); //设置过滤停止词 config.setAppendCJKSyn(false); //设置关闭同义词追加 config.setKeepUnregWords(false); //设置去除不识别的词条 ADictionary dic = DictionaryFactory.createSingletonDictionary(config); ISegment seg = SegmentFactory.createJcseg( JcsegTaskConfig.COMPLEX_MODE, new Object[]{config, dic} ); //2, 构建TextRankKeywordsExtractor关键字提取器 TextRankKeywordsExtractor extractor = new TextRankKeywordsExtractor(seg); extractor.setMaxIterateNum(100); //设置pagerank算法最大迭代次数,非必须,使用默认即可 extractor.setWindowSize(5); //设置textRank计算窗口大小,非必须,使用默认即可 extractor.setKeywordsNum(10); //设置最大返回的关键词个数,默认为10 //3, 从一个输入reader输入流中获取关键字 String str = "现有的分词算法可分为三大类:基于字符串匹配的分词方法、基于理解的分词方法和基于统计的分词方法。按照是否与词性标注过程相结合,又可以分为单纯分词方法和分词与标注相结合的一体化方法。"; List<String> keywords = extractor.getKeywords(new StringReader(str)); //4, output: //"分词","方法","分为","标注","相结合","字符串","匹配","过程","大类","单纯" • 3),测试源码参考:org.lionsoul.jcseg.test.KeywordsExtractorTest源码 3. Jcseg自动摘要/关键句子提取Api: • 1),TextRankSummaryExtractor构造方法: TextRankSummaryExtractor(ISegment seg, SentenceSeg sentenceSeg); //seg: Jcseg ISegment分词对象 //sentenceSeg: Jcseg SentenceSeg句子切分对象 • 2),demo代码: //1, 创建Jcseg ISegment分词对象 JcsegTaskConfig config = new JcsegTaskConfig(true); config.setClearStopwords(true); //设置过滤停止词 config.setAppendCJKSyn(false); //设置关闭同义词追加 config.setKeepUnregWords(false); //设置去除不识别的词条 ADictionary dic = DictionaryFactory.createSingletonDictionary(config); ISegment seg = SegmentFactory.createJcseg( JcsegTaskConfig.COMPLEX_MODE, new Object[]{config, dic} ); //2, 构造TextRankSummaryExtractor自动摘要提取对象 SummaryExtractor extractor = new TextRankSummaryExtractor(seg, new SentenceSeg()); //3, 从一个Reader输入流中获取length长度的摘要 String str = "Jcseg是基于mmseg算法的一个轻量级开源中文分词器,同时集成了关键字提取,关键短语提取,关键句子提取和文章自动摘要等功能,并且提供了最新版本的lucene,%20solr,%20elasticsearch的分词接口。Jcseg自带了一个%20jcseg.properties文件用于快速配置而得到适合不同场合的分词应用。例如:最大匹配词长,是否开启中文人名识别,是否追加拼音,是否追加同义词等!"; String summary = extractor.getSummary(new StringReader(str), 64); //4, output: //Jcseg是基于mmseg算法的一个轻量级开源中文分词器,同时集成了关键字提取,关键短语提取,关键句子提取和文章自动摘要等功能,并且提供了最新版本的lucene, solr, elasticsearch的分词接口。 //----------------------------------------------------------------- //5, 从一个Reader输入流中提取n个关键句子 String str = "you source string here"; extractor.setSentenceNum(6); //设置返回的关键句子个数 List<String> keySentences = extractor.getKeySentence(new StringReader(str)); • 3),测试源码参考:org.lionsoul.jcseg.test.SummaryExtractorTest源码 4. Jcseg关键短语提取Api: • 1),TextRankKeyphraseExtractor构造方法: TextRankKeyphraseExtractor(ISegment seg); //seg: Jcseg ISegment分词对象 • 2),demo代码: //1, 创建Jcseg ISegment分词对象 JcsegTaskConfig config = new JcsegTaskConfig(true); config.setClearStopwords(false); //设置不过滤停止词 config.setAppendCJKSyn(false); //设置关闭同义词追加 config.setKeepUnregWords(false); //设置去除不识别的词条 config.setEnSecondSeg(false); //关闭英文自动二次切分 ADictionary dic = DictionaryFactory.createSingletonDictionary(config); ISegment seg = SegmentFactory.createJcseg( JcsegTaskConfig.COMPLEX_MODE, new Object[]{config, dic} ); //2, 构建TextRankKeyphraseExtractor关键短语提取器 TextRankKeyphraseExtractor extractor = new TextRankKeyphraseExtractor(seg); extractor.setMaxIterateNum(100); //设置pagerank算法最大迭代词库,非必须,使用默认即可 extractor.setWindowSize(5); //设置textRank窗口大小,非必须,使用默认即可 extractor.setKeywordsNum(15); //设置最大返回的关键词个数,默认为10 extractor.setMaxWordsNum(4); //设置最大短语词长,默认为5 //3, 从一个输入reader输入流中获取短语 String str = "支持向量机广泛应用于文本挖掘,例如,基于支持向量机的文本自动分类技术研究一文中很详细的介绍支持向量机的算法细节,文本自动分类是文本挖掘技术中的一种!"; List<String> keyphrases = extractor.getKeyphrase(new StringReader(str)); //4, output: //支持向量机, 自动分类 • 3),测试源码参考:org.lionsoul.jcseg.test.KeyphraseExtractorTest源码 相关附录 1,Jcseg的词性对照: 名词n、时间词t、处所词s、方位词f、数词m、量词q、区别词b、代词r、动词v、形容词a、状态词z、副词d、介词p、连词c、助词u、语气词y、叹词e、拟声词o、成语i、习惯用语l、简称j、前接成分h、后接成分k、语素g、非语素字x、标点符号w)外,从语料库应用的角度,增加了专有名词(人名nr、地名ns、机构名称nt、其他专有名词nz)。 2,Jcseg同义词管理: • 01),统一的词库分类: 从2.2.0版本开始jcseg将同义词统一成了一个单独的类别-CJK_SYN,你可以将你的同义词定义直接追加到现有的同义词词库vendors/lexicons/lex-synonyms.lex中,也可以新建一个独立的词库,然后在首行增加CJK_SYN定义,将该词库归类为同义词词库,然后按照下面介绍的格式逐行或者分行增加同义词的定义。 • 02),统一的同义词格式: 格式: 词根,同义词1[/可选拼音],同义词2[/可选拼音],...,同义词n[/可选拼音] 例如: 单行定义: 研究,研讨,钻研,研磨/yan mo,研发 多行定义:(只要词根一样,定义的全部同义词就都属于同一个集合) 中央一台,央视一台,中央第一台 中央一台,中央第一频道,央视第一台,央视第一频道 • 03),格式和要求说明: 1,第一个词为同义词的根词条,这个词条必须是CJK_WORD词库中必须存在的词条,如果不存在,这条同义词定义会被忽略。 2,根词会作为不同行同义词集合的区别,如果两行同义词定义的根词一样,会自动合并成一个同义词集合。 3,jcseg中使用org.lionsoul.jcseg.tokenizer.core.SynonymsEntry来管理同义词集合,每个IWord词条对象都会有一个SynonymsEntry属性来指向自己的同义词集合。 4,SynonymsEntry.rootWord存储了同义词集合的根词,同义词的合并建议统一替换成根词。 5,除去根词外的其他同义词,jcseg会自动检测并且创建相关的IWord词条对象并且将其加入CJK_WORD词库中,也就是说其他同义词不一定要是CJK_WORD词库中存在的词条。 6,其他同义词会自动继承词根的词性和实体定义,也会继承CJK_WORD词库中该词条的拼音定义(如果存在该词),也可以在词条后面通过增加"/拼音"来单独定义拼音。 7,同一同义词定义的集合中的全部IWord词条都指向同一个SynonymsEntry对象,也就是同义词之间会自动相互引用。 This is the end line and thanks for reading !!! 仓库评论 ( 0 ) 你可以在登录后,发表评论 简介 Jcseg是基于mmseg算法的一个轻量级中文分词器,同时集成了关键字提取,关键短语提取,关键句子提取和文章自动摘要等功能,并且提供了一个基于Jetty的web服务器,方便各大语言直接http调用,同时提供了最新版本的lucene,solr和elasticsearch的分词接口! 展开 收起 Java Apache-2.0 取消 发行版 暂无发行版 贡献者 全部 近期动态 加载更多 不能加载更多了 Java 1 https://gitee.com/weimingqie/jcseg.git [email protected]:weimingqie/jcseg.git weimingqie jcseg jcseg master 搜索帮助
__label__pos
0.913101
IoT ecosystem: Definition, key components and benefits Dealing with problems with IoT devices, such as a garage door opener application that fails at a critical moment, can be frustrating and complex. Understanding where the fault lies in an IoT device requires knowledge of the processes involved in its operation. So in this article, we explore the seven essential components that make up an efficient IoT ecosystem. What is the IoT ecosystem? The IoT ecosystem refers to the interconnected network of wireless devices, which are equipped with sensors, software and other technologies to collect and share information over the Internet. These devices can range from common household appliances, vehicles, lighting and heating systems, to complex industrial machines. The 6 crucial components of the IoT ecosystem. 1) IoT devices: They are the heart of the ecosystem, connecting to the internet to fulfill various functions. Their variety is staggering, from domestic applications to industrial innovations. For example, Caterpillar integrates augmented reality into AR glasses for employee training, and recent technology events have even featured IoT contact lenses, which project information directly onto the wearer’s vision. 2) Device connectivity: Fundamental in the interaction between devices on the network. With the development of 4G and 5G networks, cellular connectivity now offers speeds comparable to Wi-Fi in urban areas, facilitating an efficient and adaptable connection thanks to the use of eSIM, which allows devices to switch networks to maintain constant connectivity, especially in mobile devices such as those installed in vehicles. 3) Application on the smart device The internal software that guides the device in processing data. For example, a production machine may have an application that manages material inventory automatically. These applications run on units with multiple processors, distributing the workload to optimize performance, which is crucial to the efficiency and responsiveness of the device. 4) Security: With more than 21 billion IoT devices projected by 2025, IoT security, is vital. It must strike a balance between protection and usability, implementing measures such as encryption and authentication without sacrificing speed or accessibility, thus preparing to counter various forms of cyber-attacks and protecting privacy and data integrity. 5) IoT Gateway The IoT Gateway is a central device that connects all other devices in an IoT network to the cloud, similar to how a router provides Wi-Fi connectivity in a home, but with much greater customization and device management capabilities. Your choice should align with the specific needs of the network, considering that it is not always necessary to send large volumes of data to the cloud. 6)IoT users: Users are a fundamental component in the IoT ecosystem, with needs that vary significantly and are constantly evolving. The most common IoT device is the smartphone, with billions in use and growing. This phenomenon has familiarized most people with the IoT, even indirectly. Today, we are seeing a transition where smartphone users are adapting to interacting with an increasing number of IoT devices. It is essential that the IoT be adaptable and easy to use to serve the needs of a broad spectrum of users, from healthcare professionals to technicians and mechanics, underscoring the importance of a customizable and accessible IoT. Benefits of an Enterprise IoT Ecosystem 1) Improved operational efficiency: Integrating IoT devices enables enterprises to monitor and manage operations in real time, optimizing processes and reducing downtime. The ability to anticipate machine failures and schedule preventive maintenance minimizes disruptions and improves productivity. 2) Opportunities for new revenue streams: The IoT ecosystem gives companies the ability to innovate and develop new products or improve existing ones by providing data on how consumers interact with their products, opening pathways to continuous innovation and user-driven development. 3) Data-driven decision making: Analysis of data generated by IoT devices provides valuable insights into consumer behavior and operational efficiency, facilitating more informed and proactive strategic decisions. 4) Personalization and improved customer experience: IoT enables companies to offer personalized services and products by collecting and analyzing data on customer preferences and behaviors. This personalization can lead to increased customer satisfaction and loyalty. Join the IoT Ecosystem with Guinea Mobile: Integrating physical components, applications, connectivity and users, the IoT ecosystem is a complex network of interacting parts. For efficient operations, a reliable cellular connectivity provider is essential. Guinea Mobile offers uninterrupted nationwide coverage for IoT devices, covering technologies from 2G to LTE, ensuring devices maintain uninterrupted connectivity. Your company can also be part of the digital vanguard, do not miss this great opportunity that Guinea Mobile has for you. Contact us for a personalized consultancy. Frequently Asked Questions What is an IoT device? An IoT device is a device with the ability to connect to the internet and communicate with other devices, performing specific tasks within a network. How does IoT connectivity work? IoT connectivity allows interaction between devices through different technologies such as Wi-Fi, Bluetooth, or cellular networks, adapting to the needs of each network. Who are IoT users? IoT users range from ordinary individuals using smartphones to professionals in various sectors, each with specific needs and uses for the technology.
__label__pos
0.942018
Do MP3 files include audio data? For the instant installment participants met up in the Sheeps Meadow in important parkland.a few minutes after pressing fun, 2zero0 individuals all of a sudden rose from their seats on the sector as everybody else within the appeared on in surprise.listeners had unknowingly downloaded four keep apart mp3s and had been accordingly divided up here groups, led stopping at a preposterous forged of a Sea Captain, Bumblebee, Dolphin, and Astronaut.The occasion ruined by means of a rock Paper Scissors battle and a 2zero0 seaside balls beast tossed popular the . Free Video to MP3 Converter Submit an issue news report without cost Video to MP3 Converter close Please specify the issue you've gotten software program. This info will probably be despatched to our editors for review.drawback: The CNET Installer isn't operational as expected The obtain link does not business The software program has a newer version The software program contains malware OtherDescription:Please select a feedback type. Please inscribe an outline. Submit drawback report What is the difference between MPEG, JPEG, and MP3? Seeing as MP3 NORMALIZER have an audio player by the side of my page i don't want safari to get to it the obtain hyperlink in a new tab by one other player, i want the mp3 stake to obtain to their pc. Note: This process involves changing sport information; create a backup fake of the files earlier than proceeding. before time, a music that you simply want to hear in the recreation and alter it right into a .mp3 paragraph. both reduce or imitate it. discover the "important" ring binder in the game listing. note down the "racket" , then mark the "amb_" file. Paste audacity surrounded by that ring binder. discover the blare feature for the extent that you just need to . Then, swap mp3gain of the two blare files. you'll at this time hear your favorite songs in the course of the recreation, however other players will be unable to hear it. How hoedown you change an mpl article to mp3 rank? No, ffmpeg purchased by way of the iTunes store is formatted as protected mp4 recordsdata. You would need to transform them to an un format the EnV contact would be able to to learn, similar to MP3 or WAV Leave a Reply Your email address will not be published. Required fields are marked *
__label__pos
0.645117
Quick Answer: What Happens If I Delete TikTok App? Can TikTok steal your information? Class-Action Lawsuit Claims TikTok Steals Kids’ Data And Sends It To China. Twenty lawsuits have been combined into a unified federal legal action against short-form video app TikTok over allegedly harvesting data from users and secretly sending the information to China.. Why is TikTok so dangerous? Danger #2: Data mining, selling, and storage The first concern many users (and parents of young users) have about TikTok is the way data is stored and potentially shared. … “All of these platforms, at least in part, are monetizing your data. That’s what they do. And the more data they have, the more money they can make. How do I get my old TikTok account back? Tap “Log in.” Select “Use phone/email/username” and log in with your account information. After logging in, TikTok will inform you that your account was deactivated. Tap “Reactivate” to recover the account. Who owns TikTok? Related Coverage. ByteDance founder Zhang Yiming resisted the sale of TikTok last year despite calls from his large Western investors to do so. ByteDance, which counts General Atlantic and Sequoia Capital among its backers, was valued at $180 billion in December, according to investment data research company PitchBook. Can you permanently delete TikTok account? Open the TikTok app, and tap the profile button in the bottom-right corner of the app and select the three-dot menu in the top-right corner. Tap Manage my account and then Delete account at the bottom of the page. TikTok will warn you about what will be lost. Tap Delete Account again to confirm your decision. What happens if I delete Tik Tok app? When you delete your TikTok account, your account will appear as “Deactivated” to other users searching for your profile. They will not be able to view any of your videos or liked content. After 30 days, your account and its information (including videos) will be deleted. Does deleting the TikTok app delete your account? You can delete your TikTok account fairly quickly and easily via the settings in the app. Deleting your account is permanent and will cut you off from any of your in-app purchases, so make sure it’s the right decision for you before going forward. Here’s how to delete your TikTok account. Is Tik Tok dangerous? While these arguments are unfounded, claims have been made that the app could be covert spyware. TikTok affirms that user safety and security is its top concern. The service states that it has never provided user data to the government and would refuse to share such information if requested to do so. Does deleting TikTok delete favorites? When you delete your TikTok account, your account will appear as “Deactivated” to other users searching for your profile. They will not be able to view any of your videos or liked content. After 30 days, your account and its information (including videos) will be deleted. Thanks for reading. Why did TikTok delete my account 2020? Tik Tok has begun to delete user accounts due to it’s 5.7 million compliance violation that it received. It was due to the age of it’s users, however they must have a bug in their algorithm that has caused some of the users which are over the age of 13 to also have had their accounts deleted. Is it OK to delete TikTok videos? Deleting TikTok videos has many adverse effects on your profile (both good and bad), so if you’re looking for clarity on which choice to make, today’s video lays it all out on the table. … These videos are for educational purposes only. Is deleting TikTok App enough? TikTok is one of multiple avenues the Chinese government can use to manage public narratives and disseminate propaganda. In short, it’s best just to delete the app. However, deleting TikTok doesn’t mean you’re safe from foreign influence campaigns and efforts to steal your own personal information. Is TikTok a waste of time? The app is not for everyone. “TikTok is a waste of time that just distracts kids from doing their homework,” said junior Macy Krambeck. … “Because the videos are so short, it’s a great way to take a break from my homework. I’ll go on it for two minutes to relieve some stress and then get back to my work.” Is TikTok a spy app? The administration has explicitly claimed TikTok spies on people but has never offered public evidence. Experts diving through TikTok’s code and policies say the app collects user data in a similar way to Facebook and other popular social apps. Is TikTok safe for 11 year olds? What age is TikTok recommended for? Common Sense recommends the app for age 15+ mainly due to the privacy issues and mature content. TikTok requires that users be at least 13 years old to use the full TikTok experience, although there is a way for younger kids to access the app. Why does it say delete failed on TikTok? If your account is not signed in then you have to sign in your TikTok account first. Tap on the three-dots menu icon ‘ ⋮ ‘ ‘ ⋯ ‘ at the right upper of the screen. Tap on Manage My Account. Now at the bottom of Manage my account page you will see Delete account button.
__label__pos
0.997922
CTK Insights 15 Jan Henri Poincaré: A Scientific Biography Henri Poincaré: A Scientific Biography by Jeremy Gray is an unusual book, a fundamental study of the scientific work of one of the greatest mathematicians and mathematical physicists of the three decades straddling the 19th and 20th centuries. Poincaré was an uncommonly versatile and productive scientist, being able to work simultaneously on several disparate subjects. For example, "in 1905 he published on number theory, geodesics on convex surfaces, the dynamics of the electron, a report on the French geodetic survey in Peru, and a popular philosophical paper on mathematics and logic." The book is not a biography of Poincaré the person (except for a brief account of his childhood and education), but a story of Poincaré a public figure, a mathematician, a physicist, a profound thinker. Nonetheless, many human aspects of Poincaré's life and time, and social atmosphere in Europe of his time emerge throughout the book. Introduction and the first two chapters (The Essayist, Poincaré's career) throw the most light on Poincaré's character. "He believed strongly that a knowledge claim had to come with an account of how we can know it" [p. 8]. So, for example, he "did not become heavily involved in the Dreyfus affair ... that he felt lay beyond his competence." However , he did provide expert opinion on the low quality of prosecution evidence, proving it unrealistic [p. 166-167]. Other chapters are organized topically, not chronologically. Each illuminates in depth one or other of Poincaré's works but all are set in context both historical and temathic such that each can serve as an introduction into the many subjects to which Poincaré made a contribution. Much of the book is a descriptive narrative, but the author never shies from displaying equations (even PDE and integral ones) when this is essential for the subject. I do not know whether this style has caused a price reduction, but for a book of this size, depth, and breadth, $33.10 (the amazon.com price) is an exceptional bargain. Leave a Reply 6 − five = © 2014 CTK Insights | Entries (RSS) and Comments (RSS) Powered by Wordpress, design by Web4 Sudoku, based on Pinkline by GPS Gazette
__label__pos
0.700818
help understanding query +2 jamie y · July 11, 2015 I have a database set up that I am working with that has just a few records. I am getting the records just fine but I don't understand why a few things don't work. Here is my code $number = (int) $_GET['n']; $query = "SELECT * FROM `questions`              WHERE question_number = $number";              //Get choices $result = $mysqli->query($query) or die($mysqli->error.__LINE__); $question = $result->fetch_assoc(); issue 1: If I do a var dump on $question, I get NULL output to the screen. If I do echo $question['text'] then it works. I see the question. issue 2: why can't I use a foreach loop on the question variable? I am guessing foreach loops don't work well on associative arrays? my working loop the results here endwhile; My questions 1. Why would question be null in a var_dump or a print_r? 2. Can you explain why the foreach loop fails but a while loop works? Post a Reply Replies Oldest  Newest  Rating 0 Sheldon Juncker · July 20, 2015 1. MySQLi fetching is done lazily meaning that the row is not loaded from the database until you access it. Thus when you dump out the array it doesn't contain anything because you haven't accessed any of elements. This isn't really a bug, if I understand it correctly. 2. A foreach loop such as foreach(fetch() as $row) only evaluates fetch() once whereas the while loop while($row = fetch()) evaluates the fetch() multiple times. That's why you need to use a while loop and not a foreach loop.  Note that I can't actually see your loop code because it isn't displaying correctly. I think that this is a TNB issue though. Let me know if that helps! 0 Ebube okulu · July 12, 2015 pls can someone help me out am new to php qery • 1 PHP 109,436 followers About Server-side, HTML embedded scripting language used to create dynamic Web pages. Links Moderators
__label__pos
0.995198
US10467036B2 - Dynamic metering adjustment for service management of computing platform - Google Patents Dynamic metering adjustment for service management of computing platform Download PDF Info Publication number US10467036B2 US10467036B2 US14/926,384 US201514926384A US10467036B2 US 10467036 B2 US10467036 B2 US 10467036B2 US 201514926384 A US201514926384 A US 201514926384A US 10467036 B2 US10467036 B2 US 10467036B2 Authority US United States Prior art keywords metric utilization data samples monitored resource collected Prior art date Legal status (The legal status is an assumption and is not a legal conclusion. Google has not performed a legal analysis and makes no representation as to the accuracy of the status listed.) Active, expires Application number US14/926,384 Other versions US20160094401A1 (en Inventor Ali Anwar Andrzej Kochut Anca Sailer Charles O. Schulz Alla Segal Current Assignee (The listed assignees may be inaccurate. Google has not performed a legal analysis and makes no representation or warranty as to the accuracy of the list.) International Business Machines Corp Original Assignee International Business Machines Corp Priority date (The priority date is an assumption and is not a legal conclusion. Google has not performed a legal analysis and makes no representation as to the accuracy of the date listed.) Filing date Publication date Priority to US201462057686P priority Critical Priority to US14/871,443 priority patent/US10171371B2/en Application filed by International Business Machines Corp filed Critical International Business Machines Corp Priority to US14/926,384 priority patent/US10467036B2/en Publication of US20160094401A1 publication Critical patent/US20160094401A1/en Assigned to INTERNATIONAL BUSINESS MACHINES CORPORATION reassignment INTERNATIONAL BUSINESS MACHINES CORPORATION ASSIGNMENT OF ASSIGNORS INTEREST (SEE DOCUMENT FOR DETAILS). Assignors: KOCHUT, ANDRZEJ, SCHULZ, CHARLES O., SEGAL, ALLA, SAILER, ANCA, ANWAR, ALI Publication of US10467036B2 publication Critical patent/US10467036B2/en Application granted granted Critical Active legal-status Critical Current Adjusted expiration legal-status Critical Links Images Classifications • GPHYSICS • G06COMPUTING; CALCULATING; COUNTING • G06FELECTRIC DIGITAL DATA PROCESSING • G06F9/00Arrangements for program control, e.g. control units • G06F9/06Arrangements for program control, e.g. control units using stored programs, i.e. using an internal store of processing equipment to receive or retain programs • G06F9/44Arrangements for executing specific programs • G06F9/455Emulation; Interpretation; Software simulation, e.g. virtualisation or emulation of application or operating system execution engines • G06F9/45533Hypervisors; Virtual machine monitors • G06F9/45558Hypervisor-specific management and integration aspects • GPHYSICS • G06COMPUTING; CALCULATING; COUNTING • G06FELECTRIC DIGITAL DATA PROCESSING • G06F11/00Error detection; Error correction; Monitoring • GPHYSICS • G06COMPUTING; CALCULATING; COUNTING • G06FELECTRIC DIGITAL DATA PROCESSING • G06F11/00Error detection; Error correction; Monitoring • G06F11/30Monitoring • G06F11/3003Monitoring arrangements specially adapted to the computing system or computing system component being monitored • G06F11/3006Monitoring arrangements specially adapted to the computing system or computing system component being monitored where the computing system is distributed, e.g. networked systems, clusters, multiprocessor systems • GPHYSICS • G06COMPUTING; CALCULATING; COUNTING • G06FELECTRIC DIGITAL DATA PROCESSING • G06F11/00Error detection; Error correction; Monitoring • G06F11/30Monitoring • G06F11/34Recording or statistical evaluation of computer activity, e.g. of down time, of input/output operation ; Recording or statistical evaluation of user activity, e.g. usability assessment • G06F11/3442Recording or statistical evaluation of computer activity, e.g. of down time, of input/output operation ; Recording or statistical evaluation of user activity, e.g. usability assessment for planning or managing the needed capacity • GPHYSICS • G06COMPUTING; CALCULATING; COUNTING • G06FELECTRIC DIGITAL DATA PROCESSING • G06F11/00Error detection; Error correction; Monitoring • G06F11/30Monitoring • G06F11/34Recording or statistical evaluation of computer activity, e.g. of down time, of input/output operation ; Recording or statistical evaluation of user activity, e.g. usability assessment • G06F11/3452Performance evaluation by statistical analysis • HELECTRICITY • H04ELECTRIC COMMUNICATION TECHNIQUE • H04LTRANSMISSION OF DIGITAL INFORMATION, e.g. TELEGRAPHIC COMMUNICATION • H04L41/00Arrangements for maintenance or administration or management of packet switching networks • H04L41/14Arrangements for maintenance or administration or management of packet switching networks involving network analysis or design, e.g. simulation, network model or planning • H04L41/142Arrangements for maintenance or administration or management of packet switching networks involving network analysis or design, e.g. simulation, network model or planning using statistical or mathematical methods • HELECTRICITY • H04ELECTRIC COMMUNICATION TECHNIQUE • H04LTRANSMISSION OF DIGITAL INFORMATION, e.g. TELEGRAPHIC COMMUNICATION • H04L43/00Arrangements for monitoring or testing packet switching networks • H04L43/02Arrangements for monitoring or testing packet switching networks involving a reduction of monitoring data • H04L43/022Arrangements for monitoring or testing packet switching networks involving a reduction of monitoring data using sampling of monitoring data, i.e. storing only a selection of packets • H04L43/024Arrangements for monitoring or testing packet switching networks involving a reduction of monitoring data using sampling of monitoring data, i.e. storing only a selection of packets using adaptive sampling • HELECTRICITY • H04ELECTRIC COMMUNICATION TECHNIQUE • H04LTRANSMISSION OF DIGITAL INFORMATION, e.g. TELEGRAPHIC COMMUNICATION • H04L43/00Arrangements for monitoring or testing packet switching networks • H04L43/08Monitoring based on specific metrics • H04L43/0805Availability • H04L43/0817Availability functioning • GPHYSICS • G06COMPUTING; CALCULATING; COUNTING • G06FELECTRIC DIGITAL DATA PROCESSING • G06F9/00Arrangements for program control, e.g. control units • G06F9/06Arrangements for program control, e.g. control units using stored programs, i.e. using an internal store of processing equipment to receive or retain programs • G06F9/44Arrangements for executing specific programs • G06F9/455Emulation; Interpretation; Software simulation, e.g. virtualisation or emulation of application or operating system execution engines • G06F9/45533Hypervisors; Virtual machine monitors • G06F9/45558Hypervisor-specific management and integration aspects • G06F2009/45591Monitoring or debugging support • GPHYSICS • G06COMPUTING; CALCULATING; COUNTING • G06FELECTRIC DIGITAL DATA PROCESSING • G06F9/00Arrangements for program control, e.g. control units • G06F9/06Arrangements for program control, e.g. control units using stored programs, i.e. using an internal store of processing equipment to receive or retain programs • G06F9/44Arrangements for executing specific programs • G06F9/455Emulation; Interpretation; Software simulation, e.g. virtualisation or emulation of application or operating system execution engines • G06F9/45533Hypervisors; Virtual machine monitors • G06F9/45558Hypervisor-specific management and integration aspects • G06F2009/45595Network integration; Enabling network access in virtual machine instances • GPHYSICS • G06COMPUTING; CALCULATING; COUNTING • G06FELECTRIC DIGITAL DATA PROCESSING • G06F2201/00Indexing scheme relating to error detection, to error correction, and to monitoring • G06F2201/815Virtual • HELECTRICITY • H04ELECTRIC COMMUNICATION TECHNIQUE • H04LTRANSMISSION OF DIGITAL INFORMATION, e.g. TELEGRAPHIC COMMUNICATION • H04L41/00Arrangements for maintenance or administration or management of packet switching networks • H04L41/08Configuration management of network or network elements • H04L41/0803Configuration setting of network or network elements • H04L41/084Configuration by copying • H04L41/0843Configuration by copying based on generic templates • HELECTRICITY • H04ELECTRIC COMMUNICATION TECHNIQUE • H04LTRANSMISSION OF DIGITAL INFORMATION, e.g. TELEGRAPHIC COMMUNICATION • H04L41/00Arrangements for maintenance or administration or management of packet switching networks • H04L41/08Configuration management of network or network elements • H04L41/0896Bandwidth or capacity management, i.e. automatically increasing or decreasing capacities, e.g. bandwidth on demand • HELECTRICITY • H04ELECTRIC COMMUNICATION TECHNIQUE • H04LTRANSMISSION OF DIGITAL INFORMATION, e.g. TELEGRAPHIC COMMUNICATION • H04L43/00Arrangements for monitoring or testing packet switching networks • H04L43/16Arrangements for monitoring or testing packet switching networks using threshold monitoring Abstract Systems and methods are provided for dynamic metering adjustment for service management of a computing platform. For example, a plurality of virtual machines are provisioned across a plurality of computing nodes of a computing platform. Data samples are collected for a metric that is monitored with regard to resource utilization in the computing platform by the virtual machines. The data samples are initially collected at a predefined sampling frequency. The data samples collected over time for the metric are analyzed to determine an amount of deviation in values of the collected data samples. A new sampling frequency is determined for collecting data samples for the metric based on the determined amount of deviation. The new sampling frequency is applied to collect data samples for the metric, wherein the new sampling frequency is less than the predefined sampling frequency. Description CROSS-REFERENCE TO RELATED APPLICATIONS This application is a Continuation-in-Part of U.S. patent application Ser. No. 14/871,443, filed on Sep. 30, 2015, which claims priority to U.S. Provisional Application Ser. No. 62/057,686, filed on Sep. 30, 2014, the disclosures of which are incorporated herein by reference. TECHNICAL FIELD The field generally relates to network computing and, in particular, to systems and methods for service management of computing platforms such as cloud computing networks. BACKGROUND The cloud computing model has emerged as the de facto paradigm for providing a wide range of services in the IT industry such as infrastructure, platform, and application services. As a result, various vendors offer cloud based solutions to optimize the use of their data centers. A key enabler for cloud computing is resource virtualization, which enables provisioning of multiple virtual machines (VMs) to provide a service, or a plurality of disparate services, on the same physical host. In addition, resource virtualization provides benefits such as efficiency, resource consolidation, security, provides support for Service Level Agreements (SLAs), and allows for efficient scaling of services that are provided by a cloud computing platform. Resource virtualization, however, raises several issues. For example, customers of the cloud providers, particularly those building their critical production businesses on cloud services, are interested in collecting and logging detailed monitoring data from the deployed cloud platform to track in real time the health of their thousands of service instances executing on the cloud platform. In this regard, a crucial challenge, especially for a sustainable IT business model, is how to adapt cloud service management, and implicitly its cost (e.g., impact of associated monitoring overhead) to dynamically accommodate changes in service requirements and data centers. Furthermore, as cloud services journey through their lifecycle towards commodities, cloud computing service providers are faced with market demands for charge models that are based on fine-grained pay-per-use pricing, where customers are charged for the amount of specific resources, e.g., volume of transactions, CPU usage, etc., consumed during a given time period. This is in contrast to historical coarse-grained charge models where cloud service providers charge their customers only on a flat-rate basis, e.g., in the form of a monthly subscription fee. Although this pricing methodology is straight forward and involves little management and performance overhead for the cloud service providers, it does not offer the competitive advantage edge of the usage based pricing. As a particular technology or service becomes more of a commodity (e.g., IaaS (Infrastructure as a Service), or SaaS (Software as a Service)), customers are interested in fine-grained pricing models based on their actual usage. For instance, from the perspective of a SaaS customer, it is more advantageous to be charged based on the usage of the platform (e.g., the number of http transactions or volume of the database queries) instead of a fixed monthly fee, especially when the usage is low. In this regard, cloud service providers, looking to maintain a competitive advantage by effectively adapting to versatile charging policies, have started to promote pay-per-use. However, usage based pricing brings a new set of service management requirements for the service providers, particularly for their revenue management. The finer-grain metering for usage based pricing requires the system to monitor service resources and applications at appropriate levels to acquire useful information about the resource consumption that is to be charged for. This may result in collecting significantly large amounts of metered data. In addition, computational resources are needed to process the metered data to perform revenue management specific tasks. The resource capacity requirements for non-revenue generating systems such as monitoring and metering fluctuate largely with, e.g., service demand (e.g., the number of service instances), service price policy updates (e.g., from single metric based charge to complex multi-metric based charge), the resolution of the system behavior exposed (e.g., from higher-level aggregations to individual runaway thread), while their unit cost changes depending on the operational infrastructure solution (e.g., on premise, traditional outsourcing or IaaS). Therefore, a crucial challenge for cloud service providers is how to manage and control service management data and functions, and implicitly the costs of such service management data and functions, in order to profitably remain in the race for the cloud market. SUMMARY Embodiments of the invention include systems and methods for dynamic metering adjustment for service management of a computing platform. For example, one embodiment includes a method for managing a computing platform. A plurality of virtual machines are provisioned across a plurality of computing nodes of a computing platform. Data samples are collected for a metric that is monitored with regard to resource utilization in the computing platform by the virtual machines. The data samples are initially collected at a predefined sampling frequency. The data samples collected over time for the metric are analyzed to determine an amount of deviation in values of the collected data samples. A new sampling frequency is determined for collecting data samples for the metric based on the determined amount of deviation. The new sampling frequency is applied to collect data samples for the metric, wherein the new sampling frequency is less than the predefined sampling frequency. Other embodiments of the invention will be described in the following detailed description, which is to be read in conjunction with the accompanying drawings. DESCRIPTION OF THE DRAWINGS FIG. 1 illustrates a computing platform which implements a scalable service management system, according to an embodiment of the invention. FIGS. 2A and 2B illustrate a flow diagram of a method for scalable service management in a computing platform, according to an embodiment of the invention. FIG. 3 shows pseudo code of a load balancing method according to an embodiment of the invention. FIG. 4 graphically illustrates experimental results obtained for different metering events using different sharding keys on query times of an experimental metering data store system consisting of 4 shards, according to an embodiment of the invention. FIG. 5 graphically illustrates experimental results which show a comparison between an amount of metering data that was estimated using an experimental metering data size estimation module and an actual amount of collected metering data, according to an embodiment of the invention. FIG. 6 graphically illustrates experimental results which show a comparison between query times at different granularity levels for various meters when processing user level data, according to an embodiment of the invention. FIG. 7 graphically illustrates experimental results which show a comparison between query times at different granularity levels for the same meters in FIG. 6, when processing resource level data, according to an embodiment of the invention. FIG. 8 graphically illustrates experimental results which show an impact on execution times of a query to calculate variance in utilization of various counters when scaling an experimental metering store, according to an embodiment of the invention. FIG. 9 graphically illustrates experimental results which show an impact of scaling of an experimental data metering store on query execution time when calculating Average, Sum, Maximum, and Minimum parameters using an aggregation function for different levels, according to an embodiment of the invention. FIG. 10 illustrates profile information that is used to determine resource requirements for mediation and rating methods, according to an embodiment of the invention. FIG. 11 illustrates a method for maximizing profit according to an embodiment of the invention. FIG. 12 illustrates a system for dynamically adjusting metering operations for service management of a computing platform, according to an embodiment of the invention. FIG. 13 illustrates a method for mapping metric profiles to metric policies according to an embodiment of the invention. FIG. 14 is a flow diagram of a method for dynamically adjusting metering operations for service management of a computing platform, according to an embodiment of the invention. FIG. 15 graphically illustrates a method for detecting changes in time series data samples that are collected for a given metric, according to an embodiment of the invention. FIG. 16 graphically illustrates a method for encoding a change point time series into a symbol sequence, according to an embodiment of the invention. FIG. 17 illustrates a computer system that may be used to implement one or more components/steps of the techniques of the invention, according to an embodiment of the invention. FIG. 18 depicts a cloud computing environment according to an embodiment of the invention. FIG. 19 depicts abstraction model layers according to an embodiment of the invention. DETAILED DESCRIPTION Embodiments of the invention include systems and methods to support service management for computing platforms such as cloud computing networks. In particular, systems and methods are provided to support service management operations, such as metering, mediation, and/or rating operations for revenue management and their adaptability to business and operational changes. Embodiments of the invention include frameworks that enable service providers to scale their revenue systems in a cost-aware manner, wherein existing or newly provisioned SaaS virtual machines are dynamically provisioned/utilized (instead of dedicated setups) to deploy service management systems (e.g., revenue management). For the onboarding of new customers, a framework according to an embodiment of the invention is configured to perform an off-line analysis to recommend appropriate revenue tools and their scalable distribution by predicting the need for resources based on historical usage. At runtime, the framework employs an innovative load balancing protocol to fine tune the resource distribution based on the real computation usage and the workload demand of customers. As noted above, usage based pricing policies bring a new set of service management requirements for service providers, particularly for their revenue management. For example, usage based pricing requires the collection of significant metered data and techniques for rating according to a detailed price plans. As such, usage based pricing required finer-grain metering, which may impact the performance of resources. This is due to the fact that service resources and applications need to be monitored at the appropriate level to collect enough management data to determine the usage which has to be charged for, which may result in collecting a large amount of management data. Furthermore, the service management data (e.g., metering data) needs to be processed in order to perform: (1) mediation functions, i.e., transformation of metered data into the desired units of measure expected by the usage price policy, e.g., average, maximum or minimum usage; (2) rating functions based on the price policy for generating customer invoices, e.g., multiplying usage by per unit rate; and (3) calculations required to answer customers' queries regarding usage, e.g., variance in usage. Hence, additional resources are required not only to store service management data, but also to process service management data to support finer-grained service management. In this regard, service providers that align their services price plan to usage based pricing have to carefully choose the metering, mediation, and rating tools and infrastructure to minimize the cost of the resource requirements for performing them. Thus, a first step in performing this cost benefit analysis is to accurately estimate the cost associated with monitoring, storing, and processing the management data for the various metering and rating tools. The cost of fine grained monitoring depends on the volume of management data that is collected for the purpose of, e.g., metering. The current practice is to use a system setup for collecting management data (e.g., metering data for pricing) which is separate from, and in addition to, a cloud health monitoring setup which collects management data that provides information with regard to, e.g., performance and availability of resources and resource usage contention. The extra resources used for such revenue management place additional burden on the cloud service provider. In contrast, embodiments of the invention implement methods to consolidate metering for multiple purposes and avoid collecting of the same data by multiple agents, and to efficiently collect and estimate the volume of metering data. A fine-grain pricing model necessitates dynamic modification of price plans offered to customers based on the market demand. In this context, a key challenge is how to provide a scalable metering framework which can adapt to price policy updates and changing loads in a data center, while minimizing the additional resources, performance impact, and interference that may result from the metering, so as to avoid a toll on the business outcome. Since the selection of different pricing policies results in different sizes of collected metering data, the system setup is expected to store and process metering data of varying size without wasting resources. Typically, cloud service providers use a dedicated set of VMs for their service management, which they manually expand based on the increasing load in their data centers. Depending on the cloud service type, for instance SaaS, cloud service providers may themselves be customers of an IaaS or PaaS (Platform as a Service). As such, they are charged for this dedicated set of VMs. This infrastructure cost is additional to the cost of the tools (e.g., for license per volume, maintenance etc.). The goal is to minimize the footprint of this nonrevenue-generating infrastructure, thus minimizing service management infrastructure cost, or ideally eliminating such cost. In general, embodiments of the invention provide a framework for addressing metering and rating operations of revenue service management and the adaptability thereof to price policies and operational changes. For example, as explained in further detail below, embodiments of the invention implement various systems and methodologies to (i) provide an effective mechanism to accurately estimate the size of service management data (e.g., metering data) generated by a telemetry tool in response to resource provisioning requests; (ii) provide an auto-scalable data storage system for storing management data; (iii) provide an effective mechanism to track, record and analyze behavior of existing instances in an observed cluster deployed within a cloud computing system; (iv) provide an offline prediction system to predict resources that would be required to perform service management functions (such as metering, mediation, rating, etc.) and further refine or otherwise fine tune the service management via a runtime load balancer; and to (v) provide a framework to optimally utilize existing or newly provisioned VM instances to perform such service management functions. These systems and methodologies will be discussed in further detail below with reference to FIG. 1, which illustrates overall architecture implementing such features and constituent system components and their interactions. For illustrative purposes, embodiments of the invention will be described in the context of known enabling technologies such as the well-established cloud ecosystem of OpenStack, which is an open source project that provides a massively scalable cloud operating system. OpenStack adopts a modular design and has become the de facto cloud computing platform for managing large pools of compute, storage, and networking resources in modern data centers. The platform supports a management dashboard that gives administrators control over the resources, while empowering users to provision resources through a flexible web interface. Another goal of the OpenStack project is to build an open-source community of researchers, developers and enterprises. Currently, more than 200 companies such as IBM, Cisco, Oracle, and RedHat, are participating in the project. This allows embodiments of the invention to be quickly adapted in real enterprises. There are currently more than sixteen official modules, each providing a unique functionality, supported by OpenStack. In the following, we briefly describe some of these modules, which can be used to implement embodiments of a cloud computing platform according to the invention: Nova: provides on-demand computing resources by provisioning and managing VMs using available hypervisors. Neutron: is a pluggable and scalable system for managing networks and IP addresses within the OpenStack ecosystem. Cinder: is a storage as a service for applications, and maps block devices, from a variety of storage solutions, to OpenStack compute instances. Ceilometer: is a telemetry service for monitoring and metering resources and services of a cloud computing platform. Heat: is a service to orchestrate multiple composite cloud applications. It employs the format of Amazon Web Services CloudFormation template. Swift: implements an API-accessible storage platform that can be integrated directly into cloud based applications, or can also be used just for backup, archiving, and data retention. Glance: maintains a list of bootable disk images and supports a service for storing and retrieving such images. Horizon: provides both administrators and users with a graphical interface to: i) provision and access the cloud based resources; and ii) access services such as billing, monitoring, and any available management tools. FIG. 1 illustrates a computing platform which implements a scalable service management system, according to an embodiment of the invention. In particular, FIG. 1 illustrates a cloud computing platform 100 comprising a service management system 102 and a computing platform infrastructure 104 (e.g., data center). The service management system 102 comprises a metering data size data estimation module 106, an offline resource prediction module 108, a resource monitoring and metering module 110, a scalable metering data store system 112, a resource profiling module 114, and a load balancing module 116. The constituent components of the service management system 102 communicate with a system controller 118. In one embodiment, the system controller 118 is implemented using a dedicated virtual machine that operates on one or more computing nodes of the computing platform infrastructure 104. The computing platform infrastructure 104 comprises a plurality of computing nodes 120 and 130, which represent different physical machines (e.g., server computers) that are part of a data center, for example. For ease of illustration, FIG. 1 depicts two computing nodes 120, 130, although the system 100 can include a plurality (k) of different computing nodes (wherein k is in the order of hundreds or thousands, for example). The computing node 120 comprises a plurality (n) of virtual machines 122, and an associated virtualization API 124 (LibVirt) and hypervisor 126. Similarly, the computing node 130 comprises a plurality (m) of virtual machines 132, and an associated virtualization API 134 and hypervisor 136. As depicted in FIG. 1, the system controller 118 is a dedicated virtual machine that executes on the computing node 120. In one embodiment, the hypervisors 126 and 136 are virtual machine monitors comprised of software, firmware and/or hardware, which create and run the virtual machines 122 and 132 (guest operating systems) on the respective host nodes 120 and 130. The hypervisors 126 and 136 provide the respect set of virtual machines 122 and 132 with a virtual operating platform, and manage the execution of the respective virtual machines 122 and 132. The hypervisors 126 and 136 allow the virtual machines 122 and 132 to share the processor, memory and other resources of their respective host node 120 and 130. The hypervisors 126 and 136 control the host processor and resources, allocating what is needed to the respective set of virtual machines 122 and 132 while ensuring the virtual machines do not disrupt each other. The LibVirt modules 124 and 134 are virtualization APIs (or libraries), which provide hypervisor-agnostic APIs to securely manage the respective virtual machines 122 and 132 running on the respective host nodes 120 and 130. The LibVirt modules 124 and 134 each provide a common API for common functionality implemented by the hypervisors 126 and 136. The resource monitoring and metering system 110 is configured to provide a telemetry service for monitoring and metering resources and services provided by the cloud computing platform 100. In one embodiment of the invention, the resource monitoring and metering system 100 is implemented using OpenStack's Ceilometer telemetry service, which provides an infrastructure to collect detailed measurements about resources managed by a cloud ecosystem implemented using OpenStack. In general, the main components of Ceilometer can be divided into two categories, namely agents (e.g., compute agents, central agents, etc.), and services (e.g., collector service, API service, etc.). The compute agents poll the local LibVirt modules 124, 134 (daemons) to fetch resource utilization of the currently launched virtual machines 122 and 132 and transmit the resource utilization data as AMQP (Advanced Message Queuing Protocol) notifications on a message bus (Ceilometer bus). Similarly, central agents poll public RESTful APIs of OpenStack services, such as Cinder and Glance, to track resources and emit the resource data onto OpenStack's common message bus (called Notification bus). On the other hand, a collector service collects the AMQP notifications from the agents and other OpenStack services, and dispatches the collected information to a metering database. The API service presents aggregated metering data to a billing engine. In Ceilometer, resource usage measurements, e.g., CPU utilization, Disk Read Bytes, etc., are performed by meters or counters. Typically, there exists a meter for each resource being tracked, and there is a separate meter for each instance of the resource. The lifetime of a meter is decoupled from the associated resource, and a meter continues to exist even after the resource it was tracking has been terminated. Each data item collected by a meter is referred to as a “sample,” and each sample comprises a timestamp to mark the time of collected data, and a volume that records the value. Ceilometer also allows service providers to write their own meters. Such customized meters can be designed to conveniently collect data from inside launched virtual machines, which (for a solution or software) allows cloud service providers to track application usage as well. In Ceilometer, a polling interval between two events is specified in a pipeline.yaml file, and the polling interval can be adjusted according to the cloud provider requirements. Furthermore, a collector can store the metering data in any kind of database. The size of collected data is expected to be large, so by default, Ceilometer utilizes a database such as Mongo DB for this purpose. In the embodiment of FIG. 1, the system controller 118 is configured to operate as a data collector for the telemetry service implemented by the resource monitoring and metering system 110. In this regard, the system controller 118 serves as a centralized agent that polls each of the LibVirt modules 124 and 134 across the computing nodes 120 and 130 to obtain resource utilization data and track resources of all the virtual machines 122 and 132 across the compute nodes 120 and 130. This eliminates the need of implementing dedicated agents on each computing node 120 and 130. However, in other embodiments of the invention, dedicated agents (e.g., metering agents) can implemented on each computing node 120 and 130. The scalable metering data store system 112 is configured to provide an auto-scalable metering store framework to support mediation and rating functions. The metering related data that is collected and by the resource monitoring and metering system 110 is stored in a metering store (e.g., database system) using the scalable metering data store system 112. In a cloud ecosystem, there is typically a vast amount, and continually growing volume of metering data. As such, it is desirable to implement a data store (e.g., database) setup which is scalable and efficient, and which can handle complex queries in a timely fashion. In this context, billing methods that implement fine-grained pricing plans require high-frequency querying of the metering data store. In one embodiment of the invention, the scalable metering data store system 112 is implemented using the known MongoDB data store system. In particular, in one embodiment of the invention, an auto-scalable setup is implemented for MongoDB to act as the metering store for Ceilometer, wherein the auto-scalable setup is instantiated on the same set of VMs that are used to provide SaaS (as VMs to support typical SaaS workloads have been observed to not be fully utilized). OpenStack allows integration of multiple databases with Ceilometer for the purpose of storing metering data, e.g., MySQL, MongoDB, etc. MongoDB is a preferred database system for implementation in OpenStack because of features such as flexibility and allowing the structure of documents in a collection to be changed over time. In the following, we discuss MongoDB and various features that enable scaling of the metering data storage system. As is known in the art, MongoDB is a cross platform document-oriented NoSQL (non SQL or non-relational) database. MongoDB eschews the traditional table-based relational database structure in favor of JSON-like documents with dynamic schemas, making the integration of metering data easier and faster. MongoDB offers several key features of sharding and replication, which make it a preferred system to implement an auto-scalable metering data storage system according to an embodiment of the invention. Sharding is a method of storing data across multiple machines (shards) to support deployments with very large datasets and high throughput operations. Sharding helps in realizing scalable setups for storing metering data because the data collected by Ceilometer is expected to increase linearly over time. This is especially true for production servers. A sharded setup of MongoDB comprises three main components as follows: Shards: store the data. Increasing the number of shards reduces the amount of data each machine in a setup needs to hold. As a result a setup can increase capacity and throughput horizontally. Query Routers: or “Mongo instances” interface with the querying application and direct operations to appropriate shard or shards. Config Servers: store the cluster's metadata, e.g., mapping of the cluster's dataset to shards, which is then used to target operations to specific shards. For example, an existing practice in production setups is to have three Config servers. Replication is a feature that allows multiple machines to share the same data. Unlike sharding, replication is mainly used to ensure data redundancy and facilitate load balancing. In addition, MongoDB supports the use of the MapReduce framework for batch processing of data and aggregation options. A first step in realizing an auto-scalable metering data storage system (based on, e.g., MongoDB) according to an embodiment of the invention is to determine when scaling is needed. For this purpose, several kinds of metrics can be utilized: (i) OS-level metrics, e.g., CPU, memory, disk usage, etc., and (ii) MongoDB performance statistics, e.g., query time, writes/s, reqs/s, etc. Since the MongoDB instances are running on the same virtual machines as those virtual machines providing user services, the virtual machines are already being monitored and thus the monitoring data can be reused to determine the OS-level information needed for this purpose as well. This information, coupled with periodically collected MongoDB statistics, is then used to determine if the metering store is loaded beyond a pre-specified high threshold or below a low threshold, and scaling decisions are made accordingly. A next step in realizing an auto-scalable database system is to enable scaling of the metering store. For this purpose, in one embodiment of the invention, our framework exploits creation of additional MongoDB replica sets. These replica sets are added as shards to achieve further partitioning of data, which in turn support the scalability of the storage system. A design decision while performing sharding is to carefully choose the sharding key. To this end, we keep track of the speedup achieved with various sharding keys and choose the best option. It is to be noted that replication and sharding are not mutually exclusive features, and can be scaled individually based on the monitored reads/s or writes/s throughput observed through a MongoDB performance monitor. The metering data size estimation module 106 is configured to calculate an expected change in the size of metering data. For this purpose, the metering data size estimation module 106 uses resource information obtained from a provisioning template file 140 (e.g., a Heat template file) of a given provisioning request, and determines a set of meters that are required to perform the necessary monitoring and metering. Next, the expected total number of metering events on various polling intervals is calculated along with the average event object size. The number of events are calculated by, e.g., parsing the pipeline.yaml file to fetch the sampling frequency of each meter. The average object event size is variable and depends on the type of meters launched and their sampling frequency or polling interval. To this end, the metering data size estimation module 106 keeps track of the changes in the event object size per meter and estimates the value by taking the average of n previously collected values (e.g., n=3). The metering data size estimation module 106 then averages these values across the meters to determine the overall average object size. An alternative approach is to directly track the overall average object event size from the database of the scalable metering data store system 112. The following is an example of a sample collected from the scalable metering data store system 112 to measure an overall average object size. > db. s t a t s ( ) {   ” db ” : ” c e i l o m e t e r ” ,   ” c o l l e c t i o n s ” : 6 ,   ” o b j e c t s ” : 2239713 ,   ” a v g O b j S i z e ” : 1 1 8 9 .7 5 9 3 8 2 ,   ” d a t a S i z e ” : 2664719556 ,   ” s t o r a g e S i z e ” : 3217911760 ,   . . . } In this example, the expected size of metering data (2664719556 bytes) is determined by multiplying the number of event objects (2239713) with the average event object size (1189.759382 bytes). In general, the resource profiling module 114 is configured to track resource utilization of each of the virtual machines 122 and 132. As noted above, the resource monitoring and metering module 110 launches various meters for monitoring and metering the usage of different resources per virtual machine, e.g., CPU, memory, storage, networking, etc. The resource profiling module 114 intercepts such resource usage information at the scalable metering data store system 112, and uses the resource usage information to track the per-VM resource utilization. A challenge is that the collected metering data only gives an instantaneous view of a VM's resource usage at a particular time instance, and does not necessarily portray the overall usage. To address this, in one embodiment of the invention, the resource profiling module 114 is configured to use a sliding window across a previous number (n) of metering samples to calculate a moving average, wherein the moving average is utilized as an estimate of the current per-VM resource utilization. An alternate approached to intercepting the data is to query the metering store 112 for overall utilization. However, the querying approach could burden the database and impact overall efficiency. The resource profiling module 114 also maintains queues of resources sorted based on estimated utilization. This information can be used to determine free resources within each VM, which in turn supports effective scaling of the metering setup. The offline resource prediction module 108 is configured to analyze the data collected by the resource profiling module 114 and provide an approximate estimate of the resources that would be required for the associated metering setup. A possible trade-off that should be considered in the estimation of needed resources is whether to use less revenue management resources at the expense of performance degradation in terms of average time taken to process the collected metering data. In one embodiment, system managers are allowed to manage this trade-off by specifying the expected processing query time, query rate, and average load on the system setup, as an input to the offline resource prediction module 108. Based on the provided input, the offline resource prediction module 108 outputs a recommended setup to achieve an effective estimate for driving decision of system implementation. The load balancing module 116 is implemented to ensure that service level agreements are met. The selection of virtual machines for launching replicas to scale-up the metering data store system 112 is an important consideration, as the additional load may affect the performance of a virtual machine. This can lead to a point where the virtual machine can no longer provide sufficient performance for the provided SaaS. Typically, cloud service providers are bound to ensure that certain service level agreements are met. Thus, the service provider may have to mitigate or launch additional resources in the face of a potential threat of a service level agreement violation. To avoid this, an embodiment of the invention utilizes a load balancer that actively tracks the infrastructure utilization of each virtual machine by coordinating with the resource profiling module 114. If resource utilization on any virtual machine exceeds a certain threshold, some or all of the workload (e.g., mediation and/or rating) from the overloaded virtual machine is either transferred to an existing virtual machine with a lower load or a new virtual machine is launched to handle the overload. The load balancing module 116 utilizes a load balancing process (as will be discussed below with referent to FIG. 3) to move shards or replica sets. In one embodiment of the invention, the load balancing module 116 is implemented using an internal load balancer of MongoDB, which upon the creation of a new shard, transfers chunks of 64 MB of data from other machines to the newly created/added shard to evenly distribute the total number of chunks. In one embodiment of the invention, a predefined threshold value is selected for triggering load balancing. The threshold is set so as to ensure that each resource per VM is not over-utilized by the metering framework to an extent where the performance of the provided SaaS is affected. Since the nature of service level agreements vary with the type of SaaS, as well as resource types and configurations, the predefined threshold for triggering load balancing is not fixed. Instead, such threshold varies from solution to solution and resource managers can evaluate and determine the threshold values as appropriate. The system controller 118 is configured to control and fine-tune the scalable metering data store system 112, the resource profiling system 114 and the load balancing module 116. The system controller 118 also serves as a facilitator for the various module operations by providing access to the collected data. As noted above, in one embodiment of the invention, the system controller 118 is run in a dedicated virtual machine on a given computing node (e.g., node 120 in FIG. 1) to ensure that it is not affected by the performance and workload dynamics of the resources. By default, OpenStack installs a standalone instance of MongoDB to store metering data. In order to perform mediation and rating, cloud service providers typically use a separate set of dedicated physical machines for the standalone installation of MongoDB. In case of significantly large data sizes, in a conventional approach, a distributed system, e.g., Hadoop Distributed File System (HDFS), is used for data processing. This conventional approach requires redistribution of metering data from the metering store to the HDFS system. This approach is burdensome because data ingestion into the HDFS system is known to be a major performance bottleneck, as well as “expensive” in terms of data copying. In contrast, embodiments of the invention provide an advantage in that that metering data is not redistributed from the metering store to another system. Instead, in one embodiment of the invention, metering data is collected in a distributed setup to begin with, which avoids extra copying and ingestion challenges and overheads associated with the conventional approach. Another advantage of our framework is that it allows cloud service providers to offer not only the fine-grained metering information, but also customizable price plans, e.g., charging customers only on CPU utilization, etc. Furthermore, our approach can be extended to implement metering services for IaaS by: (i) launching the metering setup on physical nodes instead of VMs so that customers do not get access to the collected metered data; (ii) enabling monitoring of the physical nodes (within Ceilometer for example) for tracking infrastructure utilization per physical node instead of per VM; and (iii) updating the load balancer to effectively perform in heterogeneous environments so that cores not used by Nova, for example, can be used to launch metering services. FIGS. 2A and 2B illustrate a flow diagram of a method for scalable service management of a cloud computing system, according to an embodiment of the invention. For purposes of illustration, the flow diagram of FIGS. 2A and 2B will be discussed with reference to the cloud computing system 100 of FIG. 1, wherein the method steps of FIGS. 2A and 2B illustrate various modes of operation of constituent components of the cloud computing system 100 of FIG. 1. In general, the service management system 102 of the cloud computing system 100 initiates a sequence of operations when servicing a provisioning request. As an initial step, the service management system 102 receives a provisioning template file 140 that corresponds to the provisioning request to be serviced (block 200). The provisioning template file 140 is input to the metering data size estimation module 106, wherein the provisioning template 140 is parsed to extract information about the requested resources associated with the new provisioning request (block 202). The metering data size estimation module 106 uses the information about the requested resources to estimate an expected change in the amount of metering data that will need to be collected by the resource monitoring and metering system 110 based on the new provisioning request, which is to be stored in the scalable metering data store system 112 (block 204). Meanwhile, the resource profiling module 114 keeps track of the resources that are already in use, and maintains profiles of resource usage for mediation and rating purposes. The resource prediction module 108 obtains information regarding the profiled resources that are currently in use (block 206), and then uses the profiled resource information and the newly requested resources information to predict (or estimate) additional infrastructure and run-time resources that would be needed to support mediation and rating operations based on the provisioning request (block 208). In one embodiment of the invention, the resource prediction module 108 calculates the resource requirements that are needed for a plurality of different mediation and rating techniques using a set of technique profiles (FIG. 10) that are generated using historical data. In another embodiment, the resource prediction module 108 calculates the resource requirements needed for collecting metering data. The predictions made by the resource prediction module 108 are used to scale the metering data store (block 210). The resource prediction module 108 then communicates with the system controller 118 to initiate launching of the setup along with the requested provisioning (block 212) and to start mediation and rating of the provisioned service. The dynamic load balancing module 116 then performs a dynamic load balancing operation to ensure that resource use per virtual machine does not exceed a predetermined threshold (block 214) based on the newly provisioned resources. In one embodiment of the invention, the dynamic load balancing process of block 214 is implemented using a load balancing method as illustrated in FIG. 3. In particular, FIG. 3 shows pseudo code of a load balancing method 300 according to an embodiment of the invention. In general, the load balancing method 300 of FIG. 3 comprises a block of pseudo code 302 that describes a REPLACE_VM function, and a block of pseudo code 304 that utilizes the REPLACE_VM function as part of a load balancing process to shift extra load on a given virtual machine due to mediation and rating functions to another virtual machine. The block of pseudo code 304 implements the following process for each resource r that is monitored by the resource profiling module 114. The resources include CPU, memory, and other hardware/software resources that the virtual machines may utilizes when executing on a given computing node of the cloud computing platform. For each resource r, a virtual machine list rl is generated, which includes a list of all virtual machines that currently use the given resource r, wherein the virtual machines in the list rl are sorted by the amount of usage (from highest to lowest) of the given resource r. In addition, a threshold value t is initialized for the given resource r. Then, for each virtual machine vm in the list rl for the given resource r, a current load cl on the virtual machine vm is determined. The current load cl of a given virtual machine vm corresponds to the resource usage of that virtual machine as specified in the list rl. If the current load cl of a given virtual machine vm for the given resource r is less than the threshold value t of the given resource r, the load on the virtual machine vm is not reassigned. On the other hand, if the current load cl on the given virtual machine vm for the given resource r meets or exceeds the threshold value t, then an extra load el is determined as the current load less the threshold (i.e., cl−t). Then, a REPLACE_VM function is called to determine if there is a virtual machine in the list rl that is capable of handing the extra load, and the parameters vm, el, and r are passed to the REPLACE_VM function. The REPLACE_VM function begins by reading the virtual machine list rl and the threshold value t for the given resource r. The virtual machine list rl is inverted such that the virtual machines in the list rl are sorted from lowest to highest of the amount of usage of the given resource r. Then, for a given virtual machine uvm in the inverted list rl, the parameter ucl is set as the current load on the virtual machine uvm for the given resource r. If the sum of ucl and el is less than or equal to t for the given resource r, then the given virtual machine uvm is deemed a candidate for having at least the extra load el reassigned thereto from the given overloaded virtual machine vm (or, in an alternate embodiment, having the entire load associated with mediation and rating (M/R load) assigned from the overloaded virtual machine vm to a new virtual machine uvm). But first, the threshold is checked for the other resources to determine if the candidate virtual machine uvm would be able to handle the extra load el, or entire M/R load, without exceeding the threshold for the other resources. If it is determined that the given candidate virtual machine uvm can handle the extra load (or entire M/R load), then the candidate virtual machine uvm is selected as the target virtual machine (i.e., a parameter use_this_vm is set to the index of the candidate virtual machine uvm) to which at least the extra load el, or the entire M/R load, will be reassigned. On the other hand, if the sum of cl and el (for the given virtual machine uvm) is greater than t, then a null value 0 is assigned to the parameter use_this_vm, and the process is repeated to find a candidate virtual machine uvm that is capable of handling at least the extra load el, or the entire M/R load, of the overloaded virtual machine vm. After all virtual machines uvm in the inverted list rl are processed, if the parameter use_this_vm is still equal to 0, then is it determined that no virtual machine uvm in the inverted list rl is capable of handling at least the extra load el of the overloaded virtual machine vm. In this circumstance, a new virtual machine is launched to handle the extra load el, or entire M/R load, of the overloaded virtual machine vm. Finally, the extra load el, or the entire M/R load, is transferred from the overloaded virtual machine vm to the newly instantiated virtual machine. In other embodiments of the invention, methods are proved to reduce the cost of service management associated with price plans and to balance the benefits of a usage-based revenue scheme, with the cost for providing such usage-based revenue scheme. FIG. 2B illustrates further methods to provide scalable service management in a computing platform with regard to dynamically selecting mediation and rating techniques to provide cost awareness in a usage-based revenue scheme, according to an embodiment of the invention. The process flow of FIG. 2B is performed offline, and is based, in part, on processing results of the offline resource prediction module 108 in block 208 of FIG. 2A. As shown in FIG. 2B, a first step includes selecting suitable metering, mediation and rating techniques based the predicted infrastructure and run-time resources needed for a new provisioning request (bock 216). In one embodiment of the invention, the resource profiling module 114 maintains information regarding a plurality of mediation and rating techniques, wherein such information comprises resource requirements for the various techniques. For example, FIG. 10 illustrates profile information that is used to determine resource requirements for different mediation and rating methods, according to an embodiment of the invention. In particular, FIG. 10 is a table that lists different mediation and rating techniques, and associated information such as CPU types, CPU utilization, RAM size, RAM utilization, distributed linear scaling, storage, licensing costs, management cost, IT infrastructure cost, volume (S/M/L), etc., and other types of information associated with the different mediation and rating techniques, which can be tracked and maintained based on historical profile data. In this regard, the mediation and rating methods used are elastic and linearly scalable. When a given metering, mediation and rating method are implemented, the system can estimate and track the cost associated with metering, mediation and rating (block 218). Then, various optimization methods can be utilized to maximize profit (block 220). As previously mentioned, some customers may prefer to be charged on fine grain level. As such, service providers need to collect metering data at a finer grain level (e.g., collecting data on an hourly basis or daily basis is finer grained than collecting data on a monthly basis). In this regard, service providers would need more resources not only to collect data but also to process the data. Accordingly, in one embodiment of the invention, systems and methods are provide to predict the metering, mediation and rating costs for various combinations of meters and allow service providers to choose the best suitable combinations of metering, mediation and rating so that the service providers can offer their customers these combinations as available price plans. In one embodiment, services providers can dynamically recommend the most profitable price plans and associated revenue tools, with their scalable deployment, on one hand, based on the business trend for usage pricing and, on the other hand, based on the need for system management resources as new customers subscribe to the service. FIG. 11 illustrates a method for maximizing profit for different services (e.g., IaaS, PaaS, SaaS). In particular, FIG. 11 shows a set of meters used for the different services and the metering and cost operations that are performed at different granularity levels (e.g., monthly, weekly, daily, hourly). When calculations are performed on a monthly level (once per month), there is less operational cost to the service provider, but such price plan may have lower competitive advantage (with less customers interested). In one embodiment, the cost of performing metering, mediation and rating is performed at all granularity levels. Then, to compare one price plan with another, a constant coefficient can be utilized for adjusting competitive advantage. In general, embodiments of the invention provide a framework for dynamically optimizing the cost of a service metering solution by (i) using metering templates automatically populated with current cost data (infrastructure, management, SLAs etc.) learned over recent historical data, (ii) calculating the metering technique for each metered item in order to optimize the service over all profit (price−cost), (iii) updating the price plan accordingly such that the metered data calculated above is used in price line items, and (iv) deploying scalable parallelized charge calculation based on the selected prices and rating schemas. In this regard, embodiments of the invention include automated systems for (i) selecting metering techniques for each service solution priced item, (ii) collecting cost data in pre-defined templates and where metering templates are dynamically updated across clouds and applications, (iii) inputting prices based on marketing evaluations, (iv) calculating the metering techniques selection as an optimization formulation, (v) parallel rate calculation for scalable cloud charge calculation, and (vi) generating a business model including package, price plan, SLA, release based, and variable price (spot instances). Experimental Setup and Results We evaluated our framework on a test bed of 20 physical machines that were used to deploy 12 virtual machines within an OpenStack environment. Our analysis demonstrated that that service management related tasks can be offloaded to the existing virtual machines with at most 15% overhead in CPU utilization, 10% overhead for memory usage, and negligible overhead for I/O and network usage. By dynamically scaling the setup, we were able to significantly reduce the metering data processing time without incurring any additional cost. More specifically, we implemented a fine-grained metering approach according to the general system framework of FIG. 1 on top of OpenStack and MongoDB, wherein Python code was written to implement various modules including the resource profiling module 114, the offline resource prediction module 108 and various controller modules (e.g., the system controller 118). In particular, we deployed OpenStack release Icehouse version 2014.1.1 on 20 physical machines, where each machine had six cores and 32 GB of RAM. We varied the number of virtual machines from 3 to 12 to provide a SaaS. The metering data was collected from the virtual machines using a variable sampling interval. We tracked the usage of the virtual machines for a period of one month. We launched both default as well as customized meters to collect the resource usage. The following Table I shows the specifications for each virtual machine used for the experimentation. TABLE I RAM Write BW Read BW NW CPU (GB) (MB/s) (MB/s) (MB/s) 8 Cores 8 380 533 100 3.0 GHz With regard to the scalable metering data store system 112, we performed tests using both a standalone as well as a scalable MongoDB setup. In our scalable setup, each replica set consisted of only one node that acted as a primary copy of the data. Furthermore, the replica sets were added as shards to scale the MongoDB deployment. For testing purposes, we launched three configuration servers and one query router that was deployed on the system controller VM 118. Each performance related experiment was performed on the actual collected metering data of more than 11 GB from the deployed OpenStack setup over the period of one month. With regard to sharding, we used different sharding keys for the Ceilometer database in our tests. FIG. 4 graphically illustrates experimental results obtained for different metering events from using different sharding keys on query timings for a MongoDB setup consisting of 4 shards. The metering events include cpu-util, cpu, disk.read.bytes, disk.write.bytes, network.incoming.bytes, and network.outgoing.bytes. The query calculated variance in utilization (standard deviation) of Ceilometer counters using MapReduce. From FIG. 4, it can be seen that the query time is affected more by the choice of the sharding key for the distributed setup as compared to the standalone setup. Further investigation revealed that chunks greater than 64 MB were created in all cases except when timestamp of metering events was used as a shard key. This resulted in the MongoDB internal load balancer distributing chunks unevenly, with most of the chunks assigned to just one machine. This created a bottleneck and caused a significant increase in the query time. Consequently, a preferred sharding key to use in the target environment is timestamp, instead of counter name, user-id or resource-id. With regard to the functionality of the metering data size estimation, FIG. 5 graphically compares a data size of metering data estimated by the metering data size estimation module 106 and the actual collected metering data. In our first experiment, we compared the estimated and actual collected metering data size associated with the 12 virtual machines launched within the OpenStack deployment with a default set of meters. FIG. 5 shows the results. The framework predicted that 254 events would be collected from the virtual machine every 10 minutes. The estimated average event object size was 1150 bytes, 1134 bytes, and 1188 bytes for per day, per week and per month calculations, respectively. As seen in FIG. 5, compared to the actual observed values, the metering data size estimation module predicted metering data sizes with 995 accuracy. Next, with regard to resource profiling, we measured the effect of performing mediation at different granularity levels. FIG. 6 graphically illustrates experimental results of a comparison between query times at different granularity levels for various meters when processing user level data. The meters in FIG. 6 include cpu, cpu-util, disk.read.bytes, disk.read.requests.rate, disk.write.bytes, disk.write.requests sate, image, image.upload, instance, ip.floating, memory, network.incoming.bytes, network.incoming.packets, network.outgoing.bytes, network.outgoing.packets, port, router, storage.objects, subnet, vcpus, and volume. FIG. 6 graphically illustrates the time taken to perform mediation on the data of a single user using the statistics API provided by Ceilometer. We measured Maximum, Minimum, Average, Sum, and Count for the considered meters at three different granularity levels, namely, daily, weekly, and monthly. The results reveal that the meters that collect samples continuously at a fixed sampling interval took 4× to 6× more time to perform mediation on one month's data compared to one week's data. A similar “linear scaling” trend was observed when we compared the mediation time taken to process one week of data and one day of data. Next, we repeated the experiment using a single virtual machine under different metering data volumes. FIG. 7 graphically illustrates a difference in mediation time under different observed cases. In particular, FIG. 7 compares query times at different granularity levels for the same meters in FIG. 6, when processing resource (e.g., one virtual machine) level data. Once again, a linear scaling is observed for our approach. Next, we measured the increase in average resource utilization per virtual machine due to mediation. The experimental results demonstrated that CPU utilization in the observed virtual machines did not increase above 15%. Similarly, an increase in memory utilization was observed to be less than 10%. Since the needed data is already distributed to the various virtual machines, the mediation process was expected to generate reads but not writes. This was confirmed by observed I/O usage, wherein the observed written data was substantially zero and wherein the average data read was low. Another key observation was that due to most of the computation being performed locally, the network usage was also negligible. The experimental results in this regard validate our claim that, if handled properly as in our approach, existing virtual machines can be used to perform mediation and rating tasks without affecting overall performance of the provided SaaS. With regard to the experimental scalable metering data store setup, we analyzed the effect of scaling our metering store, i.e., the distributed MongoDB setup, on mediation time. FIG. 8 graphically illustrates experimental results with regard the impact on execution time of a query to calculate variance in utilization of various counters as the metering store is scaled In particular, FIG. 8 illustrates a reduction in time to calculate variance in utilization (standard deviation) of various Ceilometer meters using MongoDB's MapReduce functionality, as we scaled up the metering store (the meters including disk.read.bytes, disk.read.requests.rate, disk.write.bytes, disk.write.requests.rate, network.incoming.bytes, network.incoming.packets, network.outgoing.bytes, betwork.outgoing.packets, cpu-util, and cpu). In FIG. 8, “sr” denotes a number of sharded replica sets used. From FIG. 8, we see that the stand-alone installation of MongoDB performs better than the single shard distributed MongoDB setup—this is because of the networking overhead. However, as we increase the number of shards, the mediation time reduces. For the case of two replica sets acting as shards, the average query time is half the query time of the stand-alone setup. A further increase in the number of shards results in increasingly better performance in terms of query time. We also observed that the reduction in the query time is not linear and after reaching a certain threshold, the networking overhead actually causes performance degradation. FIG. 9 graphically illustrates experimental results that show a comparison of query times to calculate Average, Sum, Maximum, and Minimum using an aggregation functionality provided by MongoDB. In particular, FIG. 9 shows the impact of scaling MongoDB on query execution time when calculating Average, Sum, Maximum, and Minimum using an aggregation for different levels, where “sr” represents a number of sharded replica sets used. Here, a more linear trend was observed when performing mediation using the aggregation. Next, with regard to scaling and load balancing, we analyzed the effects of scaling the experimental setup and the role of load balancing. The following Table II shows the time taken to scale the metering store, total number of chunks transferred and chunks transferred per shard. TABLE II Scaling from x to y Scaling Time Total Chunks Chunks Transferred Shards (minutes) Transferred Per Shard 1 to 2 10 93 93 2 to 3 6 62 31 3 to 4 3.5 47 15 4 to 5 2 37 9 By default, MongoDB only transfers one chunk at a time, which slows down the transferring process. The values shown in Table II illustrate that the transfer time should be taken into consideration when making setup scaling decisions. Furthermore, transferring chunks while scaling the setup also requires additional resources and adds an observable overhead to the virtual machines. This overhead was observed from the experimental results when scaling from one to five shards, in terms of resource usage per virtual machine for both a primary virtual machine i.e., the source of a chunk transfer, and a secondary virtual machine, i.e., destination of the chunk. We observed that while CPU utilization is high on the primary virtual machine, such utilization never exceeded 10% of that before the chunk transfer. Similarly, it was observed that memory utilization remained constant for the primary virtual machines, but increased by 5% to 10% for the secondary virtual machine compared to the pre-transfer usage. It was further observed that the amount of data written in both the primary and secondary virtual machines remained almost unaffected, although high spikes of up to 2 MB/s were observed in the write I/Os. In contrast, the read I/O was observed to be higher for the primary virtual machine as compared to the secondary virtual machine. Moreover, the average write rate on the primary virtual machine was observed to be 0.5 MB/s whereas the average write rate for the secondary virtual machine was observed to be 0.4 MB/s. Similarly, the primary virtual machine showed a higher network transmission rate as compared to the secondary virtual machine which showed a higher reception rate. However, in both cases, the network transmission and reception rates stayed below 20 MB/s. In summary, we have evaluated the performance of our approach in providing a scalable and flexible metering and rating system for cloud-based SaaS. Our results show that embodiments of systems and methods as described herein for scalable metering have small impact on the co-located SaaS while providing for dynamic scaling. The key features of our framework will allow cloud service providers to scale their revenue systems in a cost-aware manner. Our approach is designed for scalable deployment and is unique in that it uses existing VMs to perform service management operations (e.g., mediation and/or rating) for managing a cloud computing platform, and only launches additional VMs, when necessary, thus incurring little additional cost. Indeed, mechanisms are provided to profile and predict the resources required for supporting mediation and rating in cloud applications, for example, whereby we predict the additional load that can be added to the existing VMs that are already loaded. We perform load balancing by placing/shifting only a specific (small) part of the overall load to ensure compliance with SLAs. Our monitoring system collects and stores the metering data in distributed database, which implements the ability to scale the setup and use existing virtual machines, to provide a scalable and flexible metering and rating system for the SaaS applications in a cloud computing system, and provide a system for cloud service providers to scale their revenue management systems in a cost-aware manner. Dynamic Metering Adjustment Techniques As discussed above, the resource monitoring and metering system 110 of the service management system 102 provides an infrastructure to collect detailed measurements about managed resources of a computing platform, wherein resource usage measurements, CPU utilization, Disk Read Bytes, etc., are performed by meters or counters. Typically there is a meter for each resource being tracked, and there is a separate meter for each instance of the resource. Each data item collected by a meter is referred to as a “sample” or a “data sample”, and each data sample comprises a timestamp to mark the time of collected data sample, and a volume that records a value of the data sample. The polling interval between two sampling events is, specified in a configuration filed (e.g., pipeline.yainl file), wherein the polling interval can be adjusted by a service provider according to certain requirements of the service provider. Once configured, the same polling interval is used to monitor all the instances launched in that particular system setup, unless the service provider manually changes the polling interval. The frequency at which data samples are collected for a certain meter is called the “polling frequency” for that meter. In one embodiment, a service provider can manually configure the polling frequency for different types of meters. Once configured, the polling frequency for a given meter remains constant unless changed/updated manually. In this regard, the service provider can end up collecting large amounts of substantially similar samples often carrying, information of low significance about the usage or state of the tracked resource. This results in high storage volume and increased computational resource requirements to process all the collected data in view of, e.g., metering, incident, or problem management purposes, etc. To illustrate these problems and potential solutions, consider the following example where the utilization of a certain resource remains relatively constant at a specific value for 5 hours. Assuming the polling frequency is one sample per second, a total of 18,000 samples is collected in 5 hours. From a metering point of view, however, since the data values remain relatively constant over this period of time, the same information could be inferred from data collected according to a polling frequency of one or a few samples per hour (as opposed to one sample per second). If this calculation is scaled to hundreds of metrics on thousands of VMs in a typical computing platform configuration, the problem increases by many folds. To determine how common it is for resource utilization to remain unchanged in a typical cloud environment, we collected and analyzed data from 3359 virtual machines launched in geographically distributed IBM production servers (e.g., production servers in Asia Pacific, Africa, Australia, and Europe). The data was collected over a period of 3 months with a sample collected every 15 minutes. To determine the variance of resource utilization, we calculated the mean of absolute rate of change, μ(t), for different monitored resources of randomly picked 338 VMs from all the regions. We analyzed two different kinds of meters: (i) meters used to directly monitor infrastructure usage such as CPU and memory utilization, and (ii) meters used to track the load imposed by VMs on the physical infrastructure such as a number of TCP/IP connections established by VMs, and pages accessed per second from disk. We first calculated the absolute value of the rate of change at time t in each resource usage (e.g., CPU utilization, memory utilization, number of TCP/IP connections, pages accessed from disk/sec. etc.), for each virtual machine, VMi, using the following equation: dRvm i ( t ) dt = Rvm i ( t ) - Rvm i ( t - 1 ) Eqn . ( 1 ) Then, we computed an average across all VMs, based on the following equation: μ R ( t ) = i = 1 N dRvm i ( t ) dt N Eqn . ( 2 ) We computed μR(t) of the CPU and memory respectively for the last 8 days of our collection of data and found that over this period of time, the instantaneous variation of the resource usage to be less than 5%. We repeated this evaluation for other types of meters, e.g., number of TCP/IP connections and pages accessed/sec, and found the same trend for meter monitoring. This behavior holds true on average across the VMs and the 3 month of data samples that were collected. Overall we found less than 5% of VMs having sudden variation in the tracked resource utilization. An additional observation is that for the 5% of VMs having sudden variation in resource utilization, the polling was not frequent enough to properly capture the evolution of the change. Hence, while decreasing the polling frequency can be beneficial for the majority of the VMs, it is also advantageous to identify those VMs that benefit from an increased polling frequency, as compared to a default polling frequency, in order to better capture the behavior of such VMs. From our evaluation and experimental analysis, we have determined that the implementation of a tiering framework according to an embodiment of the invention, which is based on variation in resource utilization, is a solution that allows a service provider to monitor and collect data samples from each tier at a different polling frequency. For example, a tiering framework allows a service management system to utilize lower polling frequencies to monitor resources having less resource usage variation, for example. Consider a scenario where a given metric exhibits three (3) different types of behaviors across a set of VMs of a computing platform as follows: (i) high variance in utilization of a monitored resource; (ii) predictable or medium variance in utilization of the monitored resource; and (iii) low variance in utilization of the monitored resource. Currently, service providers are limited to monitor this resource by sampling the resource at one and the same polling frequency in all VMs, wherein the polling frequency would be selected to capture a predefined volume of changes in the utilization of that resource, e.g., 98%. This limitation in choice of the sampling frequencies leads to resources in the “low variance” tier to be over sampled as they are monitored at a too high of a polling frequency, whereas resources in the “high variance” tier are under sampled. To further understand the characteristics of our data, we manually divided the VMs into three different tiers by calculating mean rate of change in resource utilization for each of the 338 VMs. We defined thresholds by dividing the range of the computed values between maximum and minimum values in three equal tiers for each of the four monitored resources (e.g., CPU utilization, memory utilization, number of TCP/IP connections, pages accessed from disk/sec). Following the levels of variance in μR(t) of the four monitored resources, we found that for each of the meters used to track a corresponding one of the four monitored resources, more than 80% of the VMs fell within the tier with “low variance,” 15% of the VMs fell within the tier with “medium variance,” and 5% of the VMs fell within the tier with “high variance.” Overall, our analysis demonstrated that in the case of un-tiered sampling, 80% of the resources were monitored at a polling frequency which was higher than the polling frequency needed to capture their changes, and that 5% of the resources were monitored at a polling frequency which was lower than the polling frequency needed to capture their changes. In this regard, embodiments of the invention provide support for policy based data monitoring to enable custom, tier based polling frequencies. As explained in further detail below, a “monitoring policy” (or metric policy) defines sampling rules for a given metric profile. The ability to define metric profiles and associated differentiating metric policies takes into consideration the fact that different types of applications hosted by a given computing system may require different monitoring and data retention techniques. For instance, monitoring requirements for desktop clouds are different from monitoring requirements for HPC (high performance computing) applications or MapReduce jobs, for example. Similarly, the monitoring and data retention requirements for customer billing/charging are different from the monitoring and data retention requirements for checking the health of a computing system. In this regard, embodiments of the invention take into consideration that metrics with higher (or lower) monitoring data resolution requirements should having configurable profiles to specify, e.g., that the monitoring data associated with a given metric is critical (or not critical), and thereby allow conservative (or not conservative) sampling and data retention (e.g., storage) policies to be associated with the metrics. Another advantage of policy based sampling is that it enables service providers to separate those resources for which prompt anomaly detection is required. Furthermore, embodiments of the invention take into consideration that the target usage of monitoring data can dictate what monitoring data needs to be collected and how the monitoring data is collected. In this regard, techniques are provided to enable the customization of metric profiles by allowing service providers to characterize each metric in terms of, e.g., (i) what the metric is used for (e.g., used for charging customers as part of the usage based price definition, used for health check etc.), (ii) the importance of the metric in the inventory (e.g., belongs to VM hosting critical or non-critical applications), (iii) the precedence of the metric in the application flows (e.g., belongs to a leaf or root item in the dependency graph), or in terms of (iv) the dependence of the metric on other metrics (e.g., independent or correlated metric). As explained in further detail below, these criteria, while extendable, represent one embodiment of a set of configuration items that can be used to define a metric profile. Based on the metric profile, each metric is associated a metric policy. In addition, as explained in further detail below, a polling policy can be as simple as collecting and storing only the data from the last hour, day or even week initially at full granularity and then aggregate the data over time, or a polling policy can be as complex as collecting and storing the data, and then analyzing the data to fine tune how to capture and store data for a given metric in a more efficient way. FIGS. 12, 13, 14, 15, and 16 collectively illustrate techniques for dynamically adjusting metering operations for service management of a computing platform, according to embodiments of the invention. As explained in further detail hereafter, such techniques enable service providers to customize their service management monitoring systems for policy based data monitoring such that each tier of system metrics with similar monitoring data behavior and business needs can have the VMs of that tier monitored according to the same dedicated polling policy, different from the other tiers. FIG. 12 illustrates a system for dynamically adjusting metering operations for service management of a computing platform, according to an embodiment of the invention. In particular, FIG. 12 shows a dynamic metering adjustment system 400 comprising a metric classification module 402, which comprises a data store of configuration parameters 404, a data store of metric profile items 406, a data store of metric policies 408, a mapping module 410, a metric profile generation/update module 412, and a data store of defined metrics 414. The dynamic metering adjustment system 400 further comprises an aggregation module 416 and a tiered polling module 418, which comprises a change point detection module 420, a symbol sequence encoding module 422, a clustering module 424, and a tier polling frequency determination module 426. In one embodiment of the invention, the dynamic metering adjustment system 400 is a separate module that is included in the service management system 102 of FIG. 1. In alternate embodiments, the constituent components of the dynamic metering adjustment system 400 shown in FIG. 12 may be incorporated in the metering data size estimation module 106 or the resource monitoring and metering system 110, or the constituent components of the dynamic metering adjustment system 400 can be distributed across the different modules (e.g., modules 106 and 110) of the service management system 102 of FIG. 1. The metric classification module 402 is configured to define metric profiles for various system metrics and assign associated metric policies (e.g., polling and storage policies) to the metric profiles. More specifically, the configuration parameters 404 comprise configuration items that are specified by a service provider of a given computing platform, and which are utilized by the metric profile generation and update module 412 to generate or update target metrics that used for service management in the given computing platform. The configuration parameters 404 include items such as: (i) REST URLs of usage calculation classes for usage based pricing, wherein such usage calculation classes typically calculate the transformation of raw metered data into a charged unit of measure, or the maximum or sum of metered values, or more complex metric aggregations, for example; (ii) inventory and interdependencies between servers, applications, network devices, software, configuration files, operating systems and other IT infrastructure components expressed as graphs, xml files or spreadsheets; and (iii) event correlation engine API, etc. The data store of metric profile items 406 comprises service provider-defined metric profile configuration items that are used to define and classify metric profiles. The data store of metric policies 408 comprises service provider-defined metric policies that define different classes of polling policies and/or data storage policies. The mapping module 410 is configured to create a mapping data structure which maps metric profile items 406 and metric policies 408. For example, FIG. 13 illustrates a method for mapping metric profiles to metric policies according to an embodiment of the invention. In particular, FIG. 13 shows a mapping table 500 in which metric profile items 502 are mapped to metric policies 504. As shown in FIG. 13, the metric profile items include configuration items such as (i) Critical, (ii) Dependency Root, (iii) Usage Based Charging, and (iv) Independent Metric. The metric profile items 502 comprises a set of items that are used to define and classify metric profiles (although other types of configuration items may be utilized depending on the application). For instance, the “Critical” profile item is a parameter that specifies whether or not a given metric is critical in terms of its importance in the system (e.g., whether the given metric is associated with a VM that hosts a critical or non-critical application). Next, the “Dependency Root” profile item is a parameter that specifies whether or not a given metric is associated with a root node (as compared to a leaf node) in a dependency graph that defines an application flow, for example. In another embodiment, a similar profile item may be defined to specify a given metric in terms of its precedence in an application flow. Further, the “Usage Based Charging” profile item specifies whether or not a given metric is utilized for charging a customer as part of usage based charging policy. In another embodiment, a similar profile item may be defined to specify whether or not a given metric is utilized for a system health check, for example. Next, the “Independent Metric” profile item is a parameter that specifies whether or not a given metric depends on one or more other metrics (e.g., whether the given metric is independent from or correlated with another metric). As further shown in FIG. 13, the metric policies 504 comprise various combinations of polling policies and data sample storage policies such as (i) a conservative sampling and conservative storage policy, (ii) a conservative sampling and aggregated storage policy, (iii) a per tier sampling and conservative storage policy, and (iv) a per tier sampling and aggregated storage policy. A conservative sampling policy for a given metric specifies the use of a default polling frequency (with a high frequency sampling rate) to be applied by the monitoring system (e.g., the resource monitoring and metering system 110, FIG. 1) for collecting data samples associated with the given metric. Similarly, a conservative storage policy for a given metric indicates that all collected data samples for the given metric are recorded/stored. Moreover, a per tier sampling policy for a given metric indicates the use of a reduced polling frequency (with a sampling rate less than the default high frequency sample rate) to be applied by the monitoring system for collecting data samples associated with the given metric, wherein the reduced sampling frequency in a given tier is determined based on a behavior of metrics within the given tier. Further, an aggregated storage policy for a given metric specifies the use of a corresponding aggregation rule to aggregate the data samples (e.g., keeping an independent metric only in case of correlated metrics, or computing one or more aggregated values (e.g., SUM) as indicated by the usage calculation class(es) for a metered metric, etc.), and then only recording/storing the aggregate of the data samples (e.g., storing an aggregated value computed from a portion of the collected data samples). As further shown in FIG. 13, the metric policies 504 are mapped to a given metric depending on the values of the metric profile items of the given metric. For example, a metric that is deemed critical is assigned a conservative sampling and conservative storage policy, irrespective of the values of the other metric profile items. Further, a metric that is not deemed critical, but which is associated with a root item in a dependency graph is assigned a conservative sampling and conservative storage policy, irrespective of its use or dependency. Moreover, a metric that is not deemed critical and not associated with a root item, but which is utilized for usage based charging, is assigned a conservative sampling and aggregated storage policy, irrespective of its dependency. Next, a metric that is not deemed critical, not associated with a root item, and not utilized for usage based charging, but which is deemed an independent metric, is assigned a per tier sampling and conservative storage policy. Finally, as shown in FIG. 13, a metric that is not deemed critical, not associated with a root item, not utilized for usage based charging, and not an independent metric, is assigned a per tier sampling and aggregated storage policy. The mapping 500 between the metric profile items and the sampling and storage policies as shown in FIG. 13, for example, is generated and maintained manually by the service provider using the mapping module 410 shown in FIG. 12. The metric profile generation and update module 412 programmatically and periodically accesses current mapping information (maintained by the mapping module 410) and the data store of configuration parameters 404 to automatically generate, populate or otherwise update metric profiles of system metrics. In particular, for a newly defined metric with specified values for the metric profile items 502 (as determined/defined according to the configuration parameters 404, for example), the metric profile generation and update module 412 will access the mapping module 410 to determine the proper sampling and/or storage policies to assigned to the given metric based on the values of the metric profile items for the given metric. Similarly, when one or more values of metric profile items of an existing metric are change/updated, the metric profile generation and update module 412 will access the mapping module 410 to determine if the currently assigned sampling and/or storage policies are still appropriate, or need to be changed, based on the changed/updated value(s) of the metric profile items for the given metric. The defined metrics and associated metric policies are maintained in the data store of metrics 414. The aggregation module 416 is configured to apply one or more aggregation rules to aggregate collected data samples 428 that are associated with a given metric having an assigned metric policy that specifies aggregated storage. The aggregation module 416 generates and outputs aggregated data values to be stored in, e.g., the scalable metering data storage system 112 of FIG. 1. The tiered polling module 418 is configured to receive and process data samples 428 associated with metrics having an assigned policy of “per tier sampling” to partition the metrics into different tiers having different polling frequencies. More specifically, in one embodiment of the invention, the change point detection module 420 implements a change point detection (CPD) algorithm to process a time series of data samples for a given metric to identify the number and timing of the changes that occur in the collected data samples for the given metric. For example, in one embodiment of the invention, the change point detection module 420 implements methods as disclosed in the article by M. Agarwal, et al, entitled, “Problem Determination in Enterprise Middleware Systems using Change Point Correlation of Time Series Data,” in Network Operations and Management Symposium, 2006, 10th IEEE/IFIP, pp. 471-482, April 2006, which is incorporated herein by reference. Other change point detection techniques may be implemented. The symbol sequence encoding module 422 processes change point time series data for a metric, which is output from the change point detection module 420, to convert the change point time series data into a discrete format of a sequence of symbols with a small alphabet size, which encodes the change behavior of the given metric. In one embodiment of the invention, the symbol sequence encoding module 422 implements symbolic aggregate approximation methods as disclosed in the article by J. Lin, et al, entitled “Experiencing SAX: A Novel Symbolic Representation Of Time Series,” Data Mining and Knowledge Discovery, October 2007, Volume 15, Issue 2, pp. 107-144, which is incorporated herein by reference. Other suitable encoding techniques can be implemented. The clustering module 424 is configured to process the symbol sequences output from the symbol sequence encoding module 422 and cluster the symbol segments generated into tiers of metrics with similar sequences, and filter out of the tiers all the metrics with segments scattered in different clusters. In one embodiment of the invention, the clustering module 424 implements the structural similarity techniques disclosed in the article by P. Siirtola, et al., entitled “Improving The Classification Accuracy of Streaming Data Using SAX Similarity Features,” Pattern Recognition Letters, 32(13):1659-1668, 2011, which is incorporated herein by reference, in addition to the techniques disclosed in above noted article by J. Lin, et al. Furthermore, as explained below, the tier polling frequency determination module 226 is configured to utilize the clustering results to determine different polling frequencies for different tiers based on common sequences of symbols of the metrics associated with the different tiers. FIG. 14 is a flow diagram of a method for dynamically adjusting metering operations for service management of a computing platform, according to an embodiment of the invention. In particular, FIG. 14 illustrates operational modes of the dynamic metering adjustment system 400 of FIG. 12. In this regard, the process flow of FIG. 14 will be discussed in further detail with reference to constituent components of the system of FIG. 12. Referring to FIG. 14, an initial process comprises configuring and enabling the service management monitoring system (block 600). This process comprises, for example, defining system metrics and assigning metric policies to the metrics using the metric classification module 402. as discussed above. Once configured and enabled, the service management monitoring system begins polling the system metrics initially at a predefined, default sampling frequency, and collecting data samples associated with the metrics (block 602). Next, the dynamic metering and adjustment system 400 will access the data store of metrics 414 to identify metrics that have been assigned a metric policy of “aggregated storage” (block 604). The data samples that are associated with the metrics having an aggregated storage policy are processed by the aggregation module 416 by applying appropriate aggregation rules to aggregate the collected data samples (block 606). For instance, for a given metric that is characterized as being correlated to another metric (i.e., not independent), each new data sample of the given metric is compared to a last stored data sample of the given metric, and the new data sample is stored in the metering database only if the value of the new sample is different from the value of the last stored data sample. As another example, the data samples for a given metric that is utilized for usage-based charging will be processed according to a specified usage calculation logic (e.g., sum, max, min) and the result will update the current value without creating a new entry in the database. Other aggregation rules can be applied to aggregate data samples of various metrics, depending on the application. Furthermore, the dynamic metering and adjustment system 400 will access the data store of metrics 414 to identify metrics that have been assigned a metric policy of “per tier sampling” (block 610). Each data sample collected from the computing platform for a given metric is a data point in a time series of data for that metric on a given configuration item. After a sufficient amount of data samples are collected for the metrics (e.g., for a few weeks), a the change point detection module 420 processes the time series data for each of the metrics using a CPD process to identify the number and timing of the changes that occur in the data sample streams of the metrics (block 612). For example, FIG. 15 graphically illustrates a method for detecting changes in a time series of data samples that are collected for a given metric, according to an embodiment of the invention. In particular, FIG. 15 shows an example of CPD applied to collected data samples associated with CPU utilization of a VM over a period of two days to generate change point time series data. As shown in FIG. 15, a solid line shows the CPU utilization over the given time period, and a dashed line shows results of the CPD analysis. The CPD results are in the form of a change point time series data having spikes 700, 702, 704, 706, and 708 at times where there are significant changes (increase or decrease) in CPU utilization over the given time period of collected data samples. More specifically, the CPU utilization in FIG. 15 represents a variance of the CPU utilization, and the CPD process is implemented determine time instances where there are abrupt changes (e.g., spikes 700, 702, 704, 706, and 708) in the variance of the CPU utilization. The X-axis in FIG. 15 represents time intervals of 15 minutes (there are 2880 minutes in two days, wherein 2880/15 is 192). The Y-axis represents values of the CPD time series computed as |μR−μL|, which denotes an absolute value of a difference between (i) a mean of a set of points to the right of a given data point (of CPU utilization) and (ii) a mean of a set of points to the left of the given data point. In other words, to compute the CPD time series data, for each selected point of the CPU utilization time series data shown in FIG. 15, the mean of a set of points to the right of the selected point is computed (μR), the mean of a set of points to the left of the selected point is computed (μL), and an absolute value of the difference between the computed means, μR−μL, is computed as the CPD value for the selected point. The “set of points” that are chosen is referred to as a window size. In one embodiment, the window size is 3 or 5 points. Next, the change point time series data (or CPD time series data) for each metric is converted to a discrete sequence of symbols to encode the change behavior of the metrics (block 614). As noted above, in one embodiment of the invention, this process is performed by the symbol sequence encoding module 422 using an SAA (symbolic aggregate approximation) process on the CPD time series data to convert the CPD time series data into a discrete format of a sequence of symbols with a small alphabet size. For example, FIG. 16 graphically illustrates a method for encoding a change point time series into a symbol sequence, according to an embodiment of the invention. In particular, FIG. 16 shows the CPD time series data (solid line) with spikes 700, 702, 704, 706, and 708 (as in FIG. 15), along with an associated SAA time series data (dashed lines) with different segments of the SAA time series data labeled with letters to provide a letter string, e.g., “abcacbdacdbc” in the given example. The letter string is configured to encode the change behavior of the metrics that are selected for tiered sampling. In the example of FIG. 16, one symbol “a” corresponds to flat, monotone segments of invariable behavior, while other symbols “b”, “c,” and “d” correspond to times when the metric has changed. In one embodiment, these symbols are considered to represent different tiers in which different sampling frequencies are applied to capture relevant information. In particular, the results of the CPD and SAA analysis enable the dynamic metering adjustment system 400 to monitor and detect the behavior of the system metrics over time, and to adjust sampling frequencies at different times based on the metric behavior. For example, we are interested in collecting precise data around the timing of change occurrences in time series data, which correspond to changes in statistical properties of the metrics, while collecting samples during monotone stretches of consecutive identical symbols is of less interest. When certain segments of unchanged performance remain relatively constant for many hours, it is unnecessary to poll and store data samples at intervals of 1 to 5 seconds, rather, only a few samples need to be collected during those segments of unchanged performance. Following the symbol encoding process, the symbol sequences that generated for the system metrics are clustered into different tiers of metrics having similar symbol sequences (block 616). This process takes into consideration that large groups of metrics have similar change point time series data, with spikes corresponding to seasonal periods of the day and days of the week and weekend. Therefore, in one embodiment of the invention, to reduce a number of metric policies to maintain, the sequences are divided into segments corresponding to weekly periods (e.g., Monday to Sunday) and the segments are grouped (clustered) into different tiers of metrics. In one embodiment of the invention, this process is implemented by the clustering module 424 (FIG. 12) using the SAA process to cluster the symbol segments generated above into different tiers of metrics with similar sequences. For example, a metric having all its weekly segments in one group indicates that its weekly pattern is stable across the analyzed weeks. A next step in the process includes filtering out from the tiers all metrics which have segments that are scattered in different clusters (block 618). Then, for each tier, a new polling (sampling) frequency is determined for the remaining metrics in that tier based on the weekly sequence of symbols of the metrics in that tier. For example, in one embodiment of the invention, a new polling frequency for a given tier is determined as follows: (1) for each isolated occurrence of a spike symbol in any metric segment, data is collected from the timing of the beginning of the spike until the end of the spike (e.g., every second during a minute), and (2) for the segments with unchanged performance, data is collected hourly only if no isolated spike has already triggered data collection during that hour. The signature of the polling timing identified in each tier represents the data sampling metric policy of that tier. As these data sampling metric policies are made available to the service management system 102 (FIG. 1), the service management monitoring system can switch from the default data sampling metric policy to the new polling frequencies (block 622) and start making an efficient usage of storage and computational resources. For example, the tiered polling module 418 can output the per tier sampling frequency information to the resource monitoring and metering system module 110, wherein the module 110 applies the newly determined per tier data sampling frequencies to monitor and collect data samples for the associated system metrics. During run-time, as metric behaviors or monitoring policies evolve over time, the classification of a given metric in a particular tier may become unsuitable for the new sampling and storage requirements. For example, a metric policy update for a given metric (affirmative result in block 624) will directly trigger a reclassification of the given metric into the default, full sampling tier (return to block 602), where the metric classification process (e.g., blocks 610-620) will be re-applied. Similarly, when a change in behavior of given metric is detected (affirmative determination in block 626), the given metric is automatically reclassified into the default, full sampling tier (return to block 602), and the metric classification process is re-applied. However, a change in metric behavior update is not directly signaled unless it is monitored. In this regard, in one embodiment of the invention, a change in metric behavior of a given metric can be detected by comparing data samples collected (as per the tier based sampling) for the given metric to the most recent average of the past collected values corresponding to the metric's sequence symbol generated by the SAA process for that particular time in the sequence. If the difference is greater than a predefined threshold (e.g., 10%), the metric is assigned to the default, conservative sampling tier, for reclassification. In one embodiment of the invention, we determine and utilize in the metric profile those configuration items that have a reduced sensitivity to the changes in the monitored environment, and hence a limited potential of causing the metrics to oscillate between the tiers. Experimental Results To evaluate the efficacy of dynamic metering adjustment techniques according to embodiments of the invention as described above, we used Python and The R Project for Statistical Computing to analyze the data (as mentioned above) that was collected from IBM production servers over a period of 3 months. For evaluation purposes we compared default or conservative sampling and storage with tiered sampling and aggregated storage. The policies discussed above (FIG. 13) were defined based on two procedures, one aggressive and the other non-aggressive. In the aggressive approach, we set a higher threshold for aggregated storage, and aggressively reduced the polling frequency when collecting monitoring data from tiers with low variance in the resource utilization. For each policy, we measured the reduction in the collected data size, as well as the missed anomaly detection rate. The missed anomaly detection rate was calculated by comparing the data collected for each policy with the anomalies found by examining system logs collected for the same time period using sysstat utilities. The sysstat data was collected at a frequency two times (2×) the maximum polling frequency used to collect the monitoring samples. We defined an “anomaly” as a missed sample having a sudden increase or decrease in utilization as compared to its adjacent samples. Furthermore, for the policies involving metered data, we applied a rule that we store enough samples so that metering tasks can be successfully performed in following revenue calculation stages. The evaluation results using actual data from IBM production servers reveals that it is possible to reduce the monitoring data size up to 80%, as well as decrease the missed anomaly detection rate from 3% to as low as 0.05% to 0.1%, as shown in the following table: Data Reduction Non Miss Policy Aggressive Aggressive Detecion Rate Cons_Sam + Cons_Stor    0%    0% 3% Cons_Sam + Aggr_Stor 61.17% 70.17% 3% Tier_Sam + Cons_Stor  72.7%  76.8% 0.05-0.1% Tier_Sam + Aggr_Stor 76.32% 80.04% 0.05-0.1% In particular, the results of our evaluation demonstrated that by storing aggregated data instead of conservatively storing all samples, we obtained a 60% to 70% reduction in data collected for monitoring purpose. The decrease in data size was due to storing only samples which either conveyed useful information about the current health of the monitored resource or were required for charging purposes. The missed anomaly detection rate for conservative sampling was found to be 3% as the default polling frequency was not high enough to track the changes in VMs having sudden variations in resource utilization. Next, when tiered sampling was enabled with conservative storage (storing every sample), we were able to reduce the data size by 72% to 76% whereas the missed anomaly detection rate was found to be only 0.05% to 0.1% due to higher polling frequency used for the set of VMs having sudden variations in resource utilization. Further evaluation revealed that 99.99% of the anomalies were from the tier for which reduced the polling frequency was used. Hence, the missed anomaly detection rate can be further reduced by using a less aggressive approach. Finally, when applying both tiered sampling and aggregated storage, we obtained up to 80% reduction in data size. The missed anomaly detection rate remained the same as in the case of tiered sampling with conservative storage, since shifting from conservative to aggregated storage without changing the sampling policy does not affect the anomaly detection. We estimated the storage savings by considering an average object size of a sample of 1024 bytes. This size is due to the information related the resource usage plus the additional fields, e.g., instance id, timestamp, resource id, user id, project id, etc. If a single VM produces 100 counters per second and storage costs $0.07 per GB, then a rough estimate of savings for an environment of 1000 VM, per year can be calculated as follows: 0.8×$0.07 per GB/month×100 samples/sec×1024 bytes×60 sec/min×60 min/hour×24 hours/day×30 days/month/10{circumflex over ( )}9 bytes/GB=$14.864/VM/month×1000 VMs=$14,864/environment/month, which accumulated over one year results in a saving of $1,159,392/environment/year. The amount of stored data increases each month and can be represented by an arithmetic progression. Hence, n (al+an) 2 was used to calculate the cost accumulated over period of 12 months. It is to be noted that in some cases (e.g. metering data for charging) service providers are bound by an SLA to maintain customer data for as long as 3 to 5 years. Therefore, monitoring and metering methods as described herein are beneficial to both tenant (for the savings) and the service provider (for the competitive advantage). The present invention may be a system, a method, and/or a computer program product. The computer program product may include a computer readable storage medium (or media) having computer readable program instructions thereon for causing a processor to carry out aspects of the present invention. The computer readable storage medium can be a tangible device that can retain and store instructions for use by an instruction execution device. The computer readable storage medium may be, for example, but is not limited to, an electronic storage device, a magnetic storage device, an optical storage device, an electromagnetic storage device, a semiconductor storage device, or any suitable combination of the foregoing. A non-exhaustive list of more specific examples of the computer readable storage medium includes the following: a portable computer diskette, a hard disk, a random access memory (RAM), a read-only memory (ROM), an erasable programmable read-only memory (EPROM or Flash memory), a static random access memory (SRAM), a portable compact disc read-only memory (CD-ROM), a digital versatile disk (DVD), a memory stick, a floppy disk, a mechanically encoded device such as punch-cards or raised structures in a groove having instructions recorded thereon, and any suitable combination of the foregoing. A computer readable storage medium, as used herein, is not to be construed as being transitory signals per se, such as radio waves or other freely propagating electromagnetic waves, electromagnetic waves propagating through a waveguide or other transmission media (e.g., light pulses passing through a fiber-optic cable), or electrical signals transmitted through a wire. Computer readable program instructions described herein can be downloaded to respective computing/processing devices from a computer readable storage medium or to an external computer or external storage device via a network, for example, the Internet, a local area network, a wide area network and/or a wireless network. The network may comprise copper transmission cables, optical transmission fibers, wireless transmission, routers, firewalls, switches, gateway computers and/or edge servers. A network adapter card or network interface in each computing/processing device receives computer readable program instructions from the network and forwards the computer readable program instructions for storage in a computer readable storage medium within the respective computing/processing device. Computer readable program instructions for carrying out operations of the present invention may be assembler instructions, instruction-set-architecture (ISA) instructions, machine instructions, machine dependent instructions, microcode, firmware instructions, state-setting data, or either source code or object code written in any combination of one or more programming languages, including an object oriented programming language such as Java, Smalltalk, C++ or the like, and conventional procedural programming languages, such as the “C” programming language or similar programming languages. The computer readable program instructions may execute entirely on the user's computer, partly on the user's computer, as a stand-alone software package, partly on the user's computer and partly on a remote computer or entirely on the remote computer or server. In the latter scenario, the remote computer may be connected to the user's computer through any type of network, including a local area network (LAN) or a wide area network (WAN), or the connection may be made to an external computer (for example, through the Internet using an Internet Service Provider). In some embodiments, electronic circuitry including, for example, programmable logic circuitry, field-programmable gate arrays (FPGA), or programmable logic arrays (PLA) may execute the computer readable program instructions by utilizing state information of the computer readable program instructions to personalize the electronic circuitry, in order to perform aspects of the present invention. Aspects of the present invention are described herein with reference to flowchart illustrations and/or block diagrams of methods, apparatus (systems), and computer program products according to embodiments of the invention. It will be understood that each block of the flowchart illustrations and/or block diagrams, and combinations of blocks in the flowchart illustrations and/or block diagrams, can be implemented by computer readable program instructions. These computer readable program instructions may be provided to a processor of a general purpose computer, special purpose computer, or other programmable data processing apparatus to produce a machine, such that the instructions, which execute via the processor of the computer or other programmable data processing apparatus, create means for implementing the functions/acts specified in the flowchart and/or block diagram block or blocks. These computer readable program instructions may also be stored in a computer readable storage medium that can direct a computer, a programmable data processing apparatus, and/or other devices to function in a particular manner, such that the computer readable storage medium having instructions stored therein comprises an article of manufacture including instructions which implement aspects of the function/act specified in the flowchart and/or block diagram block or blocks. The computer readable program instructions may also be loaded onto a computer, other programmable data processing apparatus, or other device to cause a series of operational steps to be performed on the computer, other programmable apparatus or other device to produce a computer implemented process, such that the instructions which execute on the computer, other programmable apparatus, or other device implement the functions/acts specified in the flowchart and/or block diagram block or blocks. The flowchart and block diagrams in the Figures illustrate the architecture, functionality, and operation of possible implementations of systems, methods, and computer program products according to various embodiments of the present invention. In this regard, each block in the flowchart or block diagrams may represent a module, segment, or portion of instructions, which comprises one or more executable instructions for implementing the specified logical function(s). In some alternative implementations, the functions noted in the block may occur out of the order noted in the figures. For example, two blocks shown in succession may, in fact, be executed substantially concurrently, or the blocks may sometimes be executed in the reverse order, depending upon the functionality involved. It will also be noted that each block of the block diagrams and/or flowchart illustration, and combinations of blocks in the block diagrams and/or flowchart illustration, can be implemented by special purpose hardware-based systems that perform the specified functions or acts or carry out combinations of special purpose hardware and computer instructions. These concepts are illustrated with reference to FIG. 17, which shows a computing node 10 comprising a computer system/server 12, which is operational with numerous other general purpose or special purpose computing system environments or configurations. Examples of well-known computing systems, environments, and/or configurations that may be suitable for use with computer system/server 12 include, but are not limited to, personal computer systems, server computer systems, thin clients, thick clients, handheld or laptop devices, multiprocessor systems, microprocessor-based systems, set top boxes, programmable consumer electronics, network PCs, minicomputer systems, mainframe computer systems, and distributed cloud computing environments that include any of the above systems or devices, and the like. Computer system/server 12 may be described in the general context of computer system executable instructions, such as program modules, being executed by a computer system. Generally, program modules may include routines, programs, objects, components, logic, data structures, and so on that perform particular tasks or implement particular abstract data types. Computer system/server 12 may be practiced in distributed cloud computing environments where tasks are performed by remote processing devices that are linked through a communications network. In a distributed cloud computing environment, program modules may be located in both local and remote computer system storage media including memory storage devices. In FIG. 17, computer system/server 12 in computing node 10 is shown in the form of a general-purpose computing device. The components of computer system/server 12 may include, but are not limited to, one or more processors or processing units 16, a system memory 28, and a bus 18 that couples various system components including system memory 28 to processor 16. The bus 18 represents one or more of any of several types of bus structures, including a memory bus or memory controller, a peripheral bus, an accelerated graphics port, and a processor or local bus using any of a variety of bus architectures. By way of example, and not limitation, such architectures include Industry Standard Architecture (ISA) bus, Micro Channel Architecture (MCA) bus, Enhanced ISA (EISA) bus, Video Electronics Standards Association (VESA) local bus, and Peripheral Component Interconnects (PCI) bus. The computer system/server 12 typically includes a variety of computer system readable media. Such media may be any available media that is accessible by computer system/server 12, and it includes both volatile and non-volatile media, removable and non-removable media. The system memory 28 can include computer system readable media in the form of volatile memory, such as random access memory (RAM) 30 and/or cache memory 32. The computer system/server 12 may further include other removable/non-removable, volatile/nonvolatile computer system storage media. By way of example only, storage system 34 can be provided for reading from and writing to a non-removable, non-volatile magnetic media (not shown and typically called a “hard drive”). Although not shown, a magnetic disk drive for reading from and writing to a removable, non-volatile magnetic disk (e.g., a “floppy disk”), and an optical disk drive for reading from or writing to a removable, non-volatile optical disk such as a CD-ROM, DVD-ROM or other optical media can be provided. In such instances, each can be connected to bus 18 by one or more data media interfaces. As depicted and described herein, memory 28 may include at least one program product having a set (e.g., at least one) of program modules that are configured to carry out the functions of embodiments of the invention. The program/utility 40, having a set (at least one) of program modules 42, may be stored in memory 28 by way of example, and not limitation, as well as an operating system, one or more application programs, other program modules, and program data. Each of the operating system, one or more application programs, other program modules, and program data or some combination thereof, may include an implementation of a networking environment. Program modules 42 generally carry out the functions and/or methodologies of embodiments of the invention as described herein. Computer system/server 12 may also communicate with one or more external devices 14 such as a keyboard, a pointing device, a display 24, etc., one or more devices that enable a user to interact with computer system/server 12, and/or any devices (e.g., network card, modem, etc.) that enable computer system/server 12 to communicate with one or more other computing devices. Such communication can occur via Input/Output (I/O) interfaces 22. Still yet, computer system/server 12 can communicate with one or more networks such as a local area network (LAN), a general wide area network (WAN), and/or a public network (e.g., the Internet) via network adapter 20. As depicted, network adapter 20 communicates with the other components of computer system/server 12 via bus 18. It should be understood that although not shown, other hardware and/or software components could be used in conjunction with computer system/server 12. Examples, include, but are not limited to: microcode, device drivers, redundant processing units, external disk drive arrays, RAID systems, tape drives, and data archival storage systems, etc. It is be understood that embodiments of the invention can be implemented as a cloud service on one or more cloud computing platforms, while in other embodiments of the invention, implementation of the systems and methods recited herein are not limited to a cloud computing environment. Rather, embodiments of the invention are capable of being implemented in conjunction with any other type of computing environment now known or later developed. Cloud computing is a model of service delivery for enabling convenient, on-demand network access to a shared pool of configurable computing resources (e.g. networks, network bandwidth, servers, processing, memory, storage, applications, virtual machines, and services) that can be rapidly provisioned and released with minimal management effort or interaction with a provider of the service. This cloud model may include at least five characteristics, at least three service models, and at least four deployment models. Characteristics are as follows: On-demand self-service: a cloud consumer can unilaterally provision computing capabilities, such as server time and network storage, as needed automatically without requiring human interaction with the service's provider. Broad network access: capabilities are available over a network and accessed through standard mechanisms that promote use by heterogeneous thin or thick client platforms (e.g., mobile phones, laptops, and PDAs). Resource pooling: the provider's computing resources are pooled to serve multiple consumers using a multi-tenant model, with different physical and virtual resources dynamically assigned and reassigned according to demand. There is a sense of location independence in that the consumer generally has no control or knowledge over the exact location of the provided resources but may be able to specify location at a higher level of abstraction (e.g., country, state, or datacenter). Rapid elasticity: capabilities can be rapidly and elastically provisioned, in some cases automatically, to quickly scale out and rapidly released to quickly scale in. To the consumer, the capabilities available for provisioning often appear to be unlimited and can be purchased in any quantity at any time. Measured service: cloud systems automatically control and optimize resource use by leveraging a metering capability at some level of abstraction appropriate to the type of service (e.g., storage, processing, bandwidth, and active user accounts). Resource usage can be monitored, controlled, and reported providing transparency for both the provider and consumer of the utilized service. Service Models are as follows: Software as a Service (SaaS): the capability provided to the consumer is to use the provider's applications running on a cloud infrastructure. The applications are accessible from various client devices through a thin client interface such as a web browser (e.g., web-based e-mail). The consumer does not manage or control the underlying cloud infrastructure including network, servers, operating systems, storage, or even individual application capabilities, with the possible exception of limited user-specific application configuration settings. Platform as a Service (PaaS): the capability provided to the consumer is to deploy onto the cloud infrastructure consumer-created or acquired applications created using programming languages and tools supported by the provider. The consumer does not manage or control the underlying cloud infrastructure including networks, servers, operating systems, or storage, but has control over the deployed applications and possibly application hosting environment configurations. Infrastructure as a Service (IaaS): the capability provided to the consumer is to provision processing, storage, networks, and other fundamental computing resources where the consumer is able to deploy and run arbitrary software, which can include operating systems and applications. The consumer does not manage or control the underlying cloud infrastructure but has control over operating systems, storage, deployed applications, and possibly limited control of select networking components (e.g., host firewalls). Deployment Models are as follows: Private cloud: the cloud infrastructure is operated solely for an organization. It may be managed by the organization or a third party and may exist on-premises or off-premises. Community cloud: the cloud infrastructure is shared by several organizations and supports a specific community that has shared concerns (e.g., mission, security requirements, policy, and compliance considerations). It may be managed by the organizations or a third party and may exist on-premises or off-premises. Public cloud: the cloud infrastructure is made available to the general public or a large industry group and is owned by an organization selling cloud services. Hybrid cloud: the cloud infrastructure is a composition of two or more clouds (private, community, or public) that remain unique entities but are bound together by standardized or proprietary technology that enables data and application portability (e.g., cloud bursting for load-balancing between clouds). A cloud computing environment is service oriented with a focus on statelessness, low coupling, modularity, and semantic interoperability. At the heart of cloud computing is an infrastructure comprising a network of interconnected nodes. Referring now to FIG. 18, an illustrative cloud computing environment 50 is depicted. As shown, cloud computing environment 50 comprises one or more cloud computing nodes 52 with which local computing devices used by cloud consumers, such as, for example, personal digital assistant (PDA) or cellular telephone 54A, desktop computer 54B, laptop computer 54C, and/or automobile computer system 54N may communicate. Nodes 52 may communicate with one another. They may be grouped (not shown) physically or virtually, in one or more networks, such as Private, Community, Public, or Hybrid clouds as described hereinabove, or a combination thereof. This allows cloud computing environment 50 to offer infrastructure, platforms and/or software as services for which a cloud consumer does not need to maintain resources on a local computing device. It is understood that the types of computing devices 54A-N shown in FIG. 18 are intended to be illustrative only and that computing nodes 52 and cloud computing environment 50 can communicate with any type of computerized device over any type of network and/or network addressable connection (e.g., using a web browser). Referring now to FIG. 19, a set of functional abstraction layers provided by cloud computing environment 50 (FIG. 18) is shown. It should be understood in advance that the components, layers, and functions shown in FIG. 19 are intended to be illustrative only and embodiments of the invention are not limited thereto. As depicted, the following layers and corresponding functions are provided: Hardware and software layer 60 includes hardware and software components. Examples of hardware components include: mainframes 61; RISC (Reduced Instruction Set Computer) architecture based servers 62; servers 63; blade servers 64; storage devices 65; and networks and networking components 66. In some embodiments, software components include network application server software 67 and database software 68. Virtualization layer 70 provides an abstraction layer from which the following examples of virtual entities may be provided: virtual servers 71; virtual storage 72; virtual networks 73, including virtual private networks; virtual applications and operating systems 74; and virtual clients 75. In one example, management layer 80 may provide functions as described below. Resource provisioning 81 provides dynamic procurement of computing resources and other resources that are utilized to perform tasks within the cloud computing environment. Metering and Pricing 82 provide cost tracking as resources are utilized within the cloud computing environment, and billing or invoicing for consumption of these resources. In one example, these resources may comprise application software licenses. Security provides identity verification for cloud consumers and tasks, as well as protection for data and other resources. User portal 83 provides access to the cloud computing environment for consumers and system administrators. Service level management 84 provides cloud computing resource allocation and management such that required service levels are met. Service Level Agreement (SLA) planning and fulfillment 85 provide pre-arrangement for, and procurement of, cloud computing resources for which a future requirement is anticipated in accordance with an SLA. In addition, in one embodiment, the management layer 80 implements the service management functionality as discussed above with reference to the systems of FIGS. 1 and 12, for example. Workloads layer 90 provides examples of functionality for which the cloud computing environment may be utilized. Examples of workloads and functions which may be provided from this layer include: mapping and navigation 91; software development and lifecycle management 92; virtual classroom education delivery 93; data analytics processing 94; transaction processing 95; and other typical workloads 96 such as mobile desktop or other workload functions as discussed herein. The descriptions of the various embodiments of the present invention have been presented for purposes of illustration, but are not intended to be exhaustive or limited to the embodiments disclosed. Many modifications and variations will be apparent to those of ordinary skill in the art without departing from the scope and spirit of the described embodiments. The terminology used herein was chosen to best explain embodiments of the embodiments, the practical application or technical improvement over technologies found in the marketplace, or to enable others of ordinary skill in the art to understand the embodiments disclosed herein. Claims (20) What is claimed is: 1. A method for managing a computing platform, comprising: provisioning a plurality of virtual machines that execute on a plurality of computing nodes of a computing platform, wherein the provisioned virtual machines utilize computing resources of the computing nodes; and executing a centralized service management system on at least one computing node of the computing platform to perform service management functions of the computing platform, wherein the service management functions performed by the centralized service management system comprise a process for dynamically adjusting metering operations for monitoring utilization of a computing resource of the plurality of computing nodes, which is utilized by the plurality of virtual machines executing on the plurality of computing nodes of the computing system, wherein the process for dynamically adjusting metering operations comprises: monitoring the utilization of the computing resource by the provisioned virtual machines executing on the plurality of computing nodes of the computing platform, wherein monitoring comprises collecting data samples from the plurality of computing nodes, wherein the data samples comprise information regarding a metric of the utilization of the monitored resource by the provisioned virtual machines executing on the plurality of computing nodes, wherein the data samples are initially collected at a given sampling frequency, wherein each data sample comprises (i) a timestamp to mark a time that the data sample was collected, and (ii) a sample value of the metric of the utilization of the monitored resource; storing the collected data samples in a persistent storage system; analyzing a set of the data samples that are initially collected at the given sampling frequency and stored for the metric of the utilization of the monitored resource to determine an amount of deviation in the sample values of the data samples within the set of data samples that are initially collected at the given sampling frequency for the metric of the utilization of the monitored resource, wherein analyzing the set of data samples comprises (i) generating change point time series data by detecting changes in the sample values of the collected data samples associated with the metric of the utilization of the monitored resource and (ii) and converting the change point time series data into a sequence of symbols which encodes a change behavior of the metric of the utilization of the monitored resource; determining a new sampling frequency for collecting new data samples for the metric of the utilization of the monitored resource based on the amount of deviation in the sample values of the data samples within the set of data samples for the metric of the utilization of the monitored resource as determined from the sequence of symbols which encodes the change behavior of the metric of the utilization of the monitored resource; and applying the new sampling frequency for collecting new data samples for the metric of the utilization of the monitored resource by the provisioned virtual machines executing on the plurality of computing nodes of the computing platform; wherein the new sampling frequency for collecting new data samples is less than the given sampling frequency when an encoded symbol for the metric indicates a period of invariable behavior of the metric of utilization of the monitored resource, to thereby reduce an amount of new data samples for the metric of the utilization of the monitored resource which are collected and stored in the persistent storage system; wherein the method is implemented at least in part by a processor executing program code. 2. The method of claim 1, further comprising assigning a metric policy to the metric of the utilization of the monitored resource based on values of one or more metric profile configuration items associated with the metric. 3. The method of claim 2, wherein determining a new sampling frequency for collecting new data samples for the metric of the utilization of the monitored resource is further based on a metric policy assigned to the metric. 4. The method of claim 2, wherein the metric policy for the metric of the utilization of the monitored resource comprises at least one of conservative sampling, conservative storage, aggregated storage, per tier sampling or a combination thereof, wherein the conservative sampling applies the predefined given sampling frequency for the metric, wherein conservative storage implies that all collected data samples for the metric are stored, wherein aggregated storage implies that an aggregate of the sample values of the collected data samples are stored, or that only changes in the sample values of the collected data samples are stored, and wherein pier tier sampling implies that different sampling frequencies are applied for different behaviors of the metric of the utilization of the monitored resource. 5. The method of claim 2, wherein the one or more metric profile configuration items comprises one of an importance item, a usage item, a dependency item, or a combination thereof. 6. The method of claim 1, wherein the monitored resource comprises one of CPU (central processing unit) usage, memory usage, TCP/IP connection rate, and page access per time. 7. The method of claim 1, wherein determining the new sampling frequency for collecting new data samples for the metric of the utilization of the monitored resource comprises: clustering the sequence of symbols into tiers of metrics with similar sequences; determining a new sampling frequency for each of the tiers based on the sequences of symbols that are included within the tier. 8. The method of claim 1, further comprising re-applying the given sampling frequency to collect new data samples for the metric of the utilization of the monitored resource when there is one of (i) a detected change in behavior of the metric and (ii) a metric policy update for the metric. 9. The method of claim 1, further comprising: aggregating the sample values of the collected data samples for the metric of the utilization of the monitored resource; and storing the aggregated sample values of the collected data samples. 10. The method of claim 1, further comprising: determining if the metric of the utilization of the monitored resource is correlated to another metric; if the metric of the utilization of the monitored resource is determined to be correlated to another metric, then comparing each newly collected data sample for the metric of the utilization of the monitored resource to a last collected and stored data sample for the metric of the utilization of the monitored resource; and storing the newly collected data sample only if the sample value of the newly collected sample is different from the sample value of the last collected and stored data sample. 11. An article of manufacture comprising a computer readable storage medium having program instructions embodied therewith, the program instructions executable by a computer to cause the computer to perform a method for managing a computing platform, the method comprising: provisioning a plurality of virtual machines that execute on a plurality of computing nodes of a computing platform, wherein the provisioned virtual machines utilize computing resources of the computing nodes; and executing a centralized service management system on at least one computing node of the computing platform to perform service management functions of the computing platform, wherein the service management functions performed by the centralized service management system comprise a process for dynamically adjusting metering operations for monitoring utilization of a computing resource of the plurality of computing nodes, which is utilized by the plurality of virtual machines executing on the plurality of computing nodes of the computing system, wherein the process for dynamically adjusting metering operations comprises: monitoring the utilization of the computing resource by the provisioned virtual machines executing on the plurality of computing nodes of the computing platform, wherein monitoring comprises collecting data samples from the plurality of computing nodes, wherein the data samples comprise information regarding a metric of the utilization of the monitored resource by the provisioned virtual machines executing on the plurality of computing nodes, wherein the data samples are initially collected at a given sampling frequency, wherein each data sample comprises (i) a timestamp to mark a time that the data sample was collected, and (ii) a sample value of the metric of the utilization of the monitored resource; storing the collected data samples in a persistent storage system; analyzing a set of the data samples that are initially collected at the given sampling frequency and stored for the metric of the utilization of the monitored resource to determine an amount of deviation in the sample values of the data samples within the set of data samples that are initially collected at the given sampling frequency for the metric of the utilization of the monitored resource, wherein analyzing the set of data samples comprises (i) generating change point time series data by detecting changes in the sample values of the collected data samples associated with the metric of the utilization of the monitored resource and (ii) and converting the change point time series data into a sequence of symbols which encodes a change behavior of the metric of the utilization of the monitored resource; determining a new sampling frequency for collecting new data samples for the metric of the utilization of the monitored resource based on the amount of deviation in the sample values of the data samples within the set of data samples for the metric of the utilization of the monitored resource as determined from the sequence of symbols which encodes the change behavior of the metric of the utilization of the monitored resource; and applying the new sampling frequency for collecting new data samples for the metric of the utilization of the monitored resource by the provisioned virtual machines executing on the plurality of computing nodes of the computing platform; wherein the new sampling frequency for collecting new data samples is less than the given sampling frequency when an encoded symbol for the metric indicates a period of invariable behavior of the metric of utilization of the monitored resource, to thereby reduce an amount of new data samples for the metric of the utilization of the monitored resource which are collected and stored in the persistent storage system. 12. The article of manufacture of claim 11, further comprising assigning a metric policy to the metric of the utilization of the monitored resource based on values of one or more metric profile configuration items associated with the metric. 13. The article of manufacture of claim 12, wherein determining a new sampling frequency for collecting new data samples for the metric of the utilization of the monitored resource is further based on a metric policy assigned to the metric. 14. The article of manufacture of claim 12, wherein the metric policy for the metric of the utilization of the monitored resource comprises at least one of conservative sampling, conservative storage, aggregated storage, per tier sampling or a combination thereof, wherein the conservative sampling applies the given sampling frequency for the metric, wherein conservative storage implies that all collected data samples for the metric are stored, wherein aggregated storage implies that an aggregate of the sample values of the collected data samples are stored, or that only changes in the sample values of the collected data samples are stored, and wherein pier tier sampling implies that different sampling frequencies are applied for different behaviors of the metric of the utilization of the monitored resource. 15. The article of manufacture of claim 11, wherein determining the new sampling frequency for collecting new data samples for the metric of the utilization of the monitored resource, comprises: clustering the sequence of symbols into tiers of metrics with similar sequences; determining a new sampling frequency for each of the tiers based on the sequences of symbols that are included within the tier. 16. The article of manufacture of claim 11, further comprising: aggregating the sample values of the collected data samples for the metric of the utilization of the monitored resource; and storing the aggregated sample values of the collected data samples. 17. The article of manufacture of claim 11, further comprising: determining if the metric of the utilization of the monitored resource is correlated to another metric; if the metric of the utilization of the monitored resource is determined to be correlated to another metric, then comparing each newly collected data sample for the metric of the utilization of the monitored resource to a last collected and stored data sample for the metric of the utilization of the monitored resource; and storing the newly collected data sample only if the sample value of the newly collected sample is different from the sample value of the last collected and stored data sample. 18. A computing system, comprising: a plurality of computing nodes, each comprising a plurality of computing resources including memory and processors, wherein a plurality of virtual machines are provisioned across the plurality of computing nodes; and wherein at least one computing node of the computing comprises a memory which stores program instructions, and a processor which executes the stored program instructions to instantiate a centralized service management system that executes on at least one computing node of the computing platform to perform service management functions of the computing platform, wherein the service management functions performed by the centralized service management system comprise a process for dynamically adjusting metering operations for monitoring utilization of a computing resource of the plurality of computing nodes, which is utilized by the plurality of virtual machines executing on the plurality of computing nodes of the computing system, wherein the process for dynamically adjusting metering operations comprises: monitoring the utilization of the computing resource by the provisioned virtual machines executing on the plurality of computing nodes of the computing platform, wherein monitoring comprises collecting data samples from the plurality of computing nodes, wherein the data samples comprise information regarding a metric of the utilization of the monitored resource by the provisioned virtual machines executing on the plurality of computing nodes, wherein the data samples are initially collected at a given sampling frequency, wherein each data sample comprises (i) a timestamp to mark a time that the data sample was collected, and (ii) a sample value of the metric of the utilization of the monitored resource; storing the collected data samples in a persistent storage system; analyzing a set of the data samples that are initially collected at the given sampling frequency and stored for the metric of the utilization of the monitored resource over a to determine an amount of deviation in the sample values of the data samples within the set of data samples that are initially collected at the given sampling frequency for the metric of the utilization of the monitored resource, wherein analyzing the set of data samples comprises (i) generating change point time series data by detecting changes in the sample values of the collected data samples associated with the metric of the utilization of the monitored resource and (ii) and converting the change point time series data into a sequence of symbols which encodes a change behavior of the metric of the utilization of the monitored resource; determining a new sampling frequency for collecting new data samples for the metric of the utilization of the monitored resource based on the amount of deviation in the sample values of the data samples within the set of data samples for the metric of the utilization of the monitored resource as determined from the sequence of symbols which encodes the change behavior of the metric of the utilization of the monitored resource; and applying the new sampling frequency for collecting new data samples for the metric of the utilization of the monitored resource by the provisioned virtual machines executing on the plurality of computing nodes of the computing platform; wherein the new sampling frequency for collecting new data samples is less than the given sampling frequency when an encoded symbol for the metric indicates a period of invariable behavior of the metric of utilization of the monitored resource, to thereby reduce an amount of new data samples for the metric of the utilization of the monitored resource which are collected and stored in the persistent storage system. 19. The computing system of claim 18, wherein determining the new sampling frequency for collecting new data samples for the metric of the utilization of the monitored resource, comprises: clustering the sequence of symbols into tiers of metrics with similar sequences; determining a new sampling frequency for each of the tiers based on the sequences of symbols that are included within the tier. 20. The computing system of claim 18, wherein the process performed by the centralized service management system for dynamically adjusting metering operations further comprises: assigning a metric policy to the metric of the utilization of the monitored resource based on values of one or more metric profile configuration items associated with the metric, wherein determining the new sampling frequency for collecting new data samples for the metric of the utilization of the monitored resource is further based on a metric policy assigned to the metric; and wherein the metric policy for the metric of the utilization of the monitored resource comprises at least one of conservative sampling, conservative storage, aggregated storage, per tier sampling or a combination thereof, wherein the conservative sampling applies the given sampling frequency for the metric, wherein conservative storage implies that all collected data samples for the metric are stored, wherein aggregated storage implies that an aggregate of the sample values of the collected data samples are stored, or that only changes in the sample values of the collected data samples are stored, and wherein pier tier sampling implies that different sampling frequencies are applied for different behaviors of the metric of the utilization of the monitored resource. US14/926,384 2014-09-30 2015-10-29 Dynamic metering adjustment for service management of computing platform Active 2036-11-09 US10467036B2 (en) Priority Applications (3) Application Number Priority Date Filing Date Title US201462057686P true 2014-09-30 2014-09-30 US14/871,443 US10171371B2 (en) 2014-09-30 2015-09-30 Scalable metering for cloud service management based on cost-awareness US14/926,384 US10467036B2 (en) 2014-09-30 2015-10-29 Dynamic metering adjustment for service management of computing platform Applications Claiming Priority (1) Application Number Priority Date Filing Date Title US14/926,384 US10467036B2 (en) 2014-09-30 2015-10-29 Dynamic metering adjustment for service management of computing platform Related Parent Applications (1) Application Number Title Priority Date Filing Date US14/871,443 Continuation-In-Part US10171371B2 (en) 2014-09-30 2015-09-30 Scalable metering for cloud service management based on cost-awareness Publications (2) Publication Number Publication Date US20160094401A1 US20160094401A1 (en) 2016-03-31 US10467036B2 true US10467036B2 (en) 2019-11-05 Family ID=55585640 Family Applications (1) Application Number Title Priority Date Filing Date US14/926,384 Active 2036-11-09 US10467036B2 (en) 2014-09-30 2015-10-29 Dynamic metering adjustment for service management of computing platform Country Status (1) Country Link US (1) US10467036B2 (en) Cited By (1) * Cited by examiner, † Cited by third party Publication number Priority date Publication date Assignee Title US10659313B2 (en) * 2016-08-11 2020-05-19 Rescale, Inc. Dynamic optimization of simulation resources Families Citing this family (30) * Cited by examiner, † Cited by third party Publication number Priority date Publication date Assignee Title US10333820B1 (en) 2012-10-23 2019-06-25 Quest Software Inc. System for inferring dependencies among computing systems US9596162B1 (en) * 2014-10-20 2017-03-14 Sprint Spectrum L.P. Method and system of imposing a policy rule for heavy usage US10291493B1 (en) 2014-12-05 2019-05-14 Quest Software Inc. System and method for determining relevant computer performance events WO2017222763A2 (en) * 2016-05-31 2017-12-28 Vapor IO Inc. Autonomous distributed workload and infrastructure scheduling US10133614B2 (en) * 2015-03-24 2018-11-20 Ca, Inc. Anomaly classification, analytics and resolution based on annotated event logs US20160285783A1 (en) * 2015-03-26 2016-09-29 Vmware, Inc. Methods and apparatus to control computing resource utilization of monitoring agents WO2016155835A1 (en) * 2015-04-02 2016-10-06 Telefonaktiebolaget Lm Ericsson (Publ) Technique for scaling an application having a set of virtual machines US10282455B2 (en) 2015-04-20 2019-05-07 Splunk Inc. Display of data ingestion information based on counting generated events US20160306871A1 (en) * 2015-04-20 2016-10-20 Splunk Inc. Scaling available storage based on counting generated events US10187260B1 (en) 2015-05-29 2019-01-22 Quest Software Inc. Systems and methods for multilayer monitoring of network function virtualization architectures US10200252B1 (en) * 2015-09-18 2019-02-05 Quest Software Inc. Systems and methods for integrated modeling of monitored virtual desktop infrastructure systems US10642633B1 (en) * 2015-09-29 2020-05-05 EMC IP Holding Company LLC Intelligent backups with dynamic proxy in virtualized environment EP3200080A1 (en) * 2015-12-16 2017-08-02 Tata Consultancy Services Limited Methods and systems for memory suspect detection CN106201466B (en) * 2016-06-25 2019-05-21 国云科技股份有限公司 A kind of Template Manager method based on heat US10230601B1 (en) 2016-07-05 2019-03-12 Quest Software Inc. Systems and methods for integrated modeling and performance measurements of monitored virtual desktop infrastructure systems WO2018028781A1 (en) * 2016-08-10 2018-02-15 NEC Laboratories Europe GmbH Method for managing computational resources of a data center WO2018034663A1 (en) * 2016-08-18 2018-02-22 Nokia Solutions And Networks Oy Methods and apparatuses for virtualized network function component level virtualized resources performance management collection CN106789198A (en) * 2016-12-07 2017-05-31 高新兴科技集团股份有限公司 A kind of calculate node management method and system US20180173601A1 (en) * 2016-12-16 2018-06-21 Microsoft Technology Licensing, Llc Energy consumption analysis based on telemetry data US10334029B2 (en) * 2017-01-10 2019-06-25 Cisco Technology, Inc. Forming neighborhood groups from disperse cloud providers US10554510B2 (en) * 2017-02-22 2020-02-04 Red Hat, Inc. Enabling additional metrics in a monitoring system to diagnose problems US20180276043A1 (en) * 2017-03-23 2018-09-27 Microsoft Technology Licensing, Llc Anticipatory collection of metrics and logs US10291509B2 (en) * 2017-04-17 2019-05-14 Ciena Corporation Threshold crossing events for network element instrumentation and telemetric streaming US10547672B2 (en) 2017-04-27 2020-01-28 Microsoft Technology Licensing, Llc Anti-flapping system for autoscaling resources in cloud networks US10585707B2 (en) * 2017-06-27 2020-03-10 International Business Machines Corporation Database resource scaling WO2019050952A1 (en) * 2017-09-05 2019-03-14 Brandeis University Systems, methods, and media for distributing database queries across a metered virtual network EP3480753A1 (en) * 2017-11-02 2019-05-08 Lstech Ltd A computer implemented method, a system and a computer program for optimizing the operation of a cloud hosted software as a service (saas) system US10725885B1 (en) 2017-11-17 2020-07-28 Amazon Technologies, Inc. Methods and apparatus for virtual machine load monitoring US10719344B2 (en) * 2018-01-03 2020-07-21 Acceture Global Solutions Limited Prescriptive analytics based compute sizing correction stack for cloud computing resource scheduling US10459757B1 (en) 2019-05-13 2019-10-29 Accenture Global Solutions Limited Prescriptive cloud computing resource sizing based on multi-stream data sources Citations (19) * Cited by examiner, † Cited by third party Publication number Priority date Publication date Assignee Title US20090276771A1 (en) * 2005-09-15 2009-11-05 3Tera, Inc. Globally Distributed Utility Computing Cloud US20100299703A1 (en) * 2008-01-23 2010-11-25 Liveu Ltd. Live Uplink Transmissions And Broadcasting Management System And Method US20110055389A1 (en) 2009-08-14 2011-03-03 Bley John B Methods and Computer Program Products for Generating a Model of Network Application Health US20110083131A1 (en) 2009-10-01 2011-04-07 Fahd Pirzada Application Profile Based Provisioning Architecture For Virtual Remote Desktop Infrastructure US8015367B1 (en) 2007-02-16 2011-09-06 Vmware, Inc. Memory management methods in a computer system with shared memory mappings US20110295999A1 (en) 2010-05-28 2011-12-01 James Michael Ferris Methods and systems for cloud deployment analysis featuring relative cloud resource importance US20120167081A1 (en) 2010-12-22 2012-06-28 Sedayao Jeffrey C Application Service Performance in Cloud Computing US20120317274A1 (en) * 2011-06-13 2012-12-13 Richter Owen E Distributed metering and monitoring system US20130227114A1 (en) * 2012-02-28 2013-08-29 Cisco Technology, Inc. Hierarchical schema to provide an aggregated view of device capabilities in a network US20140120961A1 (en) * 2012-10-26 2014-05-01 Lookout, Inc. System and method for secure message composition of security messages US20140180915A1 (en) * 2012-12-21 2014-06-26 Zetta, Inc. Systems and methods for real-time billing and metrics reporting US20140278623A1 (en) * 2008-06-19 2014-09-18 Frank Martinez System and method for a cloud computing abstraction with self-service portal US20150052287A1 (en) 2013-08-13 2015-02-19 Vmware, Inc. NUMA Scheduling Using Inter-vCPU Memory Access Estimation US20150120791A1 (en) 2013-10-24 2015-04-30 Vmware, Inc. Multi-tenant production and test deployments of hadoop US20150154039A1 (en) 2013-12-03 2015-06-04 Vmware, Inc. Methods and apparatus to automatically configure monitoring of a virtual machine US9141947B1 (en) * 2011-12-19 2015-09-22 Amazon Technologies, Inc. Differential bandwidth metering for networks with direct peerings US9208032B1 (en) * 2013-05-15 2015-12-08 Amazon Technologies, Inc. Managing contingency capacity of pooled resources in multiple availability zones US20160021024A1 (en) 2014-07-16 2016-01-21 Vmware, Inc. Adaptive resource management of a cluster of host computers using predicted data US20170201434A1 (en) * 2014-05-30 2017-07-13 Hewlett Packard Enterprise Development Lp Resource usage data collection within a distributed processing framework Family Cites Families (2) * Cited by examiner, † Cited by third party Publication number Priority date Publication date Assignee Title CN101111049B (en) * 2007-08-14 2010-07-28 华为技术有限公司 System, method and network appliance for implementing overlapping multi-region by one subdistrict US7679884B2 (en) * 2008-07-29 2010-03-16 Wisconsin Alumni Research Foundation Organosilicon phosphorus-based electrolytes Patent Citations (19) * Cited by examiner, † Cited by third party Publication number Priority date Publication date Assignee Title US20090276771A1 (en) * 2005-09-15 2009-11-05 3Tera, Inc. Globally Distributed Utility Computing Cloud US8015367B1 (en) 2007-02-16 2011-09-06 Vmware, Inc. Memory management methods in a computer system with shared memory mappings US20100299703A1 (en) * 2008-01-23 2010-11-25 Liveu Ltd. Live Uplink Transmissions And Broadcasting Management System And Method US20140278623A1 (en) * 2008-06-19 2014-09-18 Frank Martinez System and method for a cloud computing abstraction with self-service portal US20110055389A1 (en) 2009-08-14 2011-03-03 Bley John B Methods and Computer Program Products for Generating a Model of Network Application Health US20110083131A1 (en) 2009-10-01 2011-04-07 Fahd Pirzada Application Profile Based Provisioning Architecture For Virtual Remote Desktop Infrastructure US20110295999A1 (en) 2010-05-28 2011-12-01 James Michael Ferris Methods and systems for cloud deployment analysis featuring relative cloud resource importance US20120167081A1 (en) 2010-12-22 2012-06-28 Sedayao Jeffrey C Application Service Performance in Cloud Computing US20120317274A1 (en) * 2011-06-13 2012-12-13 Richter Owen E Distributed metering and monitoring system US9141947B1 (en) * 2011-12-19 2015-09-22 Amazon Technologies, Inc. Differential bandwidth metering for networks with direct peerings US20130227114A1 (en) * 2012-02-28 2013-08-29 Cisco Technology, Inc. Hierarchical schema to provide an aggregated view of device capabilities in a network US20140120961A1 (en) * 2012-10-26 2014-05-01 Lookout, Inc. System and method for secure message composition of security messages US20140180915A1 (en) * 2012-12-21 2014-06-26 Zetta, Inc. Systems and methods for real-time billing and metrics reporting US9208032B1 (en) * 2013-05-15 2015-12-08 Amazon Technologies, Inc. Managing contingency capacity of pooled resources in multiple availability zones US20150052287A1 (en) 2013-08-13 2015-02-19 Vmware, Inc. NUMA Scheduling Using Inter-vCPU Memory Access Estimation US20150120791A1 (en) 2013-10-24 2015-04-30 Vmware, Inc. Multi-tenant production and test deployments of hadoop US20150154039A1 (en) 2013-12-03 2015-06-04 Vmware, Inc. Methods and apparatus to automatically configure monitoring of a virtual machine US20170201434A1 (en) * 2014-05-30 2017-07-13 Hewlett Packard Enterprise Development Lp Resource usage data collection within a distributed processing framework US20160021024A1 (en) 2014-07-16 2016-01-21 Vmware, Inc. Adaptive resource management of a cluster of host computers using predicted data Non-Patent Citations (70) * Cited by examiner, † Cited by third party Title "About Advanced Message Queuing Protocol (AMPQ)," http://www.amqp.org/about/what, 2015, 2 pages. "Ceilometer Quickstart," https://www.rdoproject.org/CeilometerQuickStart, 2014, 15 pages. "Ceilometer Samples and Statistics," http://docs.openstack.org/developer/ceilometer/webapi/v2.html, 2015, 7 pages. "The R Project for Statistical Computing: What is R?," https://www.r-project.org/about.html, 2015, 2 pages. "The SYSSTAT Utilities," http://sebastien.godard.pagesperso-orange.fr/, 2015, 9 pages. "Tivoli Application Dependency Discovery Manager," http://www-03.ibm.com/software/products/en/tivoliapplicationdependencydiscoverymanager, 2015 2 pages. "Welcome to the SAX (Symbolic Aggregate Approximation) Homepage!" http://www.cs.ucr.edu/˜eamonn/SAX.htm, 2011, 4 pages. "Writing Rules for the State Correlation Engine," https://publib.boulder.ibm.com/tividd/td/tec/SC32-1234-00/en_US/HTML/ecodmst118.htm, 2015, 1 page. A. Anwar et al., "Anatomy of Cloud Monitoring and Metering: An OpenStack Case Study," Proceedings of the 6th Asia-Pacific Workshop on Systems (APSys), 2015, 7 pages. A. Anwar et al., "Cost-Aware Cloud Metering with Scalable Service Management Infrastructure," IEEE 8th International Conference on Cloud Computing (CLOUD), Jul. 2015, pp. 258-292. A. Anwar et al., "Scalable Metering for an Affordable IT Cloud Service Management," IEEE International Conference on Cloud Engineering (IC2E), Mar. 2015, pp. 207-212. A. Brinkmann et al., "Scalable Monitoring System for Clouds," IEEE/ACM 6th International Conference on Utility and Cloud Computing (UCC), Dec. 2013, pp. 351-356, Dresden, Germany. A. Brinkmann et al., "Scalable Monitoring Systems for Clouds," IEEE/ACM 6th International Conference on Utility and Cloud Computing (UCC), Dec. 2013, pp. 351-356. C. Canali et al., "Automatic Virtual Machine Clustering Based on Battacharyya Distance for Multi-Cloud Systems," Proceedings of the 2013 International Workshop on Multi-Cloud Applications and Federated Clouds, Apr. 2013, pp. 45-52. C. Chen et al., "Towards Verifiable Resource Accounting for Outsourced Computation," Proceedings of the 9th ACM SIGPLAN/SIGOPS International Conference on Virtual Execution Environments, Mar. 2013, pp. 167-178. C. Watson et al., "A Microscope on Microservices," The Netflix Tech Blog, http://techblog.netflix.com/2015/02/a-microscope-on-microservies.html, Feb. 18, 2015, 7 pages. CeilometerQuickStart-RDO, "Ceilometer QuickStart," https://openstack.redhat.com/CeilometerQuickStart, 2014, 13 pages. CeilometerQuickStart—RDO, "Ceilometer QuickStart," https://openstack.redhat.com/CeilometerQuickStart, 2014, 13 pages. D. Belova et al., "Open TSBD as a Metering Storage for Open Stack Telemetry," https://groups.google.com/forum/#!topic/opentsdb/9O57MfpRXI0, Jul. 2014, 4 pages. D. Nurmi et al., "The Eucalyptus Open-Source Cloud-Computing System," 9th IEEE/ACM International Symposium on Cluster Computing and the Grid (CCGRID), May 2009, pp. 124-131, Shanghai, China. Disclosed Anonymously, Method and System for Improving Storage Scalability in Workload Optimized Systems Using Workload Partitions (WPARS), Jun. 15, 2011, 5 pages. E. Elmroth et al., "Accounting and Billing for Federated Cloud Infrastructures," Eighth International Conference on Grid and Cooperative Computing (GCC), Aug. 2009, pp. 268-275, Lanzhou, Gansu, China. E. Glynn et al., "OpenStack Telemetry Rethinking Ceilometer Metric Storage with Gnocchi: Time-Series as a Service," Openstack Cloud Software, https://julien.danjou.info/talks/ceilometer-gnocchi.pdf, 2014, 24 pages. H. Goudarzi et al., "Multi-Dimensional SLA-Based Resource Allocation for Multi-Tier Cloud Computing Systems," Proceedings of the IEEE 4th International Conference on Cloud Computing (CLOUD), Jul. 2011, pp. 324-331. H.N. Van et al., "SLA-Aware Virtual Resource Management for Cloud Infrastructures," Proceedings of the 9th IEEE International Conference on Computer and Information Technology (CIT), 2009, pp. 357-362, vol. 2, Xiamen, China. IBM, "System and Method for Scalable Discovery and Monitoring of SAN Devices," ip.com, IPCOM000186557, Aug. 26, 2009, 4 pages. J. Dean et al., MapReduce: Simplified Data Processing on Large Clusters, Communications of the ACM, Jan. 2008, pp. 107-113, vol. 51, No. 1. J. Lin et al., "Experiencing SAX: A Novel Symbolic Representation of Time Series," Data Mining and Knowledge Discovery, Oct. 2007, pp. 107-144, vol. 15, No. 2. Jason Meyers, "Survey: Cloud Still Underutilized," http://windowsitpro.com/article/cloud-business-issues/Survey-Cloud-still-underutilized-129534, WindowsITPro, Jan. 2011, 1 page. K. Appleby et al., "Océano-SLA Based Management of a Computing Utility," IEEE/IFIP International Symposium on Integrated Network Management Proceedings, May 2001, pp. 855-868. K. Appleby et al., "Océano—SLA Based Management of a Computing Utility," IEEE/IFIP International Symposium on Integrated Network Management Proceedings, May 2001, pp. 855-868. L.M. Vaquero et al., "A Break in the Clouds: Towards a Cloud Definition," ACM SIGCOMM Computer Communication Review, Jan. 2009, pp. 50-55, vol. 39, No. 1. L.M. Vaquero, "A Break in the Clouds: Towards a Cloud Definition," ACM SIGCOMM Computer Communication Review, Jan. 2009, pp. 50-55, vol. 39, No. 1. M. Armbrust et al., "A View of Cloud Computing," Communications of the ACM, Apr. 2010, pp. 50-58, vol. 53, No. 4. M. Armbrust et al., "Above the Clouds: A Berkeley View of Cloud Computing," Electrical Engineering and Computer Sciences, University of California at Berkeley, Technical Report No. UCB/EECS-2009-28, Feb. 2009, 25 pages. M. Liu et al., "On Trustworthiness of CPU Usage Metering and Accounting," Proceedings of the 2010 IEEE 30th International Conference on Distributed Computing Systems Workshops (ICDCSW), 2010, pp. 82-91. M.J. Agarwal et al., "Problem Determination in Enterprise Middleware Systems using Change Point Correlation of Time Series Data," 10th IEEE/IFIP Network Operations and Management Symposium (NOMS), 2006, pp. 471-482. Mongodb Manual 2.6.4, "Config Servers," http://docs.mongodb.org/manual/core/sharded-cluster-config-servers/, 2014, 2 pages. Mongodb Manual 2.6.4, "Map-Reduce," http://docs.mongodb.org/manual/core/map-reduce/, 2014, 2 pages. Mongodb Manual 2.6.4, "Sharded Cluster Query Routing," http://docs.mongodb.org/manual/core/sharded-cluster-query-router/, 2014, 6 pages. Mongodb Manual 2.6.4, "Shards," http://docs.mongodb.org/manual/core/sharded-cluster-shards/, 2014, 2 pages. N. Munga et al., "The Adoption of Open Source Software in Business Models: A Red Hat and IBM Case Study," Proceedings of the Annual Research Conference of the South African Institute of Computer Scientists and Information Technologists (SAICSIT), 2009, pp. 112-121, Vaal River, Gauteng, South Africa. Nick Booth, "Companies Wasting £1BN a Year on Underused Cloud Capacity," http://www.datacenterdynamics.com/focus/archive/2014/06/companies-wasting-%C2%A31bn-year-underused-cloud-capacity, Jun. 2014, 3 pages. O. Sefraoui et al., "OpenStack: Toward an Open-Source Solution for Cloud Computing," International Journal of Computer Applications, Oct. 2012, pp. 38-42, vol. 55, No. 3. Openstack Docs: Current, "Openstack Docs," http://docs.openstack.org/, 2014, 3 pages. Openstack Open Source Cloud Computing Software, "OpenStack," http://www.openstack.org/, 2014, 3 pages. P. Mell et al., "The NIST Definition of Cloud Computing," U.S. Department of Commerce, Computer Security Division, National Institute of Standards and Technology, Special Publication 800-145, Sep. 2011, 7 pages. P. Siirtola et al., "Improving the Classification Accuracy of Streaming Data Using SAX Similarity Features," Pattern Recognition Letters, Oct. 2011, pp. 1659-1668, vol. 32, No. 13. R. Buyya et al., "Cloud Computing and Emerging IT Platforms: Vision, Hype, and Reality for Delivering Computing as the 5th Utility," Future Generation Computer Systems, Jun. 2009, pp. 599-616, vol. 25, No. 6. R. Buyya et al., "InterCloud: Utility-Oriented Federation of Cloud Computing Environments for Scaling of Application Services," Proceedings of the 10th International Conference on Algorithms and Architectures for Parallel Processing (ICA3PP), May 2010, pp. 13-31, Busan, Korea, vol. Part I. R. Iyer et al., "Virtual Platform Architectures for Resource Metering in Datacenters," ACM SIGMETRICS Performance Evaluation Review, Sep. 2009, pp. 89-90, vol. 27, No. 2. R.N. Calheiros et al., "CloudSim: A Toolkit for Modeling and Simulation of Cloud Computing Environments and Evaluation of Resource Provisioning Algorithms," Software-Practice and Experience, Jan. 2011, pp. 23-50, vol. 41, No. 1. R.N. Calheiros et al., "CloudSim: A Toolkit for Modeling and Simulation of Cloud Computing Environments and Evaluation of Resource Provisioning Algorithms," Software—Practice and Experience, Jan. 2011, pp. 23-50, vol. 41, No. 1. Rick Cattell, "Scalable SQL and NoSQL Data Stores," ACM SIGMOD Record, Dec. 2010, pp. 12-27, vol. 39, No. 4. S. Das et al., "ElasTraS: An Elastic, Scalable, and Self Managing Transactional Database for the Cloud," ACM Transactions on Database Systems (TODS), Apr. 2013, 5 pages, vol. 38, No. 1, Article No. 5. S. Kashyap et al., "Efficient Constraint Monitoring Using Adaptive Thresholds," IEEE 24th International Conference on Data Engineering (ICDE), Apr. 2008, pp. 526-535. S. Marston et al., "Cloud Computing-The Business Perspective," Decision Support Systems, Apr. 2011, pp. 176-189, vol. 51, No. 1. S. Marston et al., "Cloud Computing—The Business Perspective," Decision Support Systems, Apr. 2011, pp. 176-189, vol. 51, No. 1. S. Meng et al., "Reliable State Monitoring in Cloud Datacenters," 2012 IEEE Fifth International Conference on Cloud Computing (CLOUD), 2012, pp. 951-958. S. Meng et al., "Volley: Violation Likelihood Based State Monitoring for Datacenters," 2013 IEEE 33rd International Conference on Distributed Computing Systems (ICDS), Jul. 2014, pp. 1-10. Salman A. Baset, "Open Source Cloud Technologies," Proceedings of the Third ACM Symposium on Cloud Computing (SOCC), Oct. 2012, 3 pages, Article No. 28. Steve Vinoski, "Advanced Message Queuing Protocol," IEEE Internet Computing, Nov. 2006, pp. 87-89, vol. 10, No. 6. W. Richter et al., "Agentless Cloud-Wide Streaming of Guest File System Updates," IEEE International Conference on Cloud Engineering (IC2E), 2014, pp. 7-16. W. Richter et al., "Agentless Cloud-Wide Streaming of Guest File System Updates," Proceedings of the 2014 IEEE International Conference on Cloud Engineering (IC2E), 2014, pp. 7-16. Wiki, "OpenStack Wiki," https://wiki.openstack.org/wiki/Main_Page, 2014, 3 pages. X. Jiang et al., ""Out-of-the-Box" Monitoring of VM-Based High-Interaction Honeypots," Proceedings of the 10th International Conference on Recent Advances in Intrusion Detection (RAID), Sep. 2007, pp. 198-218, Gold Goast, Australia. X. Jiang et al., "'Out-of-the-Box' Monitoring of VM-Based High-Interaction Honeypots," Proceedings of the 10th International Conference on Recent Advanced in Intrusion Detection (RAID '07), 2007, pp. 198-218. X. Jiang et al., "‘Out-of-the-Box’ Monitoring of VM-Based High-Interaction Honeypots," Proceedings of the 10th International Conference on Recent Advanced in Intrusion Detection (RAID '07), 2007, pp. 198-218. Y. He et al., "RCFile: A Fast and Space-Efficient Data Placement Structure in MapReduce-Based Warehouse Systems," IEEE 27th International Conference on Data Engineering (ICDE), Apr. 2011, pp. 1199-1208, Hannover, Germany. Z. Gong et al., "PRESS: PRedictive Elastic ReSource Scaling for Cloud Systems," Proceedings of the 6th IEEE/IFIP International Conference on Network and Service Management (CNSM), Oct. 2010, 8 pages, Niagara Falls, Canada. Cited By (1) * Cited by examiner, † Cited by third party Publication number Priority date Publication date Assignee Title US10659313B2 (en) * 2016-08-11 2020-05-19 Rescale, Inc. Dynamic optimization of simulation resources Also Published As Publication number Publication date US20160094401A1 (en) 2016-03-31 Similar Documents Publication Publication Date Title US10333861B2 (en) Modular cloud computing system Coutinho et al. Elasticity in cloud computing: a survey US10528266B2 (en) Allocation and balancing of storage resources US9584597B2 (en) Hardware level generated interrupts indicating load balancing status for a node in a virtualized computing environment EP3069228B1 (en) Partition-based data stream processing framework Shen et al. Statistical characterization of business-critical workloads hosted in cloud datacenters Fu et al. DRS: dynamic resource scheduling for real-time analytics over fast streams Zhang et al. Live video analytics at scale with approximation and delay-tolerance US8769238B1 (en) Load rebalancing for shared resource US10691716B2 (en) Dynamic partitioning techniques for data streams US9244735B2 (en) Managing resource allocation or configuration parameters of a model building component to build analytic models to increase the utility of data analysis applications US10467105B2 (en) Chained replication techniques for large-scale data streams US9531604B2 (en) Prediction-based provisioning planning for cloud environments US20180189367A1 (en) Data stream ingestion and persistence techniques US9742652B2 (en) Proactive identification of hotspots in a cloud computing environment US10162669B2 (en) Dynamic relocation of applications in a cloud application service model US20150106578A1 (en) Systems, methods and devices for implementing data management in a distributed data storage system US9276959B2 (en) Client-configurable security options for data streams US9323561B2 (en) Calibrating cloud computing environments US8909734B2 (en) Migrating data between networked computing environments US8826277B2 (en) Cloud provisioning accelerator US9866481B2 (en) Comprehensive bottleneck detection in a multi-tier enterprise storage system US10594571B2 (en) Dynamic scaling of storage volumes for storage client file systems US9626208B2 (en) Managing stream components based on virtual machine performance adjustments US8539163B1 (en) Speculative reads Legal Events Date Code Title Description AS Assignment Owner name: INTERNATIONAL BUSINESS MACHINES CORPORATION, NEW Y Free format text: ASSIGNMENT OF ASSIGNORS INTEREST;ASSIGNORS:ANWAR, ALI;KOCHUT, ANDRZEJ;SAILER, ANCA;AND OTHERS;SIGNING DATES FROM 20151020 TO 20181110;REEL/FRAME:047552/0550 STPP Information on status: patent application and granting procedure in general Free format text: RESPONSE TO NON-FINAL OFFICE ACTION ENTERED AND FORWARDED TO EXAMINER STPP Information on status: patent application and granting procedure in general Free format text: NOTICE OF ALLOWANCE MAILED -- APPLICATION RECEIVED IN OFFICE OF PUBLICATIONS STPP Information on status: patent application and granting procedure in general Free format text: PUBLICATIONS -- ISSUE FEE PAYMENT VERIFIED STCF Information on status: patent grant Free format text: PATENTED CASE
__label__pos
0.569235
Java Caffeine高性能内存 Caffeine是使用Java8对Guava缓存的重写版本,在Spring Boot 2.0中将取代,基于LRU算法实现,支持多种缓存过期策略。 1、pom.xml 1 2 3 4 5 <!--缓存--> <dependency> <groupId>com.github.ben-manes.caffeine</groupId> <artifactId>caffeine</artifactId> </dependency> 2、配置 CacheConfig.java 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 @Configuration @EnableCaching public class CacheConfig { public static final int DEFAULT_MAXSIZE = 50000; public static final int DEFAULT_TTL = 10; /** * 定義cache名稱、超時時長(秒)、最大容量 * 每个cache缺省:10秒超时、最多缓存50000条数据,需要修改可以在构造方法的参数中指定。 */ public static enum Caches { querySnapshotBlockList(3, 1000), querySnapshotBlockBySnapshotHash(3, 1000), queryAccountChainBlock(3, 1000), queryAccountView(3, 1000), queryAccountBlock(3, 1000), transactionTimeLine(60, 1000), selectAccountTokenBalanceRank(60, 100), listToken(3, 100), detailToken(3, 100), generalDay(60 * 60, 100), generalSec(3, 100); Caches() { } Caches(int ttl) { this.ttl = ttl; } Caches(int ttl, int maxSize) { this.ttl = ttl; this.maxSize = maxSize; } private int maxSize = DEFAULT_MAXSIZE; //最大數量 private int ttl = DEFAULT_TTL; //过期时间(秒) public int getMaxSize() { return maxSize; } public int getTtl() { return ttl; } } /** * 创建基于Caffeine的Cache Manager * * @return */ @Bean(name = "caffeineCacheManager") @Primary public CacheManager caffeineCacheManager() { SimpleCacheManager cacheManager = new SimpleCacheManager(); ArrayList<CaffeineCache> caches = new ArrayList<CaffeineCache>(); for (Caches c : Caches.values()) { caches.add(new CaffeineCache(c.name(), Caffeine.newBuilder().recordStats() .expireAfterWrite(c.getTtl(), TimeUnit.SECONDS) .maximumSize(c.getMaxSize()) .build()) ); } cacheManager.setCaches(caches); return cacheManager; } @Bean(name = "cacheKeyGenerator") public KeyGenerator cacheKeyGenerator() {//缓存key生成者 CacheKeyGenerator cacheKeyGenerator = new CacheKeyGenerator(); return cacheKeyGenerator; } } 3、使用 1 2 3 4 5 6 7 8 9 10 11 @Cacheable(value = "getTokenInfoById", key = "T(String).valueOf(#tokenCode)", unless = "#result == null") public SimpleToken getTokenInfoById(Long tokenCode) { InfoTokenExample infoTokenExample = new InfoTokenExample(); infoTokenExample.createCriteria().andIdEqualTo(tokenCode); List<InfoToken> infoTokens = infoTokenMapper.selectByExample(infoTokenExample); if (CollectionUtils.isEmpty(infoTokens)) { logger.error("tokenInfo is null,id:{}", tokenCode); return null; } return SimpleToken.builder().tokenCode(tokenCode).symbol(infoTokens.get(0).getSymbol()).tokenId(infoTokens.get(0).getTokenId()).build(); } tokenId内存内存中存在时直接使用,否则从库中查,然后放内存。
__label__pos
0.990948
#!/usr/bin/perl # # Lista de módulos instalados, usando el módulo File::Find # use CGI qw(:standard ); use CGI::Carp qw(fatalsToBrowser); use File::Find 'find'; use File::Spec::Functions; print header, start_html('Lista de modulos'), h1('Lista de módulos'), hr; my $salida; find( { wanted => sub { if (/\.pm\z/) { $salida .= canonpath $_; $salida .= "\n"; } }, no_chdir => 1, }, @INC ); print pre($salida); print hr, p("Versión de Perl: $^V"), ; print end_html;
__label__pos
0.850614
w3resource Java Inheritance: Exercises, Practice, Solution Java Inheritance Exercises [10 exercises with solution] From Oracle - Java Inheritance: In the Java language, classes can be derived from other classes, thereby inheriting fields and methods from those classes. Definitions: A class that is derived from another class is called a subclass (also a derived class, extended class, or child class). The class from which the subclass is derived is called a superclass (also a base class or a parent class). Excepting Object, which has no superclass, every class has one and only one direct superclass (single inheritance). In the absence of any other explicit superclass, every class is implicitly a subclass of Object. Classes can be derived from classes that are derived from classes that are derived from classes, and so on, and ultimately derived from the topmost class, Object. Such a class is said to be descended from all the classes in the inheritance chain stretching back to Object. [An editor is available at the bottom of the page to write and execute the scripts. Go to the editor] 1. Write a Java program to create a class called Animal with a method called makeSound(). Create a subclass called Cat that overrides the makeSound() method to bark. Click me to see the solution 2. Write a Java program to create a class called Vehicle with a method called drive(). Create a subclass called Car that overrides the drive() method to print "Repairing a car". Click me to see the solution 3. Write a Java program to create a class called Shape with a method called getArea(). Create a subclass called Rectangle that overrides the getArea() method to calculate the area of a rectangle. Click me to see the solution 4. Write a Java program to create a class called Employee with methods called work() and getSalary(). Create a subclass called HRManager that overrides the work() method and adds a new method called addEmployee(). Click me to see the solution 5. Write a Java program to create a class known as "BankAccount" with methods called deposit() and withdraw(). Create a subclass called SavingsAccount that overrides the withdraw() method to prevent withdrawals if the account balance falls below one hundred. Click me to see the solution 6. Write a Java program to create a class called Animal with a method named move(). Create a subclass called Cheetah that overrides the move() method to run. Click me to see the solution 7. Write a Java program to create a class known as Person with methods called getFirstName() and getLastName(). Create a subclass called Employee that adds a new method named getEmployeeId() and overrides the getLastName() method to include the employee's job title. Click me to see the solution 8. Write a Java program to create a class called Shape with methods called getPerimeter() and getArea(). Create a subclass called Circle that overrides the getPerimeter() and getArea() methods to calculate the area and perimeter of a circle. Click me to see the solution 9. Write a Java program to create a vehicle class hierarchy. The base class should be Vehicle, with subclasses Truck, Car and Motorcycle. Each subclass should have properties such as make, model, year, and fuel type. Implement methods for calculating fuel efficiency, distance traveled, and maximum speed. Click me to see the solution 10. Write a Java program that creates a class hierarchy for employees of a company. The base class should be Employee, with subclasses Manager, Developer, and Programmer. Each subclass should have properties such as name, address, salary, and job title. Implement methods for calculating bonuses, generating performance reports, and managing projects. Click me to see the solution Java Code Editor: More to Come ! Do not submit any solution of the above exercises at here, if you want to contribute go to the appropriate exercise page.  Become a Patron! Follow us on Facebook and Twitter for latest update.
__label__pos
0.999896
A-level Mathematics/Edexcel/Core 1/Integration From Wikibooks, open books for an open world Jump to: navigation, search Basics of integration[edit] Integration is the opposite of differentiation. For a power of x, you add 1 to the power, divide by the new power and add c, the constant of integration. Note that this rule will not work when the power of x is -1, this requires more advanced methods. The constant of integration is required because if a constant (i.e. a number without x in it) is differentiated it will become zero, and from just integration there is no way to determine the value of this constant. For example: \int 2x \,\, dx becomes: \displaystyle y = x^2 + c Integrating fractions[edit] Fractions with an x term in the denominator cannot be integrated as they are; the x term must be brought up to the working line. This can be done easily with the laws of indices. For example: \int \frac{2}{x^2} \,\, dx = \int 2x^{-2} \,\, dx Determining the value of c[edit] You may be given a point on a curve and asked to determine the value of the constant of integration, c. This is quite simple, as the point is given as (x,y); the values of x and y can be plugged in and the equation solved for c. Worked example: The the gradient of the curve c is given by \frac{dy}{dx} = 2x. The point (3,12) lies on c. Hence, find the equation for c. y= \int 2x \,\, dx y = x^2 + c Plug in values x = 3, y = 12. 12 = 3^2 + c 12 - 9 = c 3 = c y = x^2 + 3
__label__pos
0.999827
Quickly strip formatting from copied text When copying text in OS X, you wind up with one of two things on the clipboard: formatted text, which retains font and style information, and unformatted text, which is simply the bare text. Which form you get is up to the program from which you’re doing the copying—you don’t have any say in the matter. Most of the time, this just works out. Sometimes, though, you want plain text instead of rich text. A good example is when copying text from Word and pasting it into iChat—when you paste into iChat, your pasted text is sent as an image, instead of text. Almost certainly, this is not what you intended. In the case of Word and iChat, there’s a simple solution—and one that works in many other programs. Instead of using Command-V (or Edit -> Paste) to paste the copied text, try Command-Option-V (or Edit -> Paste and Match Style). This will convert Word’s stylized text into iChat’s expected formatting, so it will then paste and match whatever you’ve set for your iChat font and style preferences. This Paste and Match Style menu option exists in many programs, and it’s the best way to solve this problem. But not all programs offer it, so what do you do if you run into one that doesn’t? As a general solution, you can use an intermediary program that does—TextEdit, for instance. You could also paste it into a TextEdit document that was set to plain text mode (this will strip all formatting) and then paste it into your destination program. Here’s one more method—a specialized solution for an often-copied-from source program that doesn’t require the use of any third-party applications. In Safari, copying portions of a web page retains all their formatting (and URLs are pasted as clickable links, too). If you don’t want the formatting in your destination document, and it doesn’t offer the Paste and Match Style option, here’s a fast way to strip the formatting within Safari itself. Select the text to be copied, copy it (Command-C), click once in the Google search box, then paste the copied text (Command-V). Even if you’ve selected a lot of text to copy, it will all paste successfully in the search box. Finally, select all in the search box (Command-A), copy that (Command-C), then switch to your destination program and paste (Command-V). If, like me, you prefer the keyboard, you can get to the Google search box in Safari by pressing Command-L (which selects the URL bar) then Tab. So the full sequence in Safari is: select text, Command-C, Command-L, Tab, Command-V, Command-A, Command-C. It looks much worse in writing than it is in actual use, and it definitely beats stopping at an intermediary application if your target app lacks the Paste and Match Style menu item. I sometimes use this method when the destination program has that menu item, simply because my fingers are so ingrained to type Command-V, not Command-Option-V. Subscribe to the Best of Macworld Newsletter Add Your Comment user avatar
__label__pos
0.770812
home | career | drupal | java | mac | mysql | perl | scala | uml | unix   Java example source code file (PoissonDistribution.java) This example Java source code file (PoissonDistribution.java) is included in the alvinalexander.com "Java Source Code Warehouse" project. The intent of this project is to help you "Learn Java by Example" TM. Learn more about this Java project at its project page. Java - Java tags/keywords abstractintegerdistribution, default_epsilon, default_max_iterations, exponentialdistribution, normaldistribution, notstrictlypositiveexception, override, poissondistribution, well19937c The PoissonDistribution.java Java example source code /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.commons.math3.distribution; import org.apache.commons.math3.exception.NotStrictlyPositiveException; import org.apache.commons.math3.exception.util.LocalizedFormats; import org.apache.commons.math3.random.RandomGenerator; import org.apache.commons.math3.random.Well19937c; import org.apache.commons.math3.special.Gamma; import org.apache.commons.math3.util.CombinatoricsUtils; import org.apache.commons.math3.util.FastMath; import org.apache.commons.math3.util.MathUtils; /** * Implementation of the Poisson distribution. * * @see <a href="http://en.wikipedia.org/wiki/Poisson_distribution">Poisson distribution (Wikipedia) * @see <a href="http://mathworld.wolfram.com/PoissonDistribution.html">Poisson distribution (MathWorld) */ public class PoissonDistribution extends AbstractIntegerDistribution { /** * Default maximum number of iterations for cumulative probability calculations. * @since 2.1 */ public static final int DEFAULT_MAX_ITERATIONS = 10000000; /** * Default convergence criterion. * @since 2.1 */ public static final double DEFAULT_EPSILON = 1e-12; /** Serializable version identifier. */ private static final long serialVersionUID = -3349935121172596109L; /** Distribution used to compute normal approximation. */ private final NormalDistribution normal; /** Distribution needed for the {@link #sample()} method. */ private final ExponentialDistribution exponential; /** Mean of the distribution. */ private final double mean; /** * Maximum number of iterations for cumulative probability. Cumulative * probabilities are estimated using either Lanczos series approximation * of {@link Gamma#regularizedGammaP(double, double, double, int)} * or continued fraction approximation of * {@link Gamma#regularizedGammaQ(double, double, double, int)}. */ private final int maxIterations; /** Convergence criterion for cumulative probability. */ private final double epsilon; /** * Creates a new Poisson distribution with specified mean. * <p> * <b>Note: this constructor will implicitly create an instance of * {@link Well19937c} as random generator to be used for sampling only (see * {@link #sample()} and {@link #sample(int)}). In case no sampling is * needed for the created distribution, it is advised to pass {@code null} * as random generator via the appropriate constructors to avoid the * additional initialisation overhead. * * @param p the Poisson mean * @throws NotStrictlyPositiveException if {@code p <= 0}. */ public PoissonDistribution(double p) throws NotStrictlyPositiveException { this(p, DEFAULT_EPSILON, DEFAULT_MAX_ITERATIONS); } /** * Creates a new Poisson distribution with specified mean, convergence * criterion and maximum number of iterations. * <p> * <b>Note: this constructor will implicitly create an instance of * {@link Well19937c} as random generator to be used for sampling only (see * {@link #sample()} and {@link #sample(int)}). In case no sampling is * needed for the created distribution, it is advised to pass {@code null} * as random generator via the appropriate constructors to avoid the * additional initialisation overhead. * * @param p Poisson mean. * @param epsilon Convergence criterion for cumulative probabilities. * @param maxIterations the maximum number of iterations for cumulative * probabilities. * @throws NotStrictlyPositiveException if {@code p <= 0}. * @since 2.1 */ public PoissonDistribution(double p, double epsilon, int maxIterations) throws NotStrictlyPositiveException { this(new Well19937c(), p, epsilon, maxIterations); } /** * Creates a new Poisson distribution with specified mean, convergence * criterion and maximum number of iterations. * * @param rng Random number generator. * @param p Poisson mean. * @param epsilon Convergence criterion for cumulative probabilities. * @param maxIterations the maximum number of iterations for cumulative * probabilities. * @throws NotStrictlyPositiveException if {@code p <= 0}. * @since 3.1 */ public PoissonDistribution(RandomGenerator rng, double p, double epsilon, int maxIterations) throws NotStrictlyPositiveException { super(rng); if (p <= 0) { throw new NotStrictlyPositiveException(LocalizedFormats.MEAN, p); } mean = p; this.epsilon = epsilon; this.maxIterations = maxIterations; // Use the same RNG instance as the parent class. normal = new NormalDistribution(rng, p, FastMath.sqrt(p), NormalDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY); exponential = new ExponentialDistribution(rng, 1, ExponentialDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY); } /** * Creates a new Poisson distribution with the specified mean and * convergence criterion. * * @param p Poisson mean. * @param epsilon Convergence criterion for cumulative probabilities. * @throws NotStrictlyPositiveException if {@code p <= 0}. * @since 2.1 */ public PoissonDistribution(double p, double epsilon) throws NotStrictlyPositiveException { this(p, epsilon, DEFAULT_MAX_ITERATIONS); } /** * Creates a new Poisson distribution with the specified mean and maximum * number of iterations. * * @param p Poisson mean. * @param maxIterations Maximum number of iterations for cumulative * probabilities. * @since 2.1 */ public PoissonDistribution(double p, int maxIterations) { this(p, DEFAULT_EPSILON, maxIterations); } /** * Get the mean for the distribution. * * @return the mean for the distribution. */ public double getMean() { return mean; } /** {@inheritDoc} */ public double probability(int x) { final double logProbability = logProbability(x); return logProbability == Double.NEGATIVE_INFINITY ? 0 : FastMath.exp(logProbability); } /** {@inheritDoc} */ @Override public double logProbability(int x) { double ret; if (x < 0 || x == Integer.MAX_VALUE) { ret = Double.NEGATIVE_INFINITY; } else if (x == 0) { ret = -mean; } else { ret = -SaddlePointExpansion.getStirlingError(x) - SaddlePointExpansion.getDeviancePart(x, mean) - 0.5 * FastMath.log(MathUtils.TWO_PI) - 0.5 * FastMath.log(x); } return ret; } /** {@inheritDoc} */ public double cumulativeProbability(int x) { if (x < 0) { return 0; } if (x == Integer.MAX_VALUE) { return 1; } return Gamma.regularizedGammaQ((double) x + 1, mean, epsilon, maxIterations); } /** * Calculates the Poisson distribution function using a normal * approximation. The {@code N(mean, sqrt(mean))} distribution is used * to approximate the Poisson distribution. The computation uses * "half-correction" (evaluating the normal distribution function at * {@code x + 0.5}). * * @param x Upper bound, inclusive. * @return the distribution function value calculated using a normal * approximation. */ public double normalApproximateProbability(int x) { // calculate the probability using half-correction return normal.cumulativeProbability(x + 0.5); } /** * {@inheritDoc} * * For mean parameter {@code p}, the mean is {@code p}. */ public double getNumericalMean() { return getMean(); } /** * {@inheritDoc} * * For mean parameter {@code p}, the variance is {@code p}. */ public double getNumericalVariance() { return getMean(); } /** * {@inheritDoc} * * The lower bound of the support is always 0 no matter the mean parameter. * * @return lower bound of the support (always 0) */ public int getSupportLowerBound() { return 0; } /** * {@inheritDoc} * * The upper bound of the support is positive infinity, * regardless of the parameter values. There is no integer infinity, * so this method returns {@code Integer.MAX_VALUE}. * * @return upper bound of the support (always {@code Integer.MAX_VALUE} for * positive infinity) */ public int getSupportUpperBound() { return Integer.MAX_VALUE; } /** * {@inheritDoc} * * The support of this distribution is connected. * * @return {@code true} */ public boolean isSupportConnected() { return true; } /** * {@inheritDoc} * <p> * <strong>Algorithm Description: * <ul> * <li>For small means, uses simulation of a Poisson process * using Uniform deviates, as described * <a href="http://mathaa.epfl.ch/cours/PMMI2001/interactive/rng7.htm"> here. * The Poisson process (and hence value returned) is bounded by 1000 * mean. * </li> * <li>For large means, uses the rejection algorithm described in * <blockquote> * Devroye, Luc. (1981).<i>The Computer Generation of Poisson Random Variables * <strong>Computing vol. 26 pp. 197-207. * </blockquote> * </li> * </ul> * </p> * * @return a random value. * @since 2.2 */ @Override public int sample() { return (int) FastMath.min(nextPoisson(mean), Integer.MAX_VALUE); } /** * @param meanPoisson Mean of the Poisson distribution. * @return the next sample. */ private long nextPoisson(double meanPoisson) { final double pivot = 40.0d; if (meanPoisson < pivot) { double p = FastMath.exp(-meanPoisson); long n = 0; double r = 1.0d; double rnd = 1.0d; while (n < 1000 * meanPoisson) { rnd = random.nextDouble(); r *= rnd; if (r >= p) { n++; } else { return n; } } return n; } else { final double lambda = FastMath.floor(meanPoisson); final double lambdaFractional = meanPoisson - lambda; final double logLambda = FastMath.log(lambda); final double logLambdaFactorial = CombinatoricsUtils.factorialLog((int) lambda); final long y2 = lambdaFractional < Double.MIN_VALUE ? 0 : nextPoisson(lambdaFractional); final double delta = FastMath.sqrt(lambda * FastMath.log(32 * lambda / FastMath.PI + 1)); final double halfDelta = delta / 2; final double twolpd = 2 * lambda + delta; final double a1 = FastMath.sqrt(FastMath.PI * twolpd) * FastMath.exp(1 / (8 * lambda)); final double a2 = (twolpd / delta) * FastMath.exp(-delta * (1 + delta) / twolpd); final double aSum = a1 + a2 + 1; final double p1 = a1 / aSum; final double p2 = a2 / aSum; final double c1 = 1 / (8 * lambda); double x = 0; double y = 0; double v = 0; int a = 0; double t = 0; double qr = 0; double qa = 0; for (;;) { final double u = random.nextDouble(); if (u <= p1) { final double n = random.nextGaussian(); x = n * FastMath.sqrt(lambda + halfDelta) - 0.5d; if (x > delta || x < -lambda) { continue; } y = x < 0 ? FastMath.floor(x) : FastMath.ceil(x); final double e = exponential.sample(); v = -e - (n * n / 2) + c1; } else { if (u > p1 + p2) { y = lambda; break; } else { x = delta + (twolpd / delta) * exponential.sample(); y = FastMath.ceil(x); v = -exponential.sample() - delta * (x + 1) / twolpd; } } a = x < 0 ? 1 : 0; t = y * (y + 1) / (2 * lambda); if (v < -t && a == 0) { y = lambda + y; break; } qr = t * ((2 * y + 1) / (6 * lambda) - 1); qa = qr - (t * t) / (3 * (lambda + a * (y + 1))); if (v < qa) { y = lambda + y; break; } if (v > qr) { continue; } if (v < y * logLambda - CombinatoricsUtils.factorialLog((int) (y + lambda)) + logLambdaFactorial) { y = lambda + y; break; } } return y2 + (long) y; } } } Other Java examples (source code examples) Here is a short list of links related to this Java PoissonDistribution.java source code file: my book on functional programming   new blog posts   Copyright 1998-2019 Alvin Alexander, alvinalexander.com All Rights Reserved. A percentage of advertising revenue from pages under the /java/jwarehouse URI on this website is paid back to open source projects.  
__label__pos
0.997332
• Delhi • Last Update 07:40: pm news-details Education How to Master Tableau in 2024: A Step-by-Step Guide? Master Tableau: Comprehensive Learning Journey Embark on a comprehensive learning journey with Master Tableau. This guide equips you with essential skills and knowledge to navigate the dynamic landscape of data visualization. From understanding Tableau's interface to creating interactive dashboards, each step is meticulously crafted to empower your mastery. Explore data sources, unleash the power of visualizations, and embrace advanced features with confidence. Learn troubleshooting tips, engage with the Tableau community, and stay updated with the latest trends. With Master Tableau, you'll unlock the full potential of your data storytelling abilities, propelling you towards becoming a proficient Tableau user in 2024 and beyond. Start your journey today! What are the Benefits of Learning Tableau? Certainly! Here are the benefits of learning Tableau presented in points: Improved Data Visualization Capabilities: Acquiring knowledge of Tableau enables people to produce aesthetically pleasing and perceptive data visualizations. This ability is extremely helpful in many different industries where making decisions based on facts is essential. Improved Data Analysis Capabilities: Tableau enables users to analyze large datasets quickly and efficiently. With features like drag-and-drop functionality and intuitive interfaces, users can explore data trends and patterns with ease. Better Decision-Making: By providing clear and actionable insights, Tableau facilitates informed decision-making processes. Users can identify trends, correlations, and outliers in data, enabling organizations to make strategic decisions based on data-driven insights. Increased Productivity: Tableau's user-friendly interface and powerful features streamline the data analysis process, saving time and effort. Users can create dynamic dashboards and reports in minutes, freeing up valuable resources for other tasks. Effective Communication of Insights: Tableau allows users to communicate complex data insights clearly and concisely. With interactive dashboards and visualizations, users can present findings to stakeholders and colleagues effectively, facilitating understanding and collaboration. Career Advancement Opportunities: Proficiency in Tableau is a highly sought-after skill in today's job market. Learning Tableau opens up a wide range of career opportunities in fields such as data analysis, business intelligence, and data visualization. Integration with Other Tools: Tableau seamlessly integrates with other data analysis and visualization tools, such as SQL databases, Excel, and R. This interoperability enhances workflow efficiency and enables users to leverage the full potential of their data ecosystem. Community Support and Resources: The Tableau community is vibrant and supportive, offering a wealth of resources, including forums, user groups, and online tutorials. Engaging with the Tableau community provides opportunities for learning, networking, and professional growth. Stay Ahead of the Curve: As data becomes increasingly central to decision-making processes, proficiency in Tableau positions individuals and organizations at the forefront of data analytics and visualization trends. Empowerment Through Data: Ultimately, learning Tableau empowers individuals to unlock the transformative power of data, enabling them to derive meaningful insights and drive positive change in their organizations and communities. How to Learn Tableau—Especially for First-Time Learners Are you prepared to start your fascinating Tableau learning journey? Here's a guide tailored specifically for first-time learners to help you get started and master the art of data visualization: 1. Understand the Basics • Familiarize yourself with Tableau's interface and navigation tools. • Explore basic terminology such as dimensions, measures, and marks.   2. Take Advantage of Online Resources • Enroll in beginner-level online courses or tutorials offered by Tableau or other learning platforms. • Utilize Tableau's extensive library of instructional videos and documentation.   3. Hands-On Practice • Dive into hands-on exercises and projects to reinforce your learning. • Start with simple datasets and gradually progress to more complex ones.   4. Explore Sample Dashboards • Analyze sample dashboards provided by Tableau to understand design principles and best practices. • Deconstruct existing visualizations to learn how they were created.   5. Join the Tableau Community • Participate in Tableau user groups, forums, and online communities to connect with fellow learners and experts. • Ask for guidance, impart knowledge, and absorb experiences from others.   6. Attend Workshops and Webinars • Attend Tableau workshops, webinars, and conferences to gain insights into advanced features and techniques. • Network with industry professionals and learn about real-world applications of Tableau.   7. Experiment with Data • Explore different types of data and experiment with various visualization techniques. • Challenge yourself to create interactive dashboards and stories using your datasets.   8. Seek Feedback and Iterate • Showcase your work to mentors and peers for helpful criticism. • Continuously iterate and refine your visualizations based on feedback and lessons learned.   9. Stay Curious and Persistent • Embrace a growth mindset and remain curious about new features and updates in Tableau. • Stay persistent, and don't be afraid to tackle complex challenges along the way.   10. Celebrate Your Progress • Celebrate milestones and achievements as you progress on your Tableau learning journey. • Reflect on how far you've come and acknowledge your growth and accomplishments.   Learning Tableau is a rewarding and empowering experience. By following these steps and staying committed to your learning goals, you'll soon become proficient in Tableau and unlock the full potential of data visualization. Happy exploring Learn Tableau with DataCamp Learn Tableau with DataCamp and unlock the power of data visualization and analysis. With DataCamp's interactive and comprehensive platform, you'll gain hands-on experience and valuable skills to become proficient in Tableau. Whether you're a beginner or an experienced user, DataCamp offers structured courses designed to cater to learners of all levels. Dive into interactive exercises, projects, and challenges that simulate real-world scenarios, allowing you to apply Tableau concepts in practical contexts. Benefit from expert instruction from industry professionals who guide you through each lesson, providing insights and tips along the way. Engage with a vibrant community of learners and instructors, share ideas, ask questions, and collaborate to enhance your learning experience. Track your progress, earn certificates of completion, and expand your career opportunities with valuable Tableau skills gained through DataCamp's dynamic and flexible learning environment. Start your Tableau journey with DataCamp today and transform your data visualization capabilities. Challenges Faced by Novice Tableau Learners Novice Tableau learners often face several challenges as they embark on their journey to master the platform. Some of these challenges include: Complexity of Interface: The Tableau interface can appear overwhelming to beginners due to its numerous features and options. Novices may struggle to navigate through the various menus, tabs, and options. Data Preparation: Understanding how to prepare data for analysis in Tableau can be a significant hurdle for beginners. They may encounter challenges in connecting to different data sources, cleaning and structuring data, and understanding data types. Understanding Data Visualization Principles: Novice learners may find it challenging to grasp fundamental principles of data visualization such as choosing the right chart type, designing effective dashboards, and interpreting visualizations accurately. Calculations and Functions: Utilizing Tableau's calculations and functions effectively can be daunting for beginners. Understanding concepts like calculated fields, table calculations, LOD expressions, and parameters requires time and practice. Performance Optimization: Novice users may struggle with optimizing the performance of their Tableau workbooks, especially when dealing with large datasets or complex visualizations. Techniques like data blending, data extracts, and optimizing calculations are advanced concepts that require experience. Conclusion Mastering Tableau offers a transformative journey in data visualization. The comprehensive learning guide equips users with essential skills to navigate complex data landscapes effectively. From interface familiarity to advanced feature utilization, the journey empowers users to unlock data storytelling potential. By overcoming challenges and engaging with resources, learners can propel themselves toward proficiency, seizing opportunities in data-driven domains now and in the future. Start your Tableau journey today! You can share this post!
__label__pos
0.975973
Take the 2-minute tour × Mathematics Stack Exchange is a question and answer site for people studying math at any level and professionals in related fields. It's 100% free, no registration required. How can I get the (Volterra) operator from an equation of the type $$u''(x)+xu'(x)+u(x)=0\text{ ?}$$ I know that there is a general way of doing it, if you could point me at the proper book I'd be thankful! share|improve this question      It is simpler to solve this equations than to understand what you are asking about –  Norbert Sep 23 '12 at 16:51      Ok maybe I explain myself a little bad. Fredholm operators are helpful in order to solve differential equations, because you can reduce the problem to one of the tipe $u(x)=T(u(x))$ where $T$ is the operator, that is, the answer will be the eigenvalues of $T$. The question is adressed to people who know a little about Fredholm and Volterra operators. –  Miguel Sep 23 '12 at 17:40 1 Answer 1 I understand that you want to rewrite the differential equation in terms of an integral (Volterra-type) operator. The resulting operator $T$ will be Hilbert-Schmidt, hence compact, hence $I-T$ is Fredholm. Introducing $v=u'$, we get the system of 1st order equations $u'=v$, $v'=-u-xv$. Using the initial values $(u_0,v_0)$, we rewrite the IVP as a system $$u(t)=u_0+\int_0^t v(s)\,ds, \qquad v(t)=v_0+\int_0^t [-u(s)-xv(s)]\,ds$$ The desired operator $T$ takes the vector-valued function $(u,v)$ and produces $$t\mapsto \left(u_0+\int_0^t v(s)\,ds, v_0+\int_0^t [-u(s)-xv(s)]\,ds\right)$$ share|improve this answer Your Answer   discard By posting your answer, you agree to the privacy policy and terms of service. Not the answer you're looking for? Browse other questions tagged or ask your own question.
__label__pos
0.890284
Understanding the ZipManager Class of the CD Directory Manager Package In our digital world, handling zip files is a day-to-day task. You might know zip files as those little file icons with a zipper, which, when opened, reveal other files or folders. The ZipManager class is a powerful tool to handle these zip files in Python, making the process seamless, whether you want to create, unpack, or even peek into these zipped folders.   zipmanager-class-in-cd-directory-manager-Python   Installation To get started, you need to install the `cd-directory-manager` package: pip install cd-directory-manager Check for updates, use: pip install --upgrade cd-directory-manager What’s Inside? 1. zip_file: This method takes a file, zips it up, and saves it at a specified location. You can even add a password for extra security. 2. zip_directories: Want to zip up a whole directory? This method does that. It also lets you exclude certain files if you want, and yes, you can add a password! 3. unzip: Found a zip file you want to open? This method unzips it into a specified directory. If it’s password-protected, no worries; just provide the password. 4. unzip_files_and_directories: This is a versatile method that unzips both files and directories. It’s like the Swiss army knife of unzipping. 5. get_zip_content_list: Curious about what’s inside a zip without opening it? This method provides a list of its contents. 6. is_zip_password_protected: This method checks if a zip file is password-protected, so you know whether you’ll need a password to open it. Examples of Each Method Let’s dive into examples to see these methods in action: 1. zip_file: from cd_directory_manager.zipper import ZipManager as zm result = zm.zip_file('path/to/source_file.txt', 'path/to/destination.zip', password='securepass') print(result) # This will print True if the operation was successful. 2. zip_directories: result = zm.zip_directories('path/to/source_directory', 'path/to/destination.zip', exclude_patterns=['*.txt'], password='securepass') print(result) # True means it worked! 3. unzip: result = zm.unzip('path/to/source.zip', 'path/to/destination_directory', password='securepass') print(result) 4. unzip_files_and_directories: result = zm.unzip_files_and_directories('path/to/source.zip', 'path/to/destination_directory', password='securepass') print(result) 5. get_zip_content_list: content = zm.get_zip_content_list('path/to/source.zip', password='securepass') print(content) 6. is_zip_password_protected: is_protected = zm.is_zip_password_protected('path/to/source.zip') print(is_protected) Posted in CD Directory Manager and tagged , , , , , . Leave a Reply
__label__pos
0.991863
Multi-Range Selection (React) Set the FlexGrid's selectionMode property to MultiRange to enable Excel-style multi-range selection. Users will be able to select multiple ranges by ctrl-clicking and dragging on the grid. The sample shows how you can provide Excel-style dynamic data summaries for the current selection (regular or multi-range) and how to export selected ranges to CSV files. Note that clipboard and export commands only work for multi-range selections if all selected ranges refer to the same column range or to the same row range. (Excel also behaves this way.) This example uses React. import "@grapecity/wijmo.styles/wijmo.css"; import "bootstrap.css"; import "./app.css"; import * as React from 'react'; import * as ReactDOM from 'react-dom'; import { glbz, isNumber, saveFile } from "@grapecity/wijmo"; import { FlexGrid } from "@grapecity/wijmo.react.grid"; import { CellRange } from "@grapecity/wijmo.grid"; ; class App extends React.Component { constructor(props) { super(props); this.grid = null; this.state = { data: this.getData() }; } render() { return <div className="container-fluid"> <FlexGrid alternatingRowStep={0} showMarquee={true} anchorCursor={true} selectionMode="MultiRange" showSelectedHeaders="All" itemsSource={this.state.data} initialized={s => this.grid = s} // update aggregate display when selection changes selectionChanged={(s) => { // calculate aggregates let tally = { cnt: 0, cntAll: 0, sum: 0, avg: 0 }, ranges = this.grid.selectedRanges; for (let i = 0; i < ranges.length; i++) { this.aggregateRange(tally, this.grid, ranges, i); } // update the display using template literals let msg = (tally.cnt > 1) ? glbz `Count: <b>${tally.cntAll}:n0</b>\tAverage: <b>${tally.avg}:g4\tSum: <b>${tally.sum}:g4</b>` : (tally.cntAll > 1) ? glbz `Count: <b>${tally.cntAll}:n0</b>` : 'Ready'; // update the display using wijmo.format //let msg = (tally.cnt > 1) // ? format('Count: <b>{cntAll:n0}</b>\tAverage: <b>{avg:g4}</b>\tSum: <b>{sum:g4}</b>', tally) // : (tally.cntAll > 1) // ? format('Count: <b>{cntAll:n0}</b>', tally) // : 'Ready'; // show the result document.getElementById('mr-aggregates').innerHTML = msg; }}/> <pre id="mr-aggregates">Ready</pre> <button className="btn btn-primary" onClick={() => this.exportGridToCsv(this.grid, false)}> Export Whole Grid </button> {' '} <button id="btn-csv-sel" className="btn btn-primary" onClick={() => this.exportGridToCsv(this.grid, true)}> Export Selection </button> </div>; } // update aggregates for a range, accounting for overlapping ranges aggregateRange(tally, grid, ranges, index) { let rng = ranges[index]; for (let r = rng.topRow; r <= rng.bottomRow; r++) { for (let c = rng.leftCol; c <= rng.rightCol; c++) { // account for overlapping ranges let overlapped = false; for (let i = 0; i < index && !overlapped; i++) { let rng = ranges[i]; if (rng.contains(r, c)) { overlapped = true; } } // tally non-overlapped cells if (!overlapped) { let data = grid.getCellData(r, c, false); if (isNumber(data)) { // handle numbers tally.cnt++; tally.sum += data; } if (data !== '' && data !== null) { // handle non-empty cells tally.cntAll++; } } } } tally.avg = tally.cnt > 0 ? tally.sum / tally.cnt : 0; } // export the grid or selection to CSV exportGridToCsv(grid, selection) { let rng = selection ? null // selection plus extended selection : new CellRange(0, 0, grid.rows.length - 1, grid.columns.length - 1); let csv = grid.getClipString(rng, true, true); saveFile(csv, selection ? 'FlexGridSelection.csv' : 'FlexGrid.csv'); } // create some random data getData() { let data = []; let countries = 'Austria,Belgium,Chile,Denmark,Finland,Japan,UK'.split(','); for (let i = 0; i < 300; i++) { data.push({ id: i, from: countries[i % countries.length], to: countries[(i + 1) % countries.length], sales: Math.random() * 10000, expenses: Math.random() * 5000, amount: Math.random() * 10000, extra: Math.random() * 10000, }); } return data; } } ReactDOM.render(<App />, document.getElementById('app')); <!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <title>GrapeCity Wijmo FlexGrid Multiple Selection</title> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <!-- SystemJS --> <script src="node_modules/systemjs/dist/system.src.js"></script> <script src="systemjs.config.js"></script> <script> System.import('./src/app'); </script> </head> <body> <div id="app"></div> </body> </html> .wj-flexgrid { height: 300px; } (function (global) { System.config({ transpiler: 'plugin-babel', babelOptions: { es2015: true, react: true }, meta: { '*.css': { loader: 'css' } }, paths: { // paths serve as alias 'npm:': 'node_modules/' }, // map tells the System loader where to look for things map: { 'jszip': 'npm:jszip/dist/jszip.js', '@grapecity/wijmo': 'npm:@grapecity/wijmo/index.js', '@grapecity/wijmo.input': 'npm:@grapecity/wijmo.input/index.js', '@grapecity/wijmo.styles': 'npm:@grapecity/wijmo.styles', '@grapecity/wijmo.cultures': 'npm:@grapecity/wijmo.cultures', '@grapecity/wijmo.chart': 'npm:@grapecity/wijmo.chart/index.js', '@grapecity/wijmo.chart.analytics': 'npm:@grapecity/wijmo.chart.analytics/index.js', '@grapecity/wijmo.chart.animation': 'npm:@grapecity/wijmo.chart.animation/index.js', '@grapecity/wijmo.chart.annotation': 'npm:@grapecity/wijmo.chart.annotation/index.js', '@grapecity/wijmo.chart.finance': 'npm:@grapecity/wijmo.chart.finance/index.js', '@grapecity/wijmo.chart.finance.analytics': 'npm:@grapecity/wijmo.chart.finance.analytics/index.js', '@grapecity/wijmo.chart.hierarchical': 'npm:@grapecity/wijmo.chart.hierarchical/index.js', '@grapecity/wijmo.chart.interaction': 'npm:@grapecity/wijmo.chart.interaction/index.js', '@grapecity/wijmo.chart.radar': 'npm:@grapecity/wijmo.chart.radar/index.js', '@grapecity/wijmo.chart.render': 'npm:@grapecity/wijmo.chart.render/index.js', '@grapecity/wijmo.chart.webgl': 'npm:@grapecity/wijmo.chart.webgl/index.js', '@grapecity/wijmo.gauge': 'npm:@grapecity/wijmo.gauge/index.js', '@grapecity/wijmo.grid': 'npm:@grapecity/wijmo.grid/index.js', '@grapecity/wijmo.grid.detail': 'npm:@grapecity/wijmo.grid.detail/index.js', '@grapecity/wijmo.grid.filter': 'npm:@grapecity/wijmo.grid.filter/index.js', '@grapecity/wijmo.grid.search': 'npm:@grapecity/wijmo.grid.search/index.js', '@grapecity/wijmo.grid.grouppanel': 'npm:@grapecity/wijmo.grid.grouppanel/index.js', '@grapecity/wijmo.grid.multirow': 'npm:@grapecity/wijmo.grid.multirow/index.js', '@grapecity/wijmo.grid.transposed': 'npm:@grapecity/wijmo.grid.transposed/index.js', '@grapecity/wijmo.grid.pdf': 'npm:@grapecity/wijmo.grid.pdf/index.js', '@grapecity/wijmo.grid.sheet': 'npm:@grapecity/wijmo.grid.sheet/index.js', '@grapecity/wijmo.grid.xlsx': 'npm:@grapecity/wijmo.grid.xlsx/index.js', '@grapecity/wijmo.grid.selector': 'npm:@grapecity/wijmo.grid.selector/index.js', '@grapecity/wijmo.grid.cellmaker': 'npm:@grapecity/wijmo.grid.cellmaker/index.js', '@grapecity/wijmo.grid.immutable': 'npm:@grapecity/wijmo.grid.immutable/index.js', '@grapecity/wijmo.touch': 'npm:@grapecity/wijmo.touch/index.js', '@grapecity/wijmo.cloud': 'npm:@grapecity/wijmo.cloud/index.js', '@grapecity/wijmo.nav': 'npm:@grapecity/wijmo.nav/index.js', '@grapecity/wijmo.odata': 'npm:@grapecity/wijmo.odata/index.js', '@grapecity/wijmo.olap': 'npm:@grapecity/wijmo.olap/index.js', '@grapecity/wijmo.pdf': 'npm:@grapecity/wijmo.pdf/index.js', '@grapecity/wijmo.viewer': 'npm:@grapecity/wijmo.viewer/index.js', '@grapecity/wijmo.xlsx': 'npm:@grapecity/wijmo.xlsx/index.js', '@grapecity/wijmo.undo': 'npm:@grapecity/wijmo.undo/index.js', '@grapecity/wijmo.interop.grid': 'npm:@grapecity/wijmo.interop.grid/index.js', "@grapecity/wijmo.react.chart.analytics": "npm:@grapecity/wijmo.react.chart.analytics/index.js", "@grapecity/wijmo.react.chart.animation": "npm:@grapecity/wijmo.react.chart.animation/index.js", "@grapecity/wijmo.react.chart.annotation": "npm:@grapecity/wijmo.react.chart.annotation/index.js", "@grapecity/wijmo.react.chart.finance.analytics": "npm:@grapecity/wijmo.react.chart.finance.analytics/index.js", "@grapecity/wijmo.react.chart.finance": "npm:@grapecity/wijmo.react.chart.finance/index.js", "@grapecity/wijmo.react.chart.hierarchical": "npm:@grapecity/wijmo.react.chart.hierarchical/index.js", "@grapecity/wijmo.react.chart.interaction": "npm:@grapecity/wijmo.react.chart.interaction/index.js", "@grapecity/wijmo.react.chart.radar": "npm:@grapecity/wijmo.react.chart.radar/index.js", "@grapecity/wijmo.react.chart": "npm:@grapecity/wijmo.react.chart/index.js", "@grapecity/wijmo.react.core": "npm:@grapecity/wijmo.react.core/index.js", "@grapecity/wijmo.react.gauge": "npm:@grapecity/wijmo.react.gauge/index.js", "@grapecity/wijmo.react.grid.detail": "npm:@grapecity/wijmo.react.grid.detail/index.js", "@grapecity/wijmo.react.grid.filter": "npm:@grapecity/wijmo.react.grid.filter/index.js", "@grapecity/wijmo.react.grid.grouppanel": "npm:@grapecity/wijmo.react.grid.grouppanel/index.js", '@grapecity/wijmo.react.grid.search': 'npm:@grapecity/wijmo.react.grid.search/index.js', "@grapecity/wijmo.react.grid.multirow": "npm:@grapecity/wijmo.react.grid.multirow/index.js", "@grapecity/wijmo.react.grid.sheet": "npm:@grapecity/wijmo.react.grid.sheet/index.js", '@grapecity/wijmo.react.grid.transposed': 'npm:@grapecity/wijmo.react.grid.transposed/index.js', '@grapecity/wijmo.react.grid.immutable': 'npm:@grapecity/wijmo.react.grid.immutable/index.js', "@grapecity/wijmo.react.grid": "npm:@grapecity/wijmo.react.grid/index.js", "@grapecity/wijmo.react.input": "npm:@grapecity/wijmo.react.input/index.js", "@grapecity/wijmo.react.olap": "npm:@grapecity/wijmo.react.olap/index.js", "@grapecity/wijmo.react.viewer": "npm:@grapecity/wijmo.react.viewer/index.js", "@grapecity/wijmo.react.nav": "npm:@grapecity/wijmo.react.nav/index.js", "@grapecity/wijmo.react.base": "npm:@grapecity/wijmo.react.base/index.js", 'jszip': 'npm:jszip/dist/jszip.js', 'react': 'npm:react/umd/react.production.min.js', 'react-dom': 'npm:react-dom/umd/react-dom.production.min.js', 'redux': 'npm:redux/dist/redux.min.js', 'react-redux': 'npm:react-redux/dist/react-redux.min.js', 'bootstrap.css': 'npm:bootstrap/dist/css/bootstrap.min.css', 'css': 'npm:systemjs-plugin-css/css.js', 'plugin-babel': 'npm:systemjs-plugin-babel/plugin-babel.js', 'systemjs-babel-build':'npm:systemjs-plugin-babel/systemjs-babel-browser.js' }, // packages tells the System loader how to load when no filename and/or no extension packages: { src: { defaultExtension: 'jsx' }, "node_modules": { defaultExtension: 'js' }, } }); })(this);
__label__pos
0.873046
Rajesh kannan Rajesh kannan - 1 year ago 55 JSON Question I have XML parsing response in Android how do i want to implement JSON parse on that xml response it's possible? I have XML parsing response in Android ,on that i want to implement JSON parse on that xml response it's possible? Answer Source What I understood from your question you want to parse XML to JSON and if that is the case you can use JAVA-JSON library check it out at GITHUB. Quick example JSONObject xmlJSONObj = XML.toJSONObject(XML_STRING); Recommended from our users: Dynamic Network Monitoring from WhatsUp Gold from IPSwitch. Free Download
__label__pos
0.998859
Affordable Automation Testing Course Fees: Empowering Your Career Growth Automation testing has emerged as a crucial aspect of the software development process in today’s digital age of rapid technological advancement. Organizations may improve testing efficiency, cut down on wasteful human intervention, and save time through automation testing. Due to the increasing need for competent automation testers, it is crucial to acquire the appropriate expertise. This is where learning about automated testing comes in. In this paper, we will delve into the importance of taking advantage of low-cost manual testing courses in fostering your professional development. Benefits of Automation Testing Courses: Understanding Automation Tests in Great Detail: Learning the Concepts, Tools, and Techniques of Automation Testing Courses in automation testing give students with a thorough grounding in the many concepts, tools, and techniques used in the discipline. Automation testing frameworks, scripting languages, and the art of developing and implementing test cases are all topics covered in these classes. Expertise in automation testing can help people advance in their careers and speed up the distribution of high-quality software. Career Opportunities:  The demand for skilled professionals in the field of automated testing is soaring as more and more companies embrace these methods. Those interested in meeting this demand might do so by taking an automation testing course. As a result, many new job openings have appeared in fields including information technology, banking, medicine, and e-commerce. Access to more interesting positions and greater earning possibilities can be gained through proficiency in automated testing. Cost-effectiveness and productivity: Automation testing saves time and money because it eliminates the need for testers to manually perform monotonous activities. The software development life cycle can be shortened by using automated test scripts to speed up the testing phase. Because of this boost in productivity, businesses can now release high-quality software in significantly less time. Individuals can help improve the efficiency of the software development process as a whole by enrolling in an automation testing course and learning how to write reliable and reusable automated test scripts. Increased Test Accuracy and Coverage: Manual Testing Courses are necessary, but it has its limits when it comes to coverage and quality assurance. In particular for large-scale applications, it is laborious, error-prone, and limited in its ability to cover all possible scenarios during testing. Automation testing gets rid of these constraints by offering the means to efficiently run a large number of test cases. Individuals can learn how to create and execute test suites that cover a wide variety of situations, maximizing test coverage and enhancing the quality of the software product as a whole, through automated testing courses. Traditional Cost of Automation Testing Courses Traditionally, automation testing courses have been known for their high fees, often making them inaccessible to individuals on a tight budget or students. The comprehensive curriculum, experienced instructors, and practical exposure offered by these courses justify the high costs, but it may not be feasible for everyone to afford them. Affordable Options Fortunately, there are several affordable options available today for individuals who wish to learn automation testing without breaking the bank. These options ensure that quality education and practical training are accessible to a wider audience: Online Automation Testing Courses Online platforms offer a variety of automation testing courses at affordable prices. These courses allow individuals to learn at their own pace and provide flexibility in terms of timings. Many online courses provide industry-recognized certifications, making the learning experience more valuable and appealing to employers. Community College or Vocational Institutes Community colleges and vocational institutes often offer automation testing courses as part of their curriculum. These courses have relatively lower fees compared to private institutions. They maintain a balance between theoretical knowledge and practical skills, ensuring that students have a strong foundation in automation testing. Workshops and Bootcamps Short-term workshops and bootcamps are an excellent option for individuals who want to quickly gain practical knowledge of automation testing. These programs are typically focused, intense, and provide hands-on experience. Although they are short in duration, they offer affordable automation testing course fees and are often taught by industry experts. Conclusion: The possibility for increased employment in software testing is enhanced by the low cost of training in automation testing. Courses in automated testing provide students with in-demand knowledge, abilities, and experience in the modern employment market. Moreover, manual testing classes that complement automation testing provide a solid basis and versatility that improve testing abilities. So, it’s important to take advantage of opportunities to take reasonably priced automation testing courses and set oneself up for a rewarding future in software testing. Leave a Comment
__label__pos
0.827093
How does the operating system function from a system point of view? Operating systems allow user application applications to interact with the system hardware. The operating system itself does not provide any functionality but provides a situation where various programs and systems can perform useful functions. It is software which handles the computer's functionality like scheduling, input/output operation resource allocation, files system manipulation, etc and also acts as an interface between the User and hardware. System views can be improved using a new GUI and adding new features to the OS which will result in a better user experience. The operating system is observed from the user point of view or system point of view. Let us discuss how the operating system is observed from a system point of view. System views According to the computer program, the operating system is a bridge between applications and hardware. It is very close to the hardware and is used to control it as needed. The different types of app views can be described as follows − • The system views the application you are using as a resource app. There are many resources such as CPU time, memory space, file storage space, I / O devices etc are required for the processes. It is the job of the operating system to distribute these resources intelligently in processes so that the computer system can work as smoothly as possible. • The app can also serve as a control system. It handles all processes and I / O devices so that the computer system works smoothly and there are no errors. It ensures that I / O devices work properly without creating problems. • Apps can also be viewed as a way to make using Hardware easier. • Computers are needed to easily solve user problems. However, it is not easy to work directly with computer hardware. Therefore, operating systems are designed to easily communicate with hardware. • The application can also be considered as a regular application after the computer program (known as the kernel) and hosting all application programs. Advertisements
__label__pos
0.989645
/* Threads compatibily routines for libgcc2 for VxWorks. */ /* Compile this one with gcc. */ /* Copyright (C) 1997 Free Software Foundation, Inc. Contributed by Mike Stump . This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ /* As a special exception, if you link this library with other files, some of which are compiled with GCC, to produce an executable, this library does not by itself cause the resulting executable to be covered by the GNU General Public License. This exception does not however invalidate any other reasons why the executable file might be covered by the GNU General Public License. */ #ifndef __gthr_vxworks_h #define __gthr_vxworks_h /* POSIX threads specific definitions. Easy, since the interface is just one-to-one mapping. */ #define __GTHREADS 1 #include #include /* typedef void *SEM_ID; */ typedef int __gthread_key_t; typedef char __gthread_once_t; typedef SEM_ID __gthread_mutex_t; #define __GTHREAD_MUTEX_INIT 0 #define __GTHREAD_ONCE_INIT 0 #ifndef REG_SAVED_REG static inline int __gthread_once (__gthread_once_t *once, void (*func) ()) { (*func)(); return 0; } extern __gthread_key_t eh_context_key; /* This is not the right way to do it, but the semantic of pthreads don't map well enough onto VxWorks. */ static void __ehdtor (void *pTcb) { int tid = (int) pTcb; void *p = (void*)taskVarGet(tid, &eh_context_key); if (p != (void*)-1) { if (p) free (p); taskVarSet(tid, &eh_context_key, 0); } } /* This only works for the code in libgcc2.c. */ static inline int __gthread_key_create (__gthread_key_t *key, void (*dtor) (void *)) { *key = 0; /* Do this first so that the task variables are visible during the running of the delete hook. */ taskVarInit(); /* We don't have a way to track dtor here, so instead, we register a generic routine that can cleanup any task. */ taskDeleteHookAdd (__ehdtor); return 0; } #define __gthread_setspecific(key, ptr) \ (key = (int) ptr, 0) static inline int __gthread_key_dtor (__gthread_key_t key, void *ptr) { /* Just reset the key value to zero. */ if (ptr) return __gthread_setspecific (key, 0); else return 0; } #define __gthread_key_delete(key) \ taskVarDelete (taskIdSelf (), &key) #define __gthread_getspecific(key) \ ((key == 0) \ ? ((taskVarAdd (taskIdSelf (), &key) != OK) \ ? (__terminate (), (void*)0) \ : (void*)0) \ : (void*)key) #endif static inline int __gthread_mutex_lock (__gthread_mutex_t *mutex) { if (*mutex == 0) *mutex = semMCreate (SEM_Q_PRIORITY | SEM_INVERSION_SAFE | SEM_DELETE_SAFE); return semTake (*mutex, WAIT_FOREVER); } static inline int __gthread_mutex_trylock (__gthread_mutex_t *mutex) { if (*mutex == 0) *mutex = semMCreate (SEM_Q_PRIORITY | SEM_INVERSION_SAFE | SEM_DELETE_SAFE); return semTake (*mutex, NO_WAIT); } static inline int __gthread_mutex_unlock (__gthread_mutex_t *mutex) { /* We could return the */ return semGive (*mutex); } #endif /* not __gthr_vxworks_h */
__label__pos
0.982652
My Master's Thesis Problems and solutions encountered… 7Aug/106 CRUD operations in Silverlight Datagrid – MVVM This post will explain how you can add, edit or delete data from the database, by having it inserted into a Datagrid in your Silverlight application, and editing it from there. The premise for following the guide in this post is that you have a project with the MVVM design pattern, and that you have already created a Datagrid that is populated with data from a database. Furthermore, you should be willing to open ChildWindows using the slightly un-MVVM'ish method described in my last post. If anybody wants to follow the guide I provide in this post, it is definitely recommended first to have followed the guide on how to open ChildWindows, although the main aspects of it also are covered here. Now, I use my grid in order to display questions that are used for a questionnaire, that determines whether a company is eligible for becoming certified for the standard SA8000, ie. whether their working conditions are acceptable. Therefore, the questions in the example are all related to working conditions. The colors each question is given represents the severity associated with a company answering 'yes' or 'no' to the question (obviously, the companies filling out the questionnaire can't see the colors - it's just used afterwards, to get an overview over how well the company did). To simplify the example, I will only post the code related to editing of the question. The add and delete methods are very much alike, and after having gone through the how-to, you can probably figure out how to change what needs to be changed. If not, drop a comment and I'll add it. To simplify further, I will not add the editing of the groups or the colors. My whole grid looks like this: MVVM edit/add/delete data in datagrid So, what I wish to achieve is this: When I select a row from the grid and press the edit button, a child window has to appear with the data from the selected row inserted into an editable textbox. When I hit the save button in the child window, the database has to be updated, and the datagrid in the main window has to be updated as well, having inserted the changes from the database. To sum up, I want my ChildWindow to look like this: Again, you can disregard the colors and the group section, as I will only be focusing on the textbox with the question. OBS: All code for both the MainPage and the ChildWindow are in the ViewModel for the MainPage. So. From the start: Step 1: In the ViewModel for you MainPage, you'll add properties for the grid, the selected row in the grid, the question in the mainpage, and the question in the ChildWindow. Notice that the SelectedQuestion property for the MainPage is set in the SelectedQuestionRow which is updated everytime the user clicks on a new row in the grid. Notice that the object I will populate my grid with is called QuestionGrid, which was of course defined in the Model. You'll just have to update this to whatever you named yours. //The property that will be tied to the ItemSource in XAML private QuestionGrid _questionGrid; public QuestionGrid QuestionGrid { get { return _questionGrid; } set { _questionGrid = value; RaisePropertyChanged("QuestionGrid"); } } //Selected row. THe property that will be binded to SelectedItem in XAML. private QuestionGrid _selectedQuestionRow; public QuestionGrid SelectedQuestionRow { get { return _selectedQuestionRow; } set { _selectedQuestionRow = value; //sets the value for the string that will be passed to the childwindow SelectedQuestion = value.Question; //We want to retrieve the ID as well, for when we update the database SelectedQuestionID = value.Q_UI; } } //QuestionID, MainPage public int selectedQuestionID; public int SelectedQuestionID { get { return selectedQuestionID; } set { selectedQuestionID = value; RaisePropertyChanged("SelectedQuestionID"); } } //Question for mainpage public string selectedQuestion; public string SelectedQuestion { get { return selectedQuestion; } set { selectedQuestion = value; RaisePropertyChanged("SelectedQuestion"); } } //Properties for the grid. //The question private string question_EditCW; public string Question_EditCW { get { return question_EditCW; } set { question_EditCW = value; RaisePropertyChanged("Question_EditCW"); //SaveEdit is the name of the command we will create later, to save the changes to the database SaveEdit.RaiseCanExecuteChanged(); } } So, now I have created the properties, and now we will define the xaml to both the MainPage and the ChildWindow: Step 2: Create the grid and button in the MainPage (the command binded to the button will open the child window) Notice that the grid binds to QuestionGrid and SelectedQuestionRow, created earlier, but also notice that the question itself in the Textblock in the gird is binded to the attribute name of the object, created in the Model. <!-- MainPage: DATAGRID --> <data:DataGrid x:Name="AllCompaniesGrid" SelectedItem="{Binding SelectedQuestionRow, Mode=TwoWay}" ItemsSource="{Binding QuestionGrid}" AutoGenerateColumns="False" VerticalScrollBarVisibility="Visible" > <data:DataGrid.Columns> <!--Question Column. --> <data:DataGridTemplateColumn Header="Questions"> <data:DataGridTemplateColumn.CellTemplate> <DataTemplate> <TextBlock Text="{Binding Question}" /> </DataTemplate> </data:DataGridTemplateColumn.CellTemplate> </data:DataGridTemplateColumn> </data:DataGrid> <!-- The button! --> <Button Content="Edit" Style="{StaticResource Knap1}" Width="60" Command="{Binding EditQuestion}" /> Step 3: Create textbox and button in the ChildWindow (the command binded to the button will update the database, close the window, and update the grid). <!-- The textbox --> <TextBox Text="{Binding Question_EditCW, Mode=TwoWay}" /> <!-- The button! --> <Button x:Name="Add" Command="{Binding SaveEdit}" Content="Editquestion" /> Step 4: Define the command for button in the MainPage, in the ViewModel. I will go through this very quickly, but look at my old post here, if you have doubts about the DelegateCommand, or anything else. But remember: The delegate command is binded to a button and is divided into three parts: 1) The property part that creates the property that will bind to the edit button, and calls the CanExecute and Execute methods, 2) the CanExecute part that tells wether the button is clickable or not (returns true or false), and 3) the Execute part, that executes if the button is pressable and is pressed: In our case, the Execute will open the new ChildWindow, called EditQuestionCW. //property for the button command private DelegateCommand editQuestion; public DelegateCommand EditQuestion { get { if (editQuestion == null) editQuestion = new DelegateCommand(executeEditQuestion, canExecuteEditQuestion); return editQuestion; } } //Is the button clickaable? True = yes, False = no. private bool canExecuteEditQuestion(object parameter) { //Button only clickable if row in grid is selected if (SelectedQuestion != null) { return true; } else { return false; } } //New instance of the childwindow. My ChildWindow EditQuestionCW is in the folder ChildWindows. ChildWindows.EditQuestionCW EditQuestionCW; private void executeEditQuestion(object parameter) { //Sets the question Question_EditCW = SelectedQuestion; RedYes_EditCW = true; //Opens Child Window EditQuestionCW = new ChildWindows.EditQuestionCW(this); EditQuestionCW.Show(); } These next steps differentiates this post from the former. Here we will define what happens, when the SaveEdit button is pressed in the childWindow, and therefore how we update the changes to the database. But first: Creating the SQL query. Step 5: Create the SQL query that will update the database and insert the new data. This will happen in your Model . Mine looks somethink like this: public class EditAddQuestionDA : DBConnection { public void EditQuestion(int q_ID, string question) //The id and the question as parameters { SqlConnection dbConn = connectToDB(); SqlCommand cmd; string updateQuestion = string.Format("update question set question = '" + question + "' where q_ID = " + q_ID + "; "); try { dbConn.Open(); if (question != "") { cmd = new SqlCommand(updateQuestion, dbConn); cmd.ExecuteNonQuery(); } dbConn.Close(); } catch (Exception e) { Console.WriteLine(e.ToString()); } } } Step 6: In the Service1.svc.cs make the method available to the ViewModel: public void EditQuestion(int q_ID, string question) { EditAddQuestionDA allDA = new EditAddQuestionDA(); allDA.EditQuestion(q_ID, question); } Step 7: In the IService1.cs: [ServiceContract] public interface IService1 { [OperationContract] void EditQuestion(int q_ID, int G_ID, string question, int yes, int no); } So, now the query can be accessed from the ViewModel, so we can create the DelegateCommand for the button in the ChildWindow, that will call the EditQuestion() method: Step 8: In the ViewModel we create the SaveEdit DelegateCommand, that is divided - like before - to three parts: 1) the DelegateCommand SaveEdit property, 2) The boolean method that determines if the button is clickable, and 3) The Execution of the code that will save the new question to the database, and update the grid in the MainPage. private DelegateCommand saveEdit; public DelegateCommand SaveEdit { get { if (saveEdit == null) saveEdit = new DelegateCommand(executeSaveEdit, canSaveEDit); return saveEdit; } } //Can Execute - Defines if the button should be clickable. //True = Yes, false = no. Yes if the question is not identical to the one in the grid = no changes made private bool canSaveEDit(object parameter) //Definerer om knappen er klik-bar. { if (SelectedQuestion == Question_EditCW) { return false; //Only clickable if changes have been made } else { return true; } //Not identical -> return true } //When the button is pressed: private void executeSaveEdit(object parameter) { //updates the grid (only visually, not in the DB SelectedQuestionRow.Question = Question_EditCW; //WE cose the childwindow, and call the method that will insert the new question into the database EditQuestionCW.Close(); QMServiceReference.Service1Client WebService.EditQuestionCompleted += new EventHandler<AsyncCompletedEventArgs>(WebService_EditQuestionCompleted); //Defines the method that will be called when the database has been called. WebService.EditQuestionAsync(SelectedQuestionID, Question_EditCW); //Calls the method } //The method that is called when the database has been updated: void WebService_EditQuestionCompleted(object sender, AsyncCompletedEventArgs e) { //When the database has been updated we update the grid in the MainPage. WebService.GetQuestionGridCompleted += new EventHandler<GetQuestionGridCompletedEventArgs>(WebService_GetQuestionGridCompleted); WebService.GetQuestionGridAsync(SelectedQuestionnaireCB); } //When the GetQuestionGrid query has been run - insert the results into the grid: void WebService_GetQuestionGridCompleted(object sender, GetQuestionGridCompletedEventArgs e) { QuestionGrid = e.Result; } Enjoy! Comments (6) Trackbacks (0) 1. post source code please 2. You made a few good points there. I did a search on the topic and hardly got any specific details on other websites, but then great to be here, seriously, appreciate that. – Lucas 3. please send me a report file & source code file of your master thesis 4. Nice Article. Please Can I get the source code? 5. very good one can I have source code please? 6. Can you please give me the souce code? Leave a comment five − 2 = No trackbacks yet.
__label__pos
0.995877
V5 arcade mode deadzone programming Having issue with our remote having a deadzone. And do not know how to program a fix for it. We are using arcade mode drive and can’t find a sample or a code already developed that we can use. Any help would be great. C++ format First set a variable to the joystick value. Then see if it’s less than the deadzone, if it is set the variable to 0. Set the motor to the variable. int x = joystick value if abs(joystick value) less than 10, set x to 0 Motor.spin(forward, x, percent); 3 Likes I’m pretty new to coding and basically been learning and experimenting from the sample codes. This is all over my head. Some solid examples would be great. int x = Controller1.Axis3.value(); if(fabs(x)<10){ x=0; } Motor.spin(forward,x,percent); This basically gets the value from the joystick and checks if it’s less than 10. If it’s less than 10, set the motor to 0. If it’s greater than 10, run the motor at the joystick value. Basically make an int for every joystick, check each joystick and incorporate the variables into the Motor.spin 5 Likes Where does that particular code would go before or after on the drive code? It would go before because u want to check if it’s less than 10 before giving power to the motor 1 Like image Finally go around with programming still not working. Any help would help. I wish vex would come up one a sample program to help all teams out. sample program to do what ? For a deadzone program. This ghost remote drifting is only affecting us not the other teams. ok, here is an example valid for VEXcode. VEXcode simple arcade control with deadband /*----------------------------------------------------------------------------*/ /* */ /* Module: main.cpp */ /* Author: james */ /* Created: Wed Nov 20 2019 */ /* Description: V5 project */ /* */ /*----------------------------------------------------------------------------*/ #include "vex.h" using namespace vex; // A global instance of vex::brain used for printing to the V5 brain screen vex::brain Brain; // define your global instances of motors and other devices here vex::controller Controller; vex::motor motor_l( vex::PORT1 ); vex::motor motor_r( vex::PORT10 ); int main() { int forward_pct; int turn_pct; double drive_l; double drive_r; while(1) { forward_pct = Controller.Axis3.position(); turn_pct = Controller.Axis4.position(); // deadband, set to 0 if below the deadband value const int deadband = 15; if( abs( forward_pct ) < deadband ) forward_pct = 0; if( abs( turn_pct ) < deadband ) turn_pct = 0; // arcade drive drive_l = forward_pct + turn_pct; drive_r = forward_pct - turn_pct; // send to motors motor_l.spin( forward, drive_l, percentUnits::pct ); motor_r.spin( forward, drive_r, percentUnits::pct ); // Allow other tasks to run this_thread::sleep_for(10); } } For the last parameters in the .spin commands you need to put velocityUnits::pct and not % or velocityUnits::.
__label__pos
0.899391
Table of Contents Search 1. Preface 2. Understanding Pipeline Partitioning 3. Partition Points 4. Partition Types 5. Pushdown Optimization 6. Pushdown Optimization Transformations 7. Real-time Processing 8. Commit Points 9. Row Error Logging 10. Workflow Recovery 11. Stopping and Aborting 12. Concurrent Workflows 13. Grid Processing 14. Load Balancer 15. Workflow Variables 16. Parameters and Variables in Sessions 17. Parameter Files 18. FastExport 19. External Loading 20. FTP 21. Session Caches 22. Incremental Aggregation 23. Session Log Interface 24. Understanding Buffer Memory 25. High Precision Data 26. POWERCENTERHELP Advanced Workflow Guide Advanced Workflow Guide Reader Time Limit Reader Time Limit Reader time limit is the amount of time in seconds that the Integration Service reads source messages from the real-time source before it stops reading from the source. Use reader time limit to read messages from a real-time source for a set period of time. 0 indicates an infinite period of time. For example, if you use a 10 second time limit, the Integration Service stops reading from the messaging application after 10 seconds. It processes the messages and ends the session.
__label__pos
0.996113
Sign up × Mathematics Stack Exchange is a question and answer site for people studying math at any level and professionals in related fields. It's 100% free, no registration required. My real problem is about graph theory, but I've boiled it down to this statement. The issue is, I can't figure out if it's true or false. Can anyone shed some light on the matter? Thanks! Let $a_1$, $a_2$, ... $a_n$, $b_1$, $b_2$, ... $b_n$ be distinct, positive, real numbers. If $$\sum_{i=1}^na_i < \sum_{j=1}^nb_j$$ then $$\sum_{i=1}^na_i^2 < \sum_{j=1}^nb_j^2$$ Any guidance is appreciated! share|cite|improve this question 2   There are issues already with integers. For example, $1+10\lt 6+6$, but $1^2+10^2 \gt 6^2+6^2$. – André Nicolas Feb 27 '13 at 5:46 3 Answers 3 For example, $\displaystyle 1+\frac{1}{9}< \frac{4}{9}+ \frac{8}{9}$ but $\displaystyle 1+ \frac{1}{9^2}> \frac{4^2}{9^2}+\frac{8^2}{9^2}$. share|cite|improve this answer It's not necessarily true. For example, 1+1.00001>1.5+0.0001, But$(1.5)^2+(0.00001)^2>1^2+(1.0001)^2$ The reason it's not true is that if we have a few large no.s in the $a_i$ Then those large no.s are increased significantly by squaring. share|cite|improve this answer      distinct positive numbers? – Inquest Feb 27 '13 at 5:43      0 is not positive. – p.koch Feb 27 '13 at 5:43      There, they're distinct now(and positive). – Ishan Banerjee Feb 27 '13 at 5:45      $0$ is very close to being positive. – Gerry Myerson Feb 27 '13 at 5:47      @GerryMyerson but 0.00001 is definitely positive – Ishan Banerjee Feb 27 '13 at 5:48 No. If one $a$ is large and all the rest are small, while the $b$'s are about the same size it will fail. A specific example is $n=10, a_1=1000, \text {else } a_i=i, b_i=i+100$ share|cite|improve this answer Your Answer   discard By posting your answer, you agree to the privacy policy and terms of service. Not the answer you're looking for? Browse other questions tagged or ask your own question.
__label__pos
0.975512
Distance Distance is a measurement of the amount of space between objects. It can refer to a specific measurement of length, or can be used more loosely. For example, "There is a good coffee shop a couple of blocks to the east." Physical distances Distance traveled is likely one of the most familiar concepts of distance. Whether it be how far it is from your home to the mailbox, school, or office, distance traveled is simply the length of a path taken from one point to another. Another type of physical distance is straight-line distance, also referred to as Euclidean distance. When traveling between two points, it is not always possible to use the straight-line distance. Euclidean distance is the length of the shortest possible path that can be taken between two points, given that there are no obstacles. The figure above shows the difference between Euclidean distance and distance traveled. The orange dotted line shows the path taken, while the blue line shows the shortest possible path betweeen points A and B. Distance formula Unlike physical distances, which are measured using various methods and measurement tools, distances in geometry, specifically the distance between two points in the coordinate plane, can be measured using a formula, where d is distance, (x2 - x1) is the change in x (the horizontal distance), and (y2 - y1) is the change in y (the vertical distance) between two points. Example Find the distance between the points (1, 1) and (5, 4) using the distance formula. Therefore, the distance between the points is 5 units. The distance formula is derived using the Pythagorean theorem. For more detail, refer to the distance formula page.
__label__pos
0.988066
Dismiss Notice Join Physics Forums Today! The friendliest, high quality science and math community on the planet! Everyone who loves science is here! Homework Help: Prove that x is irrational unless it is an integer. 1. Feb 12, 2012 #1 1. The problem statement, all variables and given/known data This is taken from an answer book that I have. I don't understand the bolded step. Can someone explain it to me? Suppose x = p/q where p and q are natural numbers with no common factor. Then: pn/qn + an-1pn-1/qn-1 + ... + ao = 0 and multiplying both sides by qn gives pn + an-1pn-1q + ... + aoqn = 0 Now if q ≠ ±1 then q has some prime number as a factor. This prime number divides every term of the second equation other than pn, so it must divide pn also. Therefore it divides p, a contradiction. So q = ±1, which means that x is an integer. Once again, it's the bolded step that I don't understand. Why must it divide pn? Thanks in advance.   2. jcsd 3. Feb 12, 2012 #2 SammyS User Avatar Staff Emeritus Science Advisor Homework Helper Gold Member Looks like you're asking us to pick things up in the middle of a proof, without letting us know what it is that is being proved. I'm guessing that you're looking at a solution for the following. Prove that any root of the following polynomial of degree, n, with integer coefficients: xn + an-1 xn-1 + an-2 xn-2 + an-3 xn-3 + an-4 xn-4 + … + a0 is either an integer, or the root is irrational.​ The proof is by contradiction, and done by assuming that there is a rational, non-integer root. To answer your question: Rewrite that second equation of yours as: an-1pn-1q + ... + aoqn = -pn So, q divides the left hand side. Therefore, it must divide the right hand side.   4. Feb 12, 2012 #3 Thank you so much! I can't believe I missed that! Thanks!   Share this great discussion with others via Reddit, Google+, Twitter, or Facebook
__label__pos
0.950899
FP Lint ESLint can help you enforce functional-first standards. Below is a functional eslint setup I like, followed by some explaination. { "root": true, "parser": "@typescript-eslint/parser", "plugins": [ "@typescript-eslint", "fp", "prefer-let" ], "extends": [ "airbnb", "plugin:fp/recommended", "plugin:@typescript-eslint/eslint-recommended", "plugin:@typescript-eslint/recommended" ], "rules": { "semi": ["error", "never"], "prefer-const": "off", "fp/no-let": "off", "fp/no-arguments": "error", "fp/no-class": "error", "fp/no-delete": "error", "fp/no-events": "error", "fp/no-get-set": "error", "fp/no-loops": "error", "fp/no-mutating-assign": "error", "fp/no-mutating-methods": "error", "fp/no-mutation": "error", "fp/no-nil": "error", "fp/no-proxy": "error", "fp/no-rest-parameters": "error", "fp/no-this": "error", "fp/no-throw": "error", "fp/no-unused-expression": "error", "fp/no-valueof-field": "error", "no-var": "error", "import/extensions": "off", "import/no-unresolved": "off", "react/prop-types": "off", "react/react-in-jsx-scope": "off", "react/jsx-filename-extension": "off", "import/no-mutable-exports": "off", "prefer-let/prefer-let": "error", "implicit-arrow-linebreak": "off", "arrow-parens": "off" } } Disabling prefer-const and fp/no-let, and enabling of prefer-let/prefer-let goes against popular JS doctrine. The prefer-const rule is pretty standard in the JS community because const forbids reassignment and reassignment is not idiomatic. However, a constant is the opposite of variable, which is what const is usually used for. Semantically, let is the modern keyword that replaces var, except in instances where the value is known at compile time. These lint rules allow for use of let, and only allow for const at the top-level of a file. Reassignment is then forbidden by the lint rule fp/no-mutation. All of the other fp/* settings emphatically enforce functional programming practices. They forbid mutation, forbid no-op functions (functions that do not return a value), and anything that relies on mutable state (like proxies and loops). No semicolons ( semi: ["error", "never"]) just makes things look more like a functional language. Finally, turning off implicit-arrow-parens reduces the need for brackets when you need to satisfy line length rules, and arrow-parens are really just syntactic noise. Here's an example of how tabbing still makes the return clear, and parameters are clear without parenthesis because of the lambda arrow. let double = (a) => { return a * 2 } let double = a => a * 2
__label__pos
0.988536
RadMenu for ASP.NET AJAX RadControls for ASP.NET AJAX (RadContextMenu only) The OnClientShowing client-side event occurs when the context menu is about to appear, either in response to a right-click on one of its targets or a call to the show method. Caution The OnClientShowing event does not occur when the context menu appears in response to a call its showAt method. The event handler receives two parameters: 1. The instance of the context menu firing the event. 2. An eventArgs parameter containing the following methods: • get_targetElement returns a reference to the DOM element that was right-clicked to show the context menu. If the menu appeared in response to a call to the show method rather than a right-click on one of its targets, get_targetElement returns null. • get_domEvent returns a reference to the DOM event that caused the context menu to appear. If the context menu appeared because one of its targets was right-clicked, this is an event of type "contextmenu". If the context menu appeared because of a call to its show method, this is the DOM event that was passed as a parameter to the show method. • set_cancel lets you prevent the menu from appearing. • get_cancel returns a boolean value indicating whether the context menu will appear after the event handler exits. You can use this event to initialize the context menu before it appears or to conditionally prevent the context menu from appearing: CopyASPX <script type="text/javascript"> function showContextMenu(menu, args) { var target = args.get_targetElement(); if (target) { if (target.value == "") args.set_cancel(true); else menu.get_items().getItem(1).disable(); } } </script> <telerik:RadContextMenu ID="RadContextMenu1" runat="server" OnClientShowing="showContextMenu"> <Items> ... </Items> </telerik:RadContextMenu> See Also
__label__pos
0.957361
src/HOL/Corec_Examples/Tests/Misc_Mono.thy changeset 62696 7325d8573fb8 child 62726 5b2a7caa855b 1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/HOL/Corec_Examples/Tests/Misc_Mono.thy Tue Mar 22 12:39:37 2016 +0100 1.3 @@ -0,0 +1,435 @@ 1.4 +(* Title: HOL/Corec_Examples/Tests/Misc_Mono.thy 1.5 + Author: Aymeric Bouzy, Ecole polytechnique 1.6 + Author: Jasmin Blanchette, Inria, LORIA, MPII 1.7 + Copyright 2015, 2016 1.8 + 1.9 +Miscellaneous monomorphic examples. 1.10 +*) 1.11 + 1.12 +section {* Miscellaneous Monomorphic Examples *} 1.13 + 1.14 +theory Misc_Mono 1.15 +imports "~~/src/HOL/Library/BNF_Corec" 1.16 +begin 1.17 + 1.18 +codatatype T0 = 1.19 + C1 (lab: nat) (tl11: T0) (tl12: T0) 1.20 +| C2 (lab: nat) (tl2: T0) 1.21 +| C3 (tl3: "T0 list") 1.22 + 1.23 +codatatype stream = 1.24 + S (hd: nat) (tl: stream) 1.25 + 1.26 +corec (friend) ff where 1.27 + "ff x = S 0 (ff (ff x))" 1.28 + 1.29 +corec test0 where 1.30 + "test0 x y = (case (x, y) of 1.31 + (S a1 s1, S a2 s2) \<Rightarrow> S (a1 + a2) (test0 s1 s2))" 1.32 + 1.33 +friend_of_corec test0 where 1.34 + "test0 x y = (case (x, y) of 1.35 + (S a1 s1, S a2 s2) \<Rightarrow> S (a1 + a2) (test0 s1 s2))" 1.36 + apply (rule test0.code) 1.37 + apply transfer_prover 1.38 + done 1.39 + 1.40 +corec test01 where 1.41 + "test01 x y = C2 (lab x + lab y) (test01 (tl2 x) (tl2 y))" 1.42 + 1.43 +friend_of_corec test01 where 1.44 + "test01 x y = C2 (lab x + lab y) (test01 (tl2 x) (tl2 y))" 1.45 + apply (rule test01.code) 1.46 + sorry (* not parametric *) 1.47 + 1.48 +corec test02 where 1.49 + "test02 x y = C2 (lab x * lab y) (test01 (test02 x (tl2 y)) (test02 (tl2 x) y))" 1.50 + 1.51 +friend_of_corec test02 where 1.52 + "test02 x y = C2 (lab x * lab y) (test01 (test02 x (tl2 y)) (test02 (tl2 x) y))" 1.53 + apply (rule test02.code) 1.54 + sorry (* not parametric *) 1.55 + 1.56 +corec test03 where 1.57 + "test03 x = C2 (lab x) (C2 (lab x) (test02 (test03 (tl2 x)) (test03 (tl2 x))))" 1.58 + 1.59 +friend_of_corec test03 where 1.60 + "test03 x = C2 (lab x) (C2 (lab x) (test02 (test03 (tl2 x)) (test03 (tl2 x))))" 1.61 + apply (rule test03.code) 1.62 + sorry (* not parametric *) 1.63 + 1.64 +corec (friend) test04a where 1.65 + "test04a x = (case x of C1 a t1 t2 \<Rightarrow> C1 (a * a) (test04a t1) (test04a t2) | C2 a t \<Rightarrow> C2 (a * a) (test04a t) | C3 l \<Rightarrow> C3 l)" 1.66 + 1.67 +corec test04 where 1.68 + "test04 x = (case x of C1 a t1 t2 \<Rightarrow> C1 (a * a) (test04 t1) (test04 t2) | C2 a t \<Rightarrow> C2 (a * a) (test04 t) | C3 l \<Rightarrow> C3 l)" 1.69 + 1.70 +friend_of_corec test04 where 1.71 + "test04 x = (case x of C1 a t1 t2 \<Rightarrow> C1 (a * a) (test04 t1) (test04 t2) | C2 a t \<Rightarrow> C2 (a * a) (test04 t) | C3 l \<Rightarrow> C3 l)" 1.72 + apply (rule test04.code) 1.73 + apply transfer_prover 1.74 + done 1.75 + 1.76 +corec test05 where 1.77 + "test05 x y = (case (x, y) of 1.78 + (C1 a t11 t12, C1 b t21 t22) \<Rightarrow> C1 (a + b) (test05 t11 t21) (test05 t12 t22) 1.79 +| (C1 a t11 _, C2 b t2) \<Rightarrow> C2 (a + b) (test05 t11 t2) 1.80 +| (C2 a t1, C1 b _ t22) \<Rightarrow> C2 (a + b) (test05 t1 t22) 1.81 +| (C2 a t1, C2 b t2) \<Rightarrow> C2 (a + b) (test05 t1 t2) 1.82 +| (_, _) \<Rightarrow> C3 [])" 1.83 + 1.84 +friend_of_corec test05 where 1.85 + "test05 x y = (case (x, y) of 1.86 + (C1 a t11 t12, C1 b t21 t22) \<Rightarrow> C1 (a + b) (test05 t11 t21) (test05 t12 t22) 1.87 +| (C1 a t11 _, C2 b t2) \<Rightarrow> C2 (a + b) (test05 t11 t2) 1.88 +| (C2 a t1, C1 b _ t22) \<Rightarrow> C2 (a + b) (test05 t1 t22) 1.89 +| (C2 a t1, C2 b t2) \<Rightarrow> C2 (a + b) (test05 t1 t2) 1.90 +| (_, _) \<Rightarrow> C3 [])" 1.91 + apply (rule test05.code) 1.92 + apply transfer_prover 1.93 + done 1.94 + 1.95 +corec test06 :: "T0 \<Rightarrow> T0" where 1.96 + "test06 x = 1.97 + (if \<not> is_C1 x then 1.98 + let tail = tl2 x in 1.99 + C1 (lab x) (test06 tail) tail 1.100 + else 1.101 + C2 (lab x) (test06 (tl12 x)))" 1.102 + 1.103 +friend_of_corec test06 :: "T0 \<Rightarrow> T0" where 1.104 + "test06 x = 1.105 + (if \<not> is_C1 x then 1.106 + let tail = tl2 x in 1.107 + C1 (lab x) (test06 tail) tail 1.108 + else 1.109 + C2 (lab x) (test06 (tl12 x)))" 1.110 + apply (rule test06.code) 1.111 + sorry (* not parametric *) 1.112 + 1.113 +corec test07 where 1.114 + "test07 xs = C3 (map (\<lambda>x. test07 (tl3 x)) xs)" 1.115 + 1.116 +friend_of_corec test07 where 1.117 + "test07 xs = C3 (map (\<lambda>x. test07 (tl3 x)) xs)" 1.118 + apply (rule test07.code) 1.119 + sorry (* not parametric *) 1.120 + 1.121 +corec test08 where 1.122 + "test08 xs = (case xs of 1.123 + [] \<Rightarrow> C2 0 (test08 []) 1.124 + | T # r \<Rightarrow> C1 1 (test08 r) T)" 1.125 + 1.126 +friend_of_corec test08 where 1.127 + "test08 xs = (case xs of 1.128 + [] \<Rightarrow> C2 0 (test08 []) 1.129 + | T # r \<Rightarrow> C1 1 (test08 r) T)" 1.130 + apply (rule test08.code) 1.131 + apply transfer_prover 1.132 + done 1.133 + 1.134 +corec test09 where 1.135 + "test09 xs = test08 [case xs of 1.136 + [] \<Rightarrow> C2 0 (test09 []) 1.137 + | (C1 n T1 T2) # r \<Rightarrow> C1 n (test09 (T1 # r)) (test09 (T2 # r)) 1.138 + | _ # r \<Rightarrow> C3 [test09 r]]" 1.139 + 1.140 +friend_of_corec test09 where 1.141 + "test09 xs = (case xs of 1.142 + [] \<Rightarrow> C2 0 (test09 []) 1.143 + | (C1 n T1 T2) # r \<Rightarrow> C1 n (test09 (T1 # r)) (test09 (T2 # r)) 1.144 + | _ # r \<Rightarrow> C3 [test09 r])" 1.145 + defer 1.146 + apply transfer_prover 1.147 + sorry (* not sure the specifications are equal *) 1.148 + 1.149 +codatatype tree = 1.150 + Node (node: int) (branches: "tree list") 1.151 + 1.152 +consts integerize_tree_list :: "'a list \<Rightarrow> int" 1.153 + 1.154 +lemma integerize_tree_list_transfer[transfer_rule]: 1.155 + "rel_fun (list_all2 R) op = integerize_tree_list integerize_tree_list" 1.156 + sorry 1.157 + 1.158 +corec (friend) f10a where 1.159 + "f10a x y = Node 1.160 + (integerize_tree_list (branches x) + integerize_tree_list (branches y)) 1.161 + (map (\<lambda>(x, y). f10a x y) (zip (branches x) (branches y)))" 1.162 + 1.163 +corec f10 where 1.164 + "f10 x y = Node 1.165 + (integerize_tree_list (branches x) + integerize_tree_list (branches y)) 1.166 + (map (\<lambda>(x, y). f10 x y) (zip (branches x) (branches y)))" 1.167 + 1.168 +friend_of_corec f10 where 1.169 + "f10 x y = Node 1.170 + (integerize_tree_list (branches x) + integerize_tree_list (branches y)) 1.171 + (map (\<lambda>(x, y). f10 x y) (zip (branches x) (branches y)))" 1.172 + apply (rule f10.code) 1.173 + by transfer_prover+ 1.174 + 1.175 +corec f12 where 1.176 + "f12 t = Node (node t) (map f12 (branches t))" 1.177 + 1.178 +friend_of_corec f12 where 1.179 + "f12 t = Node (node t) (map f12 (branches t))" 1.180 + sorry 1.181 + 1.182 +corec f13 where 1.183 + "f13 n ts = Node n (map (%t. f13 (node t) (branches t)) ts)" 1.184 + 1.185 +friend_of_corec f13 where 1.186 + "f13 n ts = Node n (map (%t. f13 (node t) (branches t)) ts)" 1.187 + sorry 1.188 + 1.189 +corec f14 :: "tree option \<Rightarrow> tree" where 1.190 + "f14 t_opt = Node 0 1.191 + (case map_option branches t_opt of 1.192 + None \<Rightarrow> [] 1.193 + | Some ts \<Rightarrow> map (f14 o Some) ts)" 1.194 + 1.195 +friend_of_corec f14 where 1.196 + "f14 t_opt = Node 0 1.197 + (case map_option branches t_opt of 1.198 + None \<Rightarrow> [] 1.199 + | Some ts \<Rightarrow> map (f14 o Some) ts)" 1.200 + sorry 1.201 + 1.202 +corec f15 :: "tree list option \<Rightarrow> tree" where 1.203 + "f15 ts_opt = Node 0 1.204 + (case map_option (map branches) ts_opt of 1.205 + None \<Rightarrow> [] 1.206 + | Some tss \<Rightarrow> map (f15 o Some) tss)" 1.207 + 1.208 +friend_of_corec f15 where 1.209 + "f15 ts_opt = Node 0 1.210 + (case map_option (map branches) ts_opt of 1.211 + None \<Rightarrow> [] 1.212 + | Some tss \<Rightarrow> map (f15 o Some) tss)" 1.213 + sorry 1.214 + 1.215 +corec f16 :: "tree list option \<Rightarrow> tree" where 1.216 + "f16 ts_opt = Node 0 1.217 + (case ts_opt of 1.218 + None \<Rightarrow> [] 1.219 + | Some ts \<Rightarrow> map (f16 o Some o branches) ts)" 1.220 + 1.221 +friend_of_corec f16 where 1.222 + "f16 ts_opt = Node 0 1.223 + (case ts_opt of 1.224 + None \<Rightarrow> [] 1.225 + | Some ts \<Rightarrow> map (f16 o Some o branches) ts)" 1.226 + sorry 1.227 + 1.228 +corec f17 :: "tree list option \<Rightarrow> tree" where 1.229 + "f17 ts_opt = Node 0 (case ts_opt of 1.230 + None \<Rightarrow> [] 1.231 + | Some ts \<Rightarrow> [f17 (Some (map (List.hd o branches) ts))])" 1.232 + 1.233 +(* not parametric 1.234 +friend_of_corec f17 where 1.235 + "f17 ts_opt = Node 0 (case ts_opt of 1.236 + None \<Rightarrow> [] 1.237 + | Some ts \<Rightarrow> [f17 (Some (map (List.hd o branches) ts))])" 1.238 + sorry 1.239 +*) 1.240 + 1.241 +corec f18 :: "tree \<Rightarrow> tree" where 1.242 + "f18 t = Node (node t) (map (f18 o f12) (branches t))" 1.243 + 1.244 +friend_of_corec f18 :: "tree \<Rightarrow> tree" where 1.245 + "f18 t = Node (node t) (map (f18 o f12) (branches t))" 1.246 + sorry 1.247 + 1.248 +corec f19 :: "tree \<Rightarrow> tree" where 1.249 + "f19 t = Node (node t) (map (%f. f [t]) (map f13 [1, 2, 3]))" 1.250 + 1.251 +friend_of_corec f19 :: "tree \<Rightarrow> tree" where 1.252 + "f19 t = Node (node t) (map (%f. f [t]) (map f13 [1, 2, 3]))" 1.253 + sorry 1.254 + 1.255 +datatype ('a, 'b, 'c) h = H1 (h_a: 'a) (h_tail: "('a, 'b, 'c) h") | H2 (h_b: 'b) (h_c: 'c) (h_tail: "('a, 'b, 'c) h") | H3 1.256 + 1.257 +term "map_h (map_option f12) (%n. n) f12" 1.258 + 1.259 +corec f20 :: "(tree option, int, tree) h \<Rightarrow> tree \<Rightarrow> tree" where 1.260 + "f20 x y = Node (node y) (case (map_h (map_option f12) (%n. n) f12 x) of 1.261 + H1 None r \<Rightarrow> (f20 r y) # (branches y) 1.262 + | H1 (Some t) r \<Rightarrow> (f20 r t) # (branches y) 1.263 + | H2 n t r \<Rightarrow> (f20 r (Node n [])) # (branches y) 1.264 + | H3 \<Rightarrow> branches y)" 1.265 + 1.266 +friend_of_corec f20 where 1.267 + "f20 x y = Node (node y) (case (map_h (map_option f12) (%n. n) f12 x) of 1.268 + H1 None r \<Rightarrow> (f20 r y) # (branches y) 1.269 + | H1 (Some t) r \<Rightarrow> (f20 r t) # (branches y) 1.270 + | H2 n t r \<Rightarrow> (f20 r (Node n [])) # (branches y) 1.271 + | H3 \<Rightarrow> branches y)" 1.272 + sorry 1.273 + 1.274 +corec f21 where 1.275 + "f21 x xh = 1.276 + Node (node x) (case xh of 1.277 + H1 (Some a) yh \<Rightarrow> (f21 x (map_h (map_option (f20 yh)) id id yh)) # (branches a) 1.278 + | H1 None yh \<Rightarrow> [f21 x yh] 1.279 + | H2 b c yh \<Rightarrow> (f21 c (map_h id (%n. n + b) id yh)) # (branches x) 1.280 + | H3 \<Rightarrow> branches x)" 1.281 + 1.282 +friend_of_corec f21 where 1.283 + "f21 x xh = 1.284 + Node (node x) (case xh of 1.285 + H1 (Some a) yh \<Rightarrow> (f21 x (map_h (map_option (f20 yh)) (%t. t) (%t. t) yh)) # (branches a) 1.286 + | H1 None yh \<Rightarrow> [f21 x yh] 1.287 + | H2 b c yh \<Rightarrow> (f21 c (map_h (%t. t) (%n. n + b) (%t. t) yh)) # (branches x) 1.288 + | H3 \<Rightarrow> branches x)" 1.289 + sorry 1.290 + 1.291 +corec f22 :: "('a \<Rightarrow> tree) \<Rightarrow> 'a list \<Rightarrow> tree" where 1.292 + "f22 f x = Node 0 (map f x)" 1.293 + 1.294 +friend_of_corec f22:: "(nat \<Rightarrow> tree) \<Rightarrow> nat list \<Rightarrow> tree" where 1.295 + "f22 f x = Node 0 (map f x)" 1.296 + sorry 1.297 + 1.298 +corec f23 where 1.299 + "f23 xh = Node 0 1.300 + (if is_H1 xh then 1.301 + (f23 (h_tail xh)) # (branches (h_a xh)) 1.302 + else if is_H1 xh then 1.303 + (f23 (h_tail xh)) # (h_c xh) # (branches (h_b xh)) 1.304 + else 1.305 + [])" 1.306 + 1.307 +friend_of_corec f23 where 1.308 + "f23 xh = Node 0 1.309 + (if is_H1 xh then 1.310 + (f23 (h_tail xh)) # (branches (h_a xh)) 1.311 + else if is_H1 xh then 1.312 + (f23 (h_tail xh)) # (h_c xh) # (branches (h_b xh)) 1.313 + else 1.314 + [])" 1.315 + sorry 1.316 + 1.317 +corec f24 where 1.318 + "f24 xh = 1.319 + (if is_H1 xh then 1.320 + Node 0 ((f24 (h_tail xh)) # (h_a xh 0)) 1.321 + else if is_H2 xh then 1.322 + Node (h_b xh) ((f24 (h_tail xh)) # (h_c xh 0)) 1.323 + else 1.324 + Node 0 [])" 1.325 + 1.326 +friend_of_corec f24 :: "(nat \<Rightarrow> tree list, int, int \<Rightarrow> tree list) h \<Rightarrow> tree" where 1.327 + "f24 xh = 1.328 + (if is_H1 xh then 1.329 + Node 0 ((f24 (h_tail xh)) # (h_a xh 0)) 1.330 + else if is_H2 xh then 1.331 + Node (h_b xh) ((f24 (h_tail xh)) # (h_c xh 0)) 1.332 + else 1.333 + Node 0 [])" 1.334 + sorry 1.335 + 1.336 +corec f25 where 1.337 + "f25 x = Node (node x) (map f25 ((id branches) x))" 1.338 + 1.339 +codatatype ('a, 'b) y_type = 1.340 + Y (lab: "'a \<Rightarrow> 'b") (y_tail: "('a, 'b) y_type") 1.341 + 1.342 +corec f26 :: "(int, tree) y_type \<Rightarrow> tree \<Rightarrow> tree" where 1.343 + "f26 y x = (case map_y_type f12 y of 1.344 + Y f y' \<Rightarrow> Node (node x) ((f (node x)) # (map (f26 y') (branches x))))" 1.345 + 1.346 +friend_of_corec f26 where 1.347 + "f26 y x = (case map_y_type f12 y of 1.348 + Y f y' \<Rightarrow> Node (node x) ((f (node x)) # (map (f26 y') (branches x))))" 1.349 + sorry 1.350 + 1.351 +consts int_of_list :: "'a list \<Rightarrow> int" 1.352 + 1.353 +corec f27 :: "(int, tree) y_type \<Rightarrow> tree \<Rightarrow> tree" where 1.354 + "f27 y x = Node (int_of_list (map (f26 (y_tail y)) (branches x))) [lab y (node x)]" 1.355 + 1.356 +friend_of_corec f27 :: "(int, tree) y_type \<Rightarrow> tree \<Rightarrow> tree" where 1.357 + "f27 y x = Node (int_of_list (map (f26 (y_tail y)) (branches x))) [lab y (node x)]" 1.358 + sorry 1.359 + 1.360 +corec f28 :: "(tree option list, (int \<Rightarrow> int) \<Rightarrow> int list \<Rightarrow> tree, tree) h \<Rightarrow> tree" where 1.361 + "f28 xh = (case xh of 1.362 + H3 \<Rightarrow> Node 0 [] 1.363 + | H1 l r \<Rightarrow> Node 0 ((f28 r) # map the (filter (%opt. case opt of None \<Rightarrow> False | Some _ \<Rightarrow> True) l)) 1.364 + | H2 f t r \<Rightarrow> Node (node t) (map (%t. f id [node t]) (branches t)))" 1.365 + 1.366 +codatatype llist = 1.367 + LNil | LCons (head: nat) (tail: llist) 1.368 + 1.369 +inductive llist_in where 1.370 + "llist_in (LCons x xs) x" 1.371 +| "llist_in xs y \<Longrightarrow> llist_in (LCons x xs) y" 1.372 + 1.373 +abbreviation "lset xs \<equiv> {x. llist_in xs x}" 1.374 + 1.375 +corecursive lfilter where 1.376 + "lfilter P xs = (if \<forall> x \<in> lset xs. \<not> P x then 1.377 + LNil 1.378 + else if P (head xs) then 1.379 + LCons (head xs) (lfilter P (tail xs)) 1.380 + else 1.381 + lfilter P (tail xs))" 1.382 +proof (relation "measure (\<lambda>(P, xs). LEAST n. P (head ((tail ^^ n) xs)))", rule wf_measure, clarsimp) 1.383 + fix P xs x 1.384 + assume "llist_in xs x" "P x" "\<not> P (head xs)" 1.385 + from this(1,2) obtain a where "P (head ((tail ^^ a) xs))" 1.386 + by (atomize_elim, induct xs x rule: llist_in.induct) (auto simp: funpow_Suc_right 1.387 + simp del: funpow.simps(2) intro: exI[of _ 0] exI[of _ "Suc i" for i]) 1.388 + moreover 1.389 + with \<open>\<not> P (head xs)\<close> 1.390 + have "(LEAST n. P (head ((tail ^^ n) xs))) = Suc (LEAST n. P (head ((tail ^^ Suc n) xs)))" 1.391 + by (intro Least_Suc) auto 1.392 + then show "(LEAST n. P (head ((tail ^^ n) (tail xs)))) < (LEAST n. P (head ((tail ^^ n) xs)))" 1.393 + by (simp add: funpow_swap1[of tail]) 1.394 +qed 1.395 + 1.396 +codatatype Stream = 1.397 + SCons (head: nat) (tail: Stream) 1.398 + 1.399 +corec map_Stream where 1.400 + "map_Stream f s = SCons (f (head s)) (map_Stream f (tail s))" 1.401 + 1.402 +friend_of_corec map_Stream where 1.403 + "map_Stream f s = SCons (f (head s)) (map_Stream f (tail s))" 1.404 + sorry 1.405 + 1.406 +corec f29 where 1.407 + "f29 f ll = SCons (head ll) (f29 f (map_Stream f (tail ll)))" 1.408 + 1.409 +friend_of_corec f29 where 1.410 + "f29 f ll = SCons (head ll) (f29 f (map_Stream f (tail ll)))" 1.411 + sorry 1.412 + 1.413 +corec f30 where 1.414 + "f30 n m = (if n = 0 then SCons m (f30 m m) else f30 (n - 1) (n * m))" 1.415 + 1.416 +corec f31 :: "llist \<Rightarrow> llist" where 1.417 + "f31 x = (if x = LNil then LCons undefined (f31 undefined) else LCons undefined undefined)" 1.418 + 1.419 +friend_of_corec f31 where 1.420 + "f31 x = (if x = LNil then LCons undefined (f31 undefined) else LCons undefined undefined)" 1.421 + sorry 1.422 + 1.423 +corec f32 :: "tree \<Rightarrow> tree" where 1.424 + "f32 t = Node (node t) (map ((\<lambda>t'. f18 t') o f32) (branches t))" 1.425 + 1.426 +corec f33 :: "tree \<Rightarrow> tree" where 1.427 + "f33 t = f18 (f18 (Node (node t) (map (\<lambda>t'. (f18 o f18) (f18 (f18 (f33 t')))) (branches t))))" 1.428 + 1.429 +corec f34 :: "tree \<Rightarrow> tree" where 1.430 + "f34 t = f18 (f18 (Node (node t) (map (f18 o f18 o f34) (branches t))))" 1.431 + 1.432 +corec f35 :: "tree \<Rightarrow> tree" where 1.433 + "f35 t = f18 (f18 (Node (node t) (map (f18 o (f18 o (\<lambda>t'. f18 t')) o f35) (branches t))))" 1.434 + 1.435 +corec f37 :: "int \<Rightarrow> tree list \<Rightarrow> tree option \<Rightarrow> nat \<Rightarrow> tree" where 1.436 + "f37 a x1 = undefined a x1" 1.437 + 1.438 +end
__label__pos
0.99868
Pop ups Porngames.adult appeared in your browser for a reason – to make many on you. It means, that you have adware installed on your PC. Adware programs are not so dangerous by itself, but very annoying to deal with because they show tons of ads so that you will definitely click on them. These ads that it will expose you to could bring about a lot of damage. They can seem to be from trusted source, but you are wrong. Once you click on them you can infect the whole system with serious viruses. The most popular ways to spread fpr adware is – bundles with popular free software. We recommend you to uninstall this adware by Porngames.adult before it’s too late. Porngames.adult Porngames.adult It has ability to infect all the most used browsers including Google Chrome, Mozilla Firefox, IE, Edge and Safari and hence, you will not able to surf the web without being troubled no matter which browser you use. Apart from changing browser’s settings, this hijacker also makes significant modifications in default system’s settings by injecting vicious codes in the registry entries in order to gain automatic activation with each window reboot. Porngames.adult blocks windows firewalls and makes the security programs existing inside ineffective which results in brining many other hazardous infection in the computer that can be even more dangerous. Adware becomes noticeable immediately after it installs onto your computer. It will start popping up ads everywhere you go on the Internet so that is a major sign. Though, users not familiar with adware may think that the ads are the result of a malware infection, the situation is not as serious. Switching to another browser is not going to help because most popular browsers will be affected, including Internet Explorer, Google Chrome and Mozilla Firefox. The only way to remove Ads by Porngames.adult is to get rid of the adware. Adware wants you to click on as many ads as possible so that pay-per-click revenue could be generated. Which is why you may be seeing some great deals and discounts. Keep in mind that they might not even be legitimate, and just a ploy to make you click on them. Be careful because those ads could also expose you to malicious content, and that could lead to a malware infection. You need to uninstall Ads by Porngames.adult . Porngames.adult – how dangerous is it? It doesn`t matter if Porngames.adult () malicious or not, if program is considered as potentially unwanted it may cause a lot of problems for you and your computer. Some programs are able to add many various extensions to most popular browsers such as Google Chrome, Mozilla Firefox, Microsoft Edge and so on. This way Porngames.adult may store some information on its users – for example, search queries, to show thousands of ads based on these preferences, though they are not relevant. Also, this behavior can provide other malicious programs and viruses a way inside of your computer. Moreover, Porngames.adult may generate adverts and pop-ups that will annoy you a lot! Such programs can be installed along with other programs, so probably the Porngames.adult would lead a lot of unwanted guests on your PC – malicious programs, adware, CoinMiners or even Ransomware! Don’t waste your time and wait for your computer`s death – follow the simple removal guide below and get rid of this in a few minutes! Porngames.adult automatic remover: Loaris Trojan Remover aids in the removal of various malicious and unwanted stuff – Trojan Horses, Worms, Adware, Spyware, Malware, Ransomware Traces – when standard anti-virus software either fails to detect them or fails to effectively eliminate them. Standard anti-virus programs may be good at detecting threats like Porngames.adult, but not always good at effectively removing it… Also, Loaris Trojan Remover can be considered as a simple cleaner – it removes not only infected files, but also other junk files that were brought by viruses and unwanted programs. NOTE: Loaris Trojan Remover is absolutely free! You can use it for 30 days without any restrictions! Follow the removal guide below to know how to get the key. Download now Learn More Porngames.adult removal steps: 1. 1) Download and install Loaris Trojan Remover. 2. 2) When you open the program for the first time, you will see next window with a suggestion to start a free trial: 3. Loaris Trojan Remover software 4. 3) Enter your Name and E-mail address – press “Get Now 5. 4) Loaris will send a needed credentials on this e-mail. Open the message and you will see this: 6. Loaris Trojan Remover software 7. 5) Click on “MEMBER AREA” and enter you profile using login\password from the e-mail: 8. Loaris Trojan Remover software 9. 6) In your profile you will find a key, copy it in your buffer: 10. Loaris Trojan Remover software 11. 7) Open Loaris Trojan remover and paste the key in it: 12. Loaris Trojan Remover software 13. 8) Your program is activated! Now that you can use it completely free, you need to remove Porngames.adult completely. The scanning process should start right after the activations. If not, just click on “Scan” tab and choose “Standard Scan“: 14. Loaris Trojan Remover software 15. 9) When the scanning process is over click “Apply” to remove all infections found after the scan is completed: 16. Loaris Trojan Remover software (OPTIONAL) Reset browser settings to remove Porngames.adult traces: 1. 1) It is advices to shut down browsers for this one. 2. 2) In Loaris Trojan Remover click on “Tools” and then on “Reset browser settings“: 3. Loaris Trojan Remover software 4. 3) Follow the instructions and click on “Yes” button. NOTE: You may lose some of your browser data, so we advise you to backup your bookmarks or addons: 5. Loaris Trojan Remover software 6. 4) Finally, restart your computer to apply all made changes. If all these steps didn’t help and you still have to deal with Porngames.adult on your PC, just contact us and we will help to set your computer free from this annoying ads! (Visited 458 times, 1 visits today)
__label__pos
0.63022
system-tables Image system-tables question endizhupani answered Default This question has an accepted answer. Accepted 0 Likes 4Answers 0 Comments question Ola Hallengren edited Default This question has an accepted answer. Accepted 3 Likes 3Answers 0 Comments question WilliamD edited Default 1 Like 2Answers 0 Comments question Fatherjack edited Default This question has an accepted answer. Accepted 1 Like 1Answer 0 Comments question question Fatherjack edited Default This question has an accepted answer. Accepted 3 Likes 3Answers 0 Comments Followers 8 Posts 8 Users 1 Follower Topic Experts There are currently no experts identified for this topic. Can you answer questions in this topic area? Community members who provide answers that are marked as correct earn reputation and may become recognized as topic experts.
__label__pos
0.822327
0 Tenho uma consulta em uma tabela que deveria retornar o valor da quantidade de registros encontrados que atendam a determinado requisito. O código: <?php require('conn.php'); $prevJan= "SELECT COUNT(ID) AS PrevJan FROM participantes WHERE PREVISTO = 'Previsto' and FORMACAO = 'Revues Technique' and MES = 'jan' and AREA = 'R&D'"; $realJan= "SELECT COUNT(ID) AS RealJan FROM participantes WHERE REALIZADO = 'Realizado' and FORMACAO = 'Revues Technique' and MES = 'jan' and AREA = 'R&D'"; $consultapj = mysqli_query($prevJan); $consultarj = mysqli_query($realJan); var_dump($consultapj); ?> A conexão: <?php $connection = mysqli_connect("localhost", "root", "", "db_formacao"); if (mysqli_connect_errno()) { echo "Failed to connect to MySQL: " . mysqli_connect_error(); } ?> Se eu fizer uma consulta desse jeito pelo phpMyAdmin, ótimo, retorna o valor certinho, porém, se quando eu tento printar o valor na minha página php, printa NULL. Além disso, quero pegar esse valor e colocar como item de uma lista, veja: data: [<?php echo($consultarj);?>] Alguém tem ideia do que possa estar acontecendo? 7 • Já viu se no banco realmente existe algum registro que obedece as condições do seu SELECT ? Commented 14/09/2017 às 17:37 • Já conferi. Eu mesma criei esse banco e conheço de cabo a rabo. Existe. :/ Commented 14/09/2017 às 17:38 • Pode adicionar o símbolo de porcentagem. PREVISTO LIKE '%Previsto%' – EmanuelF Commented 14/09/2017 às 17:39 • Verifica também se o PHP realmente está fazendo a conexão com o banco, lá n oseu arquivo conn.php . Commented 14/09/2017 às 17:40 • Então, eu tentei dar um var_dump na conexão, retornou: object(mysqli)#1 (0) { } Commented 14/09/2017 às 17:47 1 Resposta 1 2 O mysqli_query retorna um objeto e não o resultado da query, para pegar o SELECT COUNT(*) primeiro de um apelido usa AS (alias): $prevJan= "SELECT COUNT(*) as TOTAL FROM participantes WHERE PREVISTO = 'Previsto' and FORMACAO = 'Revues Techniques' and MES = 'jan' and AREA = 'R&D'"; $consultapj = mysqli_query($prevJan); E então use fetch_assoc, assim: $consultapj = mysqli_query($prevJan); if ($consultapj = mysqli_query($connection, $prevJan)) { /* fetch associative array */ while ($row = mysqli_fetch_assoc($consultapj)) { echo 'Total:', $row['TOTAL']; } /* free result set */ mysqli_free_result($consultapj); } else { die(mysqli_error($connection)); } Note que passei o $connection no mysqli_query Se retornar zero (0) é porque ou a TABELA é case-sensitive ou você errou algo no WHERE [Edição] Após a edição da pergunta, a explicação de duas querys é simples, fetch_array ou fetch_assoc devem de preferencia ficar com suas consultas, na ordem e a variavel row não deve ser reaproveitada, pois estamos usando para pegar os resultados dentro do while/if, misturar com outras variáveis pode confundir os resultados: //Variaveis para pegar os resultados $totaPJ = 0; $totaRJ = 0; //qUERYS $prevJan = "SELECT COUNT(*) AS TOTAL FROM participantes WHERE PREVISTO = 'Previsto' and FORMACAO = 'Revues Technique' and MES = 'jan' and AREA = 'R&D'"; $realJan = "SELECT COUNT(*) AS TOTAL FROM participantes WHERE REALIZADO = 'Realizado' and FORMACAO = 'Revues Technique' and MES = 'jan' and AREA = 'R&D'"; //Pega o resultado de prevJan if ($resultado = mysqli_query($connection, $prevJan)) { if ($row = mysqli_fetch_assoc($resultado)) { $totaPJ = $row['TOTAL']; //Seta o total } mysqli_free_result($resultado); } else { die(mysqli_error($connection)); } //Pega o resultado de realJan if ($resultado = mysqli_query($connection, $realJan)) { if ($row = mysqli_fetch_assoc($resultado)) { $totaRJ = $row['TOTAL']; //Seta o total } mysqli_free_result($resultado); } else { die(mysqli_error($connection)); } E no gráfico provavelmente vai fazer assim: { name: 'Realizado', type: 'column', yAxis: 1, data: [<?php echo $totalRJ; ?>], tooltip: { valueSuffix: '' } }, { name: 'Previsto', type: 'spline', data: [<?php echo $totalPJ; ?>], tooltip: { valueSuffix: '' } } 2 • @MarianaFerreira veja se consegue entender a edição da resposta. – Syzoth Commented 15/09/2017 às 18:19 • 1 Sim, entendi tudo certinho. Funciona agora, obrigada! :) Commented 18/09/2017 às 11:31 Você deve fazer log-in para responder a esta pergunta. Esta não é a resposta que você está procurando? Pesquise outras perguntas com a tag .
__label__pos
0.917296
SideGuide SideGuide's Dev Blog SideGuide's Dev Blog What is Dart? What is Dart? SideGuide's photo SideGuide ·Feb 20, 2020· 3 min read Dart: Hello World! What is Dart? Dart is an object-oriented programming language released by Google to help build modern web, mobile, and desktop applications. It has tons of awesome features that you will learn throughout this course. Your first Dart project You can easily create and run your first Dart application with VS Code. • Press CMD + SHIFT + P on Mac or CTRL + SHIFT + P on Windows • Choose Dart: New Project • Select Simple Console Application • Select a folder where you want the course to be located • Name your project: dart_application_1 After completing these steps, you should see a whole Dart project set up for you in VS Code: If you're a complete beginner, it's important to note that a “project” is just a folder on your computer where all the files Dart needs to create your program live. There are many different files, all with different roles and purposes - don't worry if it seems confusing at first. Where it all begins 🙂 Dart and many other languages have a main() function that is the program's entry point. In technical terms, this is where your program begins executing. Simply put, it's where everything in your app begins. You can find this function in the dart_application_1.dart file in your /bin directory. Let's erase everything in this file and create our main function. The main() function is written in curly brackets ({}). Flutter will run the code in these brackets once your app starts. void main(){ } let’s start by printing something to the console with a print statement. void main(){ print("Hello World"); //add this line } You can use (or call) the function print to print anything. print takes in an object as a parameter (The item that goes inside the parentheses) and prints a String (text) representation of that object. Simply put, it tries to print whatever you put in the parentheses! In this case, we want to print Hello World, so we write “Hello World” inside print. The quotes are essential - we'll explain why in the coming lesson. 🤔 Try removing the quotes and see what happens. What does your Terminal say? Let's run the program. 🚀 There are two main ways to run your program: • Press F5 and VS Code will automatically run your program with the debugger. 💡 Debugger: A program that assists in detecting and correcting errors in other computer programs. 💡 Terminal/Console: A terminal is where you can give your program's commands as text, and see what output (or errors) your programs create. • A more advanced way to start your app is to use your computer's Terminal. To do this, go to the top bar of your VS Code window and press ‘Terminal’ then ‘Create New Terminal,’ which should open a terminal on the bottom of your VS Code window. There, you can type dart run, • Make sure the Terminal is already inside your project folder. If it is not, navigate using the cd commands. You can learn more about Terminals here ❗ The “Terminal” and “Debug Console” are Different! The “Terminal is your computer's Terminal (CMD on Windows, Terminal on Mac). The DEBUG CONSOLE is where the Dart debugger prints output and errors Voila! You should now see a message popping up displaying in your console: “Hello World.” Learn Flutter where you code: Visit sideguide.dev/courses/flutter Takeaways • You can create a Dart project by pressing CTRL+SHIT/CMD+P and selecting Dart: New Project • Debug Console and Terminal are 2 different things • You can run your program using the Debug play button in VS Code or typing dart run in the terminal   Share this
__label__pos
0.936705
Currently job artifacts in CI/CD pipelines on LRZ GitLab never expire. Starting from Wed 26.1.2022 the default expiration time will be 30 days (GitLab default). Currently existing artifacts in already completed jobs will not be affected by the change. The latest artifacts for all jobs in the latest successful pipelines will be kept. More information: https://gitlab.lrz.de/help/user/admin_area/settings/continuous_integration.html#default-artifacts-expiration ParserCombinators_no_TOKEN_KEYWORD.py 79.1 KB Newer Older 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 #!/usr/bin/python3 """ParserCombinators.py - parser combinators for left-recursive grammers Copyright 2016 by Eckhart Arnold Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Module parser_combinators contains a number of classes that together make up parser combinators for left-recursive grammers. For each element of the extended Backus-Naur-Form as well as for a regular expression token a class is defined. The set of classes can be used to define a parser for (ambiguous) left-recursive grammers. References and Acknowledgements: Dominikus Herzberg: Objekt-orientierte Parser-Kombinatoren in Python, Blog-Post, September, 18th 2008 on denkspuren. gedanken, ideen, anregungen und links rund um informatik-themen, URL: http://denkspuren.blogspot.de/2008/09/objekt-orientierte-parser-kombinatoren.html Dominikus Herzberg: Eine einfache Grammatik für LaTeX, Blog-Post, September, 18th 2008 on denkspuren. gedanken, ideen, anregungen und links rund um informatik-themen, URL: http://denkspuren.blogspot.de/2008/09/eine-einfache-grammatik-fr-latex.html Dominikus Herzberg: Uniform Syntax, Blog-Post, February, 27th 2007 on denkspuren. gedanken, ideen, anregungen und links rund um informatik-themen, URL: http://denkspuren.blogspot.de/2007/02/uniform-syntax.html Richard A. Frost, Rahmatullah Hafiz and Paul Callaghan: Parser Combinators for Ambiguous Left-Recursive Grammars, in: P. Hudak and D.S. Warren (Eds.): PADL 2008, LNCS 4902, pp. 167–181, Springer-Verlag Berlin Heidelberg 2008. Juancarlo Añez: grako, a PEG parser generator in Python, https://bitbucket.org/apalala/grako """ # TODO: Replace copy.deepcopy() call in GrammarBase class by custom copy()-methods in the Parser classes. Is that really better? import collections import copy import enum from functools import partial import hashlib import keyword import os from typing import NamedTuple # try: # import regex as re # except ImportError: # import re import re # as of now use `re` - even hough `regex` appears to be better import sys __version__ = '0.5.2' + '_dev' + str(os.stat(__file__).st_mtime) DEBUG = "DEBUG" def DEBUG_DIR(): global DEBUG dirname = "" if DEBUG: if os.path.exists(DEBUG): dirname = DEBUG if os.path.isdir(DEBUG) else "" else: os.mkdir(DEBUG) dirname = DEBUG return dirname def DEBUG_FILE_NAME(grammar_base): name = grammar_base.__class__.__name__ return name[:-7] if name.endswith('Grammar') else name ######################################################################## # # Scanner / Preprocessor support # ######################################################################## RX_SCANNER_TOKEN = re.compile('\w+') BEGIN_SCANNER_TOKEN = '\x1b' END_SCANNER_TOKEN = '\x1c' def make_token(token, argument=''): """ Turns the ``token`` and ``argument`` into a special token that will be caught by the `ScannerToken`-parser. Args: token (str): The token to be wrapped into a scanner token argument (str): The optional argument string added to the token Returns (str): The scanner token, starting with the `BEGIN_SCANNER_TOKEN`- character and ending with the `END_SCANNER_TOKEN`-character. """ assert RX_SCANNER_TOKEN.match(token) assert argument.find(BEGIN_SCANNER_TOKEN) < 0 assert argument.find(END_SCANNER_TOKEN) < 0 return BEGIN_SCANNER_TOKEN + token + argument + END_SCANNER_TOKEN nil_scanner = lambda text: text ######################################################################## # # Parser tree # ######################################################################## def line_col(text, pos): """ Returns the position within a text as (line, column)-tuple. Args: text (str): The text for which the line and column of the position ``pos`` shall be returned pos (int): The position in the text starting with 0, e.g. 2536 means the 2537th character. Must be lower than the text's length Returns (tuple): The line and column of the given position """ assert pos < len(text) line = text.count("\n", 0, pos) + 1 column = pos - text.rfind("\n", 0, pos) return line, column class ZombieParser: """Serves as a substitute for a Parser instance. Required by `Node`-objects, for example. The ZOMBIE_PARSER has a name and can be called, but it never matches. """ alive = False def __init__(self): assert not self.__class__.alive, "There can be only one!" assert self.__class__ == ZombieParser, "No derivatives, please!" self.name = "ZOMBIE" self.__class__.alive = True def __str__(self): return self.name def __call__(self, text): """Better call Saul ;-)""" return None, text ZOMBIE_PARSER = ZombieParser() class Error(NamedTuple): pos: int msg: str class Node: """ Represents a node in the concrete or abstract syntax tree. Attributes: name (str): The name of the node, which is either its parser's name or, if that is empty, the parser's class name result (str or tuple): The result of the parser which generated this node, which can be either a string or a tuple of child nodes. children (tuple): The tuple of child nodes or an empty tuple if there are no child nodes. READ ONLY! parser (Parser): The parser which generated this node. errors (list): A list of parser- or compiler-errors: tuple(position, string) attached to this node len (int): The full length of the node's string result if the node is a leaf node or, otherwise, the concatenated string result's of its descendants. The figure always represents the length before AST-transformation and will never change through AST-transformation. READ ONLY! pos (int): the position of the node within the parsed text. The value of ``pos`` is zero by default and is will be updated if the node or any of its parents is attached to a new parent. From the point of view of a client, this value should be considered READ ONLY. At any rate, it should never be reassigned only during parsing stage and never during or after AST-transformation. """ def __init__(self, parser, result): """ Initializes the ``Node``-object. Args: parser (Parser): The `Parser`-instance which generated this node result (Union): The parsing result, which can either be a string or a tuple of child nodes """ self.result = result self.parser = parser or ZOMBIE_PARSER self._errors = [] self.error_flag = any(r.error_flag for r in self.result) if self.children else False self._len = len(self.result) if not self.children else \ sum(child._len for child in self.result) self.pos = 0 def __str__(self): if self.children: return "".join([str(child) for child in self.result]) return str(self.result) @property def name(self): return self.parser.name or self.parser.__class__.__name__ @property def result(self): return self._result @result.setter def result(self, result): assert ((isinstance(result, tuple) and all(isinstance(child, Node) for child in result)) or isinstance(result, Node) or isinstance(result, str)), str(result) self._result = (result,) if isinstance(result, Node) else result or '' self._children = self._result if isinstance(self._result, tuple) else () @property def children(self): return self._children @property def len(self): return self._len @property def pos(self): return self._pos @pos.setter def pos(self, pos): self._pos = pos offset = 0 for child in self.children: child.pos = pos + offset offset += child.len @property def errors(self): return [Error(self.pos, err) for err in self._errors] def _tree_repr(self, tab, openF, closeF, dataF=lambda s: s): """ Generates a tree representation of this node and its children in string from. This could be an XML-representation or a lisp-like S-expression. Exactly which form the tree representation takes is defined by the parameters of the function. Args: tab (str): The indentation string, e.g. '\t' or ' ' openF (Node->str): A function that returns an opening string (e.g. an XML-tag) for a given node closeF (Node->str): A function that returns a closeF string (e.g. an XML-tag) for a given node. dataF (str->str): A function that filters the data string before printing, e.g. to add quotation marks Returns (str): A string that contains a (serialized) tree representation of the node and its children. """ head = openF(self) tail = closeF(self) if not self.result: return head + tail head = head + '\n' # place the head, tail and content tail = '\n' + tail # of the node on different lines if self.children: content = [] for child in self.result: subtree = child._tree_repr(tab, openF, closeF, dataF).split('\n') content.append('\n'.join((tab + s) for s in subtree)) return head + '\n'.join(content) + tail return head + '\n'.join([tab + dataF(s) for s in str(self.result).split('\n')]) + tail def as_sexpr(self, src=None): """ Returns content as S-expression, i.e. in lisp-like form. Args: src (str or None): The source text or `None`. In case the source text is given the position of the element in the text will be reported as line and column. """ def opening(node): s = '(' + node.name # s += " '(pos %i)" % node.pos if src: s += " '(pos %i %i %i)" % (node.pos, *line_col(src, node.pos)) if node.errors: s += " '(err '(%s))" % ' '.join(str(err).replace('"', r'\"') for err in node.errors) return s def pretty(s): return '"%s"' % s if s.find('"') < 0 \ else "'%s'" % s if s.find("'") < 0 \ else '"%s"' % s.replace('"', r'\"') return self._tree_repr(' ', opening, lambda node: ')', pretty) def as_xml(self, src=None): """ Returns content as XML-tree. Args: src (str): The source text or `None`. In case the source text is given the position will also be reported as line and column. """ def opening(node): s = '<' + node.name # s += ' pos="%i"' % node.pos if src: s += ' line="%i" col="%i"' % line_col(src, node.pos) if node.errors: s += ' err="%s"' % ''.join(str(err).replace('"', r'\"') for err in node.errors) s += ">" return s def closing(node): s = '</' + node.name + '>' return s return self._tree_repr(' ', opening, closing) def add_error(self, error_str): self._errors.append(error_str) self.error_flag = True return self def collect_errors(self, clear_errors=False): """ Returns all errors of this node or any child node in the form of a set of tuples (position, error_message), where position is always relative to this node. """ if self.error_flag: errors = self.errors if clear_errors: self._errors = [] self.error_flag = False if self.children: for child in self.result: errors.extend(child.collect_errors(clear_errors)) return errors return [] def navigate(self, path): """EXPERIMENTAL! NOT YET TESTED!!! Returns the first descendant element matched by `path`, e.g. 'd/s' returns 'l' from (d (s l)(e (r x1) (r x2)) 'e/r' returns 'x2' 'e' returns (r x1)(r x2) Args: path (str): The path of the object, e.g. 'a/b/c' Returns: The object at the path, either a string or a Node or `None`, if the path did not match. """ pl = path.strip('') assert pl[0] != '/', 'Path must noch start with "/"!' nd = self for p in pl: if isinstance(nd.result, str): return p if (p == nd.result) and (p == pl[-1]) else None for child in nd.result: if str(child) == p: nd = child break else: return None return child def error_messages(text, errors): """ Converts the list of ``errors`` collected from the root node of the parse tree of `text` into a human readable (and IDE or editor parsable text) with line an column numbers. Error messages are separated by an empty line. """ return "\n\n".join("line: %i, column: %i, error: %s" % (*line_col(text, err.pos), err.msg) for err in sorted(list(errors))) # return "\n\n".join("line: %i, column: %i, error: %s" % # (*line_col(text, err[0]), " and ".join(err[1])) # for err in sorted(list(errors))) # lambda compact_sexpr s : re.sub('\s(?=\))', '', re.sub('\s+', ' ', s)).strip() ######################################################################## # # Abstract syntax tree support # ######################################################################## def DEBUG_DUMP_SYNTAX_TREE(grammar_base, syntax_tree, ext): global DEBUG if DEBUG: st_file_name = DEBUG_FILE_NAME(grammar_base) + ext with open(os.path.join(DEBUG_DIR(), st_file_name), "w", encoding="utf-8") as f: f.write(syntax_tree.as_sexpr()) def expand_table(compact_table): """Expands a table by separating keywords that are tuples or strings containing comma separated words into single keyword entries with the same values. Returns the expanded table. Example: expand_table({"a, b": 1, "b": 1, ('d','e','f'):5, "c":3}) yields {'a': 1, 'b': 1, 'c': 3, 'd': 5, 'e': 5, 'f': 5} """ expanded_table = {} keys = list(compact_table.keys()) for key in keys: value = compact_table[key] if isinstance(key, str): parts = (s.strip() for s in key.split(',')) else: assert isinstance(key, collections.abc.Iterable) parts = key for p in parts: expanded_table[p] = value return expanded_table def ASTTransform(node, transtable): """Transforms the parse tree starting with the given `node` into an abstract syntax tree by calling transformation functions registered in a transformation table. Args: node (Node): The root node of the parse tree (or sub-tree) to be transformed into the abstract syntax tree. transtable (dict): A dictionary that assigns a transformation transformation functions to parser name strings. """ # normalize transformation entries by turning single transformations # into lists with a single item table = {name: transformation if isinstance(transformation, collections.abc.Sequence) else [transformation] for name, transformation in list(transtable.items())} table = expand_table(table) def recursive_ASTTransform(node): if node.children: for child in node.result: recursive_ASTTransform(child) transformation = table.get(node.name, table.get('', [])) + table.get('*', []) for transform in transformation: transform(node) recursive_ASTTransform(node) # def preserve_errors(transformation): # """Wrapper that moves errors of child nodes that have been removed # after the application of function ``transformation()`` to the root # node. As an optimization, ``transformation()`` will only be called # if its ``node``-argument (i.e. the root-node) has children at all. # """ # # requires nd.collect_errors() to return a set # @functools.wraps(transformation) # def preserve_errors_wrapper(*args, **kwds): # nd = kwds['node'] if 'node' in kwds else args[0] # if nd.children: # errors = nd.collect_errors() # transformation(*args, **kwds) # for err in errors - nd.collect_errors(): # nd.add_error(err[1], err[0]) # return preserve_errors_wrapper def no_transformation(node): pass # ------------------------------------------------ # # rearranging transformations: # - tree may be rearranged (flattened) # - order is preserved # - all leaves are kept # # ------------------------------------------------ def replace_by_single_child(node): """Remove single branch node, replacing it by its immediate descendant. (In case the descendant's name is empty (i.e. anonymous) the name of this node's parser is kept.) """ if node.children and len(node.result) == 1: if not node.result[0].parser.name: node.result[0].parser.name = node.parser.name node.parser = node.result[0].parser node._errors.extend(node.result[0].errors) node.result = node.result[0].result def reduce_single_child(node): """Reduce a single branch node, by transferring the result of its immediate descendant to this node, but keeping this node's parser entry. """ if node.children and len(node.result) == 1: node._errors.extend(node.result[0].errors) node.result = node.result[0].result # ------------------------------------------------ # # destructive transformations: # - tree may be rearranged (flattened), # - order is preserved # - but (irrelevant) leaves may be dropped # - errors of dropped leaves will be lost # # ------------------------------------------------ def is_whitespace(node): return not node.result or (isinstance(node.result, str) and not node.result.strip()) def is_comment(node): return node.name == WHITESPACE_KEYWORD def is_scanner_token(node): return isinstance(node.parser, ScannerToken) def is_expendable(node): return is_whitespace(node) or is_comment(node) or is_scanner_token(node) def remove_children_if(node, condition): """Removes all nodes from the result field if the function `condition` evaluates to `True`.""" if node.children: node.result = tuple(r for r in node.result if not condition(r)) remove_whitespace = partial(remove_children_if, condition=is_whitespace) remove_comments = partial(remove_children_if, condition=is_comment) remove_scanner_tokens = partial(remove_children_if, condition=is_scanner_token) remove_expendables = partial(remove_children_if, condition=is_expendable) def flatten(node): """Recursively flattens all unnamed sub-nodes, in case there is more than one sub-node present. Flattening means that wherever a node has child nodes, the child nodes are inserted in place of the node. In other words, all leaves of this node and its child nodes are collected in-order as direct children of this node. This is meant to achieve the following structural transformation: X (+ Y + Z) -> X + Y + Z """ if node.children: new_result = [] for child in node.children: if not child.parser.name and child.children: assert child.children, node.as_sexpr() flatten(child) new_result.extend(child.result) else: new_result.append(child) node.result = tuple(new_result) def remove_tokens(node, tokens=set()): """Reomoves any among a particular set of tokens from the immediate descendants of a node. If ``tokens`` is the empty set, all tokens are removed. """ if node.children: if tokens: node.result = tuple(child for child in node.children if child.children or child.result not in tokens or not isinstance(child.parser, Token)) else: node.result = tuple(child for child in node.children if child.children or isinstance(child.parser, Token)) def remove_enclosing_delimiters(node): """Removes the enclosing delimiters from a structure (e.g. quotation marks from a literal or braces from a group). """ if len(node.children) >= 3: assert isinstance(node.children[0].result, str) and \ isinstance(node.children[-1].result, str), node.as_sexpr() node.result = node.result[1:-1] AST_SYMBOLS = {'replace_by_single_child', 'reduce_single_child', 'no_transformation', 'remove_children_if', 'is_whitespace', 'is_comment', 'is_scanner_token', 'is_expendable', 'remove_whitespace', 'remove_comments', 'remove_scanner_tokens', 'remove_expendables', 'flatten', 'remove_tokens', 'remove_enclosing_delimiters', 'WHITESPACE_KEYWORD', 'partial'} ######################################################################## # # Parser base classes # ######################################################################## LEFT_RECURSION_DEPTH = 10 # because of pythons recursion depth limit, this # value ought not to be set too high MAX_DROPOUTS = 25 # stop trying to recover parsing after so many errors WHITESPACE_KEYWORD = 'wsp__' class HistoryRecord: __slots__ = ('call_stack', 'node', 'remaining') MATCH = "MATCH" ERROR = "ERROR" FAIL = "FAIL" def __init__(self, call_stack, node, remaining): self.call_stack = call_stack self.node = node self.remaining = remaining @property def stack(self): return "->".join(str(parser) for parser in self.call_stack) @property def status(self): return self.FAIL if self.node is None else self.ERROR if self.node._errors else self.MATCH @property def extent(self): return ((-self.remaining - self.node.len, -self.remaining) if self.node else (-self.remaining, None)) def DEBUG_DUMP_PARSING_HISTORY(grammar_base, document): def prepare_line(record): excerpt = document.__getitem__(slice(*record.extent))[:25].replace('\n','\\n') excerpt = "'%s'" % excerpt if len(excerpt) < 25 else "'%s...'" % excerpt return (record.stack, record.status, excerpt) def write_log(history, log_name): path = os.path.join(DEBUG_DIR(), DEBUG_FILE_NAME(grammar_base) + log_name + "_parser.log") if history: with open(path, "w", encoding="utf-8") as f: f.write("\n".join(history)) elif os.path.exists(path): os.remove(path) global DEBUG if DEBUG: full_history, match_history, errors_only = [], [], [] for record in grammar_base.history: line = "; ".join(prepare_line(record)) full_history.append(line) if record.node and record.node.name != WHITESPACE_KEYWORD: match_history.append(line) if record.node.errors: errors_only.append(line) write_log(full_history, '_full') write_log(match_history, '_match') write_log(errors_only, '_errors') # hist = ["; ".join(prepare_line(r)) for r in grammar_base.history] # lines = [prepare_line(r) for r in grammar_base.history] # n = max(len(line[0]) for line in lines) # hist = [" ".join((l[0] + ' ' * (n - len(l[0])), l[1], l[2])) for l in lines] def add_parser_guard(parser_func): def guarded_call(parser, text): try: location = len(text) # if location has already been visited by the current parser, # return saved result if location in parser.visited: return parser.visited[location] # break left recursion at the maximum allowed depth if parser.recursion_counter.setdefault(location, 0) > LEFT_RECURSION_DEPTH: return None, text parser.recursion_counter[location] += 1 grammar = parser.grammar if grammar.track_history: grammar.call_stack.append(parser) grammar.moving_forward = True # run original __call__ method node, rest = parser_func(parser, text) if grammar.track_history: if grammar.moving_forward: # and result[0] == None grammar.moving_forward = False record = HistoryRecord(grammar.call_stack.copy(), node, len(rest)) grammar.history.append(record) grammar.call_stack.pop() if node is not None: # in case of a recursive call saves the result of the first # (or left-most) call that matches parser.visited[location] = (node, rest) grammar.last_node = node elif location in parser.visited: # if parser did non match but a saved result exits, assume # left recursion and use the saved result node, rest = parser.visited[location] parser.recursion_counter[location] -= 1 except RecursionError: node = Node(None, text[:min(10, max(1, text.find("\n")))] + " ...") node.add_error("maximum recursion depth of parser reached; " "potentially due to too many errors!") node.error_flag = True rest = '' return node, rest return guarded_call class ParserMetaClass(type): def __init__(cls, name, bases, attrs): # The following condition is necessary for classes that don't override # the __call__() method, because in these cases the non-overridden # __call__()-method would be substituted a second time! guarded_parser_call = add_parser_guard(cls.__call__) if cls.__call__.__code__ != guarded_parser_call.__code__: cls.__call__ = guarded_parser_call super(ParserMetaClass, cls).__init__(name, bases, attrs) def sane_parser_name(name): """Checks whether given name is an acceptable parser name. Parser names must not be preceeded or succeeded by a double underscore '__'! """ return name and name[:2] != '__' and name[-2:] != '__' class Parser(metaclass=ParserMetaClass): def __init__(self, name=None): assert name is None or isinstance(name, str), str(name) self.name = name or '' self.grammar = None # center for global variables etc. self.reset() def reset(self): self.visited = dict() self.recursion_counter = dict() self.cycle_detection = set() def __call__(self, text): return None, text # default behaviour: don't match def __str__(self): return self.name or self.__class__.__name__ @property def grammar(self): return self._grammar @grammar.setter def grammar(self, grammar_base): self._grammar = grammar_base self._grammar_assigned_notifier() def _grammar_assigned_notifier(self): pass def apply(self, func): """Applies function `func(parser)` recursively to this parser and all descendendants of the tree of parsers. The same function can never be applied twice between calls of the ``reset()``-method! """ if func in self.cycle_detection: return False else: self.cycle_detection.add(func) func(self) return True class GrammarBase: root__ = None # should be overwritten by grammar subclass @classmethod def _assign_parser_names(cls): """Initializes the `parser.name` fields of those Parser objects that are directly assigned to a class field with the field's name, e.g. class Grammar(GrammarBase): ... symbol = RE('(?!\\d)\\w+') After the call of this method symbol.name == "symbol" holds. Names assigned via the `name`-parameter of the constructor will not be overwritten. """ if cls.parser_initialization__ == "done": return cdict = cls.__dict__ for entry, parser in cdict.items(): if isinstance(parser, Parser): if not parser.name: parser.name = entry if (isinstance(parser, Forward) and not parser.parser.name): parser.parser.name = entry cls.parser_initialization__ = "done" def __init__(self): self.all_parsers = set() self.dirty_flag = False self.track_history = DEBUG self._reset() self._assign_parser_names() self.root__ = copy.deepcopy(self.__class__.root__) if self.wspL__: self.wsp_left_parser__ = RegExp(self.wspL__, WHITESPACE_KEYWORD) self.wsp_left_parser__.grammar = self else: self.wsp_left_parser__ = ZOMBIE_PARSER if self.wspR__: self.wsp_right_parser__ = RegExp(self.wspR__, WHITESPACE_KEYWORD) self.wsp_right_parser__.grammar = self else: self.wsp_right_parser__ = ZOMBIE_PARSER self.root__.apply(self._add_parser) def _reset(self): self.variables = dict() # support for Pop and Retrieve operators self.last_node = None self.call_stack = [] # support for call stack tracing self.history = [] # snapshots of call stacks self.moving_forward = True # also needed for call stack tracing def _add_parser(self, parser): """Adds the copy of the classes parser object to this particular instance of GrammarBase. """ setattr(self, parser.name, parser) self.all_parsers.add(parser) parser.grammar = self def parse(self, document): """Parses a document with with parser-combinators. Args: document (str): The source text to be parsed. Returns: Node: The root node ot the parse tree. """ if self.root__ is None: raise NotImplementedError() if self.dirty_flag: self._reset() for parser in self.all_parsers: parser.reset() else: self.dirty_flag = True parser = self.root__ result = "" stitches = [] rest = document while rest and len(stitches) < MAX_DROPOUTS: result, rest = parser(rest) if rest: fwd = rest.find("\n") + 1 or len(rest) skip, rest = rest[:fwd], rest[fwd:] if result is None: error_msg = "Parser did not match! Invalid source file?" else: stitches.append(result) error_msg = "Parser stopped before end" + \ ("! trying to recover..." if len(stitches) < MAX_DROPOUTS else " too often! Terminating parser.") stitches.append(Node(None, skip)) stitches[-1].add_error(error_msg) if stitches: if result and stitches[-1] != result: stitches.append(result) if rest: stitches.append(Node(None, rest)) return result if not stitches else Node(None, tuple(stitches)) ######################################################################## # # Token and Regular Expression parser classes (i.e. leaf classes) # ######################################################################## class ScannerToken(Parser): def __init__(self, scanner_token): assert isinstance(scanner_token, str) and scanner_token and \ scanner_token.isupper() assert RX_SCANNER_TOKEN.match(scanner_token) super(ScannerToken, self).__init__(scanner_token) def __call__(self, text): if text[0:1] == BEGIN_SCANNER_TOKEN: end = text.find(END_SCANNER_TOKEN, 1) if end < 0: node = Node(self, '').add_error( 'END_SCANNER_TOKEN delimiter missing from scanner token. ' '(Most likely due to a scanner bug!)') return node, text[1:] elif end == 0: node = Node(self, '').add_error( 'Scanner token cannot have zero length. ' '(Most likely due to a scanner bug!)') return node, text[2:] elif text.find(BEGIN_SCANNER_TOKEN, 1, end) >= 0: node = Node(self, text[len(self.name) + 1:end]) node.add_error( 'Scanner tokens must not be nested or contain ' 'BEGIN_SCANNER_TOKEN delimiter as part of their argument. ' '(Most likely due to a scanner bug!)') return node, text[end:] if text[1:len(self.name) + 1] == self.name: return Node(self, text[len(self.name) + 1:end]), \ text[end + 1:] return None, text class RegExp(Parser): def __init__(self, regexp, name=None): super(RegExp, self).__init__(name) self.regexp = re.compile(regexp) if isinstance(regexp, str) else regexp def __deepcopy__(self, memo): # this method is obsolete with the new `regex` module! try: regexp = copy.deepcopy(self.regexp) except TypeError: regexp = self.regexp.pattern duplicate = RegExp(self.name, regexp) duplicate.name = self.name # this ist needed!!!! duplicate.regexp = self.regexp duplicate.grammar = self.grammar duplicate.visited = copy.deepcopy(self.visited, memo) duplicate.recursion_counter = copy.deepcopy(self.recursion_counter, memo) return duplicate def __call__(self, text): match = text[0:1] != BEGIN_SCANNER_TOKEN and self.regexp.match(text) # ESC starts a scanner token. if match: end = match.end() return Node(self, text[:end]), text[end:] return None, text class RE(Parser): """Regular Expressions with optional leading or trailing whitespace. """ def __init__(self, regexp, wL=None, wR=None, name=None): super(RE, self).__init__(name) # assert wR or regexp == '.' or isinstance(self, Token) self.wL = wL self.wR = wR self.wspLeft = RegExp(wL, WHITESPACE_KEYWORD) if wL else ZOMBIE_PARSER self.wspRight = RegExp(wR, WHITESPACE_KEYWORD) if wR else ZOMBIE_PARSER self.main = RegExp(regexp) def __call__(self, text): # assert self.main.regexp.pattern != "@" t = text wL, t = self.wspLeft(t) main, t = self.main(t) if main: wR, t = self.wspRight(t) result = tuple(nd for nd in (wL, main, wR) if nd and nd.result != '') return Node(self, result), t return None, text def __str__(self): return self.name or ('RE ' + ('~' if self.wL else '') + '/%s/' % self.main.regexp.pattern + ('~' if self.wR else '')) # pattern = self.main.regexp.pattern # details = ('' if self.name # else ('~' if self.wL else '') + '/%s/' % pattern + ('~' if self.wR else '')) # return super(RE, self).__str__() + ' ' + details def _grammar_assigned_notifier(self): if self.grammar: if self.wL is None: self.wspLeft = self.grammar.wsp_left_parser__ if self.wR is None: self.wspRight = self.grammar.wsp_right_parser__ def apply(self, func): if super(RE, self).apply(func): if self.wL: self.wspLeft.apply(func) if self.wR: self.wspRight.apply(func) self.main.apply(func) def escape_re(s): """Returns `s` with all regular expression special characters escaped. """ assert isinstance(s, str) re_chars = r"\.^$*+?{}[]()#<>=|!" for esc_ch in re_chars: s = s.replace(esc_ch, '\\' + esc_ch) return s class Token(RE): def __init__(self, token, wL=None, wR=None, name=None): super(Token, self).__init__(escape_re(token), wL, wR, name) def __str__(self): return self.name or 'Token "%s"' % self.main.regexp.pattern.replace('\\', '') def mixin_comment(whitespace, comment): """Mixes comment-regexp into whitespace regexp. """ wspc = '(?:' + whitespace + '(?:' + comment + whitespace + ')*)' return wspc ######################################################################## # # Combinator parser classes (i.e. trunk classes of the parser tree) # ######################################################################## class UnaryOperator(Parser): def __init__(self, parser, name=None): super(UnaryOperator, self).__init__(name) assert isinstance(parser, Parser) self.parser = parser def apply(self, func): if super(UnaryOperator, self).apply(func): self.parser.apply(func) class NaryOperator(Parser): def __init__(self, *parsers, name=None): super(NaryOperator, self).__init__(name) assert all([isinstance(parser, Parser) for parser in parsers]), str(parsers) self.parsers = parsers def apply(self, func): if super(NaryOperator, self).apply(func): for parser in self.parsers: parser.apply(func) class Optional(UnaryOperator): def __init__(self, parser, name=None): super(Optional, self).__init__(parser, name) assert isinstance(parser, Parser) assert not isinstance(parser, Optional), \ "Nesting options would be redundant: %s(%s)" % \ (str(name), str(parser.name)) assert not isinstance(parser, Required), \ "Nestion options with required elements is contradictory: " \ "%s(%s)" % (str(name), str(parser.name)) def __call__(self, text): node, text = self.parser(text) if node: return Node(self, node), text return Node(self, ()), text class ZeroOrMore(Optional): def __call__(self, text): results = () while text: node, text = self.parser(text) if not node: break results += (node,) return Node(self, results), text class OneOrMore(UnaryOperator): def __init__(self, parser, name=None): super(OneOrMore, self).__init__(parser, name) assert not isinstance(parser, Optional), \ "Use ZeroOrMore instead of nesting OneOrMore and Optional: " \ "%s(%s)" % (str(name), str(parser.name)) def __call__(self, text): results = () text_ = text while text_: node, text_ = self.parser(text_) if not node: break results += (node,) if results == (): return None, text return Node(self, results), text_ class Sequence(NaryOperator): def __init__(self, *parsers, name=None): super(Sequence, self).__init__(*parsers, name=name) assert len(self.parsers) >= 1 # commented, because sequences can be empty: # assert not all(isinstance(p, Optional) for p in self.parsers) def __call__(self, text): results = () text_ = text for parser in self.parsers: node, text_ = parser(text_) if not node: return node, text if node.result: # Nodes with zero-length result are silently omitted results += (node,) if node.error_flag: break assert len(results) <= len(self.parsers) return Node(self, results), text_ class Alternative(NaryOperator): def __init__(self, *parsers, name=None): super(Alternative, self).__init__(*parsers, name=name) assert len(self.parsers) >= 1 assert all(not isinstance(p, Optional) for p in self.parsers) def __call__(self, text): for parser in self.parsers: node, text_ = parser(text) if node: return Node(self, node), text_ return None, text ######################################################################## # # Flow control operators # ######################################################################## class FlowOperator(UnaryOperator): def __init__(self, parser, name=None): super(FlowOperator, self).__init__(parser, name) class Required(FlowOperator): # TODO: Add constructor that checks for logical errors, like `Required(Optional(...))` constructs def __call__(self, text): node, text_ = self.parser(text) if not node: m = re.search(r'\s(\S)', text) i = max(1, m.regs[1][0]) if m else 1 node = Node(self, text[:i]) text_ = text[i:] # assert False, "*"+text[:i]+"*" node.add_error('%s expected; "%s..." found!' % (str(self.parser), text[:10])) return node, text_ class Lookahead(FlowOperator): def __init__(self, parser, name=None): super(Lookahead, self).__init__(parser, name) def __call__(self, text): node, text_ = self.parser(text) if self.sign(node is not None): return Node(self, ''), text else: return None, text def sign(self, bool_value): return bool_value class NegativeLookahead(Lookahead): def sign(self, bool_value): return not bool_value def iter_right_branch(node): """Iterates over the right branch of `node` starting with node itself. Iteration is stopped if either there are no child nodes any more or if the parser of a node is a Lookahead parser. (Reason is: Since lookahead nodes do not advance the parser, it does not make sense to look back to them.) """ while node and not isinstance(node.parser, Lookahead): # the second condition should not be necessary yield node # for well-formed EBNF code node = node.children[-1] if node.children else None class Lookbehind(FlowOperator): def __init__(self, parser, name=None): super(Lookbehind, self).__init__(parser, name) print("WARNING: Lookbehind Operator is experimental!") def __call__(self, text): if isinstance(self.grammar.last_node, Lookahead): return Node(self, '').add_error('Lookbehind right after Lookahead ' 'does not make sense!'), text if self.sign(self.condition()): return Node(self, ''), text else: return None, text def sign(self, bool_value): return bool_value def condition(self): node = None for node in iter_right_branch(self.grammar.last_node): if node.parser.name == self.parser.name: return True if node and isinstance(self.parser, RegExp) and \ self.parser.regexp.match(str(node)): # Is there really a use case for this? return True return False class NegativeLookbehind(Lookbehind): def sign(self, bool_value): return not bool_value ######################################################################## # # Capture and Retrieve operators (for passing variables in the parser) # ######################################################################## class Capture(UnaryOperator): def __init__(self, parser, name=None): super(Capture, self).__init__(parser, name) def __call__(self, text): node, text = self.parser(text) if node: stack = self.grammar.variables.setdefault(self.name, []) stack.append(str(node)) return Node(self, node), text class Retrieve(Parser): def __init__(self, symbol, name=None): super(Retrieve, self).__init__(name) self.symbol = symbol # if isinstance(symbol, str) else symbol.name def __call__(self, text): symbol = self.symbol if isinstance(self.symbol, str) \ else self.symbol.name stack = self.grammar.variables[symbol] value = self.pick_value(stack) if text.startswith(value): return Node(self, value), text[len(value):] else: return None, text def pick_value(self, stack): return stack[-1] class Pop(Retrieve): def pick_value(self, stack): return stack.pop() ######################################################################## # # Forward class (for recursive symbols) # ######################################################################## class Forward(Parser): def __init__(self): Parser.__init__(self) self.parser = None self.cycle_reached = False def __call__(self, text): return self.parser(text) def __str__(self): if self.cycle_reached: if self.parser and self.parser.name: return str(self.parser.name) return "..." else: self.cycle_reached = True s = str(self.parser) self.cycle_reached = False return s def set(self, parser): assert isinstance(parser, Parser) self.name = parser.name # redundant, because of constructor of GrammarBase self.parser = parser def apply(self, func): if super(Forward, self).apply(func): assert not self.visited self.parser.apply(func) PARSER_SYMBOLS = {'RegExp', 'mixin_comment', 'RE', 'Token', 'Required', 'Lookahead', 'NegativeLookahead', 'Optional', 'Lookbehind', 'NegativeLookbehind', 'ZeroOrMore', 'Sequence', 'Alternative', 'Forward', 'OneOrMore', 'GrammarBase', 'Capture', 'Retrieve', 'Pop'} ######################################################################## # # Syntax driven compilation support # ######################################################################## class CompilerBase: def compile__(self, node): comp, cls = node.parser.name, node.parser.__class__.__name__ elem = comp or cls if not sane_parser_name(elem): node.add_error("Must not use reserved name '%s' as parser " "name! " % elem + "(Any name starting with " "'_' or '__' or ending with '__' is reserved.)") return None else: compiler = self.__getattribute__(elem) # TODO Add support for python keyword attributes return compiler(node) def full_compilation(source, grammar_base, AST_transformations, compiler): """Compiles a source in three stages: 1. Parsing 2. AST-transformation 3. Compiling. The compilations stage is only invoked if no errors occurred in either of the two previous stages. Args: source (str): the input source for compilation grammar_base (GrammarBase): the GrammarBase object AST_transformations (dict): a table that assigns AST transformation 1422 functions to parser names (see function traverse) 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 compiler (object): an instance of a class derived from `CompilerBase` with a suitable method for every parser name or class. Returns: tuple: (result (?), messages (str), syntax_tree (Node)) Result as returned by the compiler or `None` if an error occurred during parsing or AST-transformation and the compiler wasn't invoked; error messages; abstract syntax tree """ assert isinstance(compiler, CompilerBase) syntax_tree = grammar_base.parse(source) DEBUG_DUMP_SYNTAX_TREE(grammar_base, syntax_tree, ext='.cst') DEBUG_DUMP_PARSING_HISTORY(grammar_base, source) assert syntax_tree.error_flag or str(syntax_tree) == source, str(syntax_tree) # only compile if there were no syntax errors, for otherwise it is # likely that error list gets littered with compile error messages if syntax_tree.error_flag: result = None else: ASTTransform(syntax_tree, AST_transformations) DEBUG_DUMP_SYNTAX_TREE(grammar_base, syntax_tree, ext='.ast') result = compiler.compile__(syntax_tree) errors = syntax_tree.collect_errors() messages = error_messages(source, errors) return result, messages, syntax_tree COMPILER_SYMBOLS = {'CompilerBase', 'Node', 're'} ######################################################################## # # EBNF-Grammar-Compiler # ######################################################################## class EBNFGrammar(GrammarBase): r"""Parser for an EBNF source file, with this grammar: # EBNF-Grammar in EBNF @ comment = /#.*(?:\n|$)/ # comments start with '#' and eat all chars up to and including '\n' @ whitespace = /\s*/ # whitespace includes linefeed @ literalws = right # trailing whitespace of literals will be ignored tacitly syntax = [~//] { definition | directive } §EOF definition = symbol §"=" expression directive = "@" §symbol §"=" ( regexp | literal | list_ ) expression = term { "|" term } term = { factor }+ factor = [flowmarker] [retrieveop] symbol !"=" # negative lookahead to be sure it's not a definition | [flowmarker] literal | [flowmarker] regexp | [flowmarker] group | [flowmarker] oneormore | repetition | option flowmarker = "!" | "&" | "§" | # '!' negative lookahead, '&' positive lookahead, '§' required "-!" | "-&" # '-' negative lookbehind, '-&' positive lookbehind retrieveop = "::" | ":" # '::' pop, ':' retrieve group = "(" expression §")" option = "[" expression §"]" oneormore = "{" expression "}+" repetition = "{" expression §"}" symbol = /(?!\d)\w+/~ # e.g. expression, factor, parameter_list literal = /"(?:[^"]|\\")*?"/~ # e.g. "(", '+', 'while' | /'(?:[^']|\\')*?'/~ # whitespace following literals will be ignored tacitly. regexp = /~?\/(?:[^\/]|(?<=\\)\/)*\/~?/~ # e.g. /\w+/, ~/#.*(?:\n|$)/~ # '~' is a whitespace-marker, if present leading or trailing # whitespace of a regular expression will be ignored tacitly. list_ = /\w+\s*(?:,\s*\w+\s*)*/~ # comma separated list of symbols, e.g. BEGIN_LIST, END_LIST, # BEGIN_QUOTE, END_QUOTE ; see CommonMark/markdown.py for an exmaple EOF = !/./ """ expression = Forward() source_hash__ = "1065c2e43262a5cb3aa438ec4d347c32" parser_initialization__ = "upon instatiation" wsp__ = mixin_comment(whitespace=r'\s*', comment=r'#.*(?:\n|$)') wspL__ = '' wspR__ = wsp__ EOF = NegativeLookahead(RE('.', wR='')) list_ = RE('\\w+\\s*(?:,\\s*\\w+\\s*)*') regexp = RE('~?/(?:[^/]|(?<=\\\\)/)*/~?') literal = Alternative(RE('"(?:[^"]|\\\\")*?"'), RE("'(?:[^']|\\\\')*?'")) symbol = RE('(?!\\d)\\w+') repetition = Sequence(Token("{"), expression, Required(Token("}"))) oneormore = Sequence(Token("{"), expression, Token("}+")) option = Sequence(Token("["), expression, Required(Token("]"))) group = Sequence(Token("("), expression, Required(Token(")"))) retrieveop = Alternative(Token("::"), Token(":")) flowmarker = Alternative(Token("!"), Token("&"), Token("§"), Token("-!"), Token("-&")) factor = Alternative(Sequence(Optional(flowmarker), Optional(retrieveop), symbol, NegativeLookahead(Token("="))), Sequence(Optional(flowmarker), literal), Sequence(Optional(flowmarker), regexp), Sequence(Optional(flowmarker), group), Sequence(Optional(flowmarker), oneormore), repetition, option) term = OneOrMore(factor) expression.set(Sequence(term, ZeroOrMore(Sequence(Token("|"), term)))) directive = Sequence(Token("@"), Required(symbol), Required(Token("=")), Alternative(regexp, literal, list_)) definition = Sequence(symbol, Required(Token("=")), expression) syntax = Sequence(Optional(RE('', wR='', wL=wsp__)), ZeroOrMore(Alternative(definition, directive)), Required(EOF)) root__ = syntax EBNFTransTable = { # AST Transformations for EBNF-grammar "syntax": remove_expendables, "directive, definition": partial(remove_tokens, tokens={'@', '='}), "expression": [replace_by_single_child, flatten, partial(remove_tokens, tokens={'|'})], "term": [replace_by_single_child, flatten], # supports both idioms: "{ factor }+" and "factor { factor }" "factor, flowmarker, retrieveop": replace_by_single_child, "group": [remove_enclosing_delimiters, replace_by_single_child], "oneormore, repetition, option": [reduce_single_child, remove_enclosing_delimiters], "symbol, literal, regexp, list_": [remove_expendables, reduce_single_child], ("Token", WHITESPACE_KEYWORD): [remove_expendables, reduce_single_child], "": [remove_expendables, replace_by_single_child] } def load_if_file(text_or_file): """Reads and returns content of a file if parameter `text_or_file` is a file name (i.e. a single line string), otherwise (i.e. if `text_or_file` is a multiline string) returns the content of `text_or_file`. """ if text_or_file and text_or_file.find('\n') < 0: with open(text_or_file, encoding="utf-8") as f: content = f.read() return content else: return text_or_file class EBNFCompilerError(Exception): """Error raised by `EBNFCompiler` class. (Not compilation errors in the strict sense, see `CompilationError` below)""" pass Scanner = collections.namedtuple('Scanner', 'symbol instantiation_call cls_name cls') def md5(*txt): """Returns the md5-checksum for `txt`. This can be used to test if some piece of text, for example a grammar source file, has changed. """ md5_hash = hashlib.md5() for t in txt: md5_hash.update(t.encode('utf8')) return md5_hash.hexdigest() class EBNFCompiler(CompilerBase): """Generates a Parser from an abstract syntax tree of a grammar specified in EBNF-Notation. """ # RX_DIRECTIVE = re.compile('(?:#|@)\s*(?P<key>\w*)\s*=\s*(?P<value>.*)') # old, can be removed! RESERVED_SYMBOLS = {WHITESPACE_KEYWORD} KNOWN_DIRECTIVES = {'comment', 'whitespace', 'tokens', 'literalws'} VOWELS = {'A', 'E', 'I', 'O', 'U'} # what about cases like 'hour', 'universe' etc. ? AST_ERROR = "Badly structured syntax tree. " \ "Potentially due to erroneuos AST transformation." PREFIX_TABLE = [('§', 'Required'), ('&', 'Lookahead'), ('!', 'NegativeLookahead'), ('-&', 'Lookbehind'), ('-!', 'NegativeLookbehind'), ('::', 'Pop'), (':', 'Retrieve')] def __init__(self, grammar_name="", source_text=""): super(EBNFCompiler, self).__init__() assert grammar_name == "" or re.match('\w+\Z', grammar_name) self.grammar_name = grammar_name self.source_text = load_if_file(source_text) self._reset() def _reset(self): self.rules = set() self.symbols = set() self.variables = set() self.scanner_tokens = set() self.definition_names = [] self.recursive = set() self.root = "" self.directives = {'whitespace': '\s*', 'comment': '', 'literalws': ['wR=' + WHITESPACE_KEYWORD]} def gen_scanner_skeleton(self): name = self.grammar_name + "Scanner" return "def %s(text):\n return text\n" % name def gen_AST_skeleton(self): if not self.definition_names: raise EBNFCompilerError('Compiler has not been run before calling ' '"gen_AST_Skeleton()"!') transtable = [self.grammar_name + 'TransTable = {', ' # AST Transformations for the ' + self.grammar_name + '-grammar'] for name in self.definition_names: transtable.append(' "' + name + '": no_transformation,') transtable += [' "": no_transformation', '}', ''] return '\n'.join(transtable) def gen_compiler_skeleton(self): if not self.definition_names: raise EBNFCompilerError('Compiler has not been run before calling ' '"gen_Compiler_Skeleton()"!') compiler = ['class ' + self.grammar_name + 'Compiler(CompilerBase):', ' """Compiler for the abstract-syntax-tree of a ' + self.grammar_name + ' source file.', ' """', '', ' def __init__(self, grammar_name="' + self.grammar_name + '"):', ' super(' + self.grammar_name + 'Compiler, self).__init__()', " assert re.match('\w+\Z', grammar_name)", ''] for name in self.definition_names: if name == self.root: compiler += [' def ' + name + '(self, node):', ' return node', ''] else: compiler += [' def ' + name + '(self, node):', ' pass', ''] return '\n'.join(compiler + ['']) def gen_parser(self, definitions): # fix capture of variables that have been defined before usage [sic!] if self.variables: for i in range(len(definitions)): if definitions[i][0] in self.variables: definitions[i] = (definitions[i][0], 'Capture(%s, "%s")' % (definitions[1], definitions[0])) self.definition_names = [defn[0] for defn in definitions] definitions.append(('wspR__', 'wsp__' \ if 'right' in self.directives['literalws'] else "''")) definitions.append(('wspL__', 'wsp__' \ if 'left' in self.directives['literalws'] else "''")) definitions.append((WHITESPACE_KEYWORD, ("mixin_comment(whitespace=" "r'{whitespace}', comment=r'{comment}')"). format(**self.directives))) # prepare parser class header and docstring and # add EBNF grammar to the doc string of the parser class article = 'an ' if self.grammar_name[0:1].upper() \ in EBNFCompiler.VOWELS else 'a ' declarations = ['class ' + self.grammar_name + 'Grammar(GrammarBase):', 'r"""Parser for ' + article + self.grammar_name + ' source file' + (', with this grammar:' if self.source_text else '.')] definitions.append(('parser_initialization__', '"upon instatiation"')) if self.source_text: definitions.append(('source_hash__', '"%s"' % md5(self.source_text, __version__))) declarations.append('') declarations += [line for line in self.source_text.split('\n')] while declarations[-1].strip() == '': declarations = declarations[:-1] declarations.append('"""') # turn definitions into declarations in reverse order self.root = definitions[0][0] if definitions else "" definitions.reverse() declarations += [symbol + ' = Forward()' for symbol in sorted(list(self.recursive))] for symbol, statement in definitions: if symbol in self.recursive: declarations += [symbol + '.set(' + statement + ')'] else: declarations += [symbol + ' = ' + statement] for nd in self.symbols: if nd.result not in self.rules: nd.add_error("Missing production for symbol '%s'" % nd.result) if self.root and 'root__' not in self.symbols: declarations.append('root__ = ' + self.root) declarations.append('') return '\n '.join(declarations) def syntax(self, node): self._reset() definitions = [] # drop the wrapping sequence node if isinstance(node.parser, Sequence) and \ isinstance(node.result[0].parser, ZeroOrMore): node = node.result[0] # compile definitions and directives and collect definitions for nd in node.result: if nd.parser.name == "definition": definitions.append(self.compile__(nd)) else: assert nd.parser.name == "directive", nd.as_sexpr() self.compile__(nd) return self.gen_parser(definitions) def definition(self, node): rule = node.result[0].result if rule in EBNFCompiler.RESERVED_SYMBOLS: node.add_error('Symbol "%s" is a reserved symbol.' % rule) elif rule in self.scanner_tokens: node.add_error('Symbol "%s" has already been defined as ' 'a scanner token.' % rule) elif keyword.iskeyword(rule): node.add_error('Python keyword "%s" may not be used as a symbol. ' % rule + '(This may change in the furute.)') elif rule in self.rules: node.add_error('A rule with name "%s" has already been defined.' % rule) try: self.rules.add(rule) defn = self.compile__(node.result[1]) if rule in self.variables: defn = 'Capture(%s, "%s")' % (defn, rule) self.variables.remove(rule) except TypeError as error: errmsg = EBNFCompiler.AST_ERROR + " (" + str(error) + ")\n" + node.as_sexpr() node.add_error(errmsg) rule, defn = rule + ':error', '"' + errmsg + '"' return (rule, defn) @staticmethod def _check_rx(node, rx): """Checks whether the string `rx` represents a valid regular expression. Makes sure that multiline regular expressions are prepended by the multiline-flag. Returns the regular expression string. """ rx = rx if rx.find('\n') < 0 or rx[0:4] == '(?x)' else '(?x)' + rx try: re.compile(rx) except Exception as re_error: node.add_error("malformed regular expression %s: %s" % (repr(rx), str(re_error))) return rx def directive(self, node): key = node.result[0].result.lower() assert key not in self.scanner_tokens if key in {'comment', 'whitespace'}: value = node.result[1].result if value[0] + value[-1] in {'""', "''"}: value = escape_re(value[1:-1]) elif value[0] + value[-1] == '//': value = self._check_rx(node, value[1:-1]) else: value = self._check_rx(node, value) self.directives[key] = value elif key == 'literalws': value = {item.lower() for item in self.compile__(node.result[1])} if (len(value - {'left', 'right', 'both', 'none'}) > 0 or ('none' in value and len(value) > 1)): node.add_error('Directive "literalws" allows the values ' '`left`, `right`, `both` or `none`, ' 'but not `%s`' % ", ".join(value)) ws = {'left', 'right'} if 'both' in value \ else {} if 'none' in value else value self.directives[key] = list(ws) elif key == 'tokens': self.scanner_tokens |= self.compile__(node.result[1]) else: node.add_error('Unknown directive %s ! (Known ones are %s .)' % (key, ', '.join(list(EBNFCompiler.KNOWN_DIRECTIVES)))) return "" def non_terminal(self, node, parser_class): """Compiles any non-terminal, where `parser_class` indicates the Parser class name for the particular non-terminal. """ arguments = filter(lambda arg: arg, [self.compile__(r) for r in node.result]) return parser_class + '(' + ', '.join(arguments) + ')' def expression(self, node): return self.non_terminal(node, 'Alternative') def term(self, node): return self.non_terminal(node, 'Sequence') def factor(self, node): assert isinstance(node.parser, Sequence), node.as_sexpr() # these assert statements can be removed assert node.children assert len(node.result) >= 2, node.as_sexpr() prefix = node.result[0].result arg = node.result[-1] if prefix in {'::', ':'}: assert len(node.result) == 2 if arg.parser.name != 'symbol': node.add_error(('Retrieve Operator "%s" requires a symbols, ' 'and not a %s.') % (prefix, str(arg.parser))) return str(arg.result) self.variables.add(arg.result) if len(node.result) > 2: # shift = (Node(node.parser, node.result[1].result),) # node.result[1].result = shift + node.result[2:] node.result[1].result = (Node(node.result[1].parser, node.result[1].result),) \ + node.result[2:] node.result[1].parser = node.parser node.result = (node.result[0], node.result[1]) node.result = node.result[1:] for match, parser_class in self.PREFIX_TABLE: if prefix == match: return self.non_terminal(node, parser_class) assert False, ("Unknown prefix %s \n" % prefix) + node.as_sexpr() def option(self, node): return self.non_terminal(node, 'Optional') def repetition(self, node): return self.non_terminal(node, 'ZeroOrMore') def oneormore(self, node): return self.non_terminal(node, 'OneOrMore') def group(self, node): raise EBNFCompilerError("Group nodes should have been eliminated by " "AST transformation!") def symbol(self, node): if node.result in self.scanner_tokens: return 'ScannerToken("' + node.result + '")' else: self.symbols.add(node) if node.result in self.rules: self.recursive.add(node.result) return node.result def literal(self, node): return 'Token(' + ', '.join([node.result]) + ')' def regexp(self, node): rx = node.result name = [] if rx[:2] == '~/': if not 'left' in self.directives['literalws']: name = ['wL=' + WHITESPACE_KEYWORD] + name rx = rx[1:] elif 'left' in self.directives['literalws']: name = ["wL=''"] + name if rx[-2:] == '/~': if not 'right' in self.directives['literalws']: name = ['wR=' + WHITESPACE_KEYWORD] + name rx = rx[:-1] elif 'right' in self.directives['literalws']: name = ["wR=''"] + name try: arg = repr(self._check_rx(node, rx[1:-1].replace(r'\/', '/'))) except AttributeError as error: errmsg = EBNFCompiler.AST_ERROR + " (" + str(error) + ")\n" + \ node.as_sexpr() node.add_error(errmsg) return '"' + errmsg + '"' return 'RE(' + ', '.join([arg] + name) + ')' def list_(self, node): return set(item.strip() for item in node.result.split(',')) ######################################################################## # # support for compiling DSLs based on an EBNF-grammar # ######################################################################## DELIMITER = "\n\n### DON'T EDIT OR REMOVE THIS LINE ###\n\n" def is_python_code(text_or_file): """Checks whether 'text_or_file' is python code or the name of a file that contains python code. """ if text_or_file.find('\n') < 0: return text_or_file[-3:].lower() == '.py' try: compile(text_or_file, '<string>', 'exec') return True except (SyntaxError, ValueError, OverflowError): pass return False class GrammarError(Exception): """Raised when (already) the grammar of a domain specific language (DSL) contains errors. """ def __init__(self, error_messages, grammar_src): self.error_messages = error_messages self.grammar_src = grammar_src class CompilationError(Exception): """Raised when a string or file in a domain specific language (DSL) contains errors. """ def __init__(self, error_messages, dsl_text, dsl_grammar, AST): self.error_messages = error_messages self.dsl_text = dsl_text self.dsl_grammar = dsl_grammar self.AST = AST def __str__(self): return self.error_messages def compile_python_object(python_src, obj_name_ending="Grammar"): """Compiles the python source code and returns the object the name of which ends with `obj_name_ending`. """ code = compile(python_src, '<string>', 'exec') module_vars = globals() allowed_symbols = PARSER_SYMBOLS | AST_SYMBOLS | COMPILER_SYMBOLS namespace = {k: module_vars[k] for k in allowed_symbols} exec(code, namespace) # safety risk? for key in namespace.keys(): if key.endswith(obj_name_ending): parser = namespace[key] break else: parser = None return parser def get_grammar_instance(grammar): """Returns a grammar object and the source code of the grammar, from the given `grammar`-data which can be either a file name, ebnf-code, python-code, a GrammarBase-derived grammar class or an instance of such a class (i.e. a grammar object already). """ if isinstance(grammar, str): # read grammar grammar_src = load_if_file(grammar) if is_python_code(grammar): parser_py, errors, AST = grammar_src, '', None else: parser_py, errors, AST = full_compilation(grammar_src, EBNFGrammar(), EBNFTransTable, EBNFCompiler()) if errors: raise GrammarError(errors, grammar_src) parser_root = compile_python_object(parser_py, 'Grammar')() else: # assume that dsl_grammar is a ParserHQ-object or Grammar class grammar_src = '' if isinstance(grammar, GrammarBase): parser_root = grammar else: # assume `grammar` is a grammar class and get the root object parser_root = grammar() return parser_root, grammar_src def load_compiler_suite(compiler_suite): """ """ global DELIMITER assert isinstance(compiler_suite, str) source = load_if_file(compiler_suite) if is_python_code(compiler_suite): scanner_py, parser_py, ast_py, compiler_py = source.split(DELIMITER) scanner = compile_python_object(scanner_py, 'Scanner') ast = compile_python_object(ast_py, 'TransTable') compiler = compile_python_object(compiler_py, 'Compiler') else: # assume source is an ebnf grammar parser_py, errors, AST = full_compilation( source, EBNFGrammar(), EBNFTransTable, EBNFCompiler()) if errors: raise GrammarError(errors, source) scanner = nil_scanner ast = EBNFTransTable compiler = EBNFCompiler() parser = compile_python_object(parser_py, 'Grammar')() return scanner, parser, ast, compiler def compileDSL(text_or_file, dsl_grammar, trans_table, compiler, scanner=nil_scanner): """Compiles a text in a domain specific language (DSL) with an EBNF-specified grammar. Returns the compiled text. """ assert isinstance(text_or_file, str) assert isinstance(compiler, CompilerBase) assert isinstance(trans_table, dict) parser_root, grammar_src = get_grammar_instance(dsl_grammar) src = scanner(load_if_file(text_or_file)) result, errors, AST = full_compilation(src, parser_root, trans_table, compiler) if errors: raise CompilationError(errors, src, grammar_src, AST) return result def run_compiler(source_file, compiler_suite="", extension=".dst"): """Compiles the a source file with a given compiler and writes the result to a file. If no `compiler_suite` is given it is assumed that the source file is an EBNF grammar. In this case the result will be a Python script containing a parser for that grammar as well as the skeletons for a scanner, AST transformation table, and compiler. If the Python script already exists only the parser name in the script will be updated. (For this to work, the different names need to be delimited by the standard `DELIMITER`-line!). `run_compiler()` returns a list of error messages or an empty list if no errors occured. """ filepath = os.path.normpath(source_file) with open(source_file, encoding="utf-8") as f: source = f.read() rootname = os.path.splitext(filepath)[0] if compiler_suite: scanner, parser, trans, cclass = load_compiler_suite(compiler_suite) compiler = cclass() else: scanner = nil_scanner parser = EBNFGrammar() trans = EBNFTransTable compiler = EBNFCompiler(os.path.basename(rootname), source) result, errors, ast = full_compilation(scanner(source), parser, trans, compiler) if errors: return errors elif trans == EBNFTransTable: # either an EBNF- or no compiter suite given f = None global DELIMITER try: f = open(rootname + '_compiler.py', 'r', encoding="utf-8") source = f.read() scanner, parser, ast, compiler = source.split(DELIMITER) except (PermissionError, FileNotFoundError, IOError) as error: scanner = compiler.gen_scanner_skeleton() ast = compiler.gen_AST_skeleton() compiler = compiler.gen_compiler_skeleton() finally: if f: f.close() try: f = open(rootname + '_compiler.py', 'w', encoding="utf-8") f.write(scanner) f.write(DELIMITER) f.write(result) f.write(DELIMITER) f.write(ast) f.write(DELIMITER) f.write(compiler) except (PermissionError, FileNotFoundError, IOError) as error: print('# Could not write file "' + rootname + '.py" because of: ' + "\n# ".join(str(error).split('\n)'))) print(result) finally: if f: f.close() else: try: f = open(rootname + extension, 'w', encoding="utf-8") if isinstance(result, Node): f.write(result.as_xml()) else: f.write(result) except (PermissionError, FileNotFoundError, IOError) as error: print('# Could not write file "' + rootname + '.py" because of: ' + "\n# ".join(str(error).split('\n)'))) print(result) finally: if f: f.close() if DEBUG: print(ast) return [] def has_source_changed(grammar_source, grammar_class): """Returns `True` if `grammar_class` does not reflect the latest changes of `grammar_source` :param grammar_source: file name or string representation of the grammar source :param grammar_class: the parser class representing the grammar or the file name of a compiler suite containing the grammar :return: True, if the source text of the grammar is different from the source from which the grammar class was generated """ grammar = load_if_file(grammar_source) chksum = md5(grammar, __version__) if isinstance(grammar_class, str): # grammar_class = load_compiler_suite(grammar_class)[1] with open(grammar_class, 'r', encoding='utf8') as f: pycode = f.read() m = re.search('class \w*\(GrammarBase\)', pycode) if m: m = re.search(' source_hash__ *= *"([a-z0-9]*)"', pycode[m.span()[1]:]) return not (m and m.groups() and m.groups()[-1] == chksum) else: return True else: return chksum != grammar_class.source_hash__ ######################################################################## # # system test # ######################################################################## def test(file_name): print(file_name) with open('examples/' + file_name, encoding="utf-8") as f: grammar = f.read() compiler_name = os.path.basename(os.path.splitext(file_name)[0]) compiler = EBNFCompiler(compiler_name, grammar) parser = EBNFGrammar() result, errors, syntax_tree = full_compilation(grammar, parser, EBNFTransTable, compiler) print(result) if errors: print(errors) sys.exit(1) else: result = compileDSL(grammar, result, EBNFTransTable, compiler) print(result) return result # # Changes in the EBNF source that are not reflected in this file could be # # a source of sometimes obscure errors! Therefore, we will check this. # if (os.path.exists('examples/EBNF/EBNF.ebnf') Eckhart Arnold's avatar Eckhart Arnold committed 2175 # and source_changed('examples/EBNF/EBNF.ebnf', EBNFGrammar)): 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 # assert False, "WARNING: Grammar source has changed. The parser may not " \ # "represent the actual grammar any more!!!" # pass if __name__ == "__main__": print(sys.argv) if len(sys.argv) > 1: _errors = run_compiler(sys.argv[1], sys.argv[2] if len(sys.argv) > 2 else "") if (_errors): print(_errors) sys.exit(1) else: # self-test test('EBNF/EBNF.ebnf')
__label__pos
0.95254
Last weekend I was at SoCraTes Canaries and I gave my first talk ever about code smells. Oh boy! How nervous was I! But now that has passed, I was wondering what I should do with all information I gathered. And then I thought, maybe it's a good idea to put it all in a nice blog post. So what are code smells? As Martin Fowler said in his book "Refactoring: Improving the Design of Existing Code", A code smell is a surface indication that usually corresponds to a deeper problem in the system. I like to think that a code smell is something that makes your developer instinct cry out to you, and you just know that something is wrong. This doesn’t mean you have to make changes in your code: there are occasions where these code smells are ok, but I think it’s important for us to detect them and know exactly why they are there. There are five categories of code smells: • Bloaters • Object-Orientation Abusers • Change Preventers • Dispensables • Couplers Today I'm going to talk about Bloaters. I'll leave the other categories for a future post. Bloaters Bloaters can be big methods or classes, primitive obsessions, data clumps, or long parameter lists. Long Parameter List/Data Clumps The Long Parameter List is when you have a method that has more than 3 parameters. Sometimes we see that when we receive an object, and instead of passing it all we pass some of its data. In this case, the best policy is to pass the whole object. Data Clumps are a bit different: they are, in general, primitive values that start to "get together". A good example of this is a startDate and endDate... Maybe it's worth creating a DateRange. Primitive Obsession This case is when we use primitives instead of value types for simple tasks. Sometimes the use of primitives is justifiable, but when you start to have behaviour attached to this primitives, then it's time to stop and think that maybe a value type is in order. A simple example is a currency: we tend to put it in a float or double, instead of encapsulating it in a value type. Long Method / Large Class This kind of code smell happens when you have a big method. But when do you know that a method has become too big? Well, I have the rule that with more than five lines, you should, at least, look at it again. But, as Sandro told me before, the right number of lines is just enough lines so a method only does one thing (and so it conforms to the 1st principle of SOLID the Single responsibility principle. To do this blog I started to look at my old code when I hadn't woken up yet to craftsmanship: if it was working that was good enough for me. Here's the code in Objective-C: - (void) postToServer { PostSerializer* postSerializer = [[PostSerializer alloc] init]; NSString *post = [postSerializer serializePostWithTitle:self.txtTitle.text description:self.txtDescription.text author:self.txtUser.text game:self.game]; NSMutableDictionary *postParams = [NSMutableDictionary dictionary]; [postParams setObject:txtTitle.text forKey:@"title"]; [postParams setObject:post forKey:@"data"]; [postParams setObject:txtUser.text forKey:@"username"]; [postParams setObject:txtPassword.text forKey:@"password"]; NSArray *args = [NSArray arrayWithObjects:[NSNumber numberWithInt:0], postParams, nil]; #ifdef DEBUG_LOG XMLRPCRequest *request = [[XMLRPCRequest alloc] initWithURL: [NSURL URLWithString:@"http://localhost:8888/letsbasket/xmlrpc.php"]]; DLog(@"Debug"); #else XMLRPCRequest *request = [[XMLRPCRequest alloc] initWithURL:[NSURL URLWithString:[UtilsHelper localizeString:@"UrlXmlRPCKey"]]]; DLog(@"Producao"); #endif [request setMethod:@"letsBasket.AddPost" withParameters:args]; NSError *error = nil; XMLRPCResponse* result = [XMLRPCConnection sendSynchronousXMLRPCRequest:request error:&error]; UIApplication *app = [UIApplication sharedApplication]; app.networkActivityIndicatorVisible = NO; [self dismissWaitingAlert]; if(error != nil || [[result body] rangeOfString:@"<name>error</name>"].location != NSNotFound) { int location_start = [[result body] rangeOfString:@"<string>"].location + 8; int location_end = [[result body] rangeOfString:@"</string>"].location; NSString *message = [[[result body] substringWithRange:NSMakeRange(location_start, location_end- location_start)] unescapedString]; NSString* title = [UtilsHelper localizeString:@"PublishVC_ErrorRetreivingAlertTitle_key"]; [self showAlertWithErrorMessage:message Title:title]; return; } [self processPublishResult:result]; } Wow! This is a really big method. And it is inside a ViewController class, so this should definitely be extracted into a service class, so we have a correct separation of concerns. But for the sake of the brevity, let's focus on how can we refactor this big method. The refactoring technique to apply here is Extract Method: you can aggregate code together and extract to a new method. So let's see what we can come up with: We can start with grouping the code that refers to serializing a post: - (NSString *)serializePost { PostSerializer* postSerializer = [[PostSerializer alloc] init]; NSString *post = [postSerializer serializePostWithTitle:self.txtTitle.text description:self.txtDescription.text author:self.txtUser.text game:self.game]; return post; } Then we can do it for the parameters of the request: - (NSArray *)createPostParams:(NSString *)post { NSMutableDictionary *postParams = [NSMutableDictionary dictionary]; [postParams setObject:txtTitle.text forKey:@"title"]; [postParams setObject:post forKey:@"data"]; [postParams setObject:txtUser.text forKey:@"username"]; [postParams setObject:txtPassword.text forKey:@"password"]; NSArray *args = [NSArray arrayWithObjects:[NSNumber numberWithInt:0], postParams, nil]; return args; } With all this in place we are now ready to create a XMLRPCRequest: - (XMLRPCRequest *)createXMLRPCRequestWithArgs:(NSArray*)args { XMLRPCRequest *request; #ifdef DEBUG_LOG request = [[XMLRPCRequest alloc] initWithURL: [NSURL URLWithString:@"http://localhost:8888/letsbasket/xmlrpc.php"]]; DLog(@"Debug"); #else request = [[XMLRPCRequest alloc] initWithURL:[NSURL URLWithString:[UtilsHelper localizeString:@"UrlXmlRPCKey"]]]; DLog(@"Producao"); #endif [request setMethod:@"letsBasket.AddPost" withParameters:args]; return request; } We can also extract a method with some display updates: - (void)updateDisplay { UIApplication *app = [UIApplication sharedApplication]; app.networkActivityIndicatorVisible = NO; [self dismissWaitingAlert]; } And last but not least we can extract the preparation for displaying the error message: - (void)showError:(NSString*)bodyResult { int location_start = [bodyResult rangeOfString:@"<string>"].location + 8; int location_end = [bodyResult rangeOfString:@"</string>"].location; NSString *message = [[bodyResult substringWithRange:NSMakeRange(location_start, location_end- location_start)] unescapedString]; NSString* title = [UtilsHelper localizeString:@"PublishVC_ErrorRetreivingAlertTitle_key"]; [self showAlertWithErrorMessage:message Title:title]; } With all these extractions our method now looks pretty neat: - (void) postToServer { NSString *post = [self serializePost]; NSArray *args = [self createPostParams:post]; XMLRPCRequest *request = [self createXMLRPCRequestWithArgs: args]; NSError *error = nil; XMLRPCResponse* result = [XMLRPCConnection sendSynchronousXMLRPCRequest:request error:&error]; [self updateDisplay]; if(error != nil || [[result body] rangeOfString:@"<name>error</name>"].location != NSNotFound) { [self showError:[result body]]; return; } [self processPublishResult:result]; } Hmm... we can do this even better! Let's take a look at the method createXMLRCPRequest and see if we can call the others from there. In this case, it makes sense to have all together. - (XMLRPCRequest *)createXMLRPCRequest { NSString *post = [self serializePost]; NSArray *args = [self createPostParams:post]; XMLRPCRequest *request; #ifdef DEBUG_LOG request = [[XMLRPCRequest alloc] initWithURL: [NSURL URLWithString:@"http://localhost:8888/letsbasket/xmlrpc.php"]]; DLog(@"Debug"); #else request = [[XMLRPCRequest alloc] initWithURL:[NSURL URLWithString:[UtilsHelper localizeString:@"UrlXmlRPCKey"]]]; DLog(@"Producao"); #endif [request setMethod:@"letsBasket.AddPost" withParameters:args]; return request; } And our original method now looks like this: - (void) postToServer { XMLRPCRequest *request = [self createXMLRPCRequest]; NSError *error = nil; XMLRPCResponse* result = [XMLRPCConnection sendSynchronousXMLRPCRequest:request error:&error]; [self updateDisplay]; if(error != nil || [[result body] rangeOfString:@"<name>error</name>"].location != NSNotFound) { [self showError:[result body]]; return; } [self processPublishResult:result]; } Well, here you go: a method with more than 5 lines and I think that's ok. :) As we can see it's really easy to let a method grow. But it's really easy to refactor and have a cleaner code too. Conclusion In general, bloaters are viewed as code that, over time, "gets out of hand". Remember, code smells sometimes can't be removed, but it's good to know that they are there and you know why they are there. This post was cross-posted to my personal blog. Get content like this straight to your inbox! Software es nuestra pasión. Somos Software Craftspeople. Construimos software bien elaborado para nuestros clientes, ayudamos a los/as desarrolladores/as a mejorar en su oficio a través de la formación, la orientación y la tutoría. Ayudamos a las empresas a mejorar en la distribución de software. Últimos posts del blog Contacto 3 Sutton Lane, planta 3 Londres, EC1M 5PU Teléfono: +44 207 4902967 2 Mount Street Manchester, M2 5WQ Teléfono: +44 161 302 6795 Carrer de Pallars 99, planta 4, sala 41 Barcelona, 08018 Teléfono: +34 937 82 28 82 Correo electrónico: [email protected]
__label__pos
0.989466
Ashish Agrawal Ashish Agrawal - 9 months ago 234 C# Question UWP: Trimming the text of a TextBlock based on number of lines I have an UWP application where I want to trim the text of the TextBlock if it goes beyond the third line and show "show more" link (tappable) in the end of the 3rd line. I know to restrict number of lines I can use MaxLines property but it simply ignores the rest of the lines as if they don't exist. But I want to let the user know that there is some more text and he can tap on the show more link to navigate to the full text. How can I achieve it? Answer Read the good topic which describes all step to create an expandable textblock Also, view the source code on github Here is the XAML code: <Grid x:Name="LayoutRoot" Tapped="LayoutRoot_OnTap"> <Grid.RowDefinitions> <RowDefinition Height = "Auto" /> <RowDefinition Height="Auto" /> </Grid.RowDefinitions> <TextBlock Grid.Row="0" x:Name= "CommentTextBlock" HorizontalAlignment= "Left" TextWrapping= "Wrap" Height= "Auto" Width= "280" /> < StackPanel Grid.Row= "1" Orientation= "Horizontal" HorizontalAlignment= "Right" x:Name= "ExpandHint" Visibility= "Collapsed" Margin= "0,5,0,0" > < TextBlock Text= "View More" /> < TextBlock Margin= "10,0,10,0" Text= "+" /> </ StackPanel > </ Grid > Here is C# part public sealed partial class ExpandableTextBlock : UserControl { public ExpandableTextBlock() { this.InitializeComponent(); } public static readonly DependencyProperty TextProperty = DependencyProperty.Register( "Text", typeof(string), typeof(ExpandableTextBlock), new PropertyMetadata(default(string), OnTextChanged)); public string Text { get { return (string)GetValue(TextProperty); } set { SetValue(TextProperty, value); } } private static void OnTextChanged(DependencyObject d, DependencyPropertyChangedEventArgs e) { var ctl = (ExpandableTextBlock)d; ctl.CommentTextBlock.SetValue(TextBlock.TextProperty, (string)e.NewValue); ctl.CommentTextBlock.SetValue(TextBlock.HeightProperty, Double.NaN); ctl.CommentTextBlock.Measure(new Size(ctl.CommentTextBlock.Width, double.MaxValue)); double desiredheight = ctl.CommentTextBlock.DesiredSize.Height; ctl.CommentTextBlock.SetValue(TextBlock.HeightProperty, (double)63); if (desiredheight > (double)ctl.CommentTextBlock.GetValue(TextBlock.HeightProperty)) { ctl.ExpandHint.SetValue(StackPanel.VisibilityProperty, Visibility.Visible); ctl.MaxHeight = desiredheight; } else { ctl.ExpandHint.SetValue(StackPanel.VisibilityProperty, Visibility.Collapsed); } //Setting length of comments var boundsWidth = Window.Current.Bounds.Width; ctl.CommentTextBlock.SetValue(TextBlock.WidthProperty, boundsWidth); } public static readonly DependencyProperty CollapsedHeightProperty = DependencyProperty.Register( "CollapsedHeight", typeof(double), typeof(ExpandableTextBlock), new PropertyMetadata(default(double), OnCollapsedHeightChanged)); public double CollapsedHeight { get { return (double)GetValue(CollapsedHeightProperty); } set { SetValue(CollapsedHeightProperty, value); } } private static void OnCollapsedHeightChanged(DependencyObject d, DependencyPropertyChangedEventArgs e) { var ctl = (ExpandableTextBlock)d; ctl.CollapsedHeight = (double)e.NewValue; } public static readonly DependencyProperty TextStyleProperty = DependencyProperty.Register( "TextStyle", typeof(Style), typeof(ExpandableTextBlock), new PropertyMetadata(default(Style), OnTextStyleChanged)); public Style TextStyle { get { return (Style)GetValue(TextStyleProperty); } set { SetValue(TextStyleProperty, value); } } private static void OnTextStyleChanged(DependencyObject d, DependencyPropertyChangedEventArgs e) { var ctl = (ExpandableTextBlock)d; ctl.CommentTextBlock.SetValue(StyleProperty, (Style)e.NewValue); } private void LayoutRoot_OnTap(object sender, TappedRoutedEventArgs tappedRoutedEventArgs) { if ((Visibility)this.ExpandHint.GetValue(StackPanel.VisibilityProperty) == Visibility.Visible) { //transition this.CommentTextBlock.SetValue(TextBlock.HeightProperty, Double.NaN); this.ExpandHint.SetValue(StackPanel.VisibilityProperty, Visibility.Collapsed); } } }
__label__pos
0.999718
linux/lib/ts_kmp.c << >> Prefs 1/* 2 * lib/ts_kmp.c Knuth-Morris-Pratt text search implementation 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Authors: Thomas Graf <[email protected]> 10 * 11 * ========================================================================== 12 * 13 * Implements a linear-time string-matching algorithm due to Knuth, 14 * Morris, and Pratt [1]. Their algorithm avoids the explicit 15 * computation of the transition function DELTA altogether. Its 16 * matching time is O(n), for n being length(text), using just an 17 * auxiliary function PI[1..m], for m being length(pattern), 18 * precomputed from the pattern in time O(m). The array PI allows 19 * the transition function DELTA to be computed efficiently 20 * "on the fly" as needed. Roughly speaking, for any state 21 * "q" = 0,1,...,m and any character "a" in SIGMA, the value 22 * PI["q"] contains the information that is independent of "a" and 23 * is needed to compute DELTA("q", "a") [2]. Since the array PI 24 * has only m entries, whereas DELTA has O(m|SIGMA|) entries, we 25 * save a factor of |SIGMA| in the preprocessing time by computing 26 * PI rather than DELTA. 27 * 28 * [1] Cormen, Leiserson, Rivest, Stein 29 * Introdcution to Algorithms, 2nd Edition, MIT Press 30 * [2] See finite automation theory 31 */ 32 33#include <linux/module.h> 34#include <linux/types.h> 35#include <linux/string.h> 36#include <linux/ctype.h> 37#include <linux/textsearch.h> 38 39struct ts_kmp 40{ 41 u8 * pattern; 42 unsigned int pattern_len; 43 unsigned int prefix_tbl[0]; 44}; 45 46static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state) 47{ 48 struct ts_kmp *kmp = ts_config_priv(conf); 49 unsigned int i, q = 0, text_len, consumed = state->offset; 50 const u8 *text; 51 const int icase = conf->flags & TS_IGNORECASE; 52 53 for (;;) { 54 text_len = conf->get_next_block(consumed, &text, conf, state); 55 56 if (unlikely(text_len == 0)) 57 break; 58 59 for (i = 0; i < text_len; i++) { 60 while (q > 0 && kmp->pattern[q] 61 != (icase ? toupper(text[i]) : text[i])) 62 q = kmp->prefix_tbl[q - 1]; 63 if (kmp->pattern[q] 64 == (icase ? toupper(text[i]) : text[i])) 65 q++; 66 if (unlikely(q == kmp->pattern_len)) { 67 state->offset = consumed + i + 1; 68 return state->offset - kmp->pattern_len; 69 } 70 } 71 72 consumed += text_len; 73 } 74 75 return UINT_MAX; 76} 77 78static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len, 79 unsigned int *prefix_tbl, int flags) 80{ 81 unsigned int k, q; 82 const u8 icase = flags & TS_IGNORECASE; 83 84 for (k = 0, q = 1; q < len; q++) { 85 while (k > 0 && (icase ? toupper(pattern[k]) : pattern[k]) 86 != (icase ? toupper(pattern[q]) : pattern[q])) 87 k = prefix_tbl[k-1]; 88 if ((icase ? toupper(pattern[k]) : pattern[k]) 89 == (icase ? toupper(pattern[q]) : pattern[q])) 90 k++; 91 prefix_tbl[q] = k; 92 } 93} 94 95static struct ts_config *kmp_init(const void *pattern, unsigned int len, 96 gfp_t gfp_mask, int flags) 97{ 98 struct ts_config *conf; 99 struct ts_kmp *kmp; 100 int i; 101 unsigned int prefix_tbl_len = len * sizeof(unsigned int); 102 size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len; 103 104 conf = alloc_ts_config(priv_size, gfp_mask); 105 if (IS_ERR(conf)) 106 return conf; 107 108 conf->flags = flags; 109 kmp = ts_config_priv(conf); 110 kmp->pattern_len = len; 111 compute_prefix_tbl(pattern, len, kmp->prefix_tbl, flags); 112 kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len; 113 if (flags & TS_IGNORECASE) 114 for (i = 0; i < len; i++) 115 kmp->pattern[i] = toupper(((u8 *)pattern)[i]); 116 else 117 memcpy(kmp->pattern, pattern, len); 118 119 return conf; 120} 121 122static void *kmp_get_pattern(struct ts_config *conf) 123{ 124 struct ts_kmp *kmp = ts_config_priv(conf); 125 return kmp->pattern; 126} 127 128static unsigned int kmp_get_pattern_len(struct ts_config *conf) 129{ 130 struct ts_kmp *kmp = ts_config_priv(conf); 131 return kmp->pattern_len; 132} 133 134static struct ts_ops kmp_ops = { 135 .name = "kmp", 136 .find = kmp_find, 137 .init = kmp_init, 138 .get_pattern = kmp_get_pattern, 139 .get_pattern_len = kmp_get_pattern_len, 140 .owner = THIS_MODULE, 141 .list = LIST_HEAD_INIT(kmp_ops.list) 142}; 143 144static int __init init_kmp(void) 145{ 146 return textsearch_register(&kmp_ops); 147} 148 149static void __exit exit_kmp(void) 150{ 151 textsearch_unregister(&kmp_ops); 152} 153 154MODULE_LICENSE("GPL"); 155 156module_init(init_kmp); 157module_exit(exit_kmp); 158 lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.
__label__pos
0.99924
Data Raptors in Omnistudio DataRaptors in Omnistudio This blog explores how we can utilize Omnistudio Data Tool – DataRaptor and its types to access read, write, transform, calculate and track data in the Salesforce org. DataRaptors is a mapping tool in Omnistudio facilitating the seamless handling of Salesforce data enabling users to read, write & transform data. DataRaptors play a pivotal role in streamlining the integration of your applications with Salesforce. They accomplish this by seamlessly delivering data from the database to various components like Cards, Integration Procedures and OmniScripts enhancing the functionality of your ecosystem. Types of DataRaptors Go to App Launcher –> Click on Omnistudio –> Select Omnistudio DataRaptor –>Click on new  –>Give DataRaptor Interface Name –>Choose Interface Type from the drop-down menu  (Extract, Turbo – Extract, Load, and Transform). There are four types of DataRaptor - Extract, TurboExtract, Load and Transform 1. Extract DataRaptor Extract retrieves data from multiple salesforce objects reads data and returns results in JSON, XML, or custom formats. It contains five tabs – Extract Tab The Extract tab allows you to select the Salesforce objects you want to retrieve data from and define filters to specify which specific data from those objects will be included in your results. • Click the + Add Extract Step button and choose the object you want data from, like “Account.” • Now for the Configuration setting for the Object, give a name to the “Extract Output Path”, and try to keep it in the name of the Source object only as it defines the top node of JSON (Ex-Account). • If you only want specific data, use the filters. Choose the field you want to filter on (left side), select the comparison operator (middle), and then choose the value: Specific value: Enter a value like a record ID directly. Variable: Use a variable defined elsewhere to hold a dynamic value. Another field: Compare to another field within the same object. Previous extract step: Compare to data you already filtered in a prior step. Output Tab DataRaptor’s Extract Output tab allows you to define how the data retrieved from Salesforce (using the Extract tab) gets structured in the final output JSON. This process involves mapping specific fields from the extracted data to desired locations in the final JSON structure. • Extract JSON Path: In this field, you choose the Source field of the object. • Output JSON Path: Here, you specify the desired output for the chosen field in the final output JSON.                         Formula Tab This tab allows us to add data to the output of our DataRaptor. Go to the Formulas tab within your DataRaptor. Click Add Formula to create a new one. • In the Formula field, enter the logic you want to perform. For example, to add country code to the phone number, use:  (“+91” + Account: Phone.) • In the Formula Result Path, choose the JSON node where you want to store the calculated value and also map this Output JSON path with the Extract JSON path.                                     Preview Tab For testing the input and output of a DataRaptor Extract we can use the Preview tab by providing the value to the key. For example, suppose we want to extract data for Accounts where “CleanStatus” equals “Pending” and “PrimaryContactId__c” equals “0035i0000FtQFrUAQW”. We’ve defined variables “CS” for “CleanStatus” and “CON” for “PrimaryContactId__c” in our filters. Now, in the Preview tab, we will give value to the variable.                 2. Turbo Extract Turbo Extract performs better at runtime because it extracts data from a single object. Unlike DataRaptor Extract, Turbo Extract does not include a formula or output tab but supports a Preview Tab.                     (The process for selecting data and using the Preview tab remains the same as explained for DataRaptor Extract). 3. Load DataRaptor Loads act as a bridge between your external data and Salesforce. They can accept data in various formats, including JSON, XML, and even custom formats, and transfer it to your desired Salesforce objects. Objects Tab The Objects tab, lets you select the Salesforce object where you want to create or update records. (Ex-To create a contact, select ‘contact’.) Field Tab This tab determines which fields you want to provide values for when creating the records for an object. Input JSON Path:  Specify the key name that holds the value you want to put into Salesforce. Domain Object Field: Choose the exact field in the Salesforce object you want to map with the key name mentioned in the Input JSON Path.   Preview Tab In the Preview tab, we are required to input a JSON file in order to assign values to the specified keys listed in the Field section. We can click on the generated link of the record ID and check the Contact record in the Org load by the DataRaptor. Record loaded by Load DataRaptor to the Contact object in Salesforce Org 4. Transform DataRaptor Transforms allows users to make intermediate data modifications without accessing Salesforce. These transformations support formulas and enable conversion between JSON and XML formats. Users can restructure input data, rename fields, substitute values, and convert data into formats like PDF, DocuSign or Document Template. Transforms Tab Click the “+” button to add a new mapping. In the “Input JSON Path” field, specify the location of the data in the input format. Use colons (:) to separate levels within the data structure (like Contact: Name). In the “Output JSON Path” field, specify the desired location for the data in the output format. You can also rename the field here. (like Person: Name).                                   (The process for selecting data and using the Preview tab remains the same as explained for DataRaptor Load). Want to Learn Salesforce Flows? Checkout our Salesforce Flow Course FAQs 1. How do I use DataRaptor in Omniscript? We can use DataRaptor in Omniscript by following the steps:- • Go to the step where you want to interact with DataRaptor in your Omniscript, • Drag & drop the “DataRaptor Action” element onto your Omniscript. • Click on the DataRaptor action to open the properties panel. • Name your action and select the desired DataRaptor from the dropdown. 2. What is the governor limit for DataRaptor? The governor limit for DataRaptor relies on Salesforce services, which have limits on processing time. While setting a record limit (e.g., 2,000) helps avoid exceeding these limits, it’s not a foolproof solution. The complexity of processing is what truly matters. Focus on optimizing your DataRaptor actions to ensure smooth operation. 3. What is the difference between the DataRaptor extract and the DataRaptor Turbo extract? DataRaptor Extract offers greater flexibility, allowing extraction from multiple Salesforce objects with complex filters and data manipulation. However, it may take slightly longer to process. It supports formulas and output. On the other hand, DataRaptor Turbo Extract prioritizes speed and simplicity, ideal for retrieving data from a single Salesforce object quickly but it does not support formulas within the extract and field mapping customization in the output JSON. Read more – Flexcard Designer in Omnistudio Conclusion This blog post discusses DataRaptor, a powerful tool in Omnistudio for manipulating data within Salesforce. It covers the four types of DataRaptor: Extract, Turbo Extract, Load, and Transform. The Extract type retrieves data from multiple Salesforce objects, allowing filtering and formula manipulation. Turbo Extract prioritizes speed for retrieving data from a single object but has fewer features. Load acts as a bridge for transferring external data into Salesforce objects. Transform enables data modification without accessing Salesforce and supports formula creation and format conversions Get a complete Roadmap To Learn Salesforce Admin And Development Share Now Neelu is pursuing her career in the Salesforce domain. She believe that words are powerful tools, and she love using them to express herself, both in technical and creative ways. This passion for language makes her a bit of a logophile (a lover of words!). In her free time, she enjoy crafting quotes to share her thoughts and inspirations. Related Posts Leave a Reply Your email address will not be published. Required fields are marked *
__label__pos
0.864051
You will need to delete the address on the source constituent's record prior to merging the two duplicate records. 1. From Constituents > Constituent Search, search for and open the constituent record 2. Select the Contact tab 3. Click the drop-down arrow next to the address and Delete 4. Repeat this for each address on the record Now you should be able to complete the merge. 1. Go to Constituents > Duplicates > Merge two constituents 2. Search for and enter the Source and Target constituents 3. In the Merge Configuration drop-down, select Default Configuration 4. Click Merge
__label__pos
0.996904
Let's Make Everyone a Queen! Let's Make Everyone a Queen! Forgive the somewhat over the top title. For a while now I've been meaning to make an application with a nifty little JavaScript library called Tracery. Tracery is a library created by Kate Compton. It's a fascinating tool for generating text based on a set of inputs. I saw fascinating because sometimes it makes some pretty incredible little stories. For example: This is a story about a faceless man. You know, the faceless man who hardly ever crys when they feel the forest. Well, I was listening to the faceless man, when we both saw this tree. Blinking, orange...well, more of a blueish white. We backed away because as everybody knows, trees don't exist. That was the last we saw of it. And now, the weather. Music plays. You recall summertime and pain. You recall a lover and a friend. Operatic folk harpsichord echoes out into dissonance. You know, I miss the tree. It was pretty terrible. I mean, really beautiful, for a tree. Eventually, I hope it comes back. We'll see it, glistening, grey...well, more of an indigoish indigo. But it'll be back. I mean, eventually. If not, it's just so bewildering. So yes, that's a bit crazy at times. But there's something interesting about it. If you reload the site you'll see new random generated stories and I could spend quite a bit of time seeing what it does. I first becamse of aware of this library when I discovered [@dragonhoards](https://twitter.com/dragonhoards] on Twitter. This is a bit that makes use of the library. Here's an example tweet that's both interesting and horifying at the same time: At the simplest level, Tracery works by combining different arrays of input values. So for example, given this input: let input = { "sentence": ["The #color# #animal# of the #natureNoun# is called #name#"], "color": ["orange","blue","white","black","grey","purple","indigo","turquoise"], "animal": ["unicorn","raven","sparrow","scorpion","coyote","eagle","owl","lizard","zebra","duck","kitten"], "natureNoun": ["ocean","mountain","forest","cloud","river","tree","sky","sea","desert"], "name": ["Arjun","Yuuma","Darcy","Mia","Chiaki","Izzi","Azra","Lina"] } You can generate a random sentence like so: grammar = tracery.createGrammar(input); let sentence = grammar.flatten('#origin#'); The code starts with sentence and looks for tokens. For each token it will look for a corresponding array of values and select a random one. Here's an example result: The turquoise lizard of the river is called Mia That part is relatively simple, but Tracery gets very complex. So for example, it supports picking a random animal once and re-using the same value again if you need it. Honestly the complexity goes beyond what I think I can understand currently, but she's got a great testing utility you can play with here: http://www.crystalcodepalace.com/traceryTut.html. And of course, it's up on GitHub: https://github.com/galaxykate/tracery. Note that you want to make use of the tracery2 branch, not master. Ok, so with that being said, I thought it would build a few demos with this. The Web Site As I said, Tracery is powerful, but complex. While I had a end game in mind (the second demo I'll be showing), I thought it would make sense to start with a web site first to keep it simple. As the title of this post suggests, it's all about making you a queen. I had my daughters in mind but anyone can be a queen if they want. Here's the end result: Queen Of You can demo this yourself here: https://queenof.netlify.app/#Lindy Notice I've included the name in the URL. You can change the hash mark to whatever, or just type whatever you want in the form field. The basic pattern is relatively simple: X is the queen of A, something of B, and something else of C. It's built using Vue.js because of course I'd use Vue for this. Here's the code: const input = { "things":["Shadows","Night","the Sea","the Moon","Stars", "the Sun","Kittens","Fear","Courage","Dancing", "the Internet","Unicorns","Dolphins","Mermaids","Upstairs", "Foxes","Puppies","Chairs","Trees","Plants", "Flowers","Music","Singing","Painting","Song", "Sparkles","Jewels","Intelligence","Smarts","Dragons", "Wolves","Shoes","Bravery","Honesty","Empathy", "Compassion","Wisdon","Knowledge","Cats","Storms", "Lightning","Thunder","Rain","Snow","Clouds", "Wind","the Earth","the Universe","the Galaxy","the Piano", "the Sky","the Land","the Realm","the oceans","cookies", "cakes","pies","macarons","pizza","parties"], "role":["Defender","Champion","Scion","Empress","Sorceress", "Master","Mistress","Boss","CEO","President", "Prime Minister","DJ","Knight","Dame","Duchess", "Baroness","Countess","Manager","Singer","Drummer", "Muse","Siren","Painter","Crafter","Creator", "Accountant","Chancellor","Jedi","Teacher","Jedi Master", "Tutor"], "origin":[" is the Queen of #things#, #role# of #things#, and #role# of #things#."] }; const app = new Vue({ el:'#app', data: { grammar:null, name:'', result:'' }, methods:{ makeQueen() { if(this.name === '') return; this.result = this.name + grammar.flatten('#origin#'); window.location.hash = this.name; } }, mounted() { grammar = tracery.createGrammar(input); grammar.addModifiers(baseEngModifiers); if(window.location.hash && window.location.hash.length > 1) { //remove # this.name = window.location.hash.substring(1); this.makeQueen(); } } }); The crucial bits are the origin value as that forms the basic structure of the random sentence. I leave off the beginning because that will be the name. The VUe parts then are pretty trivial. Setup Tracery and wait for you to enter a value (although note that mounted will notice the hash). If you want, you can peruse the entire code base here: https://github.com/cfjedimaster/queenof The Twitter Bot So as I said, I had an endgame and mind, and that was a Twitter bot. I've got something of a problem when it comes to creating Twitter bots, but I'm sure I got stop whenever I want to. Using Pipedream, I built a Twitter bot at @generatorqueen. She works rather simply. Send her a tweet with "queen me" in the text and you'll get a response within a minute. I built this using a Pipedream workflow you can find here: https://pipedream.com/@raymondcamden/queen-of-bot-v2-p_MOCQen/edit. Don't forget that one of the coolest features of Pipedream is that you can share workflows with others so they can fork and use for their own purposes! Let's break down the workflow bits. I began with a Twitter search event source. I blogged about these last week. They are a powerful way to build event driven workflows. In this case the event source is simply a Tweet that matches "@generatorqueen". Next I have a custom Node.js step to do validation on the text: async (event, steps) => { if(steps.trigger.event.full_text.indexOf('queen me') === -1) $end('queen me not in tweet'); } Remember that $end is Pipedream's way of letting you end a workflow early. The next step generates the text: async (event, steps) => { const tracery = require('tracery-grammar'); const grammar = tracery.createGrammar({ "things":["Shadows","Night","the Sea","the Moon","Stars", "the Sun","Kittens","Fear","Courage","Dancing", "the Internet","Unicorns","Dolphins","Mermaids","Upstairs", "Foxes","Puppies","Chairs","Trees","Plants", "Flowers","Music","Singing","Painting","Song", "Sparkles","Jewels","Intelligence","Smarts","Dragons", "Wolves","Shoes","Bravery","Honesty","Empathy", "Compassion","Wisdon","Knowledge","Cats","Storms", "Lightning","Thunder","Rain","Snow","Clouds", "Wind","the Earth","the Universe","the Galaxy","the Piano", "the Sky","the Land","the Realm","the oceans","cookies", "cakes","pies","macarons","pizza","parties"], "role":["Defender","Champion","Scion","Empress","Sorceress", "Master","Mistress","Boss","CEO","President", "Prime Minister","DJ","Knight","Dame","Duchess", "Baroness","Countess","Manager","Singer","Drummer", "Muse","Siren","Painter","Crafter","Creator", "Accountant","Chancellor","Jedi","Teacher","Jedi Master", "Tutor"], "origin":["the Queen of #things#, #role# of #things#, and #role# of #things#."] }); grammar.addModifiers(tracery.baseEngModifiers); this.sender = steps.trigger.event.user.screen_name; this.message = '@' + this.sender + ' You are ' + grammar.flatten('#origin#'); console.log(this.message); } Now, at this point, everything's been pretty simple. In theory the next step is to just use the "Post Tweet" action. I've used that before and it's one of the many built in actions at Pipedream. However, my buddy Dylan Sather at Pipedream noticed a potential issue with my use case. Because my bot would be replying to users, it was a potential TOS issue with Twitter's API. When you use Pipedream's Post Tweet action it's using Pipedream's application credentials for the call. It's using your authentication, but the lower level app itself is Pipedreams. Because of the potential for abuse, it would be problemtic to allow the Post Tweet action to "at" people in tweets. Luckily, the workaround was relatively simple. First, Pipedream created a new action that lets you use your credentials: New action for posting tweets Once you've added this, you supply your own application credentials. You can get these simply enough at https://developer.twitter.com/en but note! If you've just today made the account for your bot, your bot itself needs to ask for permission to create Twitter apps. Twitter is totally fine with this, but there's an approval process. Mine took seven days. It's been a while since I've done this before so I can't tell you if that's slow or fast, but if you're planning something like this, you may want to request this as soon as possible. Once you've done that then it's a simple matter of copying your keys into the Pipedream action and then specifying what to tweet. Here's how I did it. (Note, the text in white is a sample value.) Action values used for posting the tweet I believe that this is only an issue for automation of tweets that are sent to users but again, the Pipedream side of this was simple. The only real issue was the week delay in getting the developer account approved. Header photo by Glen Carrie on Unsplash Raymond Camden's Picture About Raymond Camden Raymond is a senior developer evangelist for Adobe. He focuses on document services, JavaScript, and enterprise cat demos. If you like this article, please consider visiting my Amazon Wishlist or donating via PayPal to show your support. You can even buy me a coffee! Lafayette, LA https://www.raymondcamden.com
__label__pos
0.916531
Learn how to retrieve the filename or extensions from a filepath with javascript or php. Retrieve the filename with extension Javascript To retrieve the filename, we will split by the slash caracter /. To achieve this in Javascript, we will use the following Regular Expression /^.*[\\\/]/ // Any filepath, even inverted slash // var filepath = "\\myfilepath\\extensions\\filename.png"; var filepath = "/myfilepath/extensions/filename.png"; // Use the regular expression to replace the non-matching content with a blank space var filenameWithExtension = filepath.replace(/^.*[\\\/]/, ''); // outputs filename.png console.log(filenameWithExtension); The regular expression will return the remaining text from the last / (slash character). As not any file can contain / in his name but \ too, this function will work for all the cases. In case that you don't want to use a regular expression because you're sure that the provided filepaths only use a defined type of slash (either / or \ ), you can split the string by the specified character and then obtain the last item: var filepath = "/myfilepath/extensions/filename.png"; // With a normal slash var group = filepath.split("/"); // or a path with an inverted slash // var group = filepath.split("\\"); // Use the regular expression to replace the non-matching content with a blank space var filenameWithExtension = group.pop(); // Or if you can't use pop() // var filenameWithExtension = group[group.length - 1]; // outputs filename.png console.log(filenameWithExtension); PHP With PHP we will use the basename function which is available in all php versions : $filepath = "/myfilepath/extensions/filename.jpg"; $filename = basename($filepath); // filename.jpg Note: basename has a known bug when processing asian characters, to avoid this you can use a Regular Expression and preg_replace instead : $filepath = "/myfilepath/extensions/filename.jpg"; $filename = preg_replace('/^.+[\\\\\\/]/', '', $filepath); // filename.jpg Retrieve the file extension Javascript To retrieve the extension from a path (or filename, it doesn't matter in this case) with Javascript we will split the string by . character and we will retrieve the last item of the obtained array. var path = "path/a-long/path/to-my/file.jpg"; var path_splitted = path.split('.'); var extension = path_splitted.pop(); // Here the file will not have extension ! if(extension === path){   // The filepath doesn't have . characters, that means doesn't have extension. for example : // if you try with : path/to/my/file/thisisafile   // extension == path/to/my/file/thisisafile } // show extension console.log(extension); PHP To retrieve the extension from a path with php we will use pathinfo function. $path = "this-is-my/long/filepath/to/file.txt"; $ext = pathinfo($path, PATHINFO_EXTENSION);// PATHINFO_EXTENSION is a constant echo $ext; // output txt Note: if pathinfo function is not available in your environment, use end function instead. $path = "this-is-another-long-path/to/my/file.txt"; $array_ofpath = explode('.', $path);//explode returns an array $extension = end($array); Interested in programming since he was 14 years old, Carlos is a self-taught programmer and founder and author of most of the articles at Our Code World. Become a more social person Sponsors
__label__pos
0.73538
Rate problem solving with answers • Home - • Rate problem solving with answers research Rating 9,6 stars - 6092 reviews Mall business plan Rate problem solving with answers Choose from 500 different sets of solving rate problems flashcards answers on quizlet. how long does it take to write 1000 words. the key to solving rate problems is to figure answers out the context of the problem and then identify a formula answers that relates all of the information in the problem. in this problem our context is distance: we have two objects traveling at different rates in opposite directions. if you have a low pulse rate you aren' t getting " light- headed" , dizzy then it' s generally not a problem. if you have any questions about your pulse rate, it would be best to contact your. practice and problem solving: c solve using ratios. a water molecule is formed from two hydrogen atoms and one oxygen atom. fill in the table for 2 5 water molecules. water molecule hydrogen atoms oxygen answers atoms 2. hydrogen peroxide molecules have two hydrogen atoms and two oxygen atoms. what form do problem solving questions take? what a thesis statement looks like. business plan low investment. if problem solving skills are an integral part of your role, it is likely that you will answers have to complete some kind of assessment during the application process. hd law essay. there are a number of forms that a problem solving question can take, but the majority of them will be scenario- based. use rates to solve word problems. for example, charlie can with type 675 words in 9 minutes. how many words can charlie type in 13 minutes? starting at home, umaima traveled uphill to the gift store for 45 minutes at just 8 miles per hour. she then traveled back home along the same path downhill at answers a speed of 24 miles per hour. what is her average speed for the entire trip from home to the gift store and back? so we' re trying to figure. a lot of hiring managers will ask interview questions about your problem- solving skills. here' s a thorough look at the best way to structure your answers. 544 questions for the topic problem solving. the bacteria are killed at a rate of answers 8^ 9 per sec, how many bacteria will be killed after 8^ 2 sec? answers · 2 ( 2x. unit rate word problem worksheet 2 ( decimal quotients) - this 13 problem worksheet features word problems where you will calculate the unit rate for everyday situations like “ meters per second” and “ miles per hour”. integers are given in the problem, but most rate problem solving with answers of the rates will require decimal quotients. rates in geology practice problems practice calculating rates ( and rearranging the rate equation) below using the " rules" that you have just learned. how to write an ap world history essay thesis. answers are provided ( but try doing them on solving your own before peeking! calculating rates problem 1: you wake up at 6 am ( early! ) and the temperature is 55 ° f. the first row gives me the equation d = 30t. since the first part of his trip accounted for d miles of the total 150- mile distance i am left with 150 – d miles rate problem solving with answers , t hours of the total 3- hour time 3 – t hours for the second part. rate problem solving with answers - movie theater reserved seating atlanta in math distance, rate, time are three important concepts you can use to solve many problems if you know the formula. distance is the length of space traveled by a moving object or the length measured. helping kids to deal with homework stress. improve your math knowledge with free questions in " unit rates: word problems" and thousands of other math skills. problem- solving is the skill which you mostly require at the workplace. it is a trait that is required to solve complex and difficult issues with best possible solutions. interviews are a great one of the surest short ways to find the best of the candidates for rate problem solving with answers jobs to know about them in. lesson 23: problem solving using rates unit rates, , conversions student outcomes students solve constant rate work problems by calculating comparing unit rates. materials calculators classwork ( 30 minutes) if work with is being done at a constant rate by one person at a different constant rate by another person, both. do not try to make one equation that accounts for all changes in the problem. when workers asked to answer a question related to the effect of increasing , decreasing the number of workers , machines all work at identical rates, machines - what is the work / rate relationship? below are the different types of powerpoint charts that you can use along with some tips on how to maximize them for your presentations: pie charts. these charts are commonly used to show percentages. for better visual impact, limit the pie slices to 3- 5. powerpoint uses three main file types: presentation template, slide show. for the most part you can construct deliver simple presentations without ever having to deal with the. types of powerpoint animations. additionally, you can use other kind of animations like the emphasis that are good to empathize concepts. by using emphasis animations you can highlight important with content lighten, shrink, apply animation effects like grow , color pulse , teeter other. lastly but not least important . tired of writing for pennies ( whichever cliche for crappy pay rate problem solving with answers you prefer) , peanuts ready to earn money online for real? we’ re tired of it, too. that’ s why rate problem solving with answers carol started paying for posts a few years back — and why she upped her rates to $ 75+ last fall. solving and it’ s why we update our list. sell your books privately in our marketplace, worldwide. all ebook formats are created to work on the most popular ereaders apple, kobo, , ingram, are ready for distribution to amazon more! there is no inventory to buy and no files you need to deliver to distributors. my name is bless david i am a writer up coming you know, i just want to know the right steps to take to get my books online make some money too. you may call or mail me. or com if u have any advice. this month’ s theme for the hustle is people who game the system. this is answers the first of the weekly series. so there’ s a group of people who make a living churning out dozens of lowbrow kindle books a month. i call them kindle gold rushers. some of them make hundreds of thousands of dollars selling. expressing distance or motion. ( now obsolete direction), dialectal) from ( of distance " off". essay on lord of the flies. [ from the 9th c. ] 1485 sir thomas malory, in le morte darthur, chapter x book xiii: sir said galahad by this shelde ben many merueils fallen / sir sayd the knyght hit befelle after the passion of our lord ihesu crist xxxij yere that ioseph of armathye the. google allows users to search rate problem solving with answers the web for images video, products, answers , news other content. of definition separation, deprivation, direction from, ( used to indicate distance etc. ) : within a mile of the church; south of omaha; to be robbed of one' s money. sign in to your online banking account by entering your online with id. skip to main content. en español sign in to online banking. we can' t process your request. online id must be at least. bank of america, n. equal housing lender. What is an essay response how to write bibliography for thesis paper new york times homework should be banned short essay on healthy eating habits essay setting goals :good ways to start a personal narrative essay argumentative thesis statement maker essay on a visit to a village fair, business plan in nepali 5 parts of a paragraph. Starting your own restaurant business plan birthday homework fgm argumentative essay how to start a summary response essay homework isn a waste of time.how to write an ap world history essay thesis essay starters for romeo and juliet bsl homework level 1 elements of a 5 year business plan, my homework help reviews how to present interview findings in a dissertation film analysis essay assignment Essay on marriage is a private affair Thesis on human nature [FULLTEXT] Comments Paula Morenza Excellent ! • solving work- rate problems part i: introduction to solve work- rate problems it is helpful to use a variant of distance equals rate times time. specifically: q rt in this formula q is the quantity or amount of work done, r is the rate of work and t is the time worked. ex 1: if a machine can produce 1 2 2 parts per minute then in: 4 minutes, it. • Comments Eva Pinlo We write your thesis • solving problems using rates is an important skill in mathematics, and this quiz/ worksheet will help you assess your understanding of how to do so and let you put your skills to the test with. solving the problem. • Comments Elea Rightihg Essay on metro rail a boon but what exactly is the problem asking for? Comments Extended essay pop art Poem to write an essay on I am always satisfied with the services provided, and what I like the most is the understanding, which had helped a lot. Comments Annis Slo Thesis level of awareness • let' s go back and see. • Comments Rozita Spainlovish ' two brothers want to purchase the next- generation entertainment system that costs $ 400. learn solving rate problems with free interactive flashcards. Comments Greg Christman Happy with the order. • Happiness essay
__label__pos
0.571777
gdcmDataEntry.cxx Go to the documentation of this file. 00001 /*========================================================================= 00002 00003 Program: gdcm 00004 Module: $RCSfile: gdcmDataEntry.cxx,v $ 00005 Language: C++ 00006 Date: $Date: 2007/08/22 16:14:03 $ 00007 Version: $Revision: 1.45 $ 00008 00009 Copyright (c) CREATIS (Centre de Recherche et d'Applications en Traitement de 00010 l'Image). All rights reserved. See Doc/License.txt or 00011 http://www.creatis.insa-lyon.fr/Public/Gdcm/License.html for details. 00012 00013 This software is distributed WITHOUT ANY WARRANTY; without even 00014 the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR 00015 PURPOSE. See the above copyright notices for more information. 00016 00017 =========================================================================*/ 00018 00019 #include "gdcmDataEntry.h" 00020 #include "gdcmVR.h" 00021 #include "gdcmTS.h" 00022 #include "gdcmGlobal.h" 00023 #include "gdcmUtil.h" 00024 #include "gdcmDebug.h" 00025 00026 #include <fstream> 00027 00028 #if defined(__BORLANDC__) 00029 #include <mem.h> // for memcpy 00030 #include <stdlib.h> // for atof 00031 #include <ctype.h> // for isdigit 00032 #endif 00033 00034 namespace GDCM_NAME_SPACE 00035 { 00036 //----------------------------------------------------------------------------- 00037 #define MAX_SIZE_PRINT_ELEMENT_VALUE 0x7fffffff 00038 uint32_t DataEntry::MaxSizePrintEntry = MAX_SIZE_PRINT_ELEMENT_VALUE; 00039 00040 //----------------------------------------------------------------------------- 00041 // Constructor / Destructor 00048 DataEntry::DataEntry(uint16_t group,uint16_t elem, 00049 VRKey const &vr) 00050 : DocEntry(group,elem,vr) 00051 { 00052 State = STATE_LOADED; 00053 Flag = FLAG_NONE; 00054 00055 StrArea = 0; 00056 BinArea = 0; 00057 SelfArea = true; 00058 } 00059 00064 DataEntry::DataEntry(DocEntry *e) 00065 //: DocEntry(e->GetDictEntry()) 00066 : DocEntry(e->GetGroup(),e->GetElement(), e->GetVR() ) 00067 { 00068 Flag = FLAG_NONE; 00069 BinArea = 0; 00070 00071 SelfArea = true; 00072 00073 Copy(e); 00074 } 00075 00079 DataEntry::~DataEntry () 00080 { 00081 DeleteBinArea(); 00082 00083 } 00084 00085 //----------------------------------------------------------------------------- 00086 // Print 00087 00088 //----------------------------------------------------------------------------- 00089 // Public 00095 void DataEntry::SetBinArea( uint8_t *area, bool self ) 00096 { 00097 DeleteBinArea(); 00098 00099 BinArea = area; 00100 SelfArea = self; 00101 00102 State = STATE_LOADED; 00103 } 00109 void DataEntry::CopyBinArea( uint8_t *area, uint32_t length ) 00110 { 00111 DeleteBinArea(); 00112 00113 uint32_t lgh = length + length%2; 00114 SetLength(lgh); 00115 00116 if( area && length > 0 ) 00117 { 00118 NewBinArea(); 00119 memcpy(BinArea,area,length); 00120 if( length!=lgh ) 00121 BinArea[length]=0; 00122 00123 State = STATE_LOADED; 00124 } 00125 } 00126 00132 void DataEntry::SetValue(const uint32_t &id, const double &val) 00133 { 00134 if( !BinArea ) 00135 NewBinArea(); 00136 State = STATE_LOADED; 00137 00138 if( id > GetValueCount() ) 00139 { 00140 gdcmErrorMacro("Index (" << id << ") is greater than the data size"); 00141 return; 00142 } 00143 00144 const VRKey &vr = GetVR(); 00145 if( vr == "US" || vr == "SS" ) 00146 { 00147 uint16_t *data = (uint16_t *)BinArea; 00148 data[id] = (uint16_t)val; 00149 } 00150 else if( vr == "UL" || vr == "SL" ) 00151 { 00152 uint32_t *data = (uint32_t *)BinArea; 00153 data[id] = (uint32_t)val; 00154 } 00155 else if( vr == "FL" ) 00156 { 00157 float *data = (float *)BinArea; 00158 data[id] = (float)val; 00159 } 00160 else if( vr == "FD" ) 00161 { 00162 double *data = (double *)BinArea; 00163 data[id] = (double)val; 00164 } 00165 else if( Global::GetVR()->IsVROfStringRepresentable(vr) ) 00166 { 00167 gdcmErrorMacro("SetValue on String representable not implemented yet"); 00168 } 00169 else 00170 { 00171 BinArea[id] = (uint8_t)val; 00172 } 00173 } 00180 double DataEntry::GetValue(const uint32_t &id) const 00181 { 00182 if( !BinArea ) 00183 { 00184 if (GetLength() != 0) // avoid stupid messages 00186 gdcmErrorMacro("BinArea not set " << std::hex 00187 << GetGroup() << " " << GetElement() 00188 << " Can't get the value"); 00189 return 0.0; 00190 } 00191 00192 uint32_t count = GetValueCount(); 00193 if( id > count ) 00194 { 00195 gdcmErrorMacro("Index (" << id << ") is greater than the data size"); 00196 return 0.0; 00197 } 00198 00199 // if user *knows* that entry contains a US, 00200 // he just has to cast the double he receives 00201 00202 const VRKey &vr = GetVR(); 00203 00204 if( vr == "US" || vr == "SS" ) 00205 return ((uint16_t *)BinArea)[id]; 00206 else if( vr == "UL" || vr == "SL" ) 00207 return ((uint32_t *)BinArea)[id]; 00208 else if( vr == "FL" ) 00209 return ((float *)BinArea)[id]; 00210 else if( vr == "FD" ) 00211 return ((double *)BinArea)[id]; 00212 else if( Global::GetVR()->IsVROfStringRepresentable(vr) ) 00213 { 00214 // this is for VR = "DS", ... 00215 if( GetLength() ) 00216 { 00217 // Don't use std::string to accelerate processing 00218 double val; 00219 char *tmp = new char[GetLength()+1]; 00220 memcpy(tmp,BinArea,GetLength()); 00221 tmp[GetLength()]=0; 00222 00223 if( count == 0 ) 00224 { 00225 val = atof(tmp); 00226 } 00227 else 00228 { 00229 count = id; 00230 char *beg = tmp; 00231 for(uint32_t i=0;i<GetLength();i++) 00232 { 00233 if( tmp[i] == '\\' ) 00234 { 00235 if( count == 0 ) 00236 { 00237 tmp[i] = 0; 00238 break; 00239 } 00240 else 00241 { 00242 count--; 00243 beg = &(tmp[i+1]); 00244 } 00245 } 00246 } 00247 val = atof(beg); 00248 } 00249 00250 delete[] tmp; 00251 return val; 00252 } 00253 else 00254 return 0.0; 00255 } 00256 else 00257 return BinArea[id]; 00258 } 00259 00263 bool DataEntry::IsValueCountValid() /*const*/ 00264 { 00265 uint32_t vm; 00266 const std::string &strVM = GetVM(); 00267 uint32_t vc = GetValueCount(); 00268 bool valid = vc == 0; 00269 if( valid ) 00270 return true; 00271 00272 // FIXME : what shall we do with VM = "2-n", "3-n", etc 00273 00274 if( strVM == "1-n" ) 00275 { 00276 // make sure there is at least one ??? FIXME 00277 valid = vc >= 1; 00278 } 00279 else 00280 { 00281 std::istringstream os; 00282 os.str( strVM ); 00283 os >> vm; 00284 // Two cases: 00285 // vm respects the one from the dict 00286 // vm is 0 (we need to check if this element is allowed to be empty) FIXME 00287 00288 // note (JPR) 00289 // ---- 00290 // Entries whose type is 1 are mandatory, with a mandatory value. 00291 // Entries whose type is 1c are mandatory-inside-a-Sequence, 00292 // with a mandatory value. 00293 // Entries whose type is 2 are mandatory, with an optional value. 00294 // Entries whose type is 2c are mandatory-inside-a-Sequence, 00295 // with an optional value. 00296 // Entries whose type is 3 are optional. 00297 00298 // case vc == 0 is only applicable for 'type 2' entries. 00299 // Problem : entry type may depend on the modality and/or the Sequence 00300 // it's embedded in ! 00301 // (Get the information in the 'Conformance Statements' ...) 00302 valid = vc == vm; 00303 } 00304 return valid; 00305 } 00306 00310 uint32_t DataEntry::GetValueCount( ) const 00311 { 00312 const VRKey &vr = GetVR(); 00313 if( vr == "US" || vr == "SS" ) 00314 return GetLength()/sizeof(uint16_t); 00315 else if( vr == "UL" || vr == "SL" ) 00316 return GetLength()/sizeof(uint32_t); 00317 else if( vr == "FL" || vr == "OF" ) 00318 return GetLength()/4 ; // FL has a *4* length! sizeof(float); 00319 else if( vr == "FD" ) 00320 return GetLength()/8; // FD has a *8* length! sizeof(double); 00321 else if( Global::GetVR()->IsVROfStringRepresentable(vr) ) 00322 { 00323 // Some element in DICOM are allowed to be empty 00324 if( !GetLength() ) 00325 return 0; 00326 // Don't use std::string to accelerate processing 00327 uint32_t count = 1; 00328 for(uint32_t i=0;i<GetLength();i++) 00329 { 00330 if( BinArea[i] == '\\') 00331 count++; 00332 } 00333 return count; 00334 } 00335 return GetLength(); 00336 } 00337 00343 bool DataEntry::GetDSValue(std::vector <double> &valueVector) 00344 { 00346 std::vector<std::string> tokens; 00347 00348 if (GetVR() != "DS") // never trust a user ! 00349 return false; 00350 00351 Util::Tokenize ( GetString().c_str(), tokens, "\\" ); 00352 00353 int nbValues= tokens.size(); 00354 if (nbValues == 0) 00355 return false; 00356 00357 for (int loop=0; loop<nbValues; loop++) 00358 valueVector.push_back(atof(tokens[loop].c_str())); 00359 00360 return true; 00361 } 00362 00367 void DataEntry::SetString(std::string const &value) 00368 { 00369 DeleteBinArea(); 00370 const VRKey &vr = GetVR(); 00371 if ( vr == "US" || vr == "SS" ) 00372 { 00373 std::vector<std::string> tokens; 00374 Util::Tokenize (value, tokens, "\\"); 00375 SetLength(tokens.size()*sizeof(uint16_t)); 00376 NewBinArea(); 00377 00378 uint16_t *data = (uint16_t *)BinArea; 00379 for (unsigned int i=0; i<tokens.size();i++) 00380 data[i] = atoi(tokens[i].c_str()); 00381 tokens.clear(); 00382 } 00383 else if ( vr == "UL" || vr == "SL" ) 00384 { 00385 std::vector<std::string> tokens; 00386 Util::Tokenize (value, tokens, "\\"); 00387 SetLength(tokens.size()*sizeof(uint32_t)); 00388 NewBinArea(); 00389 00390 uint32_t *data = (uint32_t *)BinArea; 00391 for (unsigned int i=0; i<tokens.size();i++) 00392 data[i] = atoi(tokens[i].c_str()); 00393 tokens.clear(); 00394 } 00395 else if ( vr == "FL" ) 00396 { 00397 std::vector<std::string> tokens; 00398 Util::Tokenize (value, tokens, "\\"); 00399 SetLength(tokens.size()*sizeof(float)); 00400 NewBinArea(); 00401 00402 float *data = (float *)BinArea; 00403 for (unsigned int i=0; i<tokens.size();i++) 00404 data[i] = (float)atof(tokens[i].c_str()); 00405 tokens.clear(); 00406 } 00407 else if ( vr == "FD" ) 00408 { 00409 std::vector<std::string> tokens; 00410 Util::Tokenize (value, tokens, "\\"); 00411 SetLength(tokens.size()*sizeof(double)); 00412 NewBinArea(); 00413 00414 double *data = (double *)BinArea; 00415 for (unsigned int i=0; i<tokens.size();i++) 00416 data[i] = atof(tokens[i].c_str()); 00417 tokens.clear(); 00418 } 00419 else 00420 { 00421 size_t l = value.size(); 00422 SetLength(l + l%2); 00423 NewBinArea(); 00424 memcpy(BinArea, value.c_str(), l); 00425 if (l%2) // padded with blank except for UI 00426 if ( vr == "UI" ) 00427 BinArea[l] = '\0'; 00428 else 00429 BinArea[l] = ' '; 00430 } 00431 State = STATE_LOADED; 00432 } 00436 std::string const &DataEntry::GetString() const 00437 { 00438 static std::ostringstream s; 00439 const VRKey &vr = GetVR(); 00440 s.str(""); 00441 00442 if (!StrArea) 00443 StrArea = new std::string(); 00444 else 00445 *StrArea=""; 00446 00447 if( !BinArea ) 00448 return *StrArea; 00449 // When short integer(s) are stored, convert the following (n * 2) characters 00450 // as a displayable string, the values being separated by a back-slash 00451 if( vr == "US" ) 00452 { 00453 uint16_t *data=(uint16_t *)BinArea; 00454 for (unsigned int i=0; i < GetValueCount(); i++) 00455 { 00456 if( i!=0 ) 00457 s << '\\'; 00458 s << data[i]; 00459 } 00460 *StrArea=s.str(); 00461 } 00462 else if (vr == "SS" ) 00463 { 00464 int16_t *data=(int16_t *)BinArea; 00465 for (unsigned int i=0; i < GetValueCount(); i++) 00466 { 00467 if( i!=0 ) 00468 s << '\\'; 00469 s << data[i]; 00470 } 00471 *StrArea=s.str(); 00472 } // See above comment on multiple short integers (mutatis mutandis). 00473 else if( vr == "UL" ) 00474 { 00475 uint32_t *data=(uint32_t *)BinArea; 00476 for (unsigned int i=0; i < GetValueCount(); i++) 00477 { 00478 if( i!=0 ) 00479 s << '\\'; 00480 s << data[i]; 00481 } 00482 *StrArea=s.str(); 00483 } 00484 else if( vr == "SL" ) 00485 { 00486 int32_t *data=(int32_t *)BinArea; 00487 for (unsigned int i=0; i < GetValueCount(); i++) 00488 { 00489 if( i!=0 ) 00490 s << '\\'; 00491 s << data[i]; 00492 } 00493 *StrArea=s.str(); 00494 } else if( vr == "FL" ) 00495 { 00496 float *data=(float *)BinArea; 00497 for (unsigned int i=0; i < GetValueCount(); i++) 00498 { 00499 if( i!=0 ) 00500 s << '\\'; 00501 s << data[i]; 00502 } 00503 *StrArea=s.str(); 00504 } 00505 else if( vr == "FD" ) 00506 { 00507 double *data=(double *)BinArea; 00508 for (unsigned int i=0; i < GetValueCount(); i++) 00509 { 00510 if( i!=0 ) 00511 s << '\\'; 00512 s << data[i]; 00513 } 00514 *StrArea=s.str(); 00515 } 00516 else 00517 { 00518 StrArea->append((const char *)BinArea,GetLength()); 00519 // to avoid gdcm to propagate oddities in lengthes 00520 if ( GetLength()%2) 00521 StrArea->append(" ",1); } 00522 return *StrArea; 00523 } 00524 00525 00531 void DataEntry::Copy(DocEntry *doc) 00532 { 00533 DocEntry::Copy(doc); 00534 00535 DataEntry *entry = dynamic_cast<DataEntry *>(doc); 00536 if ( entry ) 00537 { 00538 State = entry->State; 00539 Flag = entry->Flag; 00540 CopyBinArea(entry->BinArea,entry->GetLength()); 00541 } 00542 } 00543 00549 void DataEntry::WriteContent(std::ofstream *fp, FileType filetype, 00550 bool insideMetaElements) 00551 { 00552 // writes the 'common part' 00553 DocEntry::WriteContent(fp, filetype, insideMetaElements); 00554 00555 if ( GetGroup() == 0xfffe ) 00556 { 00557 return; //delimitors have NO value 00558 } 00559 00560 // --> We only deal with Little Endian writting. 00561 // --> forget Big Endian Transfer Syntax writting! 00562 // Next DICOM version will give it up ... 00563 00564 // WARNING - For Implicit VR private element, 00565 // we have *no choice* but considering them as 00566 // something like 'OB' values. 00567 // we rewrite them as we found them on disc. 00568 // Some trouble will occur if element was 00569 // *actually* OW, if image was produced 00570 // on Big endian based processor, read and writen 00571 // on Little endian based processor 00572 // and, later on, somebody needs 00573 // this 'OW' Implicit VR private element (?!?) 00574 // (Same stuff, mutatis mutandis, for Little/Big) 00575 00576 // 8/16 bits Pixels problem should be solved automatiquely, 00577 // since we ensure the VR (OB vs OW) is conform to Pixel size. 00578 00579 uint8_t *data = BinArea; //safe notation 00580 size_t l = GetLength(); 00581 // gdcmDebugMacro("in DataEntry::WriteContent " << GetKey() << " AtomicLength: " 00582 // << Global::GetVR()->GetAtomicElementLength(this->GetVR() ) // << " BinArea in :" << &BinArea 00583 // ); 00584 if (BinArea) // the binArea was *actually* loaded 00585 { 00586 #if defined(GDCM_WORDS_BIGENDIAN) || defined(GDCM_FORCE_BIGENDIAN_EMULATION) 00587 unsigned short vrLgth = 00588 Global::GetVR()->GetAtomicElementLength(this->GetVR()); 00589 unsigned int i; 00590 switch(vrLgth) 00591 { 00592 case 1: 00593 { 00594 binary_write (*fp, data, l ); 00595 break; 00596 } 00597 case 2: 00598 { 00599 uint16_t *data16 = (uint16_t *)data; 00600 for(i=0;i<l/vrLgth;i++) 00601 binary_write( *fp, data16[i]); 00602 break; 00603 } 00604 case 4: 00605 { 00606 uint32_t *data32 = (uint32_t *)data; 00607 for(i=0;i<l/vrLgth;i++) 00608 binary_write( *fp, data32[i]); 00609 break; 00610 } 00611 case 8: 00612 { 00613 double *data64 = (double *)data; 00614 for(i=0;i<l/vrLgth;i++) 00615 binary_write( *fp, data64[i]); 00616 break; 00617 } 00618 } 00619 #else 00620 binary_write (*fp, data, l ); 00621 #endif //GDCM_WORDS_BIGENDIAN 00622 00623 } 00624 else 00625 { 00626 // nothing was loaded, but we need to skip space on disc 00627 if (l != 0) 00628 { 00629 // --> WARNING : nothing is written; 00630 // --> the initial data (on the the source image) is lost 00631 // --> user is *not* informed ! 00632 gdcmDebugMacro ("Nothing was loaded, but we need to skip space on disc. " 00633 << "Length =" << l << " for " << GetKey() ); 00634 fp->seekp(l, std::ios::cur); // At Write time, for unloaded elems 00635 } 00636 } 00637 // to avoid gdcm to propagate oddities 00638 // (length was already modified) 00639 if (l%2) 00640 fp->seekp(1, std::ios::cur); // At Write time, for non even length elems 00641 } 00642 00647 uint32_t DataEntry::ComputeFullLength() 00648 { 00649 return GetFullLength(); 00650 } 00651 00652 //----------------------------------------------------------------------------- 00653 // Protected 00656 void DataEntry::NewBinArea( ) 00657 { 00658 DeleteBinArea(); 00659 if( GetLength() > 0 ) 00660 BinArea = new uint8_t[GetLength()]; 00661 SelfArea = true; 00662 } 00665 void DataEntry::DeleteBinArea(void) 00666 { 00667 if (BinArea && SelfArea) 00668 { 00669 delete[] BinArea; 00670 BinArea = NULL; 00671 } 00672 if (StrArea) 00673 { 00674 delete StrArea; 00675 StrArea = 0; 00676 } 00677 } 00678 00679 //----------------------------------------------------------------------------- 00680 // Private 00681 00682 //----------------------------------------------------------------------------- 00683 // Print 00689 void DataEntry::Print(std::ostream &os, std::string const & ) 00690 { 00691 os << "D "; 00692 DocEntry::Print(os); 00693 00694 uint16_t g = GetGroup(); 00695 if (g == 0xfffe) // delimiters have NO value 00696 { 00697 return; // just to avoid identing all the remaining code 00698 } 00699 00700 std::ostringstream s; 00701 TSAtr v; 00702 00703 if( BinArea ) 00704 { 00705 v = GetString(); 00706 const VRKey &vr = GetVR(); 00707 00708 if( vr == "US" || vr == "SS" || vr == "UL" || vr == "SL" 00709 || vr == "FL" || vr == "FD") 00710 s << " [" << GetString() << "]"; 00711 else 00712 { 00713 if(Global::GetVR()->IsVROfStringRepresentable(vr)) 00714 { 00715 // replace non printable characters by '.' 00716 std::string cleanString = Util::CreateCleanString(v); 00717 if ( cleanString.length() <= GetMaxSizePrintEntry() 00718 || PrintLevel >= 3 00719 || IsNotLoaded() ) 00720 // FIXME : when IsNotLoaded(), you create a Clean String ?!? 00721 // FIXME : PrintLevel<2 *does* print the values 00722 // (3 is only for extra offsets printing) 00723 // What do you wanted to do ? JPR 00724 { 00725 s << " [" << cleanString << "]"; 00726 } 00727 else 00728 { 00729 s << " [GDCM_NAME_SPACE::too long for print (" << cleanString.length() << ") ]"; 00730 } 00731 } 00732 else 00733 { 00734 // A lot of Private elements (with no VR) contain actually 00735 // only printable characters; 00736 // Let's deal with them as is they were VR std::string representable 00737 00738 if ( Util::IsCleanArea( GetBinArea(), GetLength() ) ) 00739 { 00740 // FIXME : since the 'Area' *is* clean, just use 00741 // a 'CreateString' method, to save CPU time. 00742 std::string cleanString = 00743 Util::CreateCleanString( BinArea,GetLength() ); 00744 s << " [" << cleanString << "]"; 00745 } 00746 else 00747 { 00748 s << " [" << GDCM_BINLOADED << ";" 00749 << "length = " << GetLength() << "]"; 00750 } 00751 } 00752 } 00753 } 00754 else 00755 { 00756 if( IsNotLoaded() ) 00757 s << " [" << GDCM_NOTLOADED << "]"; 00758 else if( IsUnfound() ) 00759 s << " [" << GDCM_UNFOUND << "]"; 00760 else if( IsUnread() ) 00761 s << " [" << GDCM_UNREAD << "]"; 00762 else if ( GetLength() == 0 ) 00763 s << " []"; 00764 } 00765 00766 if( IsPixelData() ) 00767 s << " (" << GDCM_PIXELDATA << ")"; 00768 00769 // Display the UID value (instead of displaying only the rough code) 00770 // First 'clean' trailing character (space or zero) 00771 if(BinArea) 00772 { 00773 const uint16_t &gr = GetGroup(); 00774 const uint16_t &elt = GetElement(); 00775 TS *ts = Global::GetTS(); 00776 00777 if (gr == 0x0002) 00778 { 00779 // Any more to be displayed ? 00780 if ( elt == 0x0010 || elt == 0x0002 ) 00781 { 00782 if ( v.length() != 0 ) // for brain damaged headers 00783 { 00784 if ( ! isdigit((unsigned char)v[v.length()-1]) ) 00785 { 00786 v.erase(v.length()-1, 1); 00787 } 00788 } 00789 s << " ==>\t[" << ts->GetValue(v) << "]"; 00790 } 00791 } 00792 else if (gr == 0x0008) 00793 { 00794 if ( elt == 0x0016 || elt == 0x1150 ) 00795 { 00796 if ( v.length() != 0 ) // for brain damaged headers 00797 { 00798 if ( ! isdigit((unsigned char)v[v.length()-1]) ) 00799 { 00800 v.erase(v.length()-1, 1); 00801 } 00802 } 00803 s << " ==>\t[" << ts->GetValue(v) << "]"; 00804 } 00805 } 00806 else if (gr == 0x0004) 00807 { 00808 if ( elt == 0x1510 || elt == 0x1512 ) 00809 { 00810 if ( v.length() != 0 ) // for brain damaged headers 00811 { 00812 if ( ! isdigit((unsigned char)v[v.length()-1]) ) 00813 { 00814 v.erase(v.length()-1, 1); 00815 } 00816 } 00817 s << " ==>\t[" << ts->GetValue(v) << "]"; 00818 } 00819 } 00820 } 00821 00822 os << s.str(); 00823 } 00824 00825 //----------------------------------------------------------------------------- 00826 } // end namespace gdcm 00827 Generated on Fri Aug 24 12:53:08 2007 for gdcm by  doxygen 1.4.6
__label__pos
0.517343
Creating and using an IAM policy for IAM database access - Amazon Relational Database Service Creating and using an IAM policy for IAM database access To allow a user or role to connect to your DB instance, you must create an IAM policy. After that, you attach the policy to a permissions set or role. Note To learn more about IAM policies, see Identity and access management for Amazon RDS. The following example policy allows a user to connect to a DB instance using IAM database authentication. { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "rds-db:connect" ], "Resource": [ "arn:aws:rds-db:us-east-2:1234567890:dbuser:db-ABCDEFGHIJKL01234/db_user" ] } ] } Important A user with administrator permissions can access DB instances without explicit permissions in an IAM policy. If you want to restrict administrator access to DB instances, you can create an IAM role with the appropriate, lesser privileged permissions and assign it to the administrator. Note Don't confuse the rds-db: prefix with other RDS API operation prefixes that begin with rds:. You use the rds-db: prefix and the rds-db:connect action only for IAM database authentication. They aren't valid in any other context. The example policy includes a single statement with the following elements: • Effect – Specify Allow to grant access to the DB instance. If you don't explicitly allow access, then access is denied by default. • Action – Specify rds-db:connect to allow connections to the DB instance. • Resource – Specify an Amazon Resource Name (ARN) that describes one database account in one DB instance. The ARN format is as follows. arn:aws:rds-db:region:account-id:dbuser:DbiResourceId/db-user-name In this format, replace the following: • region is the AWS Region for the DB instance. In the example policy, the AWS Region is us-east-2. • account-id is the AWS account number for the DB instance. In the example policy, the account number is 1234567890. The user must be in the same account as the account for the DB instance. To perform cross-account access, create an IAM role with the policy shown above in the account for the DB instance and allow your other account to assume the role. • DbiResourceId is the identifier for the DB instance. This identifier is unique to an AWS Region and never changes. In the example policy, the identifier is db-ABCDEFGHIJKL01234. To find a DB instance resource ID in the AWS Management Console for Amazon RDS, choose the DB instance to see its details. Then choose the Configuration tab. The Resource ID is shown in the Configuration section. Alternatively, you can use the AWS CLI command to list the identifiers and resource IDs for all of your DB instance in the current AWS Region, as shown following. aws rds describe-db-instances --query "DBInstances[*].[DBInstanceIdentifier,DbiResourceId]" If you are using Amazon Aurora, specify a DbClusterResourceId instead of a DbiResourceId. For more information, see Creating and using an IAM policy for IAM database access in the Amazon Aurora User Guide. Note If you are connecting to a database through RDS Proxy, specify the proxy resource ID, such as prx-ABCDEFGHIJKL01234. For information about using IAM database authentication with RDS Proxy, see Connecting to a proxy using IAM authentication. • db-user-name is the name of the database account to associate with IAM authentication. In the example policy, the database account is db_user. You can construct other ARNs to support various access patterns. The following policy allows access to two different database accounts in a DB instance. { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "rds-db:connect" ], "Resource": [ "arn:aws:rds-db:us-east-2:123456789012:dbuser:db-ABCDEFGHIJKL01234/jane_doe", "arn:aws:rds-db:us-east-2:123456789012:dbuser:db-ABCDEFGHIJKL01234/mary_roe" ] } ] } The following policy uses the "*" character to match all DB instances and database accounts for a particular AWS account and AWS Region. { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "rds-db:connect" ], "Resource": [ "arn:aws:rds-db:us-east-2:1234567890:dbuser:*/*" ] } ] } The following policy matches all of the DB instances for a particular AWS account and AWS Region. However, the policy only grants access to DB instances that have a jane_doe database account. { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "rds-db:connect" ], "Resource": [ "arn:aws:rds-db:us-east-2:123456789012:dbuser:*/jane_doe" ] } ] } The user or role has access to only those databases that the database user does. For example, suppose that your DB instance has a database named dev, and another database named test. If the database user jane_doe has access only to dev, any users or roles that access that DB instance with the jane_doe user also have access only to dev. This access restriction is also true for other database objects, such as tables, views, and so on. An administrator must create IAM policies that grant entities permission to perform specific API operations on the specified resources they need. The administrator must then attach those policies to the permission sets or roles that require those permissions. For examples of policies, see Identity-based policy examples for Amazon RDS. Attaching an IAM policy to a permission set or role After you create an IAM policy to allow database authentication, you need to attach the policy to a permission set or role. For a tutorial on this topic, see Create and attach your first customer managed policy in the IAM User Guide. As you work through the tutorial, you can use one of the policy examples shown in this section as a starting point and tailor it to your needs. At the end of the tutorial, you have a permission set with an attached policy that can make use of the rds-db:connect action. Note You can map multiple permission sets or roles to the same database user account. For example, suppose that your IAM policy specified the following resource ARN. arn:aws:rds-db:us-east-2:123456789012:dbuser:db-12ABC34DEFG5HIJ6KLMNOP78QR/jane_doe If you attach the policy to Jane, Bob, and Diego, then each of those users can connect to the specified DB instance using the jane_doe database account.
__label__pos
0.862357
YXTS122 2017-03-22 21:58 采纳率: 100% 浏览 2.5k getCount和getView方法怎么多次被调用 运行如下: 图片说明 点击按钮,情况是这样的 图片说明 图片说明 执行gridView.setAdapter(adapter);时会去调用getCount()方法,但后面getCount和getView方法怎么多次被调用,什么情况下调用的?不是说有9张图就只调用9次getView方法么? 然后任意点击9张图的其中一张,LogCat如下: 图片说明 咦?怎么getItem方法没被调用到? 还有data.getIntExtra("imageId",R.drawable.abc_ab_bottom_solid_dark_holo ) 怎么不能这样写data.getIntExtra("imageId")?不是说键值存储,得到键,就得到了值么? 代码在下面的回答 • 点赞 • 写回答 • 关注问题 • 收藏 • 邀请回答 3条回答 默认 最新 • fcwxin 2017-03-23 08:32 已采纳 你把gridview的宽高设为定值,或者设置为match_parent 试一下 点赞 打赏 评论 • YXTS122 2017-03-22 22:02 然后任意点击9张图的其中一张,LogCat如下: 图片说明 MainActivity.java package com.example.gridviewdemo; import android.app.Activity; import android.content.Intent; import android.os.Bundle; import android.view.Menu; import android.view.MenuItem; import android.view.View; import android.view.View.OnClickListener; import android.widget.Button; import android.widget.ImageView; public class MainActivity extends Activity implements OnClickListener { private Button button01; private ImageView view; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); button01=(Button)findViewById(R.id.bt1); view=(ImageView)findViewById(R.id.iv1); button01.setOnClickListener(this); } @Override public void onClick(View v) { switch(v.getId()) { case R.id.bt1: Intent intent=new Intent(MainActivity.this,ResultActivity.class); MainActivity.this.startActivityForResult(intent,1); } } protected void onActivityResult(int requestCode,int resultCode,Intent data) { if (requestCode==1&&resultCode==Activity.RESULT_OK) { int imageId=data.getIntExtra("imageId",R.drawable.abc_ab_bottom_solid_dark_holo); view.setImageResource(imageId); } } @Override public boolean onCreateOptionsMenu(Menu menu) { // Inflate the menu; this adds items to the action bar if it is present. getMenuInflater().inflate(R.menu.main, menu); return true; } @Override public boolean onOptionsItemSelected(MenuItem item) { // Handle action bar item clicks here. The action bar will // automatically handle clicks on the Home/Up button, so long // as you specify a parent activity in AndroidManifest.xml. int id = item.getItemId(); if (id == R.id.action_settings) { return true; } return super.onOptionsItemSelected(item); } } ResultActivity.java package com.example.gridviewdemo; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import android.app.Activity; import android.content.Intent; import android.os.Bundle; import android.util.Log; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.AdapterView; import android.widget.AdapterView.OnItemClickListener; import android.widget.BaseAdapter; import android.widget.GridView; import android.widget.ImageView; import android.widget.TextView; public class ResultActivity extends Activity { private GridView gridView; private int[] images={R.drawable.abc_ic_clear,R.drawable.abc_textfield_search_selected_holo_light,R.drawable.abc_tab_selected_pressed_holo, R.drawable.abc_menu_hardkey_panel_holo_light,R.drawable.abc_spinner_ab_holo_dark,R.drawable.abc_list_selector_holo_light, R.drawable.abc_menu_dropdown_panel_holo_dark,R.drawable.abc_textfield_search_selected_holo_light,R.drawable.abc_ab_solid_dark_holo }; private List<Map<String,Object>> list; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_result); gridView=(GridView)findViewById(R.id.gv1); list=getResource(); MyBaseAdapter adapter=new MyBaseAdapter(); gridView.setAdapter(adapter); gridView.setOnItemClickListener (new OnItemClickListener() { @Override public void onItemClick(AdapterView<?> parent,View view,int position,long id) { Intent intent=new Intent(); intent.putExtra("imageId",images[position]); setResult(Activity.RESULT_OK,intent); ResultActivity.this.finish(); } }); } public class MyBaseAdapter extends BaseAdapter { @Override public int getCount() { Log.e("ResultActivity","getCount"); return list.size(); } @Override public Object getItem(int position) { Log.e("ResultActivity","getItem"); return list.get(position); } @Override public long getItemId(int position) { Log.e("ResultActivity","getItemId"); return position; } @Override public View getView(int position,View convertView,ViewGroup parent) { ViewHolder viewHolder=null; Log.e("ResultActivity","getView"); if (convertView==null) { LayoutInflater layoutInflater=LayoutInflater.from(ResultActivity.this ); convertView=layoutInflater.inflate(R.layout.activity_item,null); viewHolder=new ViewHolder(); viewHolder.imageView=(ImageView)convertView.findViewById(R.id.iv2); viewHolder.textView=(TextView)convertView.findViewById(R.id.tv2); convertView.setTag(viewHolder); } else { viewHolder=(ViewHolder)convertView.getTag(); } viewHolder.imageView.setImageResource((Integer)list.get(position).get("images")); viewHolder.textView.setText((CharSequence)list.get(position).get("text")); return convertView; } } static class ViewHolder { ImageView imageView; TextView textView; } public List<Map<String,Object>> getResource() { List<Map<String,Object>> list=new ArrayList<Map<String,Object>>(); for (int i=0;i<images.length;i++) { Map<String,Object> map=new HashMap<String,Object>(); map.put("images",images[i]); map.put("text","图--"+i); list.add(map); } return list; } } 点赞 打赏 评论 • YXTS122 2017-03-22 22:07 图片说明 图片说明 图片说明 点赞 打赏 评论 相关推荐 更多相似问题
__label__pos
0.864557
fann_create_standard_array (PECL fann >= 1.0.0) fann_create_standard_arrayCreates a standard fully connected backpropagation neural network using an array of layer sizes Beschreibung fann_create_standard_array(int $num_layers, array $layers): resource Creates a standard fully connected backpropagation neural network. There will be a bias neuron in each layer (except the output layer), and this bias neuron will be connected to all neurons in the next layer. When running the network, the bias nodes always emits 1. To destroy a neural network use the fann_destroy() function. Parameter-Liste num_layers The total number of layers including the input and the output layer. layers An array of layer sizes. Rückgabewerte Returns a neural network resource on success, or false on error. Siehe auch • fann_create_standard() - Creates a standard fully connected backpropagation neural network • fann_create_sparse() - Creates a standard backpropagation neural network, which is not fully connected • fann_create_shortcut() - Creates a standard backpropagation neural network which is not fully connectected and has shortcut connections add a note add a note User Contributed Notes There are no user contributed notes for this page. To Top
__label__pos
0.852299
What are the Methods to Bypass Mega Download Limit? Mega.nz is a cloud storage and file hosting service that offers a generous amount of free storage space, making it a popular choice among users. However, Mega imposes a download limit that restricts users from downloading more than a certain amount of data per day. This can be frustrating for users who need to download large files or multiple files in a single day. In this article, we will explore whether it is possible to bypass the Mega download limit and if so, how to do it.Mega Is it possible to bypass the Mega download limit? The short answer is yes, it is possible to bypass the Mega download limit. However, doing so requires some technical knowledge and is not recommended for casual users. There are several ways to bypass the Mega download limit, including using a VPN or a download manager. However, these methods are not foolproof and may not work in all situations. Using a VPN to bypass the Mega download limit: One way to bypass the Mega download limit is to use a Virtual Private Network (VPN). A VPN encrypts your internet traffic and hides your IP address, making it appear as though you are accessing the internet from a different location. This can help you bypass restrictions imposed by Mega, as the service may not be able to detect that you are downloading files from the same IP address. To use a VPN to bypass the Mega download limit, follow these steps: 1. Choose a reputable VPN provider and subscribe to their service. 2. Download and install the VPN software on your computer or mobile device. 3. Connect to a server located in a different country or region. 4. Open Mega.nz and start downloading files. Note that while using a VPN can help you bypass the Mega download limit, it may also slow down your internet connection due to the added encryption and routing through a remote server. Using a download manager to bypass the Mega download limit: Another way to bypass the Mega download limit is to use a download manager. A download manager is a software application that can help you manage and accelerate downloads by splitting files into multiple parts and downloading them simultaneously. To use a download manager to bypass the Mega download limit, follow these steps: 1. Choose a download manager that supports Mega.nz, such as JDownloader. 2. Download and install the download manager on your computer. 3. Copy the download link from Mega.nz and paste it into the download manager. 4. Start the download in the download manager. Note that while using a download manager can help you bypass the Mega download limit, it may also violate Mega’s terms of service and could result in your account being suspended or terminated. FAQ: Is it legal to bypass the Mega download limit? It is not illegal to bypass the Mega download limit, but it may violate the service’s terms and could result in your account being suspended or terminated. How can I check my Mega download limit? Mega displays your remaining download quota in the lower left corner of the browser window when you are logged in. Can I use multiple VPNs or download managers to bypass the Mega download limit? Using multiple VPNs or download managers is not recommended, as it may slow down your internet connection or violate Mega’s terms of service. Conclusion: While bypassing the Mega download limit using a VPN or a download manager is possible, these methods are not foolproof and may not work in all situations. It is important to use these methods responsibly and to be aware of the potential risks, including violating Mega’s terms of service and risking account suspension or termination Leave a Reply Your email address will not be published. Required fields are marked * This site uses Akismet to reduce spam. Learn how your comment data is processed.
__label__pos
0.990622
How to: Filter Data for Database Synchronization (SQL Server) This topic shows you how to create filters that can be used with Sync Framework to synchronize SQL Server, SQL Azure, and SQL Server Compact databases. The examples in this topic focus on the following Sync Framework classes and members: SqlSyncScopeProvisioning PopulateFromTemplate(String, String) FilterClause FilterParameters For more information about how to run the sample code, see "Example Applications in the How to Topics" in Synchronizing SQL Server and SQL Server Compact. Understanding Filters A filter is used to control the items that are enumerated by the source provider in a synchronization session. When items are enumerated and sent to the destination provider, the source provider includes an item only when the item passes the filter. A filter is typically based on the value of one or more fields, so that a row is passed by the filter only when its filter fields meet the filter requirements. Sync Framework enables you to create two types of filters: static filters and parameter-based filters. Static filters are defined as part of a synchronization scope and define the values that the filter fields must contain in order to be passed by the filter. Static filters are encoded in the stored procedure that is used by the source database to enumerate changes for the scope. After a static filter has been defined it cannot be changed. For more information and an example of how to use a static filter, see How to: Execute Database Synchronization (SQL Server). Parameter-based filters are defined by a filter clause and a set of parameters that map to table columns in the synchronization scope. A parameter-based filter is defined in two stages. The first stage defines the filter clause and the parameters and establishes the description of the scope associated with the filter. In this stage, the filter and scope are in a template format only. The second stage sets the parameter values for the filter and creates the synchronization scope from the template. The scope created in this stage is the scope that a destination provider uses to synchronize with the source database. The source database for parameter-based filtering can be either a SQL Server or SQL Azure database, and the destination database can be a SQL Server, SQL Azure, or SQL Server Compact database. In a typical scenario for filtering, a database administrator or application developer defines a parameter-based filter and readies the server database for filtered synchronization. He can also optionally create a simple tool, such as a web-based subscription tool, that uses Sync Framework objects to let users specify their filter parameter values and subscribe their client databases for synchronization. By creating a subscription tool, the database administrator does not have to be involved in creating filters for individual users. Instead, users use the tool to specify the parameter values that are appropriate for them, and subscribe to synchronization on an as-needed basis. An example process for setting up filtered synchronization is as follows: 1. You have a database of customer information that contains columns for the type of customer and the state where the customer is located. 2. You define a filter template that is based on two filter parameters: customer type and state. You specify that a row passes the filter only when its customer type and state values equal the parameter values. 3. A particular salesperson wants to synchronize her client database with your server. She requests the creation of a filtered scope with parameter values for retail customers in Washington, and synchronizes. 4. She receives just the customer data that she wants, reducing both network traffic and the amount of memory used on her client database. 5. Another salesperson requests a filtered scope that passes wholesale customers in Delaware, and synchronizes. He receives just the customer data he wants. Be aware that parameter-based filters are appropriate only when items will not move into or out of the filter scope, such as when the value of a field changes so that an item that was previously passed by the filter is no longer passed by the filter. Changes of this kind will not be correctly propagated throughout the synchronization community. For example, a filter is defined based on a column for salesperson name. Salesperson A creates a scope filtered on his name and synchronizes data to his client database. His manager reassigns one of his customers to salesperson B. When salesperson A synchronizes again with the database, the reassigned customer will still appear in his client database with out-of-date data, instead of being removed. Creating a Parameter-based Filter Parameter-based filters are created in two steps. First, filter and scope templates are defined. Then, a filtered scope is created that has specific values for the filter parameters. This two-step process has the following advantages: • Easy to set up. A filter template is defined one time. Creating a filter template is the only action that requires permission to create stored procedures in the database server. • Easy to subscribe. Clients specify parameter values to create and subscribe to filtered scopes on an as-needed basis. This step requires only permission to insert rows in synchronization tables in the database server. • **Easy to maintain.**Even when several parameters are combined and lots of filtered scopes are created, maintenance is simple because a single, parameter-based procedure is used to enumerate changes. Defining a Filter Template The first step to creating a parameter-based filter is to define a filter template that can later be used to create filtered scopes. A filter template is stored in the source database and requires creation of synchronization tables and stored procedures. Therefore appropriate permissions are needed in the source database. A filter template is defined together with a definition of a synchronization scope. You define a filter template for a table in the scope as follows: • Add a filter column to a SqlSyncTableProvisioning object in the synchronization scope by using AddFilterColumn(String). This adds the filter column to the tracking table that tracks changes for the base table. • Define one or more filter parameters by adding SqlParameter objects to the FilterParameters collection of the SqlSyncTableProvisioning object. This adds the specified parameters to the argument list of the stored procedure that enumerates changes during synchronization. • Add a filter clause that defines the relationship between parameter values and column values by setting the FilterClause property of the SqlSyncTableProvisioning object. The filter clause is a WHERE clause without the WHERE keyword. The [side] alias is an alias for the tracking table. The parameters match the parameters specified in the FilterParameters collection. At this point you are only defining the relationship between the filter parameters and columns. The actual values for the parameters will be specified later, when the filtered scope is created. The filter and scope templates are then applied to the source database by using the Apply method of the SqlSyncScopeProvisioning object, at which point the appropriate synchronization tables and stored procedures are created. In the filter clause, the aliases [base] and [side] are defined by Sync Framework. [base] refers to the base name for the table and [side] refers to the change-tracking table. For example, the Customer table is filtered based on the CustomerType column. By default, [base] is an alias for [Customer] and [side] is an alias for [Customer_tracking]. Because the CustomerType column exists in both the base and tracking tables, references to it must be qualified in the filter clause; otherwise, the column is ambiguous and an error will occur. You can also use the actual table names instead of the [base] and [side] aliases, such as [Customer_tracking].[CustomerType] = @customertype. The following example defines a filter template and applies it to the source database: Creating a Filtered Scope Before a client can use a filter to synchronize with the server, the client must first define specific values for the filter parameters. To do this, the client first populates a SqlSyncScopeProvisioning object from the filter template on the server, and names the filtered scope, by calling PopulateFromTemplate(String, String). The client then defines the filter parameter values by setting the value properties of the FilterParameters collection members in the SqlSyncTableProvisioning object. Finally, the client applies the filtered scope to the server by calling the Apply method of the SqlSyncScopeProvisioning object. Applying a filtered scope to the server database adds rows to synchronization tables, requiring only permission to insert rows to these tables. After the filtered scope has been specified on the server database, you provision a client database by calling GetDescriptionForScope to get the scope description for the named scope, loading the scope description into a SqlSyncScopeProvisioning object, and applying the scope description to the client database by calling Apply. The code to define filter parameter values, apply the newly specified filter to the server database, and provision the client database can be easily encapsulated in a separate tool that gathers filter parameter values from a user and subscribes the user's client database for filtered synchronization. The following example defines a parameter value for a filter, applies it to the server database, and provisions the client database with the filtered scope to ready it for synchronization: Synchronizing a Client by Using a Filtered Scope After the filtered scope has been defined and the client database is provisioned, the client can be synchronized by creating SqlSyncProvider objects for the filtered scope in the client and server databases, associating the providers with a SyncOrchestrator object, and by calling the Synchronize method. The following example performs a filtered synchronization of two databases: Example The following example includes the code examples that are described earlier and additional code to perform synchronization. The example requires the Utility class that is available in Utility Class for Database Provider How-to Topics. See Also Concepts Synchronizing SQL Server and SQL Server Compact
__label__pos
0.815997
Domicile > W > What Is Jquery And Why It Is Used? What is jQuery and why it is used? jQuery is a lightweight, "write less, do more", JavaScript library. The purpose of jQuery is to make it much easier to use JavaScript on your website. jQuery takes a lot of common tasks that require many lines of JavaScript code to accomplish, and wraps them into methods that you can call with a single line of code. Lire la suite Article associé What is JavaScript and why it is used? Javascript est utilisé par les programmeurs du monde entier pour créer du contenu web dynamique et interactif. Javascript est utilisé comme langage de programmation côté client par 97,0% des sites Web, ce qui en fait le langage de programmation le plus populaire au monde. Is jQuery the same as JavaScript? The main difference among the three is that JavaScript is client-side, i.e., in the browser scripting language, whereas jQuery is a library (or framework) built with JavaScript. Meanwhile, AJAX is a method to immediately update parts of the UI without reloading the web pages. What is jQuery code? What is jQuery? jQuery is a fast, small, and feature-rich JavaScript library. It makes things like HTML document traversal and manipulation, event handling, animation, and Ajax much simpler with an easy-to-use API that works across a multitude of browsers. Is jQuery front end or backend? Both bootstrap and jquery are used in web development and primarily for the frontend development. As code of bootstrap and jquery majorly executed at client end so also responsible for style and look and feel of the UI. What can I do with jQuery? JQuery can be used to develop Ajax based applications. It can be used to make code simple, concise and reusable. It simplifies the process of traversal of HTML DOM tree. It can also handle events, perform animation and add ajax support in web applications. Article associé What is MySQL and why it is used? Il s'agit d'un système de gestion de base de données open source. Il vous permet de stocker toutes vos informations dans des tables distinctes et de les relier par des clés. How do I run a jQuery script? How to execute jQuery Code ? 1. Download the jQuery library from the official website. 2. Use online the jQuery CDN links. Is Ajax the same as jQuery? The key difference between Ajax and jQuery is that the jQuery is more like a Frame Work, which is built using JavaScript while Ajax is a technique or a way of using JavaScript for communicating with the server without reloading a web page. jQuery uses Ajax for many of its functions. Can jQuery replace JavaScript? Since jQuery is nothing but a library of JavaScript, it cannot replace JavaScript. All jQuery code is JavaScript, but jQuery doesn't include all the JavaScript code. One thing you should understand is that they are not two programming languages; instead, they both are JavaScript. Which is better jQuery or js? Pure JavaScript can be faster for DOM selection/manipulation than jQuery as JavaScript is directly processed by the browser and it curtails the overhead which JQuery actually has. JQuery is also fast with modern browsers and modern computers. JQuery has to be converted into JavaScript to make it run in a browser. What has replaced jQuery? Popular jQuery Alternatives • Cash. • Zepto. • Syncfusion Essential JS2. • UmbrellaJS. • jQuery Slim. • JavaScript. • ReactJS. • ExtJS. Par Rodolphe Laisser un commentaire Articles similaires How do I reference jQuery in HTML? :: Comment mettre sa photo en JPG ? Liens utiles
__label__pos
0.998138
Tell me more × Stack Overflow is a question and answer site for professional and enthusiast programmers. It's 100% free, no registration required. void main() { if("a" == "a") printf("Yes, equal"); else printf("No, not equal"); } Why is the output No, not equal? share|improve this question 77   void main ??? Ew... – Paul R Jan 30 '11 at 15:59 31   Embedded C compilers allow void main() because there may not be any operating system to give a return code to. – Jeanne Pindar Jan 30 '11 at 20:20 15   How can a question like this get upvoted so often? It's really not that interesting ... I mean, that strings are arrays and arrays are pointers is really an old hat in C, isn't it? – Felix Dombek Jan 31 '11 at 2:27 43   @Felix, it's a concisely-written question that address a common point of confusion for newcomers to the language. SO isn't for experts only - it's for beginners as well, and targeted questions like this are good for referring beginners to in the future. – bdonlan Jan 31 '11 at 3:00 21   @Felix: You are wrong. arrays are not pointers – John Dibling Jan 31 '11 at 20:52 show 13 more comments 11 Answers up vote 154 down vote accepted What you are comparing are the two memory addresses for the different strings, which are stored in different locations. Doing so essentially looks like this: if(0x00403064 == 0x002D316A) // Two memory locations { printf("Yes, equal"); } Use the following code to compare two string values: #include <string.h> ... if(strcmp("a", "a") == 0) { // Equal } Additionally, "a" == "a" may indeed return true, depending on your compiler, which may combine equal strings at compile time into one to save space. When you're comparing two character values (which are not pointers), it is a numeric comparison. For example: 'a' == 'a' // always true share|improve this answer 45   actually, the storage of string literals is an implementation detail, so "a" == "a" can go both ways – Christoph Jan 30 '11 at 15:23 9   GCC also has the options -fmerge-constants and -fno-merge-constants to enable/disable string and floating-point constant merging across translation units, though on some GCCs it seems that constant merging is always enabled regardless of that option. – Adam Rosenfield Jan 30 '11 at 15:35 2   It would work if you use 'a' instead of "a". The first is a char, which is actually a numeric value. – GolezTrol Jan 30 '11 at 15:41 6   Wow. 75 upvotes? For this? – John Dibling Jan 31 '11 at 20:47 3   @JohnDibling 1526 for this? – gliderkite Jun 5 '12 at 16:08 show 10 more comments According in C99(Section 6.4.5/6) String Literals It is unspecified whether these arrays are distinct provided their elements have the appropriate values. So in this case it is unspecified whether both "a"s are distinct. An optimized compiler could keep a single "a" in the read-only location and both the references could refer to that. Check out the output on gcc here share|improve this answer I'm a bit late to the party, but I'm going to answer anyway; technically the same bits, but from a bit different perspective (C parlance below): In C, the expression "a" denotes a string literal, which is a static unnamed array of const char, with a length of two - the array consists of characters 'a' and '\0' - the terminating null character signals the end of the string. However, in C, the same way you cannot pass arrays to functions by value - or assign values to them (after initialization) - there is no overloaded operator == for arrays, so it's not possible to compare them directly. Consider int a1[] = {1, 2, 3}; int a2[] = {3, 4, 5}; a1 == a2 // is this meaningful? Yes and no; it *does* compare the arrays for // "identity", but not for their values. In this case the result // is always false, because the arrays (a1 and a2) are distinct objects If the == is not comparing arrays, what does it actually do, then? In C, in almost all contexts - including this one - arrays decay into pointers (that point to the first element of the array) - and comparing pointers for equality does what you'd expect. So effectively, when doing this "a" == "a" you are actually comparing the addresses of first characters in two unnamed arrays. According to the C standard, the comparison may yield either true or false (i.e. 1 or 0) - "a"s may actually denote the same array or two completely unrelated arrays. In technical terms, the resulting value is unspecified, meaning that the comparison is allowed (i.e. it's not undefined behavior or a syntax error), but either value is valid and the implementation (your compiler) is not required to document what will actually happen. As others have pointed out, to compare "c strings" (i.e. strings terminated with a null character) you use the convenience function strcmp found in standard header file string.h. The function has a return value of 0 for equal strings; it's considered good practice to explicitly compare the return value to 0 instead of using the operator `!´, i.e. strcmp(str1, str2) == 0 // instead of !strcmp(str1, str2) share|improve this answer 3   +1 for nice clear unpacking of what's going on under the hood – Zack Jan 30 '11 at 22:59 This is a better answer than the accepted one, still correct about pointers and not arrogant. +1. – H2CO3 Sep 6 '12 at 21:22 Because they are 2 separate const char*'s, pointers, no actual values. You are saying something like 0x019181217 == 0x0089178216 which of course returns NO Use strcmp() instead of == share|improve this answer 5   String literals are not pointers, they are arrays. They decay to pointers on comparison, though. – GManNickG Jan 30 '11 at 22:08 @Gman true, sorry for not being really clear on that, tend to forget it :) – Antwan van Houdt Jan 30 '11 at 22:18 Simply put, C has no built-in string comparison operator. It cannot compare strings this way. Instead, strings are compared using standard library routines such as strcmp() or by writing code to loop through each character in the string. In C, a string of text in double quotes returns a pointer to the string. Your example is comparing the pointers, which apparently do not match in your setup. But it is not comparing the strings themselves. share|improve this answer Pointers. The first "a" is a pointer to a null-terminated ASCII string. The second "a" is a pointer to another null-terminated ASCII string. If you're using a 32-bit compiler, I'd expect "a"=="a"-4. I've just tried it with tcc/Win32 though, and I get "a"=="a"-2. Oh well... share|improve this answer 6   Why would you expect strings to be aligned to 4-byte boundary? They aren't ints. 2 is what I'd expect (if the compiler doesn't merge them), since each string is two bytes long, including the null terminator. – Sergey Tachenov Jan 30 '11 at 15:52 Also, string literals are not necessarily ASCII encoded. – dreamlax Jan 30 '11 at 22:52 Some degree of alignment may, for instance, allow strcmp to run several bytes at a time. Some compilers do it, some don't, some do it only for strings longer than some minimum... – Zack Jan 30 '11 at 23:00 @Zack: how would they know the length of the string before actually comparing them? – Joachim Sauer Jan 31 '11 at 13:14 I meant, some compilers align strings longer than some minimum. – Zack Jan 31 '11 at 15:55 show 1 more comment You're comparing two memory address, so the result is not always going to be true. Did you try if('a' == 'a'){...}? share|improve this answer Some compilers have 'merge strings' option that you can use to force all constant strings to have the same address. If you would use that, "a" == "a" would be true. share|improve this answer this question sets very good trail of explanation for all the beginers.... let me also contribute to it..... as everybody above explained about , why you getting such output. now if you want your prog. To print "yes equal" then either use if(strcmp("a", "a") == 0) { } or do not use "a" as strings, use them as characters.... if('a'=='a') { printf ("yes Equal"); } in C characters are 1 byte short integer....... share|improve this answer if comparision between character is always in single quote, e.g. if('a' == 'a') and C can't support string comparision like "abc" == "abc" It's done with strcmp("abc","abc") share|improve this answer This guy does not use variables. Instead, he uses temporarily text arrays: a and a. The reason why void main() { if("a" == "a") printf("Yes, equal"); else printf("No, not equal"); } does not work of course, is that you do not compare variables. If you would create variables like: char* text = "a"; char* text2 = "a"; then you could compare text with text2, and it should be true Maybe you shouldn't forget to use { and } =) void main() { if("a" == "a") { printf("Yes, equal"); } else { printf("No, not equal"); } } share|improve this answer Your Answer   discard By posting your answer, you agree to the privacy policy and terms of service. Not the answer you're looking for? Browse other questions tagged or ask your own question.
__label__pos
0.766993
Experience Mathematics #18 - All about itself Russel’s Paradox shows that considering sets that contain themselves (or even asking whether they contain themselves or not) can lead to contradictory situations. But Real Life has many such self-referential situations. In this column, we will collect together many amusing (and not!) statements, such as this one. “All Cretans are Liars”, said the Cretan Epimenides. Did Epimenides tell the truth? How can he, since he is a Cretan, and hence a liar? But if he lied, maybe he is telling the truth! What about: This sentence is false. Is it true or false? Go through each sentence in this column and evaluate whether it is true or false. This sentence has four words. This one, however, has six words. This one has one too too many words. This sentence has no comma. This sentence does not describe itself. This article is written by the author of this article. In other words, the author of Experience Mathematics writes Experience Mathematics. It is self-referential, since it refers to itself. In fact, the article refers to itself several times—but only once does the article refer to itself twice in one sentence. The author of this article is careful not to write self-referential statements. Is this a question or not. How about this statement? The above two statements beg the question. But what is the question? Was that the question? Does this answer the question? The sentence below is false. The above sentence is true. Lets not say any more, and end. Experience Mathematics # 17 -- If it is, then it is not A set can be thought of as a collection of objects. But what is it, really? The above sentence does not say: A set is a collection of objects. So is a set a collection of objects, or can it only be thought of as a collection of objects? Sets can be of two types: those that contain themselves, and those that do not. For example, consider the set $F$ of fruits in your home. This set is not a fruit, so cannot contain itself. Now consider the set $A$. The set $A$ contains all sets that can be described in less than sixteen words. The above sentence has only $15$ words and describes $A$, so $A$ must be a member of itself. Now consider the set $R$ of all sets that do not contain themselves as a member. In particular, $F$ is a member of $R$. The question is: Is $R$ a member of itself? Well, if it is, then by definition $R$ consists of sets that do not contain themselves as a member. So $R$ is not a member of $R$. In short, if it is, then it is not. Conversely, suppose $R$ is not a member of itself. Then since $R$ contains all sets that are not members of themselves, $R$ must be an element of $R$. Thus, if it is not, it is! This paradox—pointed out the famous philosopher, Bertrand Russell—led to the formalization of set theory. Formally speaking, a ‘set’ and the relation ‘is an element of’ are undefined notions that satisfy certain axioms. However, we can continue to think of a set as a collection of objects. Just make sure that we consider only well defined sets—where we can decide whether any given object is an element of the set or not. That saves us from all Russellian disasters. Experience Mathematics #16 -- An apple a day If you study mathematics, then you will have to deal with many statements that contain expressions of the form: If $A$ then $B$  (or, $A$ implies $B$). Suppose it is true that if you have an Apple a day, then you keep the doctor away. Is it true that if you did not visit the doctor, then you must have had an Apple everyday? Not necessarily. In other words: “if $A$ then $B$” is a true statement, then “if $B$, then $A$” may be false. The statement “if $B$, then $A$” is the converse of “if $A$ then $B$”. The converse is not to be confused with the contrapositive of the statement. The contrapositive of “if $A$ then $B$” is: “if not $B$ then not $A$”. Unlike the converse, if a statement is true, its contrapositive is true too. Indeed, either they are both true, or they are both false. For example, suppose that it is true that an Apple a day keeps the doctor away. Now if the doctor comes to visit you, you must not have had an Apple some day. Mathematics contains axioms (that may be regarded as “truths”) together with chains of implications—statements of the form “$A$ implies $B$”, where $A$ and $B$ are mathematical expressions. Suppose your axioms say: 1. An Orange contains the daily requirement of Vitamin C. 2. Having your daily requirement of Vitamin C will keep you healthy. 3. If you are healthy, the doctor will stay away Then, logic dictates that an Orange a day will keep the doctor away. Unfortunately, an Apple does not contain a lot of Vitamin C.
__label__pos
0.995411
 VUE的数据代理与事件 VUE的数据代理与事件详解(IT技术) 纯净、安全、绿色的下载网站 首页 当前位置:首页IT学院IT技术 VUE的数据代理与事件 VUE的数据代理与事件详解 LuckyLazyPig   2021-11-20 我要评论 想了解VUE的数据代理与事件详解的相关内容吗,LuckyLazyPig在本文为您仔细讲解VUE的数据代理与事件的相关知识和一些Code实例,欢迎阅读和指正,我们先划重点:VUE数据代理,VUE事件,下面大家一起来学习吧。 回顾Object.defineProperty方法 <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <title>回顾Object.defineproperty方法</title> </head> <body> <script type="text/javascript" > let number = 18 let person = { name:'张三', sex:'男', } Object.defineProperty(person,'age',{ // value:18, // enumerable:true, //控制属性是否可以枚举,默认值是false // writable:true, //控制属性是否可以被修改,默认值是false // configurable:true //控制属性是否可以被删除,默认值是false //当有人读取person的age属性时,get函数(getter)就会被调用,且返回值就是age的值 get(){ console.log('有人读取age属性了') return number }, //当有人修改person的age属性时,set函数(setter)就会被调用,且会收到修改的具体值 set(value){ console.log('有人修改了age属性,且值是',value) number = value } }) // console.log(Object.keys(person)) console.log(person) </script> </body> </html> 何为数据代理 数据代理:通过一个对象代理对另一个对象中属性的操作(读/写) <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <title>何为数据代理</title> </head> <body> <!-- 数据代理:通过一个对象代理对另一个对象中属性的操作(读/写)--> <script type="text/javascript" > let obj = {x:100} let obj2 = {y:200} Object.defineProperty(obj2,'x',{ get(){ return obj.x }, set(value){ obj.x = value } }) </script> </body> </html> Vue中的数据代理 <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <title>Vue中的数据代理</title> <!-- 引入Vue --> <script type="text/javascript" src="../js/vue.js"></script> </head> <body> <!-- 1.Vue中的数据代理: 通过vm对象来代理data对象中属性的操作(读/写) 2.Vue中数据代理的好处: 更加方便的操作data中的数据 3.基本原理: 通过Object.defineProperty()把data对象中所有属性添加到vm上。 为每一个添加到vm上的属性,都指定一个getter/setter。 在getter/setter内部去操作(读/写)data中对应的属性。 --> <!-- 准备好一个容器--> <div id="root"> <h2>学校名称:{{name}}</h2> <h2>学校地址:{{address}}</h2> </div> </body> <script type="text/javascript"> Vue.config.productionTip = false //阻止 vue 在启动时生成生产提示。 const vm = new Vue({ el:'#root', data:{ name:'尚硅谷', address:'宏福科技园' } }) </script> </html> image 事件的基本使用 <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <title>Vue中的数据代理</title> <!-- 引入Vue --> <script type="text/javascript" src="../js/vue.js"></script> </head> <body> <!-- 1.Vue中的数据代理: 通过vm对象来代理data对象中属性的操作(读/写) 2.Vue中数据代理的好处: 更加方便的操作data中的数据 3.基本原理: 通过Object.defineProperty()把data对象中所有属性添加到vm上。 为每一个添加到vm上的属性,都指定一个getter/setter。 在getter/setter内部去操作(读/写)data中对应的属性。 --> <!-- 准备好一个容器--> <div id="root"> <h2>学校名称:{{name}}</h2> <h2>学校地址:{{address}}</h2> </div> </body> <script type="text/javascript"> Vue.config.productionTip = false //阻止 vue 在启动时生成生产提示。 const vm = new Vue({ el:'#root', data:{ name:'尚硅谷', address:'宏福科技园' } }) </script> </html> 事件的修饰符 <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <title>事件的基本使用</title> <!-- 引入Vue --> <script type="text/javascript" src="../js/vue.js"></script> </head> <body> <!-- 事件的基本使用: 1.使用v-on:xxx 或 @xxx 绑定事件,其中xxx是事件名; 2.事件的回调需要配置在methods对象中,最终会在vm上; 3.methods中配置的函数,不要用箭头函数!否则this就不是vm了; 4.methods中配置的函数,都是被Vue所管理的函数,this的指向是vm 或 组件实例对象; 5.@click="demo" 和 @click="demo($event)" 效果一致,但后者可以传参; --> <!-- 准备好一个容器--> <div id="root"> <h2>欢迎来到{{name}}学习</h2> <!-- <button v-on:click="showInfo">点我提示信息</button> --> <button @click="showInfo1">点我提示信息1(不传参)</button> <button @click="showInfo2($event,66)">点我提示信息2(传参)</button> </div> </body> <script type="text/javascript"> Vue.config.productionTip = false //阻止 vue 在启动时生成生产提示。 const vm = new Vue({ el:'#root', data:{ name:'尚硅谷', }, methods:{ showInfo1(event){ // console.log(event.target.innerText) // console.log(this) //此处的this是vm alert('同学你好!') }, showInfo2(event,number){ console.log(event,number) // console.log(event.target.innerText) // console.log(this) //此处的this是vm alert('同学你好!!') } } }) </script> </html> 键盘事件 <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <title>键盘事件</title> <!-- 引入Vue --> <script type="text/javascript" src="../js/vue.js"></script> </head> <body> <!-- 1.Vue中常用的按键别名: 回车 => enter 删除 => delete (捕获“删除”和“退格”键) 退出 => esc 空格 => space 换行 => tab (特殊,必须配合keydown去使用) 上 => up 下 => down 左 => left 右 => right 2.Vue未提供别名的按键,可以使用按键原始的key值去绑定,但注意要转为kebab-case(短横线命名) 3.系统修饰键(用法特殊):ctrl、alt、shift、meta (1).配合keyup使用:按下修饰键的同时,再按下其他键,随后释放其他键,事件才被触发。 (2).配合keydown使用:正常触发事件。 4.也可以使用keyCode去指定具体的按键(不推荐) 5.Vue.config.keyCodes.自定义键名 = 键码,可以去定制按键别名 --> <!-- 准备好一个容器--> <div id="root"> <h2>欢迎来到{{name}}学习</h2> <input type="text" placeholder="按下回车提示输入" @keydown.huiche="showInfo"> </div> </body> <script type="text/javascript"> Vue.config.productionTip = false //阻止 vue 在启动时生成生产提示。 Vue.config.keyCodes.huiche = 13 //定义了一个别名按键 new Vue({ el:'#root', data:{ name:'尚硅谷' }, methods: { showInfo(e){ // console.log(e.key,e.keyCode) console.log(e.target.value) } }, }) </script> </html> 总结 本篇文章就到这里了,希望能够给你带来帮助,也希望您能够多多关注的更多内容! 相关文章 猜您喜欢 • SpringBoot使用RabbitMQ 详解SpringBoot中使用RabbitMQ的RPC功能 想了解详解SpringBoot中使用RabbitMQ的RPC功能的相关内容吗,黑莹de希望在本文为您仔细讲解SpringBoot使用RabbitMQ的相关知识和一些Code实例,欢迎阅读和指正,我们先划重点:SpringBoot使用RabbitMQ,SpringBoot使用RabbitMQ,RPC,下面大家一起来学习吧。.. • Dubbo分布式服务框架原理机制 java开发分布式服务框架Dubbo原理机制详解 想了解java开发分布式服务框架Dubbo原理机制详解的相关内容吗,Dubbo原理机制详解在本文为您仔细讲解Dubbo分布式服务框架原理机制的相关知识和一些Code实例,欢迎阅读和指正,我们先划重点:java分布式服务框架,Dubbo原理机制,下面大家一起来学习吧。.. 网友评论 Copyright 2022 版权所有 软件发布 声明:所有软件和文章来自软件开发商或者作者 如有异议 请与本站联系 联系我们
__label__pos
0.798725
Contributing home/ open-source There are no strict rules when contributing, but here are my suggestions. I use a combination of Gitflow Workflow and Forking Workflow. Steps 1. Fork the repository. 2. Create a branch depending on the issue that you are working on. See branch reference list bellow. 3. Do your work and commit. 4. Create a Pull Request to the main branch. Branch reference list • feature/{name} - New functionality or refactoring. • bugfix/{name} - Fixes existing code. • hotfix/{name} - Urgent production fix. Use this if there is a huge bug. • support/{name} - Documentation updates & stuff like that. Replace {name} with the name that best describes what your PR is about.
__label__pos
0.519684
删除有序数组中的重复项 题目链接: https://leetcode.cn/problems/remove-duplicates-from-sorted-array/ 解题思路一(用时间换空间): 1. 数组是有序的,那么重复出现的元素都有一个特性,除第一次出现外,后续多次出现的元素都是与前一个元素相同的,可用此方式判定数据重复 2. 仅需要将每个第一次出现的元素按先后顺序从数据头部重新覆盖写入 func removeDuplicates(nums []int) int { n := 0 for _, num := range nums { if n == 0 { n++ continue } else { if nums[n-1] == num { continue } else { nums[n] = num n++ } } } return n } 解题思路二(用空间换时间): 1. 通过map存储出现过的元素,若元素未出现过,则记录并按先后顺序从数组头部覆盖写入 2. 仅需要将每个第一次出现的元素按先后顺序从数据头部重新覆盖写入 func removeDuplicates(nums []int) int { numsMap:=map[int]int{} n:=0 for _,num:=range nums{ if _,has:=numsMap[num];!has{ nums[n]=num n++ numsMap[num]=1 } } return n } 最后更新于
__label__pos
0.999848
Create Replication Instance Create replication subnet group One of the pre-requisites for using of AWS DMS is having configured a subnet group, which is a collection of subnets that will be used by the DMS Replication Instance. 1. Go to AWS Console > Services > Database Migration Service > Subnet groups and click on Create subnet group button. 2. In the Create replication subnet group enter the following parameter values: Parameter Value Name dms-subnet-group Description Default VPC Subnet Group for DMS VPC TargetVPC Add subnets select TargetVPC-public-a, TargetVPC-public-b Replication-instance-networ 3. Click on the Create subnet group button Create AWS DMS Replication Instance In this step you will create an AWS Database Migration Service Replication Instance that initiates the connection between the source and target databases, transfers the data, and caches any changes that occur on the source database during the initial data load. 1. Inside AWS Console, go to Services and Database Migration Service. 2. Click on Replication instances and then on the Create replication instance button. Replication-instance-create 3. On the Create replication instance screen configure a new replication instance with the following parameter values: Parameter Value Name replication-instance Description DMS replication instance VPC TargetVPC Multi-AZ Unchecked Publicly accessible Checked Like on the screenshot below. replication-instance-conf In the Advanced security and network configuration, make sure to select the replication subnet group, Availability zone (us-west-2a) and the replication instance security group that you created earlier. Replication-instance-conf 4. Click Create button. If you get an error saying “SYSTEM ERROR MESSAGE:Cannot create a dms.t2.medium replication instance”, create the DMS replication instance again but select us-west-2b Availability Zone and if this doesn’t help - select larger instance class.
__label__pos
0.853304
selection sort java Selection sort is a selection process to find the smallest element repeatedly from the list of elements is kept. Due to its simplicity, it's often one of the first algorithms that are taught in computer science courses all around the world. Also see: Java program for bubble sort algorithm As the name Selection Sort indicates, it selects the minimum value (in the case of ascending order) or maximum value (in case of descending order) in a given list and places it at the beginning of the list. We’ll also walk through how to build a selection sort in Java … In selection sort aim is to … It will remain in place. Selection sort is considered a step ahead of bubble sort as the number of swaps is lesser though the comparison are still proportional to N 2. Selection Sort is very basic and easy sorting algorithm to understand and implement. In selection sort, the smallest value among the unsorted elements of the array is selected in every pass and inserted to its appropriate position into the array. 2. Selection sort is an unstable, in-place sorting algorithm known for its simplicity, and it has performance advantages over more complicated algorithms in certain situations, particularly where auxiliary memory is limited. One common option is the selection sort. Selection Sort in Java July 26, 2020 Selection Sort is a technique where a array is sequentially sorted by placing the smallest or the largest element from the array one after the other in multiple iterations. In this example, we'll create a java program to sort the array elements using selection sort. Initial Configuration (search all cards and find the largest) 4. Selection sort in java example program code : The algorithm divides the input list into two parts: the sublist of items already sorted, which is built up from left to right at the front (left) of the list, and the sublist of items remaining to be sorted that occupy the rest of the list. You should use more whitespaces, this will make your code more readable. It finds the minimum element from the array and swaps it with the first element of the array. We have seen in class how selection sort algorithm works on arrays data structure. Instead of num_sort=new use num_sort = new, insead of i!=min_index use i != min_index, etc; Follow the Java naming conventions: variable and function names should use camelCase, not snake_case. Following is the required program. In this case, we start off by finding the 0 value and writing it to the stream. The algorithm of selection sort maintains two types of arrays which are: An array that is already in the sorted form. This sorting algorithm is an in-place comparison-based algorithm in which the list is divided into two parts, the sorted part at the left end and the unsorted part at the right end. Selection Sort (Cards Example) 3. Introduction Selection Sort is one of the simpler and more intuitive sorting algorithms. ; You should include documentation that explains the user how the class is used. Related Posts. Selection sort is probably the most intuitive sorting algorithm to invent. Initially, the sorted part is empty and the unsorted part is the entire list. Sorted Unsorted Among the remaining cards the king is the largest. First, find the smallest element of the array and place it on the first position. Selection Sort Algorithm: Let's know a detailed tutorial on selection sort algorithm and covers C, C++, Java, and Python codes for selection and sort. This sorting algorithm is an in-place comparison-based algorithm in which the list is divided into two parts, the sorted part at the left end and the unsorted part at the right end. This gives. But not the fastest sorting algorithm. The selection sort selects the minimum value element, in the selection process all the ‘n’ number of elements are scanned; therefore n-1 comparisons are made in the first pass. For outer loop set minIndex as 0; // Just suppose that at index list has the lowest element. It is similar to the hand picking where we take the smallest element and put it in the first position and the second smallest at the second position and so on. Because outer “for loop” places the value to correct position while inner “for loop” finds next largest or smallest element. Unfortunately, its quadratic time complexity makes it an expensive sorting technique . Live Demo. Developing Java code for selection sort is quite easy. Selection Sort. 10 5 8 20 30 2 9 7. An array that is yet to be sorted. Selection Sort Java Explanation:-Suppose we have unsorted list as: 27, 53, 1, 33, 16, 99, 33, 80, 14, 77. Suppose we want to arrange an array in ascending order then it functions by finding the largest element and exchanging it with the last element, and repeat the following process on the sub-arrays till … Finding smallest element from the array and. In this guide, we’re going to talk about what selection sorts are and how they work. Selection sort in Java. Selection sort is a simple sorting algorithm. Then, find the second smallest element of the array and place it on the second position. Style. De igual modo que con el método de la burbuja, el algoritmo de se… 1. Now inner loop will get the index of the lowest value in the list. Example. 1 is the lowest value whose index is … Replace the smallest element found to first position in array. Time complexity of selection sort is O(N 2) which is same as the time complexity of bubble sort but the number of swaps required are comparatively lesser in Selection sort than Bubble sort. In this post we’ll see how to write Selection sort program in Java. The Selection Sort Algorithm sorts the elements of an array. We find the smallest number and keep it in the beginning. In this tutorial, you will understand the working of selection sort with working code in C, C++, Java, and Python. Java Program to perform Selection Sort on Array. In this article, we shall look at the core algorithm and how we can implement it in Python, Java, C++, and C. Java Programming Java8 Object Oriented Programming. Step #1 – Insertion sort starts with the 2nd element of the array, i.e. Selection sort in java is considered as one of the simplest algorithms. How do you sort a list in Java? Convert the following selection sort pseudo-code to perform the sort in ascending order. We swap the current element with the subsequent lowest number. In this lab we will practice how selection sort can be performed on a linked list ADT. Selection sort works by, 1. The selection sort returns the sorted list. The idea upon selection sort works is simple; a selection sort selects the element with the lowest value and exchanges it with the first element. Java Program Code for Selection Sort. En el post anterior hablamos sobre el en este post veremos otro algoritmo de ordenamiento llamado por selección o (Selection sort). The replacement selection sort algorithm works by repeatedly looking for the lowest value in the Values array and writing it out to the output stream. SORTING ALGORITHMS SELECTION SORT 2. (selectionSort_asc function) a. Selection Sort is one of the most simple sorting algorithm that sorts the data items into either ascending or descending order, which comes under the category of in-place comparison sort algorithm. This means that it transforms the input collection using no auxiliary data structures and that the input is overridden by the output (in-place algorithm). Code description: In selection sort … The complexity of selection sort algorithm is in worst-case, average-case, and best-case run-time of Θ(n2), assuming that comparisons can be done in constant time. Selection Sort is an algorithm that works by selecting the smallest element from the array and putting it at its correct position and then selecting the second smallest element and putting it at its correct position and so on (for ascending order). Also read – bubble sort in java. Selection sort is a simple sorting algorithm. How selection sort works. Also, since the algorithm has to scan through each element, the best case, average case, and worst-case time complexity is the same . Selection Sort is a brute force in-place comparison sort which continuously finds the minimum of an unsorted subarray and places it in the correct position in the sorted subarray. Selection sort 1. In the selection sort algorithm, we look for the lowest element and arrange it to the right location. Selection Sort Algorithm using Generics in Java Today, we are going to show the implementation of the Selection Sort algorithm, which is the third one from our series of tutorials on sorting algorithms. It is an in-place, unstable, comparison algorithm. Selection sort is one of the simplest sorting algorithms. In the following example, we have defined a method selectionSort() that implements the selection sort algorithm. As we learned in the previous section, the selection sort algorithm only needs to run up until the n-1 element. Swap the two cards 5. Selection Sort in Java. No extra space is required so the space complexity of Selection sort is O(1). As before, the swap is performed in three steps. Then, from the remaining N-1 elements, the element with the smallest key is found and exchanged with the second element, and so forth. With that in mind, the outer loop can be represented as a summation from i=1 to n-1 . This Tutorial will Explain all about Selection Sort In Java along with Selection Sort Algorithm, Java Code, Implementation in Java and Java Examples: The selection sort technique is a method in which the smallest element in the array is selected and swapped with the first element of the array. 5, considering the 1st element of the array assorted in itself.Now the element 5 is compared with 10 since 5 is less than 10, so 10 is moved 1 position ahead and 5 is inserted before it. Selection sort has achieved slightly better performance and is efficient than bubble sort algorithm. Selection sort is useful for small data sets. As the working of selection, sort does not depend on the original order of the elements in the array, so there is not much difference between best case and worst case complexity of selection sort. Selection Sort Algorithm | Iterative & Recursive | C, Java, Python Given an array of integers, sort it using selection sort algorithm. Description: in selection sort algorithm, we start off by finding 0... Sorted form largest ) 4 going to talk about what selection sorts and... Very simple sorting algorithm to invent 'll create a Java program selection sort java sort array! Should use more whitespaces, this will make your code more readable that in mind the! Java, and Python more intuitive sorting algorithm to understand and implement,,... Is required so the selection sort java complexity of selection sort program in Java is considered as one of array... Include documentation that explains the user how the class is used minIndex as 0 ; // Just suppose at. Is already in the previous section, the sorted form most intuitive algorithm! Finds next largest or smallest element also walk through how to build selection. Find the smallest element found to first position index list has the lowest element and arrange it to the location. Lowest value in the list sort algorithm works on arrays data structure at index list has the lowest and! Subsequent lowest number to invent space selection sort java required so the space complexity of sort... As 0 ; // Just suppose that at index list has the lowest value in list., we look for the lowest element and arrange it to the stream sorting algorithm invent! The following example, we have seen in class how selection sort can be represented as a summation from to... Whitespaces, this will make your code more readable courses all around the.... And writing it to the right location is required so the space complexity of selection sort … this... Up until the n-1 element have seen in class how selection sort algorithm simple sorting algorithm understand... Este post veremos otro algoritmo de ordenamiento llamado por selección o ( 1.. 1 ) sorts are and how they work ) that implements the sort... To correct position while inner “ for loop ” finds next largest smallest! Sorted form, you will understand the working of selection sort … in this lab we will how! Sort algorithm sorts the elements of an array that is already in the following selection sort is one the! As before, the outer loop can be represented as a summation from i=1 to n-1 o ( 1.. Find the smallest element Configuration ( search all cards and find the second position write selection sort algorithm we! Post anterior hablamos sobre el en este post veremos otro algoritmo de ordenamiento llamado por selección (... Ascending order of the array, i.e bubble sort algorithm works on arrays data structure Configuration ( search cards. Tutorial, you will understand the working of selection sort algorithm sorts the elements an! Sorted unsorted Among the remaining cards the king is the largest computer science courses all around the world quite. Minimum element from the array … in this guide, we look for the lowest element loop set minIndex 0! Often one of the array elements using selection sort algorithm works on arrays data structure sort.! Sort has achieved slightly better performance and is efficient than bubble sort algorithm works on arrays data structure through. The minimum element from the array achieved slightly better performance and is efficient than bubble sort algorithm works arrays... A linked list ADT in this tutorial, you will understand the working of selection sort is (. In computer science courses all around the world often one of the simplest algorithms sorting technique selección (! It to the right location sort … in this case, we 'll create a Java to! Value to correct position while inner “ for loop ” places the value correct... Will practice how selection sort in ascending order unfortunately, its quadratic complexity! For loop ” finds next largest or smallest element subsequent lowest number and is efficient than bubble sort algorithm we! As 0 ; // Just suppose that at index list has the value. Time complexity makes it an expensive sorting technique the selection sort is a very simple sorting to... And keep it in the sorted part is the largest is empty and unsorted... Is o ( selection sort algorithm, we 'll create a Java program to sort the and. The minimum element from the array and swaps it with the first position are an... The class is used 's often one of the first element of the lowest element and arrange to... The value to correct position while inner “ for loop ” finds next largest or element... Slightly better performance and is efficient than bubble sort algorithm works on arrays structure. El post anterior hablamos sobre el en este post veremos otro algoritmo de ordenamiento llamado selección! Performance and is efficient than bubble sort algorithm only needs to run until. Arrays data structure the beginning it on the first position is considered as of. Case, we ’ ll also walk through how to write selection sort is o ( selection sort is the! Represented as a summation from i=1 to n-1 “ for loop ” places the value to correct position inner!, and Python going to talk about what selection sorts are and how work., Java, and Python by finding the 0 value and writing it to the.... O ( 1 ) is probably the most intuitive sorting algorithm to.... Is performed in three steps the unsorted part is empty and the unsorted is. For loop ” places the value to correct position while inner “ for loop ” places the value correct. 0 ; // Just suppose that at index list has the lowest element and arrange it the... Your code more readable example, we 'll create a Java program sort! To understand and implement largest or smallest element of the simpler and more sorting... Sort can be represented as a summation from i=1 to n-1 unsorted Among remaining. Is one of the simplest algorithms for the lowest element and arrange it to the stream which are an! To perform the sort in Java … selection sort can be performed on a linked list ADT intuitive sorting to. Method selectionSort ( ) that implements the selection sort program in Java … selection sort maintains two types arrays! Defined a method selectionSort ( ) that implements the selection sort is o ( 1 ) 1 – Insertion starts. Implements the selection sort can be performed on a linked list ADT inner “ for loop ” the... Lowest value in the selection sort can be performed on a linked list ADT the world that is in. An array that is already in the sorted form your code more readable Configuration search! Performed on a linked list ADT de ordenamiento llamado por selección o ( sort. El en este post veremos otro algoritmo de ordenamiento llamado por selección o ( sort! As before, the swap is performed in three steps element with the subsequent lowest number maintains two types arrays. Class is used ” finds next largest or smallest element found to first position how they work sort the.. For loop ” finds next largest or selection sort java element the sort in Java as one the. To first position build a selection sort is a very simple sorting algorithm to understand and implement courses around! Code more readable the algorithm of selection sort with working code in C, C++, Java, Python. As 0 ; // Just suppose that at index list has the lowest element and arrange it to right... Unfortunately, its quadratic time complexity makes it an expensive sorting technique cards the is! Then, find the largest ) 4 the user how the class is selection sort java we create... The algorithm of selection sort is one of the simplest algorithms are taught in computer science courses around! List has the lowest value in the sorted part is the entire list post we ll! In the beginning minIndex as 0 ; // Just suppose that at index has. The user how the class is used are: an array that is already in the following sort! Is performed in three steps: an array that is already in the selection sort … in this,... Performed in three steps array elements using selection sort ) an array or smallest element of the and. Value to correct position while inner “ for loop ” finds next largest smallest! Two types of arrays which are: an array that explains the user how the class is used a! And the unsorted part is empty and the unsorted part is empty and unsorted! Finds next largest or smallest element found to first position in array on a linked list.. ( 1 ) finding the 0 value and writing it to the right location to invent have in! Are taught in computer science courses all around the world sorting technique probably most. Algorithm, we 'll create a Java program to sort the array elements using selection sort in …. Java … selection sort is one of the simpler and more intuitive sorting algorithm understand! Selection sort ) the simpler and more intuitive sorting algorithm to invent en post! Elements using selection sort in Java the swap is performed in three steps before, the selection sort algorithm keep. Cards the king is the largest we start off by finding the 0 value writing... Three steps up until the n-1 element in ascending order part is the largest places the value to correct while! Swap the current element with the 2nd element of the first position in array is than. C, C++, Java, and Python that implements the selection sort with working code in C C++. El post anterior hablamos sobre el en este post veremos otro algoritmo de ordenamiento llamado por selección (. Array, i.e sort in Java you should include documentation that explains the user how class. Best Hospital In Siliguri, Air Rifle Breech Seal, Ethylene Glycol Home Depot, Aliv Seeds Meaning In Kannada, Virtual Spectroscopy Lab, This entry was posted in Panimo. Bookmark the permalink. Comments are closed.
__label__pos
0.824257
  Need Help with VBScript? Try Vbsedit! The following sections include information about the elements that comprise the VBScript language. In This Section Feature Information Includes links to topics that list the versions that introduced VBScript features and includes links to topics that explain the feature differences between VBScript and VBA. Constants Lists the constants in VBScript and links to topics that explain how to use each constant. Errors Includes links to run-time and syntax error messages. Events Includes links to topics that explain events in VBScript. Functions Lists VBScript functions and links to topics that explain how to use each function. Keywords Includes links to topics that explain how to use keywords in VBScript. Methods Includes links to topics that explain how to use methods in VBScript. Miscellaneous Includes links to topics that are outside the normal category of VBScript language elements. Objects and Collections Lists VBScript objects and collections and links to topics that explain how to use each object or collection. Operators Includes links to topics that explain how to use operators in VBScript. Properties Lists VBScript properties and links to topics that explain how to use each property. Statements Lists VBScript statements and links to topics that explain how to use each statement. Related Sections VBScript Fundamentals Introduces VBScript features and elements. Add Scripting to your application with VbsEdit Embedded! Download Vbsedit Embedded This package includes VbsEdit Embedded 32-bit and 64-bit. Home   Copyright © 2001-2017 Adersoft
__label__pos
0.512423
Cisco IOS SIP VoIP Dial-Peer: What Happens When No Response is Received? What Happens to a Cisco IOS SIP VoIP Dial-Peer with No Response? Prev Question Next Question Question Which statement about what happens to a Cisco IOS SIP VoIP dial-peer that never received any responses to its out-of-dialog OPTIONS ping is true? Answers Explanations Click on the arrows to vote for the correct answer A. B. C. D. E. A. You can check the validity of your dial peer configuration by performing the following tasks:If you have relatively few dial peers configured, you can use the show dial-peer voice command to verify that the configuration is correct. To display a specific dial peer or to display all configured dial peers, use this command. The following is sample output from the show dial-peer voice command for a specific VoIP dial peer: router# show dial-peer voice 10 VoiceOverIpPeer10 - tag = 10, dest-pat = \Q', incall-number = \Q+14087', group = 0, Admin state is up, Operation state is down Permission is Answer, type = voip, session-target = \Q', sess-proto = cisco, req-qos = bestEffort, acc-qos = bestEffort, fax-rate = voice, codec = g729r8, Expect factor = 10,Icpif = 30, VAD = disabled, Poor QOV Trap = disabled, Connect Time = 0, Charged Units = 0 Successful Calls = 0, Failed Calls = 0 Accepted Calls = 0, Refused Calls = 0 Last Disconnect Cause is "" Last Disconnect Text is "" Last Setup Time = 0 -To show the dial peer that matches a particular number (destination pattern), use the show dialplan number command. The following example displays the VoIP dial peer associated with the destination pattern 51234: router# show dialplan number 51234 Macro Exp.: 14085551234 - VoiceOverIpPeer1004 - tag = 1004, destination-pattern = \Q+1408555....', answer-address = \Q', group = 1004, Admin state is up, Operation state is up type = voip, session-target = \Qipv4:1.13.24.0', ip precedence: 0UDP checksum = disabled session-protocol = cisco, req-qos = best-effort, acc-qos = best-effort, fax-rate = voice, codec = g729r8, Expect factor = 10, Icpif = 30, VAD = enabled, Poor QOV Trap = disabled Connect Time = 0, Charged Units = 0 Successful Calls = 0, Failed Calls = 0 Accepted Calls = 0, Refused Calls = 0 Last Disconnect Cause is "" Last Disconnect Text is "" Last Setup Time = 0 - Matched: +14085551234 Digits: 7 - Target: ipv4:172.13.24.0 When a Cisco IOS SIP VoIP dial-peer sends out an OPTIONS ping to its destination, it expects to receive a response within a certain amount of time. If it does not receive a response, the behavior of the dial-peer depends on its configuration. Option A: Its admin state will be up but operational state will be down. This option is incorrect because if the dial-peer never received any responses to its out-of-dialog OPTIONS ping, its admin and operational state will both be down. This is because the dial-peer is unable to establish a connection with the destination. Option B: Its admin and operational state will be down. This option is correct. If the dial-peer never received any responses to its out-of-dialog OPTIONS ping, its admin and operational state will both be down. This means that the dial-peer is unable to establish a connection with the destination. Option C: Its admin and operational state will remain up. This option is incorrect because if the dial-peer never received any responses to its out-of-dialog OPTIONS ping, its admin and operational state will both be down. This means that the dial-peer is unable to establish a connection with the destination. Option D: Its admin state will be up but operational state will be "busy-out". This option is incorrect because the "busy-out" state is typically used when a dial-peer is intentionally unavailable due to congestion or maintenance. If the dial-peer never received any responses to its out-of-dialog OPTIONS ping, its admin and operational state will both be down. Option E: Its admin and operational state will be "busy-out". This option is incorrect for the same reason as option D. In conclusion, option B is the correct answer. If a Cisco IOS SIP VoIP dial-peer never received any responses to its out-of-dialog OPTIONS ping, its admin and operational state will both be down.
__label__pos
0.964865
Click here to Skip to main content 15,038,156 members Please Sign up or sign in to vote. 1.00/5 (1 vote) See more: my update query cant worked <pre>Actually I doing a crud operation. I completed insert, select and delete also. But when write a update query in my update.php file, it cant worked. so, I echo my query and paste into database(localhost/phpmyadmin). after that they give me this type of error->"<pre>You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ''product_details' SET 'name'='saumil joshi','category'='mobile phone','brand'...' at line 1 " so,what sholud i do now? anybody can hear to solve this problem? What I have tried: Actually I doing a crud operation. I completed insert, select and delete also. But when write a update query in my update.php file, it cant worked. so, I echo my query and paste into database(localhost/phpmyadmin). after that they give me this type of error->"<pre>You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ''product_details' SET 'name'='saumil joshi','category'='mobile phone','brand'...' at line 1 " so,what sholud i do now? anybody can hear to solve this problem? Posted Updated 2-Jun-21 22:25pm Comments Patrice T 3-Jun-21 4:50am     And you plan to show the code ? 1 solution We can't help you: we have no access to the code you are using, and that's going to be pretty significant to the problem. So start by looking at the line of code that is executing when that error is thrown: use the PHP Debugger[^] to find out exactly what the command you are sending to the DB. More often than not, this is caused by either a simple mistyping in your code, or by SQL injection prone code. Never concatenate strings to build a SQL command. It leaves you wide open to accidental or deliberate SQL Injection attack which can destroy your entire database. Always use Parameterized queries instead. When you concatenate strings, you cause problems because SQL receives commands like: SQL SELECT * FROM MyTable WHERE StreetAddress = 'Baker's Wood' The quote the user added terminates the string as far as SQL is concerned and you get problems. But it could be worse. If I come along and type this instead: "x';DROP TABLE MyTable;--" Then SQL receives a very different command: SQL SELECT * FROM MyTable WHERE StreetAddress = 'x';DROP TABLE MyTable;--' Which SQL sees as three separate commands: SQL SELECT * FROM MyTable WHERE StreetAddress = 'x'; A perfectly valid SELECT SQL DROP TABLE MyTable; A perfectly valid "delete the table" command SQL --' And everything else is a comment. So it does: selects any matching rows, deletes the table from the DB, and ignores anything else. So ALWAYS use parameterized queries! Or be prepared to restore your DB from backup frequently. You do take backups regularly, don't you?     This content, along with any associated source code and files, is licensed under The Code Project Open License (CPOL) CodeProject, 20 Bay Street, 11th Floor Toronto, Ontario, Canada M5J 2N8 +1 (416) 849-8900
__label__pos
0.660345
Beefy Boxes and Bandwidth Generously Provided by pair Networks go ahead... be a heretic   PerlMonks   Re: OT: Perl CBT? by woolfy (Hermit) on Aug 15, 2003 at 23:55 UTC ( #284281=note: print w/ replies, xml ) Need Help?? in reply to OT: Perl CBT? in thread Where and how to start learning Perl I've been making CBT for quite a while and I still look back with some fondness at that time. But anyway... of course it's a piece of cake to build good CBT with HTML and Perl (throw in some MySQL). I just wished I had Perl and HTML (and MySQL) 16 years ago... It would have been so much easier. But please don't make the same mistake as CBT-developers of all times made. It's not the tools with or the media on which the CBT is made. It's always about the content. And of course the way you present the content is important, but not as important as the content itself. Good written text, presented in small enough parts so the reader won't get frustrated. Good questions, good answers, good record keeping, low reaction times. Good CBT can be made with MS-DOS batch files, bad CBT can be made with the most modern and best content management systems (which tend to be horribly slow). Many many books have been written and, as it seems, seldomly read. As to your suggestion to write CBT which contains something for judging Perl scripts: that's closer to Artificial Intelligence than to CBT. Debuggers can do a lot nowadays, but not checking for something to have a desired outcome. I've seen the prices paid for CBT these days and I think they're ridiculously low. No fun or honour in there any more. I guess I'll stick to writing and consulting and maybe some more programming. There are a lot of reasons why there are so small amounts of good CBT and why CBT is not used much more. The same with good websites. Lots of people think it just comes in for free, and don't want to pay for it. Update: corrected spelling mistakes (again)(2x) and added a sentence at the end. Comment on Re: OT: Perl CBT? Log In? Username: Password: What's my password? Create A New User Node Status? node history Node Type: note [id://284281] help Chatterbox? and the web crawler heard nothing... How do I use this? | Other CB clients Other Users? Others exploiting the Monastery: (7) As of 2015-11-25 14:32 GMT Sections? Information? Find Nodes? Leftovers? Voting Booth? What would be the most significant thing to happen if a rope (or wire) tied the Earth and the Moon together? Results (677 votes), past polls
__label__pos
0.739102
Answers Solutions by everydaycalculation.com Everydaycalculation.com » Answers » A% of what number is B 30 percent of what number is 35? 35 is 30% of 116.67 Reference: Percentage Calculator Working out percentages 1. We have, 30% × x = 35 2. or, 30/100 × x = 35 3. Multiplying both sides by 100 and dividing both sides by 30, we have x = 35 × 100/30 4. x = 116.67 If you are using a calculator, simply enter 35×100÷30, which will give you the answer. More percentage problems: Find another is % of Use percentage calculator on our all-in-one calculator app: Download for Android, Download for iOS © everydaycalculation.com
__label__pos
0.983386
How to read input from a textbox and store the data as first element in 3D array? Please help!! Recommended Answers Please your code! Jump to Post All 4 Replies Please your code! double[,,] phs = new double[14,0,0]; TextBox168.Text = Convert.ToString(p1); phs[0,0,0] = p1; phs[1, 0, 0] = Convert.ToDouble(TextBox1.Text); phs[2, 0, 0] = Convert.ToDouble(TextBox2.Text); phs[3, 0, 0] = Convert.ToDouble(TextBox3.Text); phs[4, 0, 0] = Convert.ToDouble(TextBox4.Text); phs[5, 0, 0] = Convert.ToDouble(TextBox5.Text); phs[6, 0, 0] = Convert.ToDouble(TextBox6.Text); phs[7, 0, 0] = Convert.ToDouble(TextBox7.Text); phs[8, 0, 0] = Convert.ToDouble(TextBox8.Text); phs[9, 0, 0] = Convert.ToDouble(TextBox9.Text); phs[10, 0, 0] = Convert.ToDouble(TextBox10.Text); phs[11, 0, 0] = Convert.ToDouble(TextBox11.Text); phs[12, 0, 0] = Convert.ToDouble(TextBox12.Text); phs[13, 0, 0] = Convert.ToDouble(TextBox13.Text); double[,,] phs = new double[14,0,0]; Could you please tell me what your intention is when creating a 3D array, of doubles where the second and third dimension are zero? And why you are assigning a string to the first item of the first dimension phs[0,0,0] instead of a double? Well I don't think your code is VB.NET, I think that's a C#. Be a part of the DaniWeb community We're a friendly, industry-focused community of 1.21 million developers, IT pros, digital marketers, and technology enthusiasts learning and sharing knowledge.
__label__pos
0.999173
IP address facts for kids Kids Encyclopedia Facts An IP address is a label which is used to identify one or more devices on a computer network, such as the internet. It is comparable to a postal address. An IP address is a long number written in binary. Since such numbers are difficult to communicate, IP addresses are usually written as a set of numbers in a given order. Devices using IP addresses use the internet protocol to communicate. The Internet Assigned Numbers Authority assigns IP addresses to regional internet registries (RIRs). The RIRs assign them to Internet Service Providers. Internet Service Providers then assign IP addresses to their customers. Very often, people have a router or gateway at home, to which they connect computers, printers, and other devices. These routers or gateways are often configured to assign IP "local" IP addresses to the devices that are connected. Each address has two parts: One that specifies the computer or group of computers, and another which specifies the network. A device can have more than one IP address. Certain types of IP addresses are used to address a group of devices, while others are used to address only one device. Certain types of addresses are unique, others can be re-used. A number of IP addresses are used for special purposes, for example to obtain an IP address automatically. An IP address is converted to physical or Media Access Control Address using the Address Resolution Protocol (ARP). If an IP address is your phone number, then your MAC address is your name. You may change your phone number, but your name will not change. What an IP address looks like An IP address is a long binary number, made of ones and zeros. An IPv4 address is 32 binary digits (or bits) long. An IPv6 is 128 bits long, allowing many more IP addresses to be used. IP addresses are usually written in human-readable form, where 8 bits are grouped into one octet. IPv4 addresses are usually written as a group of four numbers. Each number can take a value from 0 to 255. IPv6 addresses are written as a group of eight hexadecimal numbers. Many Ipv6 addresses contain many zeroes. There are special rules which say that in certain cases, these zeroes do not need to be written. Public and private addresses Certain IP addresses can be assigned freely on the local area network. Since they are not unique, they are not routed on the internet. The addresses which can be freely assigned are called private IP addresses, the ones which are unique are called public. To be routed, a private address needs to be translated into a public one. This process of translating between private and public addresses is called network address translation, or NAT. Routers and firewalls often also perform this task. Reaching one or more devices There are three different types of addresses: • Unicast addresses: The address is assigned to one specific device. This is the most common case, most addresses are unicast addresses. • Broadcast addresses: address all computers on the same network. There are certain cases where this is useful, for example to obtain a new address automatically. The sender sends the data once, and the devices used for routing the data make copies, as needed. • Multicast addresses: This case is similar to the broadcast case above: Some devices are interested in receiving certain data, and the network copies the data as needed. The big difference to the broadcast case above is that all devices connected to the broadcast network see the data sent using broadcast. With multicast, devices need to subscribe to see a given content. The devices on the same network that are not subscribed will not see the content. Obtaining a new IP address There are different ways of getting a new IP address. One of them is called Bootstrap Protocol (usually shortened to BOOTP). The device that needs a new address, does not know what network it is in, so it uses an IP address of all zeroes (0.0.0.0) which it sends as a broadcast to the current network, on a special port. In addition, it sends the MAC address of the network card, plus a 4 byte random number. The BOOTP server will send a reply, also as broadcast, addressed to a different port. The reply will contain the mac address of the client, the random number, and the IP address of the client. When the client receives the data, it will set the address specied. If the BOOTP server is configured that way, it will also send the IP address and hostname of the BOOTP Server, the name and path to a file which should be loaded to boot the client (using TFTP) or the name of a directory, which the client should mount using NFS. DHCP extends BOOTP, and allows to send more information, souch the address of a time server, or information which is useful for routing. IP addresses obtained automatically can be dynamic or static. Static addressing means the same machine will always get the same IP address. With dynamic addresses, a device will get the next address which is not used. Dynamic addresses which are used need to be reviewed form time to time. If they are not renewed, they can be used for other devices. IP Version 4 With IPv4, each address consists of four 8-digit binary numbers, called octets. An IPv4 address is 32 bits in total. The biggest number one can make with 8 regular digits is 99,999,999, but the biggest number one can make with 8 binary digits is 255 (11111111 in binary), so each octet can be any number from 0 to 255. An IPv4 address could look something like this: 198.51.100.137 Each octet is converted to its decimal form and separated by a period. Also, there are special meanings associated with two different ending numbers. In general, a last number of 0 stands for the network (called base address), and a last number of 255 stands for all hosts on that network (called broadcast address). Computers that are on the same local network share 3 of the 4 numbers. A computer can be on more than one network. It can also have several names. Public/Private addresses The problem with IPv4 is that it only allows for 4.3 billion addresses, and we've almost used them all. To delay this, Network Address Translation (NAT) was created. Network Address Translation has a network share one public IP address and give every computer on the network a private IP address. Everyone living in the same house uses the same address, but mail can be meant for multiple different people living in the house. Special IP addresses There are some IP addresses that are reserved for special purposes. For example, the address 127.0.0.1 is called the Loopback Address and will "loop back" any packets sent to this address back to the computer that sent them, like sending mail to yourself. Although this may not seem useful, it is used to test servers. 127.0.0.0/8 block Starting address Ending address Number of addresses 10.0.0.0/8 10.0.0.0 10.255.255.255 16,777,216 172.16.0.0/12 172.16.0.0 172.31.255.255 1,048,576 192.168.0.0/16 192.168.0.0 192.168.255.255 65,536 IPv4 subnetting To make a network work faster, it is split up into subnets. To do this, an IP address contains a network ID, subnet ID, and a host ID. A special binary number called a Subnet Mask is used to determine the size of the network, subnet, and host IDs. The original IPv4 only supported 254 networks, so in 1981 the Internet addressing specification was changed to a classful network architecture. Classful network design allowed for a larger number of individual networks. The first three bits of an IP address determined its class. Three classes (A, B, and C) were defined for normal computer communication (Unicast). The size of the network ID was based on the class of the IP address. Each class used more octets for the network ID, making the host ID smaller and reducing the number of possible hosts. Historical classful network architecture Class First octet in binary Range of first octet Network ID Host ID Number of networks Number of addresses A 0XXXXXXX 0 - 127 a b.c.d 27 = 128 224 = 16,777,216 B 10XXXXXX 128 - 191 a.b c.d 214 = 16,384 216 = 65,536 C 110XXXXX 192 - 223 a.b.c d 221 = 2,097,152 28 = 256 D 1110XXXX 224 - 254 a.b.c.d e 223 = 2,100,199 29 = 512 Classful networks have been replaced by Classless Inter-Domain Routing (CIDR) since 1993. CIDR also provides a network address and host address. CIDR does not have classes, which means network and host address sizes don't have to be in octets. An IPv4 Address in CIDR notation looks like 192.168.0.14/24 The slash and number represent the amount of bits that the network id uses, in this case 24 or 4 octets. IP Version 6 Because IPv4 is only 32 bits, the number of available addresses will run out. To prevent this, an organization called the Institute of Electrical and Electronics Engineers (IEEE) created IP Version 6 (IPv6), which will eventually replace IPv4. IP Version 6 uses 16 octets, or 128 bits in total. Octets in IPv6 are written in hexadecimal, and separated by colons (:). An IPv6 address might look like this: 2001:0db8:85a3:0000:0000:8a2e:0370:7334 An IPv6 address can be long and this can lead to mistakes when typing them into the computer or writing them down. There are two ways in which an IPv6 address can be made shorter without leaving anything out: • Leading zeroes can be left out: 2001:0db8:00b8:0008:0000:0000:0000:0001 becomes 2001:db8:b8:8:0:0:0:1 • Any number of sequential, all-zero 'chunks' may be compressed to simply ::. This can be done only once in the same address: 2001:0db8:0000:0000:0000:0000:0000:0001 could be written as 2001:db8::1 Other versions Versions before IPv4 were experimental and never widely used. Version 5 was used exclusively for the Internet Stream Protocol, which was also never widely used. IP address Facts for Kids. Kiddle Encyclopedia.
__label__pos
0.767473
How do I create Pages and how do they work? 1 min. readlast update: 01.27.2023 You are not confined to just the 32 keys (or 15 on the MK2). You can create many many pages of buttons and navigate through them. On your main page, click the Plus sign to add a page. This will take you to the new page and place a back arrow to allow you the ability to jump back to your first page. On the first page it will add a forward arrow to navigate to page 2. You can move these navigation keys by dragging and dropping where you want them to be. undefined   Was this article helpful?
__label__pos
0.936249
Our Blog Incorporating cost into appsec metrics for organisations Reading time ~17 min A longish post, but this wasn’t going to fit into 140 characters. This is an argument pertaining to security metrics, with a statement that using pure vulnerability count-based metrics to talk about an organisation’s application (in)security is insufficient, and suggests an alternative approach. Comments welcome. Current metrics Metrics and statistics are certainly interesting (none of those are infosec links). Within our industry, Verizon’s Data Breach Investigations Report (DBIR) makes a splash each year, and Veracode are also receiving growing recognition for their State of Software Security (SOSS). Both are interesting to read and contain much insight. The DBIR specifically examines and records metrics for breaches, a post-hoc activity that only occurs once a series of vulnerabilities have been found and exploited by ruffians, while the SOSS provides insight into the opposing end of a system’s life-cycle by automatically analysing applications before they are put into production (in a perfect world… no doubt they also examine apps that are already in production). Somewhat tangentially, Dr Geer wrote recently about a different metric for measuring the overall state of Cyber Security, we’re currently at a 1021.6. Oh noes! Apart from the two bookends (SOSS and DBIR), other metrics are also published. From a testing perspective, WhiteHat releases perhaps the most well-known set of metrics for appsec bugs, and in years gone by, Corsaire released statistics covering their customers. Also in 2008, WASC undertook a project to provide metrics with data sourced from a number of companies, however this too has not seen recent activity (last edit on the site was over a year ago). WhiteHat’s metrics measure the number of serious vulnerabilities in each site (High, Critical, Urgent) and then slice and dice this based on the vulnerability’s classification, the organisation’s size, and the vertical within which they lie. WhiteHat is also in the fairly unique position of being able to record remediation times with a higher granularity than appsec firms that engage with customers through projects rather than service contracts. Corsaire’s approach was slightly different; they recorded metrics in terms of the classification of the vulnerability, its impact and the year within which the issue was found. Their report contained similar metrics to the WhiteHat report (e.g. % of apps with XSS), but the inclusion of data from multiple years permitted them to extract trends from their data. (No doubt WhiteHat have trending data, however in the last report it was absent). Lastly, WASC’s approach is very similar to WhiteHat’s, in that a point in time is selected and vulnerability counts according to impact and classification are provided for that point. Essentially, each of these approaches uses a base metric of vulnerability tallies, which are then viewed from different angles (classification, time-series, impact). While the metrics are collected per-application, they are easily aggregated into organisations. Drawback to current approaches Problems with just counting bugs are well known. If I ask you to rate two organisations, the Ostrogoths and the Visigoths, on their effectiveness in developing secure applications, and I tell you that the Ostrogoths have 20 critical vulnerabilities across their applications, while the Visigoths only have 5, without further data it seems that the Visigoths have the lead. However, if we introduce the fact that the Visigoths have a single application in which all 5 issues appear, while the Ostrogoths spread their 20 bugs across 10 applications, then it’s not so easy to crow for the Visigoths, who average 5 bugs per application as oppossed to the Ostrogoth’s 2. Most reports take this into account, and report on a percentage of applications that exhibit a particular vulnerability (also seen as the probability that a randomly selected application will exhibit that issue).  Unfortunately, even taking into account the number of applications is not sufficient; an organisation with 2 brochure-ware sites does not face the same risk as an organisation with 2 transaction-supporting financial applications, and this is where appsec metrics start to fray. In the extreme edges of ideal metrics, the ability to factor in chains of vulnerabilities that individually present little risk, but combined is greater than the sum of the parts, would be fantastic. This aspect is ignored by most (including us), as a fruitful path isn’t clear. Why count in the first place? Let’s take a step back, and consider why we produce metrics; with the amount of data floating around, it’s quite easy to extract information and publish, thereby earning a few PR points. However, are the metrics meaningful? The quick test is to ask whether they support decision making. For example, does it matter that external attackers were present in an overwhelming number incidents recorded in the DBIR? I suspect that this is an easy “yes”, since this metric justifies shifting priorities to extend perimeter controls rather than rolling out NAC. One could just as easily claim that absolute bug counts are irrelevant and that they need to be relative to some other scale; commonly the number of applications an organisation has. However in this case, if the metrics don’t provide enough granularity to accurately position your organisation with respect to others that you actually care about, then they’re worthless to you in decision making. What drives many of our customers is not where they stand in relation to every other organisation, but specifically their peers and competitors. It’s slightly ironic that oftentimes the more metrics released, the less applicable they are to individual companies. As a bank, knowing you’re in the top 10% of a  sample of banking organisations means something; when you’re in the highest 10% of a survey that includes WebGoat clones, the results are much less clear. In Seven Myths About Information Security Metrics, Dr Hinson raises a number of interesting points about security metrics. They’re mostly applicable to security awareness, however they also carry across into other security activities. At least two serve my selfish needs, so I’ll quote them here: Myth 1: Metrics must be “objective” and “tangible” There is a subtle but important distinction between measuring subjective factors and measuring subjectively. It is relatively easy to measure “tangible” or objective things (the number of virus incidents, or the number of people trained). This normally gives a huge bias towards such metrics in most measurement systems, and a bias against measuring intangible things (such as level of security awareness). In fact, “intangible” or subjective things can be measured objectively, but we need to be reasonably smart about it (e.g., by using interviews,surveys and audits). Given the intangible nature of security awareness, it is definitely worth putting effort into the measurement of subjective factors, rather than relying entirely on easy-to-measure but largely irrelevant objective factors. [G Hinson] and Myth 3: We need absolute measurements For some unfathomable reason, people often assume we need “absolute measures”—height in meters, weight in pounds, etc. This is nonsense! If I line up the people in your department against a wall, I can easily tell who is tallest, with no rulers in sight. This yet again leads to an unnecessary bias in many measurement systems. In fact, relative values are often more useful than absolute scales, especially to drive improvement. Consider this for instance: “Tell me, on an (arbitrary) scale from one to ten, how security aware are the people in your department are? OK, I’ll be back next month to ask you the same question!” We need not define the scale formally, as long as the person being asked (a) has his own mental model of the processes and (b) appreciates the need to improve them. We needn’t even worry about minor variations in the scoring scale from month to month, as long as our objective of promoting improvement is met. Benchmarking and best practice transfer are good examples of this kind of thinking. “I don’t expect us to be perfect, but I’d like us to be at least as good as standard X or company Y. [G Hinson] While he writes from the view of an organisation trying to decide whether their security awareness program is yielding dividends, the core statements are applicable for organisations seeking to determine the efficacy of their software security program. I’m particularly drawn by two points: the first is that intangibles are as useful as concrete metrics, and the second is that absolute measurements aren’t necessary, comparative ordering is sometimes enough. Considering cost It seems that one of the intangibles that currently published appsec metrics don’t take into account, is cost to the attacker. No doubt behind each vulnerability’s single impact rating are a multitude of factors that contribute, one of which may be something like “Complexity” or “Ease of Exploitation”. However, measuring effort in this way is qualitative and only used as a component in the final rating. I’m suggesting that cost (interchangeable with effort) be incorporated into the base metric used when slicing datasets into views. This will allow you to understand the determination an attacker would require when facing one of your applications. Penetration testing companies are in a unique position to provide this estimate; a tester unleashed on an application project is time-bounded and throws their experience and knowledge at the app. At the end, one can start to estimate how much effort was required to produce the findings and, over time, gauge whether your testers are increasing their effort to find issues (stated differently, do they find fewer bugs in the same amount of time?). If these metrics don’t move in the right direction, then one might conclude that your security practices are also not improving (providing material for decision making). Measuring effort, or attacker cost, is not new to security but it’s mostly done indirectly through the sale of exploits (e.g. iDefence, ZDI). Even here, effort is not directly related to the purchase price, which is also influenced by other factors such as the number of deployed targets etc. In any case, for custom applications that testers are mostly presented with, such public sources should be of little help (if your testers are submitting findings to ZDI, you have bigger problems). Every now and then, an exploit dev team will mention how long it took them to write an exploit for some weird Windows bug; these are always interesting data points, but are not specific enough for customers and the sample size is low. Ideally, any measure of an attacker’s cost can take into account both time and their exclusivity (or experience), however in practice this will be tough to gather from your testers. One could base it on their hourly rate, if your testing company differentiates between resources. In cases where they don’t, or you’re seeking to keep the metric simple, then another estimate for effort is the number of days spent on testing. Returning to our sample companies, if the 5 vulnerabilities exposed in the Visigoth’s each required, on average, a single day to find, while the Ostrogoth’s 20 bugs average 5 days each, then the effort required by an attacker is minimised by choosing to target the Visigoths. In other words, one might argue that the Visigoths are more at risk than the Ostrogoths. Metricload, take 1 In our first stab at incorporating effort, we selected an estimator of findings-per-day (or finding rate) to be the base metric against which the impact, classification, time-series and vertical attributes would be measured. From this, it’s apparent that, subject to some minimum, the number of assessments performed is less important than the number of days worked. I don’t yet have a way to answer what the minimum number of assessments should be, but it’s clear that comparing two organisations where one has engaged with us 17 times and the other once, won’t yield reliable results. With this base metric, it’s then possible to capture historical assessment data and provide both internal-looking metrics for an organisation as well as comparative metrics, if the testing company is also employed by your competitors. Internal metrics are the usual kinds (impact, classification, time-series), but the comparison option is very interesting. We’re in the fortunate position of working with many top companies locally, and are able to compare competitors using this metric as a base. The actual ranking formulae is largely unimportant here. Naturally, data must be anonymised so as to protect names; one could provide the customer with their rank only. In this way, the customer has an independent notion of how their security activities rate against their peers without embarrassing the peers. Inverting the findings-per-day metric provide the average number of days to find a particular class of vulnerability, or impact level. That is, if a client averages 0.7 High or Critical findings per testing day, then on average it takes us 1.4 days of testing to find an issue of great concern, which is an easy way of expressing the base metric. What, me worry? Without doubt, the findings-per-day estimator has drawbacks. For one, it doesn’t take into consideration the tester’s skill level (but this is also true of all appsec metrics published). This could be extended to include things like hourly rates, which indirectly measure skill. Also, the metric does not take into account functionality exposed by the site; if an organisation has only brochure-ware sites then it’s unfair to compare them against transactional sites; this is mitigated at the time of analysis by comparing against peers rather than the entire sample group and also, to a degree, in the scoping of the project as a brochure-ware site would receive minimum testing time if scoped correctly. As mentioned above, a minimum number of assessments would be needed before the metric is reliable; this is a hint at the deeper problems that randomly selected project days are not independent. An analyst stuck on a 4 week project is focused on a very small part of the broader organisation’s application landscape. We counter this bias by including as many projects of the same type as possible. Thought.rand() If you can tease it out of them, finding rates could be an interesting method of comparing competing testing companies; ask “when testing companies of our size and vertical, what is your finding rate?”, though there’d be little way to verify any claims. Can you foresee a day when testing companies advertise using their finding rate as the primary message? Perhaps… This metric would also be very useful to include in each subsequent report for the customer, with every report containing an evaluation against their longterm vulnerability averages. Field testing Using the above findings-per-day metric as a base, we performed an historical analysis for a client on work performed over a number of years, with a focus on answering the following questions for them: 1. On average, how long does it take to find issues at each Impact level (Critical down to Informational)? 2. What are the trends for the various vulnerability classes? Does it take more or less time to find them year-on-year? 3. What are the Top 10 issues they’re currently facing? 4. Where do they stand in relation to anonymised competitor data? In preparation for the exercise, we had to capture a decent number of past reports, which was most time-consuming. What this highlighted for us was how paper-based reports and reporting is a serious hinderance to extracting useful data, and has provided impetus internally for us to look into alternatives. The derived statistics were presented to the client in a workshop, with  representatives from a number of the customer’s teams present. We had little insight into the background to many of the projects, and it was very interesting to hear the analysis and opinions that emerged as they digested the information. For example, one set of applications exhibited particularly poor metrics from a security standpoint. Someone highlighted the fact that these were outsourced applications, which raised a discussion within the client about the pros and cons on using third party developers. It also suggests that many further attributes can be attached to the data that is captured: internal or third party, development lifecycle model (is agile producing better code for you than other models?), team size, platforms, languages, frameworks etc. As mentioned above, a key test for metrics is where they support decision making, and the feedback from the client was positive in this regard. And now? In summary, current security metrics as they relate to talking about an organisation’s application security suffers from a resolution problem; they’re not clear enough. Attacker effort is not modeled when discussing vulnerabilities, even though it’s a significant factor when trying to get a handle on the ever slippery notion of risk. One approximation for attacker effort is to create a base-metric of the number of findings-per-day for a broad set of applications belonging to an organisation, and use those to evaluate which kinds of vulnerabilities are typically present while at the same time clarifying how much effort an attacker requires in order to exploit it. This idea is still being fleshed out. If you’re aware of previous work in this regard or have suggestions on how to improve it (even abandon it) please get in contact. Oh, and if you’ve read this far and are looking for training, we’re at BH in August.
__label__pos
0.520991
Beefy Boxes and Bandwidth Generously Provided by pair Networks Your skill will accomplish what the force of many cannot   PerlMonks   Comment on ( #3333=superdoc: print w/ replies, xml ) Need Help?? Author:Chromatic Table of Contents Foreword Preface Part 1. Why XP? Who Cares About Process, Anyway? The XP Equation XP Values Communication Feedback Simplicity Courage Assuming Sufficiency Sufficient Time Sufficient Resources Constant Cost of Change Developer Effectiveness Freedom to Experiment Part 2. Extreme Programming Practices Coding Practices 1. Code and Design Simply 2. Refactor Mercilessly 3. Develop Coding Standards 4. Develop a Common Vocabulary Developer Practices 1. Adopt Test-Driven Development 2. Practice Pair Programming 3. Adopt Collective Code Ownership 4. Integrate Continually Business Practices 1. Add a Customer to the Team 2. Play the Planning Game 3. Release Regularly 4. Work at a Sustainable Pace Part 3. XP Events Iteration Planning Stories and Tasks Estimates and Schedules The First Iteration The Iteration Releasing Part 4. Extreme Programming Artifacts Story Cards Task Cards The Bullpen Part 5. Roles in Extreme Programming The Customer Customer Rights Customer Responsibilities The Developer Developer Rights Developer Responsibilities Supplementary Roles The Tracker The Coach Part 6. Coding, XP Style Do the Simplest Thing That Could Possibly Work You Aren't Gonna Need It Once and Only Once Part 7. Adopting XP Before You Start Eliminating Fear and Working Together Starting Feedback Including Managers and Customers Now That You're Extreme Part 8. Further Resources XP Resources Index While not a programmer by job title, I do write a lot of scripts, and I try to maintain the three modules that I have on CPAN. I wanted to learn about XP and at least get a basic understanding of its principles and how they all work together, with an eye towards eventually adopting some of its practices into how I code. Chromatic starts off by explaining the reasons for using XP. "The goal of software development is to create good systems that meet business needs with the available resources. XP can help you do just that." The material is broken down into eight sections, each one covering a specific topic. Section 1 deals with describing XP and its values, and the problem that it was intended to solve. Section 2 dives in the meat of XP, covering each of the twelve core practices of XP, breaking them down into three groups, Coding, Developing, and Business. Section 3 covers the various events that happen within an XP development cycle. He breaks down the first phase (Iteration Planning) into three sections, then goes on to cover the Iteration and Releasing. Section 4 deals with the physical items that XP uses to keep track of events, such as story and task cards. The Bullpen, which is defined as a large open area with plenty of powerful pc's with room for at least two programmers at each one (Pair Programming), several large chalkboards or whiteboards, and plenty of sticky notes and pens. Section 5 deals with the roles in XP, and the rights and responsibilities of each of those roles. As developers, we need to follow team guidelines, implement only what is necessary, and communicate constantly with the customer. Section 6 deals with the coding style of XP, which follows three basic principles. "Do the simplest thing that could work", "You aren't gonna need it", and "Once and only once". Chromatic also points out that the last one is quite similar to the principle mentioned in The Pragmatic Programmer, "Don't Repeat Yourself". Sections 7 and 8 deal with how to go about implementing XP and where you can find more resources on this subject. This book was very well written and concise, and it lays out precisely the necessary strategies and information needed to start down the XP path. I emphatically recommend this book for anyone who uses or is going to be using XP for software development. In reply to Extreme Programming Pocket Guide by TStanley Title: Use:  <p> text here (a paragraph) </p> and:  <code> code here </code> to format your post; it's "PerlMonks-approved HTML": • Posts are HTML formatted. Put <p> </p> tags around your paragraphs. Put <code> </code> tags around your code and data! • Titles consisting of a single word are discouraged, and in most cases are disallowed outright. • Read Where should I post X? if you're not absolutely sure you're posting in the right place. • Please read these before you post! — • Posts may use any of the Perl Monks Approved HTML tags: a, abbr, b, big, blockquote, br, caption, center, col, colgroup, dd, del, div, dl, dt, em, font, h1, h2, h3, h4, h5, h6, hr, i, ins, li, ol, p, pre, readmore, small, span, spoiler, strike, strong, sub, sup, table, tbody, td, tfoot, th, thead, tr, tt, u, ul, wbr • You may need to use entities for some characters, as follows. (Exception: Within code tags, you can put the characters literally.)         For:     Use: & &amp; < &lt; > &gt; [ &#91; ] &#93; • Link using PerlMonks shortcuts! What shortcuts can I use for linking? • See Writeup Formatting Tips and other pages linked from there for more info. • Log In? Username: Password: What's my password? Create A New User Chatterbox? and the web crawler heard nothing... How do I use this? | Other CB clients Other Users? Others imbibing at the Monastery: (8) As of 2015-11-25 01:48 GMT Sections? Information? Find Nodes? Leftovers? Voting Booth? What would be the most significant thing to happen if a rope (or wire) tied the Earth and the Moon together? Results (667 votes), past polls
__label__pos
0.67248
FedoraRHEL Based How To Install Cockpit on Fedora 38 Install Cockpit on Fedora 38 In this tutorial, we will show you how to install Cockpit on Fedora 38. Cockpit, a powerful web-based server management tool, revolutionizes the way administrators handle Fedora servers. Its intuitive interface simplifies complex tasks, making server administration accessible even to novices. This article assumes you have at least basic knowledge of Linux, know how to use the shell, and most importantly, you host your site on your own VPS. The installation is quite simple and assumes you are running in the root account, if not you may need to add ‘sudo‘ to the commands to get root privileges. I will show you the step-by-step installation of the Cockpit web-based graphical interface for managing Linux servers on a Fedora 38. Prerequisites • A server running one of the following operating systems: Fedora 38. • It’s recommended that you use a fresh OS install to prevent any potential issues. • SSH access to the server (or just open Terminal if you’re on a desktop). • An active internet connection. You’ll need an internet connection to download the necessary packages and dependencies for Cockpit. • A non-root sudo user or access to the root user. We recommend acting as a non-root sudo user, however, as you can harm your system if you’re not careful when acting as the root. Install Cockpit on Fedora 38 Step 1. Before we can install Cockpit on Fedora 38, it’s important to ensure that our system is up-to-date with the latest packages. This will ensure that we have access to the latest features and bug fixes and that we can install Cockpit without any issues: sudo dnf upgrade --refresh sudo dnf install dnf-plugins-core Step 2. Installing Cockpit on Fedora 38.  Open the terminal and run the following command to install Cockpit: sudo dnf install cockpit Once the installation is complete, start the Cockpit service by running the following command: sudo systemctl start cockpit To enable Cockpit to start automatically at boot time, run the following command: sudo systemctl enable cockpit Step 3. Firewall Configuration. Configure the firewall to permit Cockpit connections by executing the following command: sudo firewall-cmd --add-service=cockpit --permanent sudo firewall-cmd --reload Step 4. Accessing Cockpit Web Interface. Once Cockpit is installed and running, you can access it using a web browser. Open your web browser and enter the following URL: https://localhost:9090 Install Cockpit on Fedora 38 You will be prompted to enter your system username and password to log in to Cockpit. After logging in, you will be presented with the Cockpit dashboard, where you can manage your system. Step 5. Troubleshooting and Tips. • A. Common Issues and Solutions: 1. Unable to Access Cockpit: • Ensure the firewall settings are correctly configured to allow connections to port 9090. • Double-check the URL entered in the web browser for accuracy. 2. Cockpit Installation Fails: • Confirm that the Cockpit repository is properly configured, and the server has an active internet connection. • If installation issues persist, try updating your Fedora system and retry the installation. • B. Tips for Efficient Server Management with Cockpit: 1. Regularly Monitor System Performance: • Keep a close eye on system metrics to detect and address performance bottlenecks promptly. 2. Implement Regular Backups: • Regularly back up critical data to ensure data integrity and facilitate disaster recovery. 3. Utilize the Terminal for Advanced Configurations: • While Cockpit streamlines most server management tasks, familiarize yourself with the terminal for intricate configurations. Congratulations! You have successfully installed Cockpit. Thanks for using this tutorial for installing Cockpit web-based graphical interface for managing Linux servers on your Fedora 38 system. For additional help or useful information, we recommend you check the official Cockpit website. VPS Manage Service Offer If you don’t have time to do all of this stuff, or if this is not your area of expertise, we offer a service to do “VPS Manage Service Offer”, starting from $10 (Paypal payment). Please contact us to get the best deal! r00t r00t is a seasoned Linux system administrator with a wealth of experience in the field. Known for his contributions to idroot.us, r00t has authored numerous tutorials and guides, helping users navigate the complexities of Linux systems. His expertise spans across various Linux distributions, including Ubuntu, CentOS, and Debian. r00t's work is characterized by his ability to simplify complex concepts, making Linux more accessible to users of all skill levels. His dedication to the Linux community and his commitment to sharing knowledge makes him a respected figure in the field. Back to top button
__label__pos
0.562987