content
stringlengths 228
999k
| pred_label
stringclasses 1
value | pred_score
float64 0.5
1
|
---|---|---|
\r\n
51Degrees Device Detection Java 4.4
Device detection services for 51Degrees Pipeline
MetadataOnPrem.java
The device detection data file contains meta data that can provide additional information about the various records in the data model. This example shows how to access this data and display the values available.
To help navigate the data, it's useful to have an understanding of the types of records that are present:
• Component - A record relating to a major aspect of the entity making a web request. There are currently 4 components: Hardware, Software Platform (OS), Browser and Crawler.
• Profile - A record containing the details for a specific instance of a component. An example of a hardware profile would be the profile for the iPhone 13. An example of a platform profile would be Android 12.1.0.
• Property - Each property will have a specific value (or values) for each profile. An example of a hardware property is 'IsMobile'. An example of a browser property is 'BrowserName'.
The example will output each component in turn, with a list of the properties associated with each component. Some of the possible values for each property are also displayed. There are too many profiles to display, so we just list the number of profiles for each component.
Finally, the evidence keys that are accepted by device detection are listed. These are the keys that, when added to the evidence collection in flow data, could have some impact on the result returned by device detection.
This example is available in full on GitHub.
This example requires a local data file. The free 'Lite' data file can be acquired by pulling the git submodules under this repository (run `git submodule update --recursive`) or from the device-detection-data GitHub repository.
The Lite data file is only used for illustration, and has limited accuracy and capabilities. Find out about the more capable data files that are available on our pricing page
/*
* This Original Work is copyright of 51 Degrees Mobile Experts Limited.
* Copyright 2022 51 Degrees Mobile Experts Limited, Davidson House,
* Forbury Square, Reading, Berkshire, United Kingdom RG1 3EU.
*
* This Original Work is licensed under the European Union Public Licence
* (EUPL) v.1.2 and is subject to its terms as set out below.
*
* If a copy of the EUPL was not distributed with this file, You can obtain
* one at https://opensource.org/licenses/EUPL-1.2.
*
* The 'Compatible Licences' set out in the Appendix to the EUPL (as may be
* amended by the European Commission) shall be deemed incompatible for
* the purposes of the Work and the provisions of the compatibility
* clause in Article 5 of the EUPL shall not apply.
*
* If using the Work as, or as part of, a network application, by
* including the attribution notice(s) required under Article 5 of the EUPL
* in the end user terms of the application under an appropriate heading,
* such notice(s) shall fulfill the requirements of that article.
*/
package fiftyone.devicedetection.examples.console;
import fiftyone.devicedetection.examples.shared.DataFileHelper;
import fiftyone.devicedetection.hash.engine.onpremise.flowelements.DeviceDetectionHashEngine;
import fiftyone.devicedetection.hash.engine.onpremise.flowelements.DeviceDetectionHashEngineBuilder;
import fiftyone.pipeline.core.data.EvidenceKeyFilterWhitelist;
import fiftyone.pipeline.engines.Constants;
import fiftyone.pipeline.engines.fiftyone.data.ComponentMetaData;
import fiftyone.pipeline.engines.fiftyone.data.ProfileMetaData;
import fiftyone.pipeline.engines.fiftyone.data.ValueMetaData;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import static fiftyone.common.testhelpers.LogbackHelper.configureLogback;
import static fiftyone.pipeline.util.FileFinder.getFilePath;
public class MetadataOnPrem {
private static final Logger logger = LoggerFactory.getLogger(GettingStartedOnPrem.class);
/* In this example, by default, the 51degrees "Lite" file needs to be somewhere in the project
space, or you may specify another file as a command line parameter.
Note that the Lite data file is only used for illustration, and has limited accuracy and
capabilities. Find out about the Enterprise data file here: https://51degrees.com/pricing */
public static String LITE_V_4_1_HASH = "51Degrees-LiteV4.1.hash";
public static void main(String[] args) throws Exception {
configureLogback(getFilePath("logback.xml"));
String dataFile = args.length > 0 ? args[0] : LITE_V_4_1_HASH;
run(dataFile, System.out);
}
public static void run(String dataFile, OutputStream output) throws Exception {
logger.info("Running MetadataOnPrem example");
String dataFileLocation;
try {
dataFileLocation = getFilePath(dataFile).getAbsolutePath();
} catch (Exception e) {
DataFileHelper.cantFindDataFile(dataFile);
throw e;
}
// Build a new on-premise Hash engine with the low memory performance profile.
// Note that there is no need to construct a complete pipeline in order to access
// the meta-data.
// If you already have a pipeline and just want to get a reference to the engine
// then you can use `var engine = pipeline.GetElement<DeviceDetectionHashEngine>();`
try (DeviceDetectionHashEngine ddEngine =
new DeviceDetectionHashEngineBuilder(LoggerFactory.getILoggerFactory())
// We use the low memory profile as its performance is sufficient for this
// example. See the documentation for more detail on this and other
// configuration options:
// http://51degrees.com/documentation/_device_detection__features__performance_options.html
// http://51degrees.com/documentation/_features__automatic_datafile_updates.html
.setPerformanceProfile(Constants.PerformanceProfiles.LowMemory)
// inhibit auto-update of the data file for this test
.setAutoUpdate(false)
.setDataFileSystemWatcher(false)
.setDataUpdateOnStartup(false)
.build(dataFileLocation, false)){
PrintWriter writer = new PrintWriter(output);
logger.info("Listing Components");
outputComponents(ddEngine, writer);
writer.println();
writer.flush();
logger.info("Listing Profile Details");
outputProfileDetails(ddEngine, writer);
writer.println();
writer.flush();
logger.info("Listing Evidence Key Details");
outputEvidenceKeyDetails(ddEngine, writer);
writer.println();
writer.flush();
DataFileHelper.logDataFileInfo(ddEngine);
}
}
private static void outputEvidenceKeyDetails(DeviceDetectionHashEngine ddEngine,
PrintWriter output){
output.println();
if (ddEngine.getEvidenceKeyFilter() instanceof EvidenceKeyFilterWhitelist) {
// If the evidence key filter extends EvidenceKeyFilterWhitelist then we can
// display a list of accepted keys.
EvidenceKeyFilterWhitelist filter = (EvidenceKeyFilterWhitelist) ddEngine.getEvidenceKeyFilter();
output.println("Accepted evidence keys:");
for (Map.Entry<String, Integer> entry : filter.getWhitelist().entrySet()){
output.println("\t" + entry.getKey());
}
} else {
output.format("The evidence key filter has type " +
"%s. As this does not extend " +
"EvidenceKeyFilterWhitelist, a list of accepted values cannot be " +
"displayed. As an alternative, you can pass evidence keys to " +
"filter.include(string) to see if a particular key will be included " +
"or not.\n", ddEngine.getEvidenceKeyFilter().getClass().getName());
output.println("For example, header.user-agent " +
(ddEngine.getEvidenceKeyFilter().include("header.user-agent") ?
"is " : "is not ") + "accepted.");
}
}
private static void outputProfileDetails(DeviceDetectionHashEngine ddEngine,
PrintWriter output) {
// Group the profiles by component and then output the number of profiles
// for each component.
Map<String, List<ProfileMetaData>> groups =
StreamSupport.stream(ddEngine.getProfiles().spliterator(), false)
.collect(Collectors.groupingBy(p -> p.getComponent().getName()));
groups.forEach((k,v)->output.format("%s Profiles: %d\n", k , v.size()));
}
// Output the component name as well as a list of all the associated properties.
// If we're outputting to console then we also add some formatting to make it
// more readable.
private static void outputComponents(DeviceDetectionHashEngine ddEngine, PrintWriter output){
ddEngine.getComponents().forEach(c -> {
output.println("Component - "+ c.getName());
outputProperties(c, output);
});
}
private static void outputProperties(ComponentMetaData component, PrintWriter output) {
if (component.getProperties().iterator().hasNext() == false) {
output.println(" ... no properties");
return;
}
component.getProperties()
.forEach(property-> {
// Output some details about the property.
// If we're outputting to console then we also add some formatting to make it
// more readable.
output.format(" Property - %s [Category: %s] (%s)\n " +
"Description: %s\n",
property.getName(),
property.getCategory(),
property.getType().getName(),
property.getDescription());
// Next, output a list of the possible values this property can have.
// Most properties in the Device Metrics category do not have defined
// values so exclude them.
if (property.getCategory().equals("Device Metrics")==false) {
StringBuilder values = new StringBuilder(" Possible " +
"values: ");
Spliterator<ValueMetaData> spliterator2 =
property.getValues().spliterator();
StreamSupport.stream(spliterator2, false)
.limit(20)
.forEach(value -> {
// add value
values.append(truncateToNl(value.getName()));
// add description if exists
String d = value.getDescription();
if (Objects.nonNull(d) && d.isEmpty() == false) {
values.append("(")
.append(d)
.append(")");
}
values.append(",");
});
if (spliterator2.estimateSize() > 20) {
values.append(" + more ...");
}
output.println(values);
}
});
}
// Truncate value if it contains newline (esp for the JavaScript property)
private static String truncateToNl(String text) {
String[] lines = text.split("\n");
Optional<String> result = Arrays.stream(lines).filter(s -> !s.isEmpty()).findFirst();
return result.orElse("[empty]") + (lines.length > 1 ? "..." : "");
}
}
|
__label__pos
| 0.843813 |
典型/知識ゲーではという声があったが、(私は過去に見た記憶がないので)よかった。
solution
二部マッチング。$O(N^2M^2)$で通る。
まず、$2 \times 1$の形をいくつ作れるかに帰着する。 $2 \times 1$がただで作れるなら作って損をしない、白と黒がひとつずつ減るので他に影響しない、あたりから言える。
$2 \times 1$の形をいくつ作れるかであるが、これは二部マッチング。 editorialにいい感じの図があるので見て。
implementation
#include <iostream>
#include <vector>
#include <limits>
#include <functional>
#define repeat(i,n) for (int i = 0; (i) < (n); ++(i))
using namespace std;
template <typename T, typename X> auto vectors(T a, X x) { return vector<T>(x, a); }
template <typename T, typename X, typename Y, typename... Zs> auto vectors(T a, X x, Y y, Zs... zs) { auto cont = vectors(a, y, zs...); return vector<decltype(cont)>(x, cont); }
struct edge_t { int to, cap, rev; };
int maximum_flow_destructive(int s, int t, vector<vector<edge_t> > & g) { // ford fulkerson, O(EF)
int n = g.size();
vector<bool> used(n);
function<int (int, int)> dfs = [&](int i, int f) {
if (i == t) return f;
used[i] = true;
for (edge_t & e : g[i]) {
if (used[e.to] or e.cap <= 0) continue;
int nf = dfs(e.to, min(f, e.cap));
if (nf > 0) {
e.cap -= nf;
g[e.to][e.rev].cap += nf;
return nf;
}
}
return 0;
};
int result = 0;
while (true) {
used.clear(); used.resize(n);
int f = dfs(s, numeric_limits<int>::max());
if (f == 0) break;
result += f;
}
return result;
}
void add_edge(vector<vector<edge_t> > & g, int from, int to, int cap) {
g[from].push_back((edge_t) { to, cap, int(g[ to].size() ) });
g[ to].push_back((edge_t) { from, 0, int(g[from].size() - 1) });
}
const int dy[4] = { -1, 1, 0, 0 };
const int dx[4] = { 0, 0, 1, -1 };
int main() {
// input
int h, w; cin >> h >> w;
vector<vector<bool> > f = vectors(false, h, w);
repeat (y,h) repeat (x,w) {
char c; cin >> c;
f[y][x] = c != '.';
}
// compute
auto is_on_field = [&](int y, int x) { return 0 <= y and y < h and 0 <= x and x < w; };
vector<vector<edge_t> > g(h * w + 2);
auto index = [&](int y, int x) { return y * w + x; };
const int src = h * w;
const int dst = h * w + 1;
int white = 0, black = 0;
repeat (y,h) repeat (x,w) {
if (not f[y][x]) continue;
if (y % 2 == x % 2) {
white += 1;
add_edge(g, src, index(y, x), 1);
repeat (i,4) {
int ny = y + dy[i];
int nx = x + dx[i];
if (not is_on_field(ny, nx)) continue;
if (not f[ny][nx]) continue;
add_edge(g, index(y, x), index(ny, nx), 1);
}
} else {
black += 1;
add_edge(g, index(y, x), dst, 1);
}
}
int flow = maximum_flow_destructive(src, dst, g);
int ans = 0;
ans += flow * 100;
ans += (min(white, black) - flow) * 10;
ans += (max(white, black) - (min(white, black) - flow) - flow);
// output
cout << ans << endl;
return 0;
}
|
__label__pos
| 0.986629 |
Vuex 源码解析
染陌同学 | 2017. 11. 14 | 阅读 2220 次
无线开发 vuex Vue.js
写在前面
因为对Vue.js很感兴趣,而且平时工作的技术栈也是Vue.js,这几个月花了些时间研究学习了一下Vue.js源码,并做了总结与输出。
文章的原地址:https://github.com/answershuto/learnVue
在学习过程中,为Vue加上了中文的注释https://github.com/answershuto/learnVue/tree/master/vue-src以及Vuex的注释https://github.com/answershuto/learnVue/tree/master/vuex-src,希望可以对其他想学习源码的小伙伴有所帮助。
可能会有理解存在偏差的地方,欢迎提issue指出,共同学习,共同进步。
Vuex
我们在使用Vue.js开发复杂的应用时,经常会遇到多个组件共享同一个状态,亦或是多个组件会去更新同一个状态,在应用代码量较少的时候,我们可以组件间通信去维护修改数据,或者是通过事件总线来进行数据的传递以及修改。但是当应用逐渐庞大以后,代码就会变得难以维护,从父组件开始通过prop传递多层嵌套的数据由于层级过深而显得异常脆弱,而事件总线也会因为组件的增多、代码量的增大而显得交互错综复杂,难以捋清其中的传递关系。
那么为什么我们不能将数据层与组件层抽离开来呢?把数据层放到全局形成一个单一的Store,组件层变得更薄,专门用来进行数据的展示及操作。所有数据的变更都需要经过全局的Store来进行,形成一个单向数据流,使数据变化变得“可预测”。
Vuex是一个专门为Vue.js框架设计的、用于对Vue.js应用程序进行状态管理的库,它借鉴了Flux、redux的基本思想,将共享的数据抽离到全局,以一个单例存放,同时利用Vue.js的响应式机制来进行高效的状态管理与更新。正是因为Vuex使用了Vue.js内部的“响应式机制”,所以Vuex是一个专门为Vue.js设计并与之高度契合的框架(优点是更加简洁高效,缺点是只能跟Vue.js搭配使用)。具体使用方法及API可以参考Vuex的官网
先来看一下这张Vuex的数据流程图,熟悉Vuex使用的同学应该已经有所了解。
Vuex实现了一个单向数据流,在全局拥有一个State存放数据,所有修改State的操作必须通过Mutation进行,Mutation的同时提供了订阅者模式供外部插件调用获取State数据的更新。所有异步接口需要走Action,常见于调用后端接口异步获取更新数据,而Action也是无法直接修改State的,还是需要通过Mutation来修改State的数据。最后,根据State的变化,渲染到视图上。Vuex运行依赖Vue内部数据双向绑定机制,需要new一个Vue对象来实现“响应式化”,所以Vuex是一个专门为Vue.js设计的状态管理库。
安装
使用过Vuex的朋友一定知道,Vuex的安装十分简单,只需要提供一个store,然后执行下面两句代码即完成的Vuex的引入。
Vue.use(Vuex);
/*将store放入Vue创建时的option中*/
new Vue({
el: '#app',
store
});
那么问题来了,Vuex是怎样把store注入到Vue实例中去的呢?
Vue.js提供了Vue.use方法用来给Vue.js安装插件,内部通过调用插件的install方法(当插件是一个对象的时候)来进行插件的安装。
我们来看一下Vuex的install实现。
/*暴露给外部的插件install方法,供Vue.use调用安装插件*/
export function install (_Vue) {
if (Vue) {
/*避免重复安装(Vue.use内部也会检测一次是否重复安装同一个插件)*/
if (process.env.NODE_ENV !== 'production') {
console.error(
'[vuex] already installed. Vue.use(Vuex) should be called only once.'
)
}
return
}
/*保存Vue,同时用于检测是否重复安装*/
Vue = _Vue
/*将vuexInit混淆进Vue的beforeCreate(Vue2.0)或_init方法(Vue1.0)*/
applyMixin(Vue)
}
这段install代码做了两件事情,一件是防止Vuex被重复安装,另一件是执行applyMixin,目的是执行vuexInit方法初始化Vuex。Vuex针对Vue1.0与2.0分别进行了不同的处理,如果是Vue1.0,Vuex会将vuexInit方法放入Vue的_init方法中,而对于Vue2.0,则会将vuexinit混淆进Vue的beforeCreacte钩子中。来看一下vuexInit的代码。
/*Vuex的init钩子,会存入每一个Vue实例等钩子列表*/
function vuexInit () {
const options = this.$options
// store injection
if (options.store) {
/*存在store其实代表的就是Root节点,直接执行store(function时)或者使用store(非function)*/
this.$store = typeof options.store === 'function'
? options.store()
: options.store
} else if (options.parent && options.parent.$store) {
/*子组件直接从父组件中获取$store,这样就保证了所有组件都公用了全局的同一份store*/
this.$store = options.parent.$store
}
}
vuexInit会尝试从options中获取store,如果当前组件是根组件(Root节点),则options中会存在store,直接获取赋值给$store即可。如果当前组件非根组件,则通过options中的parent获取父组件的$store引用。这样一来,所有的组件都获取到了同一份内存地址的Store实例,于是我们可以在每一个组件中通过this.$store愉快地访问全局的Store实例了。
那么,什么是Store实例?
Store
我们传入到根组件到store,就是Store实例,用Vuex提供到Store方法构造。
export default new Vuex.Store({
strict: true,
modules: {
moduleA,
moduleB
}
});
我们来看一下Store的实现。首先是构造函数。
constructor (options = {}) {
// Auto install if it is not done yet and `window` has `Vue`.
// To allow users to avoid auto-installation in some cases,
// this code should be placed here. See #731
/*
在浏览器环境下,如果插件还未安装(!Vue即判断是否未安装),则它会自动安装。
它允许用户在某些情况下避免自动安装。
*/
if (!Vue && typeof window !== 'undefined' && window.Vue) {
install(window.Vue)
}
if (process.env.NODE_ENV !== 'production') {
assert(Vue, `must call Vue.use(Vuex) before creating a store instance.`)
assert(typeof Promise !== 'undefined', `vuex requires a Promise polyfill in this browser.`)
assert(this instanceof Store, `Store must be called with the new operator.`)
}
const {
/*一个数组,包含应用在 store 上的插件方法。这些插件直接接收 store 作为唯一参数,可以监听 mutation(用于外部地数据持久化、记录或调试)或者提交 mutation (用于内部数据,例如 websocket 或 某些观察者)*/
plugins = [],
/*使 Vuex store 进入严格模式,在严格模式下,任何 mutation 处理函数以外修改 Vuex state 都会抛出错误。*/
strict = false
} = options
/*从option中取出state,如果state是function则执行,最终得到一个对象*/
let {
state = {}
} = options
if (typeof state === 'function') {
state = state()
}
// store internal state
/* 用来判断严格模式下是否是用mutation修改state的 */
this._committing = false
/* 存放action */
this._actions = Object.create(null)
/* 存放mutation */
this._mutations = Object.create(null)
/* 存放getter */
this._wrappedGetters = Object.create(null)
/* module收集器 */
this._modules = new ModuleCollection(options)
/* 根据namespace存放module */
this._modulesNamespaceMap = Object.create(null)
/* 存放订阅者 */
this._subscribers = []
/* 用以实现Watch的Vue实例 */
this._watcherVM = new Vue()
// bind commit and dispatch to self
/*将dispatch与commit调用的this绑定为store对象本身,否则在组件内部this.dispatch时的this会指向组件的vm*/
const store = this
const { dispatch, commit } = this
/* 为dispatch与commit绑定this(Store实例本身) */
this.dispatch = function boundDispatch (type, payload) {
return dispatch.call(store, type, payload)
}
this.commit = function boundCommit (type, payload, options) {
return commit.call(store, type, payload, options)
}
// strict mode
/*严格模式(使 Vuex store 进入严格模式,在严格模式下,任何 mutation 处理函数以外修改 Vuex state 都会抛出错误)*/
this.strict = strict
// init root module.
// this also recursively registers all sub-modules
// and collects all module getters inside this._wrappedGetters
/*初始化根module,这也同时递归注册了所有子modle,收集所有module的getter到_wrappedGetters中去,this._modules.root代表根module才独有保存的Module对象*/
installModule(this, state, [], this._modules.root)
// initialize the store vm, which is responsible for the reactivity
// (also registers _wrappedGetters as computed properties)
/* 通过vm重设store,新建Vue对象使用Vue内部的响应式实现注册state以及computed */
resetStoreVM(this, state)
// apply plugins
/* 调用插件 */
plugins.forEach(plugin => plugin(this))
/* devtool插件 */
if (Vue.config.devtools) {
devtoolPlugin(this)
}
}
Store的构造类除了初始化一些内部变量以外,主要执行了installModule(初始化module)以及resetStoreVM(通过VM使store“响应式”)。
installModule
installModule的作用主要是用为module加上namespace名字空间(如果有)后,注册mutation、action以及getter,同时递归安装所有子module。
/*初始化module*/
function installModule (store, rootState, path, module, hot) {
/* 是否是根module */
const isRoot = !path.length
/* 获取module的namespace */
const namespace = store._modules.getNamespace(path)
// register in namespace map
/* 如果有namespace则在_modulesNamespaceMap中注册 */
if (module.namespaced) {
store._modulesNamespaceMap[namespace] = module
}
// set state
if (!isRoot && !hot) {
/* 获取父级的state */
const parentState = getNestedState(rootState, path.slice(0, -1))
/* module的name */
const moduleName = path[path.length - 1]
store.`_withCommit`(() => {
/* 将子module设置称响应式的 */
Vue.set(parentState, moduleName, module.state)
})
}
const local = module.context = makeLocalContext(store, namespace, path)
/* 遍历注册mutation */
module.forEachMutation((mutation, key) => {
const namespacedType = namespace + key
registerMutation(store, namespacedType, mutation, local)
})
/* 遍历注册action */
module.forEachAction((action, key) => {
const namespacedType = namespace + key
registerAction(store, namespacedType, action, local)
})
/* 遍历注册getter */
module.forEachGetter((getter, key) => {
const namespacedType = namespace + key
registerGetter(store, namespacedType, getter, local)
})
/* 递归安装mudule */
module.forEachChild((child, key) => {
installModule(store, rootState, path.concat(key), child, hot)
})
}
resetStoreVM
在说resetStoreVM之前,先来看一个小demo。
let globalData = {
d: 'hello world'
};
new Vue({
data () {
return {
$$state: {
globalData
}
}
}
});
/* modify */
setTimeout(() => {
globalData.d = 'hi~';
}, 1000);
Vue.prototype.globalData = globalData;
/* 任意模板中 */
<div>{{globalData.d}}</div>
上述代码在全局有一个globalData,它被传入一个Vue对象的data中,之后在任意Vue模板中对该变量进行展示,因为此时globalData已经在Vue的prototype上了所以直接通过this.prototype访问,也就是在模板中的{{prototype.d}}。此时,setTimeout在1s之后将globalData.d进行修改,我们发现模板中的globalData.d发生了变化。其实上述部分就是Vuex依赖Vue核心实现数据的“响应式化”。
不熟悉Vue.js响应式原理的同学可以通过笔者另一篇文章响应式原理了解Vue.js是如何进行数据双向绑定的。
接着来看代码。
/* 通过vm重设store,新建Vue对象使用Vue内部的响应式实现注册state以及computed */
function resetStoreVM (store, state, hot) {
/* 存放之前的vm对象 */
const oldVm = store._vm
// bind store public getters
store.getters = {}
const wrappedGetters = store._wrappedGetters
const computed = {}
/* 通过Object.defineProperty为每一个getter方法设置get方法,比如获取this.$store.getters.test的时候获取的是store._vm.test,也就是Vue对象的computed属性 */
forEachValue(wrappedGetters, (fn, key) => {
// use computed to leverage its lazy-caching mechanism
computed[key] = () => fn(store)
Object.defineProperty(store.getters, key, {
get: () => store._vm[key],
enumerable: true // for local getters
})
})
// use a Vue instance to store the state tree
// suppress warnings just in case the user has added
// some funky global mixins
const silent = Vue.config.silent
/* Vue.config.silent暂时设置为true的目的是在new一个Vue实例的过程中不会报出一切警告 */
Vue.config.silent = true
/* 这里new了一个Vue对象,运用Vue内部的响应式实现注册state以及computed*/
store._vm = new Vue({
data: {
$$state: state
},
computed
})
Vue.config.silent = silent
// enable strict mode for new vm
/* 使能严格模式,保证修改store只能通过mutation */
if (store.strict) {
enableStrictMode(store)
}
if (oldVm) {
/* 解除旧vm的state的引用,以及销毁旧的Vue对象 */
if (hot) {
// dispatch changes in all subscribed watchers
// to force getter re-evaluation for hot reloading.
store._withCommit(() => {
oldVm._data.$$state = null
})
}
Vue.nextTick(() => oldVm.$destroy())
}
}
resetStoreVM首先会遍历wrappedGetters,使用Object.defineProperty方法为每一个getter绑定上get方法,这样我们就可以在组件里访问this.$store.getter.test就等同于访问store._vm.test。
forEachValue(wrappedGetters, (fn, key) => {
// use computed to leverage its lazy-caching mechanism
computed[key] = () => fn(store)
Object.defineProperty(store.getters, key, {
get: () => store._vm[key],
enumerable: true // for local getters
})
})
之后Vuex采用了new一个Vue对象来实现数据的“响应式化”,运用Vue.js内部提供的数据双向绑定功能来实现store的数据与视图的同步更新。
store._vm = new Vue({
data: {
$$state: state
},
computed
})
这时候我们访问store._vm.test也就访问了Vue实例中的属性。
这两步执行完以后,我们就可以通过this.$store.getter.test访问vm中的test属性了。
严格模式
Vuex的Store构造类的option有一个strict的参数,可以控制Vuex执行严格模式,严格模式下,所有修改state的操作必须通过mutation实现,否则会抛出错误。
/* 使能严格模式 */
function enableStrictMode (store) {
store._vm.$watch(function () { return this._data.$$state }, () => {
if (process.env.NODE_ENV !== 'production') {
/* 检测store中的_committing的值,如果是true代表不是通过mutation的方法修改的 */
assert(store._committing, `Do not mutate vuex store state outside mutation handlers.`)
}
}, { deep: true, sync: true })
}
首先,在严格模式下,Vuex会利用vm的$watch方法来观察$$state,也就是Store的state,在它被修改的时候进入回调。我们发现,回调中只有一句话,用assert断言来检测store.committing,当store.committing为false的时候会触发断言,抛出异常。
我们发现,Store的commit方法中,执行mutation的语句是这样的。
this._withCommit(() => {
entry.forEach(function commitIterator (handler) {
handler(payload)
})
})
再来看看_withCommit的实现。
_withCommit (fn) {
/* 调用withCommit修改state的值时会将store的committing值置为true,内部会有断言检查该值,在严格模式下只允许使用mutation来修改store中的值,而不允许直接修改store的数值 */
const committing = this._committing
this._committing = true
fn()
this._committing = committing
}
我们发现,通过commit(mutation)修改state数据的时候,会再调用mutation方法之前将committing置为true,接下来再通过mutation函数修改state中的数据,这时候触发$watch中的回调断言committing是不会抛出异常的(此时committing为true)。而当我们直接修改state的数据时,触发$watch的回调执行断言,这时committing为false,则会抛出异常。这就是Vuex的严格模式的实现。
接下来我们来看看Store提供的一些API。
commit(mutation
/* 调用mutation的commit方法 */
commit (_type, _payload, _options) {
// check object-style commit
/* 校验参数 */
const {
type,
payload,
options
} = unifyObjectStyle(_type, _payload, _options)
const mutation = { type, payload }
/* 取出type对应的mutation的方法 */
const entry = this._mutations[type]
if (!entry) {
if (process.env.NODE_ENV !== 'production') {
console.error(`[vuex] unknown mutation type: ${type}`)
}
return
}
/* 执行mutation中的所有方法 */
this._withCommit(() => {
entry.forEach(function commitIterator (handler) {
handler(payload)
})
})
/* 通知所有订阅者 */
this._subscribers.forEach(sub => sub(mutation, this.state))
if (
process.env.NODE_ENV !== 'production' &&
options && options.silent
) {
console.warn(
`[vuex] mutation type: ${type}. Silent option has been removed. ` +
'Use the filter functionality in the vue-devtools'
)
}
}
commit方法会根据type找到并调用mutations中的所有type对应的mutation方法,所以当没有namespace的时候,commit方法会触发所有module中的mutation方法。再执行完所有的mutation之后会执行subscribers中的所有订阅者。我们来看一下_subscribers是什么。
Store给外部提供了一个subscribe方法,用以注册一个订阅函数,会push到Store实例的subscribers中,同时返回一个从subscribers中注销该订阅者的方法。
/* 注册一个订阅函数,返回取消订阅的函数 */
subscribe (fn) {
const subs = this._subscribers
if (subs.indexOf(fn) < 0) {
subs.push(fn)
}
return () => {
const i = subs.indexOf(fn)
if (i > -1) {
subs.splice(i, 1)
}
}
}
在commit结束以后则会调用这些_subscribers中的订阅者,这个订阅者模式提供给外部一个监视state变化的可能。state通过mutation改变时,可以有效补获这些变化。
dispatch(action
来看一下dispatch的实现。
/* 调用action的dispatch方法 */
dispatch (_type, _payload) {
// check object-style dispatch
const {
type,
payload
} = unifyObjectStyle(_type, _payload)
/* actions中取出type对应的ation */
const entry = this._actions[type]
if (!entry) {
if (process.env.NODE_ENV !== 'production') {
console.error(`[vuex] unknown action type: ${type}`)
}
return
}
/* 是数组则包装Promise形成一个新的Promise,只有一个则直接返回第0个 */
return entry.length > 1
? Promise.all(entry.map(handler => handler(payload)))
: entry[0](payload)
}
以及registerAction时候做的事情。
/* 遍历注册action */
function registerAction (store, type, handler, local) {
/* 取出type对应的action */
const entry = store._actions[type] || (store._actions[type] = [])
entry.push(function wrappedActionHandler (payload, cb) {
let res = handler.call(store, {
dispatch: local.dispatch,
commit: local.commit,
getters: local.getters,
state: local.state,
rootGetters: store.getters,
rootState: store.state
}, payload, cb)
/* 判断是否是Promise */
if (!isPromise(res)) {
/* 不是Promise对象的时候转化称Promise对象 */
res = Promise.resolve(res)
}
if (store._devtoolHook) {
/* 存在devtool插件的时候触发vuex的error给devtool */
return res.catch(err => {
store._devtoolHook.emit('vuex:error', err)
throw err
})
} else {
return res
}
})
}
因为registerAction的时候将push进actions的action进行了一层封装(wrappedActionHandler),所以我们在进行dispatch的第一个参数中获取state、commit等方法。之后,执行结果res会被进行判断是否是Promise,不是则会进行一层封装,将其转化成Promise对象。dispatch时则从actions中取出,只有一个的时候直接返回,否则用Promise.all处理再返回。
watch
/* 观察一个getter方法 */
watch (getter, cb, options) {
if (process.env.NODE_ENV !== 'production') {
assert(typeof getter === 'function', `store.watch only accepts a function.`)
}
return this._watcherVM.$watch(() => getter(this.state, this.getters), cb, options)
}
熟悉Vue的朋友应该很熟悉watch这个方法。这里采用了比较巧妙的设计,_watcherVM是一个Vue的实例,所以watch就可以直接采用了Vue内部的watch特性提供了一种观察数据getter变动的方法。
registerModule
/* 注册一个动态module,当业务进行异步加载的时候,可以通过该接口进行注册动态module */
registerModule (path, rawModule) {
/* 转化称Array */
if (typeof path === 'string') path = [path]
if (process.env.NODE_ENV !== 'production') {
assert(Array.isArray(path), `module path must be a string or an Array.`)
assert(path.length > 0, 'cannot register the root module by using registerModule.')
}
/*注册*/
this._modules.register(path, rawModule)
/*初始化module*/
installModule(this, this.state, path, this._modules.get(path))
// reset store to update getters...
/* 通过vm重设store,新建Vue对象使用Vue内部的响应式实现注册state以及computed */
resetStoreVM(this, this.state)
}
registerModule用以注册一个动态模块,也就是在store创建以后再注册模块的时候用该接口。内部实现实际上也只有installModule与resetStoreVM两个步骤,前面已经讲过,这里不再累述。
unregisterModule
/* 注销一个动态module */
unregisterModule (path) {
/* 转化称Array */
if (typeof path === 'string') path = [path]
if (process.env.NODE_ENV !== 'production') {
assert(Array.isArray(path), `module path must be a string or an Array.`)
}
/*注销*/
this._modules.unregister(path)
this._withCommit(() => {
/* 获取父级的state */
const parentState = getNestedState(this.state, path.slice(0, -1))
/* 从父级中删除 */
Vue.delete(parentState, path[path.length - 1])
})
/* 重制store */
resetStore(this)
}
同样,与registerModule对应的方法unregisterModule,动态注销模块。实现方法是先从state中删除模块,然后用resetStore来重制store。
resetStore
/* 重制store */
function resetStore (store, hot) {
store._actions = Object.create(null)
store._mutations = Object.create(null)
store._wrappedGetters = Object.create(null)
store._modulesNamespaceMap = Object.create(null)
const state = store.state
// init all modules
installModule(store, state, [], store._modules.root, true)
// reset vm
resetStoreVM(store, state, hot)
}
这里的resetStore其实也就是将store中的_actions等进行初始化以后,重新执行installModule与resetStoreVM来初始化module以及用Vue特性使其“响应式化”,这跟构造函数中的是一致的。
插件
Vue提供了一个非常好用的插件Vue.js devtools
/* 从window对象的__VUE_DEVTOOLS_GLOBAL_HOOK__中获取devtool插件 */
const devtoolHook =
typeof window !== 'undefined' &&
window.__VUE_DEVTOOLS_GLOBAL_HOOK__
export default function devtoolPlugin (store) {
if (!devtoolHook) return
/* devtoll插件实例存储在store的_devtoolHook上 */
store._devtoolHook = devtoolHook
/* 出发vuex的初始化事件,并将store的引用地址传给deltool插件,使插件获取store的实例 */
devtoolHook.emit('vuex:init', store)
/* 监听travel-to-state事件 */
devtoolHook.on('vuex:travel-to-state', targetState => {
/* 重制state */
store.replaceState(targetState)
})
/* 订阅store的变化 */
store.subscribe((mutation, state) => {
devtoolHook.emit('vuex:mutation', mutation, state)
})
}
如果已经安装了该插件,则会在windows对象上暴露一个VUEDEVTOOLSGLOBAL_HOOK。devtoolHook用在初始化的时候会触发“vuex:init”事件通知插件,然后通过on方法监听“vuex:travel-to-state”事件来重置state。最后通过Store的subscribe方法来添加一个订阅者,在触发commit方法修改mutation数据以后,该订阅者会被通知,从而触发“vuex:mutation”事件。
最后
Vuex是一个非常优秀的库,代码量不多且结构清晰,非常适合研究学习其内部实现。最近的一系列源码阅读也使我自己受益匪浅,写这篇文章也希望可以帮助到更多想要学习探索Vuex内部实现原理的同学。
分享到
记vue大型表单项目的一个性能问题
加入我们
|
__label__pos
| 0.968272 |
Find Cube Root of a Number in C, C++
Leave a Comment
Write a C, C++ program to find the cube root of a number.
In my last post, i have explained how to calculate cube of a number. In this post we'll solve very interesting problem which is finding cube root of a number.
Let's say we have give a number 125 the cube root of 125 is 5. Similarly cube root of 27 is 3.
C, C++ Interview Questions.
Programming Questions on Strings.
Find Cube Root of a Number in C++
#include <iostream>
#include <math.h>
using namespace std;
int main() {
int n;
float result;
cout << "Enter number \n";
cin >> n;
result = pow(n, 1.0/3.0);
cout << "Cube of a number is " << result;
return 0;
}
Find Cube Root of a Number in C
#include <stdio.h>
#include <math.h>
int main(void) {
int n;
float result;
printf ("Enter number \n");
scanf("%d", &n);
result = pow(n, 1.0/3.0);
/* %f is used for float modifier */
printf("Cube of a number is %f ",result);
return 0;
}
Output :
Enter number : 125
Cube of a number is 5.000000
Program to print sum of first ten numbers.
Check whether entered number is positive, negative or zero.
If You Enjoyed This, Take 5 Seconds To Share It
0 comments:
Post a Comment
|
__label__pos
| 0.75806 |
src/share/classes/sun/util/locale/provider/AuxLocaleProviderAdapter.java
Print this page
rev 6663 : imported patch 8008576
26 package sun.util.locale.provider;
27
28 import java.text.spi.BreakIteratorProvider;
29 import java.text.spi.CollatorProvider;
30 import java.text.spi.DateFormatProvider;
31 import java.text.spi.DateFormatSymbolsProvider;
32 import java.text.spi.DecimalFormatSymbolsProvider;
33 import java.text.spi.NumberFormatProvider;
34 import java.util.Arrays;
35 import java.util.HashSet;
36 import java.util.Locale;
37 import java.util.Set;
38 import java.util.concurrent.ConcurrentHashMap;
39 import java.util.concurrent.ConcurrentMap;
40 import java.util.spi.CalendarDataProvider;
41 import java.util.spi.CalendarNameProvider;
42 import java.util.spi.CurrencyNameProvider;
43 import java.util.spi.LocaleNameProvider;
44 import java.util.spi.LocaleServiceProvider;
45 import java.util.spi.TimeZoneNameProvider;
46
47 /**
48 * An abstract parent class for the
49 * HostLocaleProviderAdapter/SPILocaleProviderAdapter.
50 *
51 * @author Naoto Sato
52 * @author Masayoshi Okutsu
53 */
54 public abstract class AuxLocaleProviderAdapter extends LocaleProviderAdapter {
55 /**
56 * SPI implementations map
57 */
58 private ConcurrentMap<Class<? extends LocaleServiceProvider>, LocaleServiceProvider> providersMap =
59 new ConcurrentHashMap<>();
60
61 /**
62 * Getter method for Locale Service Providers
63 */
64 @Override
65 public <P extends LocaleServiceProvider> P getLocaleServiceProvider(Class<P> c) {
123 @Override
124 public LocaleNameProvider getLocaleNameProvider() {
125 return getLocaleServiceProvider(LocaleNameProvider.class);
126 }
127
128 @Override
129 public TimeZoneNameProvider getTimeZoneNameProvider() {
130 return getLocaleServiceProvider(TimeZoneNameProvider.class);
131 }
132
133 @Override
134 public CalendarDataProvider getCalendarDataProvider() {
135 return getLocaleServiceProvider(CalendarDataProvider.class);
136 }
137
138 @Override
139 public CalendarNameProvider getCalendarNameProvider() {
140 return getLocaleServiceProvider(CalendarNameProvider.class);
141 }
142
143 @Override
144 public LocaleResources getLocaleResources(Locale locale) {
145 return null;
146 }
147
148 private static Locale[] availableLocales = null;
149
150 @Override
151 public Locale[] getAvailableLocales() {
152 if (availableLocales == null) {
153 Set<Locale> avail = new HashSet<>();
154 for (Class<? extends LocaleServiceProvider> c :
155 LocaleServiceProviderPool.spiClasses) {
156 LocaleServiceProvider lsp = getLocaleServiceProvider(c);
157 if (lsp != null) {
158 avail.addAll(Arrays.asList(lsp.getAvailableLocales()));
159 }
160 }
161 availableLocales = avail.toArray(new Locale[0]);
162 }
26 package sun.util.locale.provider;
27
28 import java.text.spi.BreakIteratorProvider;
29 import java.text.spi.CollatorProvider;
30 import java.text.spi.DateFormatProvider;
31 import java.text.spi.DateFormatSymbolsProvider;
32 import java.text.spi.DecimalFormatSymbolsProvider;
33 import java.text.spi.NumberFormatProvider;
34 import java.util.Arrays;
35 import java.util.HashSet;
36 import java.util.Locale;
37 import java.util.Set;
38 import java.util.concurrent.ConcurrentHashMap;
39 import java.util.concurrent.ConcurrentMap;
40 import java.util.spi.CalendarDataProvider;
41 import java.util.spi.CalendarNameProvider;
42 import java.util.spi.CurrencyNameProvider;
43 import java.util.spi.LocaleNameProvider;
44 import java.util.spi.LocaleServiceProvider;
45 import java.util.spi.TimeZoneNameProvider;
46 import sun.util.spi.CalendarProvider;
47
48 /**
49 * An abstract parent class for the
50 * HostLocaleProviderAdapter/SPILocaleProviderAdapter.
51 *
52 * @author Naoto Sato
53 * @author Masayoshi Okutsu
54 */
55 public abstract class AuxLocaleProviderAdapter extends LocaleProviderAdapter {
56 /**
57 * SPI implementations map
58 */
59 private ConcurrentMap<Class<? extends LocaleServiceProvider>, LocaleServiceProvider> providersMap =
60 new ConcurrentHashMap<>();
61
62 /**
63 * Getter method for Locale Service Providers
64 */
65 @Override
66 public <P extends LocaleServiceProvider> P getLocaleServiceProvider(Class<P> c) {
124 @Override
125 public LocaleNameProvider getLocaleNameProvider() {
126 return getLocaleServiceProvider(LocaleNameProvider.class);
127 }
128
129 @Override
130 public TimeZoneNameProvider getTimeZoneNameProvider() {
131 return getLocaleServiceProvider(TimeZoneNameProvider.class);
132 }
133
134 @Override
135 public CalendarDataProvider getCalendarDataProvider() {
136 return getLocaleServiceProvider(CalendarDataProvider.class);
137 }
138
139 @Override
140 public CalendarNameProvider getCalendarNameProvider() {
141 return getLocaleServiceProvider(CalendarNameProvider.class);
142 }
143
144 /**
145 * Getter methods for sun.util.spi.* providers
146 */
147 @Override
148 public CalendarProvider getCalendarProvider() {
149 return getLocaleServiceProvider(CalendarProvider.class);
150 }
151
152 @Override
153 public LocaleResources getLocaleResources(Locale locale) {
154 return null;
155 }
156
157 private static Locale[] availableLocales = null;
158
159 @Override
160 public Locale[] getAvailableLocales() {
161 if (availableLocales == null) {
162 Set<Locale> avail = new HashSet<>();
163 for (Class<? extends LocaleServiceProvider> c :
164 LocaleServiceProviderPool.spiClasses) {
165 LocaleServiceProvider lsp = getLocaleServiceProvider(c);
166 if (lsp != null) {
167 avail.addAll(Arrays.asList(lsp.getAvailableLocales()));
168 }
169 }
170 availableLocales = avail.toArray(new Locale[0]);
171 }
|
__label__pos
| 0.988549 |
Take the 2-minute tour ×
Stack Overflow is a question and answer site for professional and enthusiast programmers. It's 100% free, no registration required.
How do I check to see if a column exists in a SqlDataReader object? In my data access layer, I have create a method that builds the same object for multiple stored procedures calls. One of the stored procedures has an additional column that is not used by the other stored procedures. I want to modified the method to accommodate for every scenario.
My application is written in C#.
share|improve this question
possible duplicate of Detecting if an IDataReader contains a certain field before iteration – nawfal Dec 12 '13 at 13:05
18 Answers 18
up vote 162 down vote accepted
In the accepted answer, using Exceptions for control logic is considered bad practice and has performance costs.
Looping through the fields can have a small performance hit if you use it a lot and you may want to consider caching the results
The more appropriate way to do this is:
public static class DataRecordExtensions
{
public static bool HasColumn(this IDataRecord dr, string columnName)
{
for (int i=0; i < dr.FieldCount; i++)
{
if (dr.GetName(i).Equals(columnName, StringComparison.InvariantCultureIgnoreCase))
return true;
}
return false;
}
}
share|improve this answer
6
Thank you for a solution that makes a lot of sense, I could not bring myself to beleive the catching a exception is the best way to find if a column does exist. – Rihan Meij May 20 '09 at 15:30
Same here - I was like eh? catch the exception? – Andrew Harry Jul 21 '09 at 4:26
3
Great solution! Just so you know there is an extra ) at the end if the if statement in the above code. – Stuck Apr 5 '11 at 8:27
It's much better to use this boolean function:
r.GetSchemaTable().Columns.Contains(field)
One call - no exceptions. It might throw exceptions internally, but I don't think so.
NOTE: In the comments below, we figured this out... the correct code is actually this:
public static bool HasColumn(DbDataReader Reader, string ColumnName) {
foreach (DataRow row in Reader.GetSchemaTable().Rows) {
if (row["ColumnName"].ToString() == ColumnName)
return true;
} //Still here? Column not found.
return false;
}
share|improve this answer
5
@Jasmine: I spoke too soon! Your code checks for a column in the schema table, not your result set. You need to compare "field" (assuming "field" is the column name) to the value of each row's "ColumnName" field. Break when you find it, return false if you don't. – Steve J Jun 15 '09 at 21:46
4
@Steve J: When would the resultset NOT have a column in the GetSchemaTable? – Bless Yahu Aug 7 '09 at 16:13
1
@Jasmine @Steve So does this method work at all? – bzlm Apr 8 '10 at 6:39
1
To anyone else confused, THIS DOES NOT WORK. See The answer below about retrieving the ColumnName row from the schema table and using it. – Jason Jackson Dec 17 '12 at 17:30
2
Yes, this DOES NOT WORK. Who upvoted it so many times??? It would've saved me a lot of debugging time later if this answer wasn't here! – c00000fd Apr 15 '13 at 10:11
I think your best bet is to call GetOrdinal("columnName") on your DataReader up front, and catch an IndexOutOfRangeException in case the column isn't present.
In fact, let's make an extension method:
public static bool HasColumn(this IDataRecord r, string columnName)
{
try
{
return r.GetOrdinal(columnName) >= 0;
}
catch (IndexOutOfRangeException)
{
return false;
}
}
Edit
Ok, this post is starting to garner a few down-votes lately, and I can't delete it because it's the accepted answer, so I'm going to update it and (I hope) try to justify the use of exception handling as control flow.
The other way of achieving this, as posted by Chad Grant, is to loop through each field in the DataReader and do a case-insensitive comparison for the field name you're looking for. This will work really well, and truthfully will probably perform better than my method above. Certainly I would never use the method above inside a loop where performace was an issue.
I can think of one situation in which the try/GetOrdinal/catch method will work where the loop doesn't. It is, however, a completely hypothetical situation right now so it's a very flimsy justification. Regardless, bear with me and see what you think.
Imagine a database that allowed you to "alias" columns within a table. Imagine that I could define a table with a column called "EmployeeName" but also give it an alias of "EmpName", and doing a select for either name would return the data in that column. With me so far?
Now imagine that there's an ADO.NET provider for that database, and they've coded up an IDataReader implementation for it which takes column aliases into account.
Now, dr.GetName(i) (as used in Chad's answer) can only return a single string, so it has to return only one of the "aliases" on a column. However, GetOrdinal("EmpName") could use the internal implementation of this provider's fields to check each column's alias for the name you're looking for.
In this hypothetical "aliased columns" situation, the try/GetOrdinal/catch method would be the only way to be sure that you're checking for every variation of a column's name in the resultset.
Flimsy? Sure. But worth a thought. Honestly I'd much rather an "official" HasColumn method on IDataRecord.
share|improve this answer
1
I was going to suggest the same thing, GetOrdinal is great because the lookup is case insensitive, if it fails, it does a case sensitive look up. – kd7 Dec 17 '08 at 0:16
9
using exceptions for control logic? no no no – Chad Grant May 1 '09 at 23:42
16
There is one small thing that everyone overlooks when I originally posted this question...I asked the question on 12/8/08 and Matt posted his answer on 12/17/08. Everyone made a stink about catching an exception for control logic but did not provide a solid alternative solution until 5/1/09. That is why it was originally marked as the answer. I am still using this solution today. – Michael Kniskern Dec 14 '10 at 15:32
10
This will have a performance hit only if the column was not there. The other methods described will have a performance hit, and a larger performance hit, every single time. While it is generally bad practice to avoid using exception handling for flow of control, this solution should not be ruled out without first considering if it works in your case. – Nick May 5 '11 at 15:22
1
+1. I'm ok with "Don't use exception for control logic" as a broad design rule. It does not mean "avoid it at all cost". The answer is a very well documented workaround, and as @Nick says, the performance hit (if any..) only occurs when the column does not exists. – Larry Nov 4 '13 at 13:26
Here is a working sample for Jasmin's idea:
var cols = r.GetSchemaTable().Rows.Cast<DataRow>().Select
(row => row["ColumnName"] as string).ToList();
if (cols.Contains("the column name"))
{
}
share|improve this answer
2
THIS ONE DID WORK – Marin Mar 30 '12 at 15:54
I wrote for Visual Basic users :
Protected Function HasColumnAndValue(ByRef reader As IDataReader, ByVal columnName As String) As Boolean
For i As Integer = 0 To reader.FieldCount - 1
If reader.GetName(i).Equals(columnName) Then
Return Not IsDBNull(reader(columnName))
End If
Next
Return False
End Function
I think this is more powerful and the usage is :
If HasColumnAndValue(reader, "ID_USER") Then
Me.UserID = reader.GetDecimal(reader.GetOrdinal("ID_USER")).ToString()
End If
share|improve this answer
If you read the question, Michael asked about DataReader, not DataRecord folks. Get your objects right.
Using a r.GetSchemaTable().Columns.Contains(field) on a DataRecord does work, but it returns BS columns (see screenshot below.)
To see if a data column exists AND contains data in a DataReader, use the following extensions:
public static class DataReaderExtensions
{
/// <summary>
/// Checks if a column's value is DBNull
/// </summary>
/// <param name="dataReader">The data reader</param>
/// <param name="columnName">The column name</param>
/// <returns>A bool indicating if the column's value is DBNull</returns>
public static bool IsDBNull(this IDataReader dataReader, string columnName)
{
return dataReader[columnName] == DBNull.Value;
}
/// <summary>
/// Checks if a column exists in a data reader
/// </summary>
/// <param name="dataReader">The data reader</param>
/// <param name="columnName">The column name</param>
/// <returns>A bool indicating the column exists</returns>
public static bool ContainsColumn(this IDataReader dataReader, string columnName)
{
/// See: http://stackoverflow.com/questions/373230/check-for-column-name-in-a-sqldatareader-object/7248381#7248381
try
{
return dataReader.GetOrdinal(columnName) >= 0;
}
catch (IndexOutOfRangeException)
{
return false;
}
}
}
Usage:
public static bool CanCreate(SqlDataReader dataReader)
{
return dataReader.ContainsColumn("RoleTemplateId")
&& !dataReader.IsDBNull("RoleTemplateId");
}
Calling r.GetSchemaTable().Columns on a DataReader returns BS columns:
Calling GetSchemeTable in a DataReader
share|improve this answer
Please explain down vote – Levitikon Sep 27 '12 at 20:14
see comments under Matts answer – nawfal Aug 8 '13 at 4:49
What do you mean by DataRecord does work, but it returns BS columns? You mean it runs (and gives wrong results)? – nawfal Dec 12 '13 at 14:01
@nawfal See the screenshot in this answer. It does work in that .GetSchemaTable().Columns is a method off both DataReader and DataRecord. But the results of the method on a DataReader are irrelevant to the actual columns. – Levitikon Dec 13 '13 at 11:59
Levi, I know that, and thanks for responding. does work is not a good word imo to state that. It gives the meaning, it works correctly. I was confused. I'll recommend and edit to: it doesn't work or it gives incorrect results.. Not nitpicking, was just concerned.. – nawfal Dec 13 '13 at 12:01
This code corrects the issues that Levitikon had with their code: (adapted from: [1]: http://msdn.microsoft.com/en-us/library/system.data.datatablereader.getschematable.aspx)
public List<string> GetColumnNames(SqlDataReader r)
{
List<string> ColumnNames = new List<string>();
DataTable schemaTable = r.GetSchemaTable();
DataRow row = schemaTable.Rows[0];
foreach (DataColumn col in schemaTable.Columns)
{
if (col.ColumnName == "ColumnName")
{
ColumnNames.Add(row[col.Ordinal].ToString());
break;
}
}
return ColumnNames;
}
The reason for getting all of those useless column names and not the name of the column from your table... Is because your are getting the name of schema column (i.e. the column names for the Schema table)
NOTE: this seems to only return the name of the first column...
EDIT: corrected code that returns the name of all columns, but you cannot use a SqlDataReader to do it
public List<string> ExecuteColumnNamesReader(string command, List<SqlParameter> Params)
{
List<string> ColumnNames = new List<string>();
SqlDataAdapter da = new SqlDataAdapter();
string connection = ""; // your sql connection string
SqlCommand sqlComm = new SqlCommand(command, connection);
foreach (SqlParameter p in Params) { sqlComm.Parameters.Add(p); }
da.SelectCommand = sqlComm;
DataTable dt = new DataTable();
da.Fill(dt);
DataRow row = dt.Rows[0];
for (int ordinal = 0; ordinal < dt.Columns.Count; ordinal++)
{
string column_name = dt.Columns[ordinal].ColumnName;
ColumnNames.Add(column_name);
}
return ColumnNames; // you can then call .Contains("name") on the returned collection
}
share|improve this answer
Or in one line return r.GetSchemaTable().Rows.Cast<DataRow>().Select(x => (string)x["ColumnName"]).ToList(); :) – nawfal Dec 12 '13 at 14:03
Hashtable ht = new Hashtable();
Hashtable CreateColumHash(SqlDataReader dr)
{
ht = new Hashtable();
for (int i = 0; i < dr.FieldCount; i++)
{
ht.Add(dr.GetName(i), dr.GetName(i));
}
return ht;
}
bool ValidateColumn(string ColumnName)
{
return ht.Contains(ColumnName);
}
share|improve this answer
Never seen a weirder use of HashTable :) Today you have HashSet<T> for the same purpose :) – nawfal Nov 8 '13 at 17:46
Good one, it can be one-lined using LINQ. – Larry Dec 9 '13 at 8:07
In one line, use this after your DataReader retrieval:
var fieldNames = Enumerable.Range(0, dr.FieldCount).Select(i => dr.GetName(i)).ToArray();
Then,
if (fieldNames.Contains("myField"))
{
var myFieldValue = dr["myField"];
...
share|improve this answer
Neither did I get GetSchemaTable to work, until I found this way.
Basically I do this:
Dim myView As DataView = dr.GetSchemaTable().DefaultView
myView.RowFilter = "ColumnName = 'ColumnToBeChecked'"
If myView.Count > 0 AndAlso dr.GetOrdinal("ColumnToBeChecked") <> -1 Then
obj.ColumnToBeChecked = ColumnFromDb(dr, "ColumnToBeChecked")
End If
share|improve this answer
public static bool DataViewColumnExists(DataView dv, string columnName)
{
return DataTableColumnExists(dv.Table, columnName);
}
public static bool DataTableColumnExists(DataTable dt, string columnName)
{
string DebugTrace = "Utils::DataTableColumnExists(" + dt.ToString() + ")";
try
{
return dt.Columns.Contains(columnName);
}
catch (Exception ex)
{
throw new MyExceptionHandler(ex, DebugTrace);
}
}
Columns.Contains is case-insensitive btw.
share|improve this answer
To keep your code robust and clean, use a single extension function, like this:
Public Module Extensions
<Extension()>
Public Function HasColumn(r As SqlDataReader, columnName As String) As Boolean
Return If(String.IsNullOrEmpty(columnName) OrElse r.FieldCount = 0, False, Enumerable.Range(0, r.FieldCount).Select(Function(i) r.GetName(i)).Contains(columnName, StringComparer.OrdinalIgnoreCase))
End Function
End Module
share|improve this answer
Here is a one liner linq version of the accepted answer:
Enumerable.Range(0, reader.FieldCount).Any(i => reader.GetName(i) == "COLUMN_NAME_GOES_HERE")
share|improve this answer
You can also call GetSchemaTable() on your DataReader if you want the list of columns and you don't want to have to get an exception...
share|improve this answer
1
There is some debate as to whether this works: stackoverflow.com/questions/373230/… – bzlm Apr 8 '10 at 6:40
Well, it's always worked for me. – Dave Markle Apr 8 '10 at 9:21
2
It's a matter of DataReader versus DataRecord. GetSchemaTable is a method of both, but when used in DataReader its gets schema junk. – Levitikon Sep 13 '11 at 18:27
@Levitikon what is a DataRecord? I cannot see such a type with GetSchemaTable method. – nawfal Dec 12 '13 at 14:00
These answers are already posted here. Just Linq-ing a bit:
bool b = reader.GetSchemaTable().Rows
.Cast<DataRow>()
.Select(x => (string)x["ColumnName"])
.Contains(colName, StringComparer.OrdinalIgnoreCase);
//or
bool b = Enumerable.Range(0, reader.FieldCount)
.Select(reader.GetName)
.Contains(colName, StringComparer.OrdinalIgnoreCase);
The second one is cleaner, and much much faster. Even if you dont run GetSchemaTable every single time in the first approach, the lookup is going to be very slow.
share|improve this answer
Here the solution from Jasmine in one line... (one more, tho simple!):
reader.GetSchemaTable().Select("ColumnName='MyCol'").Length > 0;
share|improve this answer
In your particular situation (all procedures has the same columns except 1 which has additional 1 column), it will be better and faster to check reader. FieldCount property to distinguish between them.
const int NormalColCount=.....
if(reader.FieldCount > NormalColCount)
{
// Do something special
}
I know it is an old post but I decided to answer to help other in the same situation. you can also (for performance reason) mix this solution with the solution iterating solution.
share|improve this answer
Please name the solution you are referring to. Which two solutions should be mixed? – Pablo Karlsson 2 days ago
How about
if (dr.GetSchemaTable().Columns.Contains("accounttype"))
do something
else
do something
It probably would not be as efficient in a loop
share|improve this answer
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.701307 |
Julia Evans
Day 36: On programming without malloc
in favorite, hackerschool, kernel, rust
So right now I'm working on writing a kernel in Rust. My current goal is to press keys on the keyboard and have them echoed to the screen. This is going okay! I anticipate being able to type by the end of the week.
One thing that's interesting is that my expectations around what programs should be able to do is really different right now. Normally I write Python or other high-level languages, so my programs don't run too quickly, but have tons of resources available to them (the Internet, a standard library, memory allocation, garbage collection, ...).
Writing operating systems is totally different. This is kind of obvious, but actually doing it is really fascinating. My OS literally can't allocate memory, and there's no standard way to print (I have to write to the VGA buffer manually). I can still write loops, though, and in general writing Rust doesn't feel too unfamiliar. But I expect my code to run super fast, because it has no excuse not to :). Right now I definitely don't have timers or anything, so I'm looping 80,000,000 times to sleep.
A few things that I can't do that I'm used to being able to do:
• allocate memory
• print (I can sort of do this)
• sleep
• run other processes (there are no other programs)
• read from stdin (I don't have a keyboard driver yet. There is no stdin.)
• open files (there are no files)
• list files (there are no files)
(thanks to Lea for "there are no files" =D)
The only real problem with not having malloc is that all the memory I use has to either be
• in the program at compile time, or
• allocated on the stack
This is less difficult than I expected it to be! We'll see how it continues. It does mean that I use a lot of global variables, and it's given me an appreciation for why there is so much use of global variables in the Linux kernel -- if just need 1 struct, it makes so much more sense to just have 1 global struct than to keep mallocing and freeing it all the time.
Here's an example of some code I have in the kernel! main() prints all the ASCII characters in a loop.
pub unsafe fn putchar(x: u16, y: u16, c: u8) {
let idx : uint = (y * VGA_WIDTH * 2 + x * 2) as uint;
// 0xb8000 is the VGA buffer
*((0xb8000 + idx) as *mut u16) = make_vgaentry(c, Black, Yellow);
}
fn make_vgaentry(c: u8, fg: Color, bg: Color) -> u16 {
// VGA entries are 2 bytes. The first byte is the character, the
second is the colour
let color = fg as u16 | (bg as u16 << 4);
return c as u16 | (color << 8);
}
pub unsafe fn main() {
let mut i: u32 = 0;
let mut c: u8 = 65; // 'A'
let N: u32 = 80000000; // big enough number so that it goes slowly
loop {
i += 1;
if (i % N == 0) {
c += 1;
putchar(2, 4, c);
}
}
}
Note for pedants: I actually do have a malloc function because my Rust standard library needs to link against it, but it's defined like this: ~~~ malloc: jmp $ ~~~
That's assembly-speak for "loop forever". If I get around to implementing malloc it will be the Most Exciting Thing ~~~
Comments
|
__label__pos
| 0.868088 |
Namespaces
Variants
Views
Actions
std::experimental::filesystem::temp_directory_path
From cppreference.com
< cpp | experimental | fs
Technical specifications
Filesystem library (filesystem TS)
Library fundamentals (library fundamentals TS)
Library fundamentals 2 (library fundamentals 2 TS)
Extensions for parallelism (parallelism TS)
Extensions for concurrency (concurrency TS)
Concepts (concepts TS)
Ranges (ranges TS)
Special mathematical functions (special math TR)
Defined in header <experimental/filesystem>
path temp_directory_path();
path temp_directory_path( error_code& ec );
(1) (filesystem TS)
Returns the directory location suitable for temporary files.
Contents
[edit] Parameters
(none)
[edit] Return value
A directory suitable for temporary files. The path is guaranteed to exist and to be a directory. The overload that takes error_code& argument returns an empty path on error.
[edit] Exceptions
The overload that does not take a error_code& parameter throws filesystem_error on underlying OS API errors, constructed with path to be returned as the first argument and the OS error code as the error code argument. std::bad_alloc may be thrown if memory allocation fails. The overload taking a error_code& parameter sets it to the OS API error code if an OS API call fails, and executes ec.clear() if no errors occur. This overload has
noexcept specification:
noexcept
[edit] Notes
On POSIX systems, the path may be the one specified in the environment variables TMPDIR, TMP, TEMP, TEMPDIR, and, if none of them are specified, the path "/tmp" is returned.
On Windows systems, the path is typically the one returned by GetTempPath
[edit] Example
#include <iostream>
#include <experimental/filesystem>
namespace fs = std::experimental::filesystem;
int main()
{
std::cout << "Temp directory is " << fs::temp_directory_path() << '\n';
}
Possible output:
Temp directory is "C:\Windows\TEMP\"
[edit] See also
creates and opens a temporary, auto-removing file
(function) [edit]
|
__label__pos
| 0.881541 |
Google Keep vs. Google Tasks
Google Keep vs Google Tasks
Google Keep vs. Tasks
Google has developed so many apps to help you make the most of your phone, and two of those are Google Keep and Tasks. Both may seem to have the same features, but both are different in many forms. How can we compare them? The article does that by covering Google Keep vs. Google Tasks.
What’s the difference?
Google Keep
Google Keep Web
Google Keep Web
Google Keep is mostly used to add and save notes. This platform will be precious to anyone who wishes to write down their thoughts or something memorable or to use later. The app is available on mobile and desktop devices, while you can access it in a browser.
One of the best things about Google Keep is its simplicity and accessibility. It works with your Google account and uses your allocated storage to save notes. But, because of its nature, it does not offer a wide range of note-taking tools.
Compared to other apps, Google Keep is the app offering the least number of options because you don’t get the rich editing tools. You can expect:
1. Add text notes
2. Add list notes
3. Set reminders
4. Add collaborators
5. Change a note’s background colour
6. Insert or take pictures
7. Handwrite notes
8. Record voice
9. Delete notes
10. Convert notes to Google Docs file
11. Pin notes to the top
12. Create and attach labels to notes (like tags)
Google Tasks
How Google Tasks Works
Google Tasks, on the other hand, helps to set and organise tasks. Keep offers a couple of options that the Tasks app offers, but Tasks is more advanced in organising tasks.
Related: Google Reminders vs. Tasks
Like Keep, Tasks is also super easy to use, and it is available on Android and iOS devices, while the web version is not available on its own, but other Google apps support the tool. The app comes with a few options:
1. Add a list (work, home etc)
2. Add tasks to lists
3. Set reminders
4. Add task details
5. Complete tasks
Reminder options
Google Keep Reminder Options
Google Keep Reminder Options
The first section of Google Keep vs. Google Tasks is the reminder tool. With Google Keep, you can set one reminder per note at a time. The app shows a few quick times to choose, but you have the option to select your own date and time or even a location if you want to be reminded at a certain location. You see the below options while setting up a reminder:
1. date
2. time
3. repeat options (daily, weekly, monthly, yearly, custom)
4. place
The app offers three default reminding options (morning, afternoon, and evening), and you can change the timing for each to adjust the app according to your style.
Related: Google Keep vs. OneNote (2021)
On the other hand, Google Tasks shows a calendar view with time and repeat options below the calendar. It does not offer the place option like Keeps.
Widgets
Google Tasks List Widget
Google Tasks List Widget
Starting with Keep, you get two widgets on Android:
1. Thw note list widget shows your notes right on the home screen. When you add this widget, it asks to choose your account (if using multiple) and notes type (all notes, pinned notes, reminders, or a label). Depdning your your selection, you will see notes.
2. The quick cpature is a bar to quickly add a list, audio, handwriting, or a camera note.
Google Tasks, on the contrary, also comes with two widgets:
1. The task list widget is for to see your most recent tasks. The widget also comes with: mark a task as complete and change the task list options.
2. The icon widget is simply an icon to quickly open the app from the home screen.
Integration options
Google Keep and Tasks Integration in Google Calendar
Google Keep and Tasks Integration in Google Calendar
Both Google Keep and Tasks can be opened from Google Calendar and Gmail websites. As discussed with the Keep app, you also have an option to convert notes to Google Docs if you need rich editing options.
On Android, once you set up the Tasks app, it automatically integrates with Google Calendar to show your tasks on specific days.
Which app should you use?
The answer focuses on the purpose. As mentioned, Google Keep is for taking notes, so if you need an app that can do that, go with Google Keep. The app offers simple reminding options which can help from time to time.
If you need an app to add tasks and get notifications about those tasks, then Google Tasks is the app you need in your digital world.
Install
Conclusion
Google Keep and Tasks are similar-looking applications but have been designed for some completely different purposes. Both applications are the product of Google that have been made to aid our day-to-day lives.
Anyway the article Google Keep vs. Google Tasks covered the essential tools of both. I hope it helped you understand the two and choose the right tool. Please let me know any questions you have in the comments down below.
Leave a Comment
Your email address will not be published. Required fields are marked *
Scroll to Top
|
__label__pos
| 0.772863 |
How are polynomials multiplied? For example, (5x-1)(6x-3)
14 Answers
caledon's profile pic
caledon | High School Teacher | (Level 3) Senior Educator
Posted on
Not all polynomials are the same, but when it comes to multiplication, what they have in common is this;
Each number in a bracket must be multiplied by each number in the other brackets.
This means that the first number, 5x, needs to be multiplied by 6x AND by -3 (remember to keep the signs). Likewise, -1 needs to be multiplied by both 6x and -3.
The example is an instance of a two-term polynomial. For these types of polynomial, you can use the FOIL method.
FOIL stands for First, Outer, Inner, Last.
(5x - 1) (6x -3)
First = the first number in each bracket. 5x times 6x.
Outer = the "outside" of the brackets. 5x times -3.
Inner = the "inside" of the brackets. -1 times 6x.
Last = the last number in each bracket. -1 times -3
`5x times 6x = 30x^2`
`5x times -3 = -15x and 6x times -1 = -6x -15x + -6x = -21x`
`-1 times -3 = 3`
``
combining these results, we get;
`30x^2 - 21x +3`
Sources:
Wiggin42's profile pic
Wiggin42 | Student, Undergraduate | (Level 2) Valedictorian
Posted on
But lets say you don't have neat binomials. Perhaps you have to multiply a trinomial by a binomial.
You will have to use the 1. Distributive property with each individual term in your binomial and then 2. add all like terms.
`(x^3 + 2x^2 + x) (2x^2 + 3)`
` `
`2x^5 + 4x^4 + 2x^3 `
`3x^3 + 6x^2 + 3x`
`2x^5 + 4x^4 + 5x^3 + 6x^2 + 3x`
` `
` `
maria-vivanco's profile pic
maria-vivanco | Student, Grade 11 | (Level 1) Valedictorian
Posted on
Polynomials can be answered in different ways. The way that I have found easier is the box method. So in the picture that i have linked shows what it looks like. So basically you make a box (in your case you would make a 2 by 2 box) You would put the first part of the polynomial on one side and the other on the other side.
(5x-1)(6x-3)
5x-1 would go on one side.
6x-3 would go on the other. This method helps you visually see what is being multiplied and and how it all goes together during the FOIL process.
Basically you would multiply everything
5x would multiply with 6x = 30x^2
-1 x 6x= -6x
-3 x 5x = -15x
-1 x -3 = -3
combine like terms and you would get 30x^2 -21x -3
(look at the picture attached its easier to understand)
For any of the polynomials, this would work. This also works for factoring polynomials.
Images:
This image has been Flagged as inappropriate Click to unflag
Image (1 of 1)
acompanioninthetardis's profile pic
acompanioninthetardis | Student, Undergraduate | (Level 1) Valedictorian
Posted on
first number multiplied by other first number
5x-1)(6x-3) 5x*6x
then first multiplied by second
5x-1)(6x-3) 5x*(-3)
then second muliplied by first
5x-1)(6x-3) -1*(6x)
then second multiplied by second
5x-1)(6x-3) -1*(-3)
then simplidy and place together
30x^2-15X-6X+3
simplify again
30X^2-21x+3
crystaltu001's profile pic
crystaltu001 | Student, Grade 10 | (Level 1) Valedictorian
Posted on
(5x-1)(6x-3)
You distribute the numbers
5x(6x - 3) + -1(6x - 3)
30x^2 - 15x -6x + 3
Combine like terms
The answer is 30x^2 -21x +3
Chantelm's profile pic
Chantelm | Student, Grade 10 | (Level 1) Salutatorian
Posted on
You need to F.O.I.L (first,inner,outer,last)
(5x-1)(6x-3)
5x*6x then -1*6x then 5x*-3 then -1*-3
30x^2-6x-15x+3
30x^2-21x+3
Wiggin42's profile pic
Wiggin42 | Student, Undergraduate | (Level 2) Valedictorian
Posted on
(5x-1)(6x-3)
First Outer Inner Last
5x times 6x = 30x^2 (first)
5x times -3 = -15x (outer)
-1 times 6x = -6x (inner)
-1 times -3 = 3 (last)
30x^2 -15x -6x + 3 (putting it all together)
30x^2 - 21x +3 (Simplify)
parama9000's profile pic
parama9000 | Student, Grade 11 | (Level 1) Valedictorian
Posted on
From me my suggestion would be to simply split the second or the first bracket, which ever you prefer, so that do can get a clearer picture of what you need to multiply, and then do the multiplication.
Yojana_Thapa's profile pic
Yojana_Thapa | Student, Grade 10 | (Level 1) Valedictorian
Posted on
The way I multiply polynomials is by using the box method.` `
`(5x-1)(6x-3) `
`5x * 6x= 36x^2`
`5x * -3 = -15x`
`-1 * 6x = -6x `
`-1 * -3 = 3 `
`36x^2-15x-6x+3 = 36x^2 -21x+3`
` <br data-mce-bogus="1"> `
` <br data-mce-bogus="1"> `
`<span class="AM"><br data-mce-bogus="1"></span>`
fashionableb1's profile pic
fashionableb1 | Student, Grade 10 | (Level 1) Salutatorian
Posted on
To multiply a monomial and a polunomial , use the "Distributive property" and the properties of exponents. Use the Foil method for this one.
|
__label__pos
| 0.978819 |
lkml.org
[lkml] [2022] [Oct] [27] [last100] RSS Feed
Views: [wrap][no wrap] [headers] [forward]
Messages in this thread
Patch in this message
/
From
Subject[RFC] memory tiering: use small chunk size and more tiers
Date
We need some way to override the system default memory tiers. For
the example system as follows,
type abstract distance
---- -----------------
HBM 300
DRAM 1000
CXL_MEM 5000
PMEM 5100
Given the memory tier chunk size is 100, the default memory tiers
could be,
tier abstract distance types
range
---- ----------------- -----
3 300-400 HBM
10 1000-1100 DRAM
50 5000-5100 CXL_MEM
51 5100-5200 PMEM
If we want to group CXL MEM and PMEM into one tier, we have 2 choices.
1) Override the abstract distance of CXL_MEM or PMEM. For example, if
we change the abstract distance of PMEM to 5050, the memory tiers
become,
tier abstract distance types
range
---- ----------------- -----
3 300-400 HBM
10 1000-1100 DRAM
50 5000-5100 CXL_MEM, PMEM
2) Override the memory tier chunk size. For example, if we change the
memory tier chunk size to 200, the memory tiers become,
tier abstract distance types
range
---- ----------------- -----
1 200-400 HBM
5 1000-1200 DRAM
25 5000-5200 CXL_MEM, PMEM
But after some thoughts, I think choice 2) may be not good. The
problem is that even if 2 abstract distances are almost same, they may
be put in 2 tier if they sit in the different sides of the tier
boundary. For example, if the abstract distance of CXL_MEM is 4990,
while the abstract distance of PMEM is 5010. Although the difference
of the abstract distances is only 20, CXL_MEM and PMEM will put in
different tiers if the tier chunk size is 50, 100, 200, 250, 500, ....
This makes choice 2) hard to be used, it may become tricky to find out
the appropriate tier chunk size that satisfying all requirements.
So I suggest to abandon choice 2) and use choice 1) only. This makes
the overall design and user space interface to be simpler and easier
to be used. The overall design of the abstract distance could be,
1. Use decimal for abstract distance and its chunk size. This makes
them more user friendly.
2. Make the tier chunk size as small as possible. For example, 10.
This will put different memory types in one memory tier only if their
performance is almost same by default. And we will not provide the
interface to override the chunk size.
3. Make the abstract distance of normal DRAM large enough. For
example, 1000, then 100 tiers can be defined below DRAM, this is
more than enough in practice.
4. If we want to override the default memory tiers, just override the
abstract distances of some memory types with a per memory type
interface.
This patch is to apply the design choices above in the existing code.
Signed-off-by: "Huang, Ying" <[email protected]>
Cc: Aneesh Kumar K.V <[email protected]>
Cc: Alistair Popple <[email protected]>
Cc: Bharata B Rao <[email protected]>
Cc: Dan Williams <[email protected]>
Cc: Dave Hansen <[email protected]>
Cc: Davidlohr Bueso <[email protected]>
Cc: Hesham Almatary <[email protected]>
Cc: Jagdish Gediya <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: Jonathan Cameron <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Tim Chen <[email protected]>
Cc: Wei Xu <[email protected]>
Cc: Yang Shi <[email protected]>
---
include/linux/memory-tiers.h | 7 +++----
mm/memory-tiers.c | 7 +++----
2 files changed, 6 insertions(+), 8 deletions(-)
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
index 965009aa01d7..2e39d9a6c8ce 100644
--- a/include/linux/memory-tiers.h
+++ b/include/linux/memory-tiers.h
@@ -7,17 +7,16 @@
#include <linux/kref.h>
#include <linux/mmzone.h>
/*
- * Each tier cover a abstrace distance chunk size of 128
+ * Each tier cover a abstrace distance chunk size of 10
*/
-#define MEMTIER_CHUNK_BITS 7
-#define MEMTIER_CHUNK_SIZE (1 << MEMTIER_CHUNK_BITS)
+#define MEMTIER_CHUNK_SIZE 10
/*
* Smaller abstract distance values imply faster (higher) memory tiers. Offset
* the DRAM adistance so that we can accommodate devices with a slightly lower
* adistance value (slightly faster) than default DRAM adistance to be part of
* the same memory tier.
*/
-#define MEMTIER_ADISTANCE_DRAM ((4 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
+#define MEMTIER_ADISTANCE_DRAM ((100 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE / 2))
#define MEMTIER_HOTPLUG_PRIO 100
struct memory_tier;
diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c
index fa8c9d07f9ce..e03011428fa5 100644
--- a/mm/memory-tiers.c
+++ b/mm/memory-tiers.c
@@ -165,11 +165,10 @@ static struct memory_tier *find_create_memory_tier(struct memory_dev_type *memty
bool found_slot = false;
struct memory_tier *memtier, *new_memtier;
int adistance = memtype->adistance;
- unsigned int memtier_adistance_chunk_size = MEMTIER_CHUNK_SIZE;
lockdep_assert_held_once(&memory_tier_lock);
- adistance = round_down(adistance, memtier_adistance_chunk_size);
+ adistance = rounddown(adistance, MEMTIER_CHUNK_SIZE);
/*
* If the memtype is already part of a memory tier,
* just return that.
@@ -204,7 +203,7 @@ static struct memory_tier *find_create_memory_tier(struct memory_dev_type *memty
else
list_add_tail(&new_memtier->list, &memory_tiers);
- new_memtier->dev.id = adistance >> MEMTIER_CHUNK_BITS;
+ new_memtier->dev.id = adistance / MEMTIER_CHUNK_SIZE;
new_memtier->dev.bus = &memory_tier_subsys;
new_memtier->dev.release = memory_tier_device_release;
new_memtier->dev.groups = memtier_dev_groups;
@@ -641,7 +640,7 @@ static int __init memory_tier_init(void)
#endif
mutex_lock(&memory_tier_lock);
/*
- * For now we can have 4 faster memory tiers with smaller adistance
+ * For now we can have 100 faster memory tiers with smaller adistance
* than default DRAM tier.
*/
default_dram_type = alloc_memory_type(MEMTIER_ADISTANCE_DRAM);
--
2.35.1
\
\ /
Last update: 2022-10-27 09:04 [W:0.058 / U:0.232 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site
|
__label__pos
| 0.835626 |
package opus
1. Overview
2. Docs
Legend:
Library
Module
Module type
Parameter
Class
Class type
type control = [
1. | generic_control
2. | `Set_gain of int
3. | `Get_gain of int Stdlib.ref
]
type t
val check_packet : Ogg.Stream.packet -> bool
val create : ?samplerate:int -> Ogg.Stream.packet -> Ogg.Stream.packet -> t
Create a decoder with given samplerate an number of channels.
val comments : t -> string * (string * string) list
val channels : t -> int
val apply_control : control -> t -> unit
val decode_float : ?decode_fec:bool -> t -> Ogg.Stream.stream -> float array array -> int -> int -> int
val decode_float_ba : ?decode_fec:bool -> t -> Ogg.Stream.stream -> (float, Stdlib.Bigarray.float32_elt, Stdlib.Bigarray.c_layout) Stdlib.Bigarray.Array1.t array -> int -> int -> int
OCaml
Innovation. Community. Security.
|
__label__pos
| 0.993963 |
Boa crashes on Windoze 98
Discussion in 'Python' started by Anand Pillai, Oct 13, 2003.
1. Anand Pillai
Anand Pillai Guest
I am trying to run the latest Boa on Windows 98 2nd edition,
using python 2.2. The program starts up and immediately windows
gives it's stupid error box.
"This program has performed an illegal operation
and will be shut down... blah blah blah..."
It is a pity that I am using a stupid Windows 98 machine
but my woes are for real apart from that. I can run a lot of python
code including IDLE, PythonWin, PyPE and the wxPython examples,
but Boa just refuses to work. For the time being I am stuck to it
since my Linux box refuses to boot up. :0(
The Boa version is 0.2.3, Python is 2.3 and wxPython is 2.4.1.2.
I also tried it for Python 2.2.3 with the rest of the configuration
remaining the same, again to no avail...
Here is the traceback. Not of any real use I guess, since the stupid
traceback does not even list the module which caused the fault.
PYTHON caused an invalid page fault in
module <unknown> at 0000:00000001.
Registers:
EAX=00000001 CS=017f EIP=00000001 EFLGS=00010202
EBX=00000000 SS=0187 ESP=0062ee7c EBP=025498f0
ECX=00000018 DS=0187 ESI=02340070 FS=5b27
EDX=7fd18c45 ES=0187 EDI=00750ce0 GS=0000
Bytes at CS:EIP:
00 00 00 00 00 00 00 16 00 6d 05 65 04 70 00 65
-Anand
Anand Pillai, Oct 13, 2003
#1
1. Advertising
2. Dutrieux Christophe, Oct 13, 2003
#2
1. Advertising
3. Anand Pillai
JanC Guest
(Anand Pillai) schreef:
> I am trying to run the latest Boa on Windows 98 2nd edition,
> using python 2.2. The program starts up and immediately windows
> gives it's stupid error box.
Try the wxPython version that does _not_ support Unicode.
Also don't forget to use the resource handle monitor (RSRCMTR.EXE).
--
JanC
"Be strict when sending and tolerant when receiving."
RFC 1958 - Architectural Principles of the Internet - section 3.9
JanC, Oct 14, 2003
#3
1. Advertising
Want to reply to this thread or ask your own question?
It takes just 2 minutes to sign up (and it's free!). Just click the sign up button to choose a username and then you can ask your own questions on the forum.
Similar Threads
1. dbdweeb
Replies:
0
Views:
311
dbdweeb
Oct 3, 2003
2. Fuzzyman
.idlerc directory - windoze
Fuzzyman, Jun 9, 2004, in forum: Python
Replies:
0
Views:
1,191
Fuzzyman
Jun 9, 2004
3. Batista, Facundo
RE: .idlerc directory - windoze
Batista, Facundo, Jun 9, 2004, in forum: Python
Replies:
1
Views:
521
Fuzzyman
Jun 10, 2004
4. Fuzzyman
CGIHTTPServer, popen3, and windoze
Fuzzyman, Sep 11, 2005, in forum: Python
Replies:
1
Views:
311
Fuzzyman
Sep 14, 2005
5. Anbeyon
Replies:
1
Views:
478
Loading...
Share This Page
|
__label__pos
| 0.569605 |
Archiv für den Monat: Oktober 2022
Datei aus Ordner mit Powershell per E-Mail versenden (Drag & Drop)
Hier ein kleines Powershell Script, das die in einem Ordner unter Windows vorhandenen Dateien per E-Mail an eine vorgegebene E-Mail Adresse verschickt und anschließend in einen Unterordner verschiebt.
Damit kann man z.B. automatisiert PDF-Dateien (oder auch andere Dateien) an eine RECHNUNGSEINGANG@ oder ARCHIV@ Mailadresse versenden.
## E-Mails mit Anhang aufgrund von Dateien in einem Ordner versenden
# Aufgabenplanung powershell starten mit
# powershell -ExecutionPolicy Unrestricted -file "G:\Meine Ablage\Systeme\Powershell\Belegdrop-email.ps1"
# Parameter & Einstellungen
$email_server = “smtp.mailserver.de”
$email_receiver="[email protected]” # if this is empty, e-mail is disabled
$email_sender="[email protected]"
$email_user = "[email protected]"
$pw = ConvertTo-SecureString -String p_passwort -AsPlainText -Force
$cred = New-Object System.Management.Automation.PSCredential $email_user, $pw # oder (Get-Credential)
# die einzelnen Dateien ermitteln
$email_subject="Rechnung: ”
$folder1_path="G:\Meine Ablage\Rechnungseingang-Drop"
$p_filedone ="G:\Meine Ablage\Rechnungseingang-Drop\send-done"
# Dateien aus dem Rechungsordner einlesen
$hf_files=(Get-ChildItem $folder1_path -File).Name
foreach ($item in $hf_files)
{
$hf_filename = $folder1_path + "\" + $item
$hf_subject = $email_subject + $item
write-host $item
$error.Clear()
# E-Mail mit Attachment verschicken
send-mailmessage -from $email_sender -to $email_receiver -subject $hf_subject -Body "Rechnung im Anhang" -Attachments $hf_filename -dno onSuccess,
onFailure -smtpServer $email_server -UseSsl -Port 587 -credential $cred
if ($error) {
write-host $item $error
}
else {
Move-Item $hf_filename -Destination $p_filedone
}
$error.clear()
}
# die einzelnen Dateien ermitteln
$email_subject="manuelle Ausgangsrechnung: ”
$folder1_path="G:\Meine Ablage\Rechnungsausgang-manuell-Drop"
$p_filedone ="G:\Meine Ablage\Rechnungsausgang-manuell-Drop\send-done"
# Dateien aus dem Rechungsordner einlesen
$hf_files=(Get-ChildItem $folder1_path -File).Name
foreach ($item in $hf_files)
{
$hf_filename = $folder1_path + "\" + $item
$hf_subject = $email_subject + $item
write-host $item
$error.Clear()
# E-Mail mit Attachment verschicken
send-mailmessage -from $email_sender -to $email_receiver -subject $hf_subject -Body "Rechnung im Anhang" -Attachments $hf_filename -dno onSuccess,
onFailure -smtpServer $email_server -UseSsl -Port 587 -credential $cred
if ($error) {
write-host $item $error
}
else {
Move-Item $hf_filename -Destination $p_filedone
}
$error.clear()
}
exit
Etwas kniffelig ist wohl, wie man die Powershell Aufgabe anschließend in der Windows Aufgabenplanung startet. Bei mir funktioniert es aktuell mit:
als Aufgabe/Aktion C:\Windows\SysWOW64\cmd.exe starten
mit Parameter: /c „G:\Meine Ablage\Systeme\Powershell\Belegdrop.bat“
In der Batchdatei steht dann:
c:\windows\system32\WindowsPowerShell\v1.0\powershell.exe -ExecutionPolicy Unrestricted -file "G:\Meine Ablage\Systeme\Powershell\Belegdrop-email-senden.ps1"
exit /b 0
|
__label__pos
| 0.743801 |
LOJ 3089: 洛谷 P5319: 「BJOI2019」奥术神杖
题目传送门:LOJ #3089
题意简述:
有一个长度为 \(n\) 的母串,其中某些位置已固定,另一些位置可以任意填。
同时给定 \(m\) 个小串,第 \(i\) 个为 \(S_i\),所有位置都已固定,它的价值为 \(V_i\)
每次每个小串在母串中出现一次,便会给答案的多重集贡献一个 \(V_i\)
最终的答案为多重集的几何平均数,定义空集的几何平均数为 \(1\)
请你求出一个合法母串(往可以填的位置填合法字符)使得答案最大。
\(1\le n,s\le 1501\)\(1\le V_i\le \max V=10^9\),其中 \(\displaystyle s=\sum_{i=1}^{m}|S_i|\)
题解:
假设多重集的大小为 \(c\),第 \(i\) 个元素为 \(w_i\),则 \(\displaystyle\mathrm{Ans}=\sqrt[c]{\prod_{i=1}^{c}w_i}\)
两边取对数,有 \(\displaystyle\ln\mathrm{Ans}=\frac{1}{c}\sum_{i=1}^{c}\ln w_i\),转化为经典的 0/1 分数规划问题。
二分答案,若等式右边大于 \(\mathrm{mid}\),则有:
\(\begin{aligned}\frac{1}{c}\sum_{i=1}^{c}\ln w_i&>\mathrm{mid}\\\sum_{i=1}^{c}\ln w_i&>c\cdot\mathrm{mid}\\\sum_{i=1}^{c}(\ln w_i-\mathrm{mid})&>0\end{aligned}\)
所以,建出小串的 AC 自动机,然后二分答案后在 AC 自动机上 DP 判断不等式是否满足。
DP 时每个小串的权值设为 \(\ln V_i-\mathrm{mid}\),注意要记录最佳转移点,以输出方案。
下面是代码,复杂度 \(\mathcal{O}(s\Sigma(\log\max V-\log\epsilon))\)
#include <cstdio>
#include <cmath>
typedef double f64;
const int MN = 1505, Sig = 10;
const f64 eps = 1e-6, inf = 1e99;
int N, M;
char T[MN];
char str[MN];
int ch[MN][Sig], fail[MN], sum[MN], cnt;
f64 val[MN];
inline void Insert(char *s, f64 v) {
int now = 0;
for (; *s; ++s) {
if (!ch[now][*s & 15]) ch[now][*s & 15] = ++cnt;
now = ch[now][*s & 15];
} ++sum[now], val[now] += v;
}
int que[MN], l, r;
void BuildAC() {
fail[0] = -1;
que[l = r = 1] = 0;
while (l <= r) {
int u = que[l++];
for (int i = 0; i < Sig; ++i) {
if (ch[u][i]) {
int x = fail[u];
while (~x && !ch[x][i]) x = fail[x];
if (~x) fail[ch[u][i]] = ch[x][i];
que[++r] = ch[u][i];
}
else if (~fail[u]) ch[u][i] = ch[fail[u]][i];
}
}
for (int i = 2; i <= r; ++i)
sum[que[i]] += sum[fail[que[i]]],
val[que[i]] += val[fail[que[i]]];
}
f64 f[MN][MN];
int g[MN][MN][2];
char AT[MN];
inline f64 DP(f64 V) {
for (int j = 0; j <= cnt; ++j) val[j] -= sum[j] * V;
for (int i = 0; i <= N; ++i)
for (int j = 0; j <= cnt; ++j)
f[i][j] = -inf;
f[0][0] = 0;
for (int i = 0; i < N; ++i) {
for (int j = 0; j <= cnt; ++j) {
if (f[i][j] == -inf) continue;
if (T[i] == '.') {
for (int k = 0; k < Sig; ++k) {
int _j = ch[j][k];
if (f[i + 1][_j] < f[i][j] + val[_j])
f[i + 1][_j] = f[i][j] + val[_j],
g[i + 1][_j][0] = j,
g[i + 1][_j][1] = k;
}
}
else {
int _j = ch[j][T[i] & 15];
if (f[i + 1][_j] < f[i][j] + val[_j])
f[i + 1][_j] = f[i][j] + val[_j],
g[i + 1][_j][0] = j,
g[i + 1][_j][1] = T[i] & 15;
}
}
}
for (int j = 0; j <= cnt; ++j) val[j] += sum[j] * V;
int ans = 0;
for (int j = 1; j <= cnt; ++j)
if (f[N][j] > f[N][ans]) ans = j;
for (int i = N, j = ans; i >= 1; --i)
AT[i - 1] = g[i][j][1] | 48,
j = g[i][j][0];
return f[N][ans];
}
int main() {
scanf("%d%d", &N, &M);
scanf("%s", T);
for (int i = 1; i <= M; ++i) {
f64 v;
scanf("%s%lf", str, &v);
Insert(str, log(v));
}
BuildAC();
f64 l = 0, r = log(1e9 + 5), mid, ans = 0;
while (r - l > eps) {
mid = (l + r) / 2;
if (DP(mid) > 0) ans = mid, l = mid;
else r = mid;
}
DP(ans);
printf("%s\n", AT);
return 0;
}
posted @ 2019-04-27 21:13 粉兔 阅读(...) 评论(...) 编辑 收藏
|
__label__pos
| 0.999198 |
Take the 2-minute tour ×
MathOverflow is a question and answer site for professional mathematicians. It's 100% free, no registration required.
Let $X$ be a compact Kahler manifold of complex dimension $n$. The Aubin--Calabi--Yau theorem says that if we fix a smooth form $\rho$ in the Chern class $c_1(X)$, then every Kahler class on $X$ contains a unique Kahler metric $\omega$ whose Ricci-form is $\rho$. Alternatively, one may fix a volume form $dV$ on $X$, then the theorem gives the existence of a unique metric $\omega$ in each Kahler class whose volume form is a constant multiple of $dV$, or $dV_\omega = c dV$ where $c > 0$ is a constant:
Indeed, if we have $\rho$, let $dV = dV_\omega$ for any Kahler metric $\omega$ whose Ricci-form is $\rho$. If we have $dV$, consider the smooth hermitian metric $h$ on the canonical bundle $K_X$ defined by the equality $i^{n^2} \alpha \wedge \overline \beta = h(\alpha,\overline \beta) dV$, and take $\rho$ to be its curvature form.
Since there are at least three ways to define the Ricci tensor of a hermitian metric, but the volume form of any hermitian metric $\omega$ is $dV_\omega = \omega^n/n!$, we'll fix a volume form $dV$ such that $Vol(X,dV) = 1$.
Question: The ACY theorem gives Kahler metrics $\omega$ with $dV_\omega = dV$. Can there be non Kahler metrics on $X$ whose volume form is $dV$?
share|improve this question
add comment
2 Answers
up vote 1 down vote accepted
A property of a hermitian metric to be conformally Kähler is a very restrictive property and a generic hermitian metric is not conformally Kähler. The property of the volume form to be equal to some fixed volume form is senseless if we allow to multiply the metric by a conformal coefficient. Combining this, we see that a generic hermitian metric with some fixed volume form it not Kähler, neither in the local nor in the global setup.
share|improve this answer
I see how the second statement implies that every conformal class of hermitian metrics contains at most one representative with the given volume form, but is it clear that we have at least one such metric in each conformal class? – Gunnar Magnusson Apr 16 '12 at 11:25
Oh of course, I'm sorry, I was being thick. – Gunnar Magnusson Apr 16 '12 at 11:29
add comment
I really should think more about these things before asking. The answer is "yes".
K. Yang considers the flag manifold $F := F_{1,2,3} := SU(3)/S(U(1)^3)$ in Invariant Kahler metrics and projective embeddings of the flag manifolds. He shows that the space of hermitian metrics on $F$ is parametrized by $(\mathbb R_+)^3$, that is, any hermitian metric on $F$ is given by $$ h_{a,b,c} = a^2 \theta_1\otimes\overline\theta_1 + b^2 \theta_2\otimes\overline\theta_2 + b^2 \theta_3\otimes\overline\theta_3 $$ where $\theta_j$ are holomorphic 1-forms on $F$ and $a$, $b$ and $c$ are positive constants. He also shows that if $h$ is a Kahler metric on $F$, then $h$ is a constant multiple of $h_{1,\sqrt 2,1}$.
Note that the volume form of $h_{a,b,c}$ is $(abc)^2 dV$, where $dV$ is the volume form obtained by wedging the $(1,1)$-forms $(i/2)\theta_j \wedge \overline\theta_j$ together. A Kahler metric $h = \lambda h_{1,\sqrt 2,1}$ thus has the volume form $dV_h = 2\lambda^2 dV$ and we can find lots of hermitian metric $h_{a,b,c}$ with that same volume form.
share|improve this answer
In the expression for $h_{a,b,c}$, should the coefficient of $\theta_3\otimes\bar{\theta}_3$ be $c^2$ instead of $b^2$? – Michael Albanese Apr 19 '12 at 12:06
Yes, thank you. – Gunnar Magnusson Apr 19 '12 at 15:00
maybe this question help mathoverflow.net/questions/154093/… – Hassan Jolany Jan 9 at 21:15
add comment
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.889846 |
Delegates
Posted on Updated on
Methods are delegate types. It is that simple. Since delegates are types in .NET we can use any method as types, that means we can pass methods as parameters and we can return methods.
The below code shows different ways we can instantiate a delegate.
1: namespace DelegatesVsInterfaces
2: {
3: public delegate string MyDelegate(string param);
4:
5: class Program
6: {
7: static void Main(string[] args)
8: {
9: // delegate initialization based on history
10:
11: // C# 1.0
12: MyDelegate md1 = new MyDelegate(MyMethod);
13: Console.WriteLine(md1("C# 1.0"));
14:
15: // C# 2.0
16: MyDelegate md2 = MyMethod;
17: Console.WriteLine(md2("C# 2.0"));
18:
19: // C# 2.0 - anonymous method
20: MyDelegate md3 = delegate(string message)
21: {
22: return String.Format("Your message, {0} - from anonymous method",message);
23: };
24: Console.WriteLine(md3("C# 2.0 and later"));
25:
26: // C# 3.0 - lamda expressions
27: MyDelegate md4 = message =>
28: {
29: return String.Format("Your message, {0} - from lamda methods", message);
30: };
31: Console.WriteLine(md4("C# 3.0 and later"));
32:
33: Console.ReadKey();
34: }
35:
36: public static string MyMethod(string greeting)
37: {
38: //Thread.Sleep(5000);
39: return String.Format("Your message, {0} - relayed from MyMethod",greeting);
40: }
41: }
42: }
A delegate has different ways of invocation. The very direct way is calling the delegate itself with the parameters. This indirectly invokes the Invoke() method of the delegate. Invoke() is generated by the compiler with the correct parameter list.
The following code shows the direct invocation.
1: string result = md1("parameter");
2: result = md1.Invoke("parameter");
Other way of handling a delegate invocation is, invoking the method asynchronously. BeginInvoke() and EndInvoke() of a delegate is used for this.
BeginInvoke() is generated for a delegate with that specific delegate’s parameters as first set of parameters, next parameter is a AsyncCallback type method and last one is a object which could have values to the callback method.
The following code explains how to use the BeginInvoke() and EndInvoke() to asynchronous execution.
1: // put this in your method invocation code
2: MyDelegate md = MyMethod;
3: AsyncCallback callback = CallBackMethod;
4:
5: IAsyncResult result = md.BeginInvoke("Param", callback, null);
6:
7:
8: // callback method
9: public static void CallBackMethod(IAsyncResult result)
10: {
11: AsyncResult asyncResult = (AsyncResult)result;
12: MyDelegate caller = (MyDelegate)asyncResult.AsyncDelegate;
13:
14: string value = caller.EndInvoke(result);
15:
16: Console.WriteLine(value);
17: }
If the method hasn’t finished the execution when the EndInvoke() is called, the callback method waits till the execution is finished.
Another way of invoking a delegate is by using its DynamicInvoke(). More often this is direct as Invoke(), but has the difference in the parameters passed to the delegate. DynamicInvoke() has a object array parameter. We should pass the parameters with the correct data types and it should be with the correct parameter count. If the parameter is not correct it will throw an ArgumentException and if the number of parameters doesn’t match it will throw TargetParameterCountException.
Advertisements
|
__label__pos
| 0.999989 |
How to Configure Algorithms
List of supported algorithms for hyperparameter tuning
This page describes hyperparameter (HP) tuning algorithms that Katib supports and how to configure them.
HP Tuning Algorithms
Katib currently supports several search algorithms for NAS:
The algorithm name in Katib is grid.
Grid sampling is useful when all variables are discrete (as opposed to continuous) and the number of possibilities is low. A grid search performs an exhaustive combinatorial search over all possibilities, making the search process extremely long even for medium sized problems.
Katib uses the Optuna optimization framework for its grid search.
Setting nameDescriptionExample
random_state[int]: Set random_state to something other than None for reproducible results.10
The algorithm name in Katib is random.
Random sampling is an alternative to grid search and is used when the number of discrete variables to optimize is large and the time required for each evaluation is long. When all parameters are discrete, random search performs sampling without replacement. Random search is therefore the best algorithm to use when combinatorial exploration is not possible. If the number of continuous variables is high, you should use quasi random sampling instead.
Katib uses the Hyperopt, Goptuna or Optuna optimization framework for its random search.
Katib supports the following algorithm settings:
Setting nameDescriptionExample
random_state[int]: Set random_state to something other than None for reproducible results.10
Bayesian optimization
The algorithm name in Katib is bayesianoptimization.
The Bayesian optimization method uses Gaussian process regression to model the search space. This technique calculates an estimate of the loss function and the uncertainty of that estimate at every point in the search space. The method is suitable when the number of dimensions in the search space is low. Since the method models both the expected loss and the uncertainty, the search algorithm converges in a few steps, making it a good choice when the time to complete the evaluation of a parameter configuration is long.
Katib uses the Scikit-Optimize optimization framework for its Bayesian search. Scikit-Optimize is also known as skopt.
Katib supports the following algorithm settings:
Setting NameDescriptionExample
base_estimator[“GP”, “RF”, “ET”, “GBRT” or sklearn regressor, default=“GP”]: Should inherit from sklearn.base.RegressorMixin. The predict method should have an optional return_std argument, which returns std(Y | x) along with E[Y | x]. If base_estimator is one of [“GP”, “RF”, “ET”, “GBRT”], the system uses a default surrogate model of the corresponding type. Learn more information in the skopt documentation.GP
n_initial_points[int, default=10]: Number of evaluations of func with initialization points before approximating it with base_estimator. Points provided as x0 count as initialization points. If len(x0) < n_initial_points, the system samples additional points at random. Learn more information in the skopt documentation.10
acq_func[string, default="gp_hedge"]: The function to minimize over the posterior distribution. Learn more information in the skopt documentation.gp_hedge
acq_optimizer[string, “sampling” or “lbfgs”, default=“auto”]: The method to minimize the acquisition function. The system updates the fit model with the optimal value obtained by optimizing acq_func with acq_optimizer. Learn more information in the skopt documentation.auto
random_state[int]: Set random_state to something other than None for reproducible results.10
Hyperband
The algorithm name in Katib is hyperband.
Katib supports the Hyperband optimization framework. Instead of using Bayesian optimization to select configurations, Hyperband focuses on early stopping as a strategy for optimizing resource allocation and thus for maximizing the number of configurations that it can evaluate. Hyperband also focuses on the speed of the search.
Tree of Parzen Estimators (TPE)
The algorithm name in Katib is tpe.
Katib uses the Hyperopt, Goptuna or Optuna optimization framework for its TPE search.
This method provides a forward and reverse gradient-based search.
Katib supports the following algorithm settings:
Setting nameDescriptionExample
n_EI_candidates[int]: Number of candidate samples used to calculate the expected improvement.25
random_state[int]: Set random_state to something other than None for reproducible results.10
gamma[float]: The threshold to split between l(x) and g(x), check equation 2 in this Paper. Value must be in (0, 1) range.0.25
prior_weight[float]: Smoothing factor for counts, to avoid having 0 probability. Value must be > 0.1.1
Multivariate TPE
The algorithm name in Katib is multivariate-tpe.
Katib uses the Optuna optimization framework for its Multivariate TPE search.
Multivariate TPE is improved version of independent (default) TPE. This method finds dependencies among hyperparameters in search space.
Katib supports the following algorithm settings:
Setting nameDescriptionExample
n_ei_candidates[int]: Number of Trials used to calculate the expected improvement.25
random_state[int]: Set random_state to something other than None for reproducible results.10
n_startup_trials[int]: Number of initial Trials for which the random search algorithm generates hyperparameters.5
Covariance Matrix Adaptation Evolution Strategy (CMA-ES)
The algorithm name in Katib is cmaes.
Katib uses the Goptuna or Optuna optimization framework for its CMA-ES search.
The Covariance Matrix Adaptation Evolution Strategy is a stochastic derivative-free numerical optimization algorithm for optimization problems in continuous search spaces. You can also use IPOP-CMA-ES and BIPOP-CMA-ES, variant algorithms for restarting optimization when converges to local minimum.
Katib supports the following algorithm settings:
Setting nameDescriptionExample
random_state[int]: Set random_state to something other than None for reproducible results.10
sigma[float]: Initial standard deviation of CMA-ES.0.001
restart_strategy[string, "none", "ipop", or "bipop", default="none"]: Strategy for restarting CMA-ES optimization when converges to a local minimum."ipop"
Sobol Quasirandom Sequence
The algorithm name in Katib is sobol.
Katib uses the Goptuna optimization framework for its Sobol’s quasirandom search.
The Sobol’s quasirandom sequence is a low-discrepancy sequence. And it is known that Sobol’s quasirandom sequence can provide better uniformity properties.
Population Based Training
The algorithm name in Katib is pbt.
Review the population based training paper for more details about the algorithm.
The PBT service requires a Persistent Volume Claim with RWX access mode to share resources between Suggestion and Trials. Currently, Katib Experiments should have resumePolicy: FromVolume to run the PBT algorithm. Learn more about resume policies in this guide.
Katib supports the following algorithm settings:
Setting nameDescriptionExample
suggestion_trial_dirThe location within the Trial container where checkpoints are saved/var/log/katib/checkpoints/
n_populationNumber of Trial seeds per generation40
resample_probabilitynull (default): perturbs the hyperparameter by 0.8 or 1.2. 0-1: resamples the original distribution by the specified probability0.3
truncation_thresholdExploit threshold for pruning low performing seeds0.4
Use Custom Algorithm in Katib
You can add an HP tuning algorithm to Katib yourself. The design of Katib follows the ask-and-tell pattern:
1. Ask for a new set of parameters
2. Walk to the Experiment and program in the new parameters
3. Observe the outcome of running the Experiment
4. Walk back to your laptop and tell the optimizer about the outcome 1. go to step 1
When an Experiment is created, one algorithm service as Kubernetes Deployment will be created. Then Katib asks for new sets of parameters via GetSuggestions gRPC call. After that, Katib creates new Trials according to the sets and observe the outcome. When the Trials are finished, Katib tells the metrics of the finished Trials to the algorithm, and ask another new sets.
Create a new Algorithm Service
The new algorithm needs to implement Suggestion service defined in api.proto.
One sample algorithm looks like:
from pkg.apis.manager.v1beta1.python import api_pb2
from pkg.apis.manager.v1beta1.python import api_pb2_grpc
from pkg.suggestion.v1beta1.internal.search_space import HyperParameter, HyperParameterSearchSpace
from pkg.suggestion.v1beta1.internal.trial import Trial, Assignment
from pkg.suggestion.v1beta1.hyperopt.base_service import BaseHyperoptService
from pkg.suggestion.v1beta1.internal.base_health_service import HealthServicer
# Inherit SuggestionServicer and implement GetSuggestions.
class HyperoptService(
api_pb2_grpc.SuggestionServicer, HealthServicer):
def ValidateAlgorithmSettings(self, request, context):
# Optional, it is used to validate algorithm settings defined by users.
pass
def GetSuggestions(self, request, context):
# Convert the Experiment in GRPC request to the search space.
# search_space example:
# HyperParameterSearchSpace(
# goal: MAXIMIZE,
# params: [HyperParameter(name: param-1, type: INTEGER, min: 1, max: 5, step: 0),
# HyperParameter(name: param-2, type: CATEGORICAL, list: cat1, cat2, cat3),
# HyperParameter(name: param-3, type: DISCRETE, list: 3, 2, 6),
# HyperParameter(name: param-4, type: DOUBLE, min: 1, max: 5, step: )]
# )
search_space = HyperParameterSearchSpace.convert(request.experiment)
# Convert the Trials in GRPC request to the Trials in algorithm side.
# Trials example:
# [Trial(
# assignment: [Assignment(name=param-1, value=2),
# Assignment(name=param-2, value=cat1),
# Assignment(name=param-3, value=2),
# Assignment(name=param-4, value=3.44)],
# target_metric: Metric(name="metric-2" value="5643"),
# additional_metrics: [Metric(name=metric-1, value=435),
# Metric(name=metric-3, value=5643)],
# Trial(
# assignment: [Assignment(name=param-1, value=3),
# Assignment(name=param-2, value=cat2),
# Assignment(name=param-3, value=6),
# Assignment(name=param-4, value=4.44)],
# target_metric: Metric(name="metric-2" value="3242"),
# additional_metrics: [Metric(name=metric=1, value=123),
# Metric(name=metric-3, value=543)],
trials = Trial.convert(request.trials)
#--------------------------------------------------------------
# Your code here
# Implement the logic to generate new assignments for the given current request number.
# For example, if request.current_request_number is 2, you should return:
# [
# [Assignment(name=param-1, value=3),
# Assignment(name=param-2, value=cat2),
# Assignment(name=param-3, value=3),
# Assignment(name=param-4, value=3.22)
# ],
# [Assignment(name=param-1, value=4),
# Assignment(name=param-2, value=cat4),
# Assignment(name=param-3, value=2),
# Assignment(name=param-4, value=4.32)
# ],
# ]
list_of_assignments = your_logic(search_space, trials, request.current_request_number)
#--------------------------------------------------------------
# Convert list_of_assignments to
return api_pb2.GetSuggestionsReply(
trials=Assignment.generate(list_of_assignments)
)
Build Docker Image for Algorithm Service
You should build Docker image for your Algorithm service, for that add a new Docker image under cmd/suggestion, for example: cmd/suggestion/hyperopt. The new gRPC server should serve in port 6789.
After that you can build Docker image from your algorithm:
docker build . -f cmd/suggestion/<PATH_TO_DOCKER> -t <DOCKER_IMAGE>
Update the Katib Config with
Update the Katib config with the new algorithm entity:
runtime:
suggestions:
- algorithmName: random
image: docker.io/kubeflowkatib/suggestion-hyperopt:$(KATIB_VERSION)
- algorithmName: tpe
image: docker.io/kubeflowkatib/suggestion-hyperopt:$(KATIB_VERSION)
+ - algorithmName: <new-algorithm-name>
+ image: <DOCKER_IMAGE>
Contribute the Algorithm to Katib
If you want to contribute the algorithm to Katib, you could add unit test and/or e2e test for it in the CI and submit a PR.
Add Unit Tests for the Algorithm
Here is an example test_hyperopt_service.py:
import grpc
import grpc_testing
import unittest
from pkg.apis.manager.v1beta1.python import api_pb2_grpc
from pkg.apis.manager.v1beta1.python import api_pb2
from pkg.suggestion.v1beta1.hyperopt.service import HyperoptService
class TestHyperopt(unittest.TestCase):
def setUp(self):
servicers = {
api_pb2.DESCRIPTOR.services_by_name['Suggestion']: HyperoptService()
}
self.test_server = grpc_testing.server_from_dictionary(
servicers, grpc_testing.strict_real_time())
if __name__ == '__main__':
unittest.main()
You can setup the gRPC server using grpc_testing, then define your own test cases.
Feedback
Was this page helpful?
|
__label__pos
| 0.993868 |
Permalink
Fetching contributors…
Cannot retrieve contributors at this time
168 lines (135 sloc) 3.88 KB
/*****************************************************************************
*
* This file is part of Mapnik (c++ mapping toolkit)
*
* Copyright (C) 2011 Artem Pavlenko
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*****************************************************************************/
// mapnik
#include <mapnik/debug.hpp>
// stl
#include <ctime>
#ifndef MAPNIK_LOG_FORMAT
#define MAPNIK_LOG_FORMAT "Mapnik LOG> %Y-%m-%d %H:%M:%S:"
#endif
#ifndef MAPNIK_DEFAULT_LOG_SEVERITY
#ifdef MAPNIK_DEBUG
#define MAPNIK_DEFAULT_LOG_SEVERITY 1
#else
#define MAPNIK_DEFAULT_LOG_SEVERITY 3
#endif
#endif
namespace mapnik {
// mutexes
#ifdef MAPNIK_THREADSAFE
boost::mutex logger::severity_mutex_;
boost::mutex logger::format_mutex_;
#endif
// first time checks
bool logger::severity_env_check_ = true;
bool logger::format_env_check_ = true;
// severity
logger::severity_type logger::severity_level_ =
#if MAPNIK_DEFAULT_LOG_SEVERITY == 0
logger::info
#elif MAPNIK_DEFAULT_LOG_SEVERITY == 1
logger::debug
#elif MAPNIK_DEFAULT_LOG_SEVERITY == 2
logger::warn
#elif MAPNIK_DEFAULT_LOG_SEVERITY == 3
logger::error
#elif MAPNIK_DEFAULT_LOG_SEVERITY == 4
logger::fatal
#elif MAPNIK_DEFAULT_LOG_SEVERITY == 5
logger::none
#else
#error "Wrong default log severity level specified!"
#endif
;
logger::severity_map logger::object_severity_level_ = logger::severity_map();
// format
#define __xstr__(s) __str__(s)
#define __str__(s) #s
std::string logger::format_ = __xstr__(MAPNIK_LOG_FORMAT);
#undef __xstr__
#undef __str__
std::string logger::str()
{
#if 0
// update the format from getenv if this is the first time
if (logger::format_env_check_)
{
logger::format_env_check_ = false;
const char* log_format = getenv("MAPNIK_LOG_FORMAT");
if (log_format != NULL)
{
logger::format_ = log_format;
}
}
#endif
char buf[256];
const time_t tm = time(0);
strftime(buf, sizeof(buf), logger::format_.c_str(), localtime(&tm));
return buf;
}
// output
std::ofstream logger::file_output_;
std::string logger::file_name_;
std::streambuf* logger::saved_buf_ = 0;
void logger::use_file(const std::string& filepath)
{
// save clog rdbuf
if (saved_buf_ == 0)
{
saved_buf_ = std::clog.rdbuf();
}
// use a file to output as clog rdbuf
if (file_name_ != filepath)
{
file_name_ = filepath;
if (file_output_.is_open())
{
file_output_.close();
}
file_output_.open(file_name_.c_str(), std::ios::out | std::ios::app);
if (file_output_)
{
std::clog.rdbuf(file_output_.rdbuf());
}
else
{
std::stringstream s;
s << "cannot redirect log to file " << file_output_;
throw std::runtime_error(s.str());
}
}
}
void logger::use_console()
{
// save clog rdbuf
if (saved_buf_ == 0)
{
saved_buf_ = std::clog.rdbuf();
}
// close the file to force a flush
if (file_output_.is_open())
{
file_output_.close();
}
std::clog.rdbuf(saved_buf_);
}
} // namespace mapnik
|
__label__pos
| 0.992874 |
Project
General
Profile
Download (10.6 KB) Statistics
| Branch: | Tag: | Revision:
1
(********************************************************************)
2
(* *)
3
(* The LustreC compiler toolset / The LustreC Development Team *)
4
(* Copyright 2012 - -- ONERA - CNRS - INPT - ISAE-SUPAERO *)
5
(* *)
6
(* LustreC is free software, distributed WITHOUT ANY WARRANTY *)
7
(* under the terms of the GNU Lesser General Public License *)
8
(* version 2.1. *)
9
(* *)
10
(********************************************************************)
11
12
open Format
13
14
open Machine_code_types
15
open Lustre_types
16
open Corelang
17
open Machine_code_common
18
19
open Misc_printer
20
open Misc_lustre_function
21
open Ada_printer
22
open Ada_backend_common
23
24
(** Functions printing the .ads file **)
25
module Main =
26
struct
27
28
29
let suffixOld = "_old"
30
let suffixNew = "_new"
31
let pp_transition_name fmt = fprintf fmt "transition"
32
let pp_state_name_transition suffix fmt = fprintf fmt "%t%s" pp_state_name suffix
33
34
35
36
37
(** Printing function for basic assignement [var := value].
38
39
@param fmt the formater to print on
40
@param var_name the name of the variable
41
@param value the value to be assigned
42
**)
43
let pp_local_eq env fmt var value =
44
fprintf fmt "%t = %a"
45
(pp_var_name var)
46
(pp_value env) value
47
48
(** Printing function for basic assignement [var := value].
49
50
@param fmt the formater to print on
51
@param var_name the name of the variable
52
@param value the value to be assigned
53
**)
54
let pp_state_eq env fmt var value =
55
fprintf fmt "%t = %a"
56
(pp_access (pp_state_name_transition suffixNew) (pp_var_name var))
57
(pp_value env) value
58
59
(** Printing function for instruction. See
60
{!type:Machine_code_types.instr_t} for more details on
61
machine types.
62
63
@param typed_submachines list of all typed machine instances of this machine
64
@param machine the current machine
65
@param fmt the formater to print on
66
@param instr the instruction to print
67
**)
68
let rec pp_machine_instr typed_submachines env instr fmt =
69
let pp_instr = pp_machine_instr typed_submachines env in
70
let pp_state suffix i fmt = fprintf fmt "%t.%s" (pp_state_name_transition suffix) i in
71
match get_instr_desc instr with
72
(* no reset *)
73
| MNoReset _ -> ()
74
(* reset *)
75
| MReset i when List.mem_assoc i typed_submachines ->
76
let (substitution, submachine) = get_instance i typed_submachines in
77
let pp_package = pp_package_name_with_polymorphic substitution submachine in
78
let args = if is_machine_statefull submachine then [[pp_state suffixOld i;pp_state suffixNew i]] else [] in
79
pp_call fmt (pp_package_access (pp_package, pp_reset_procedure_name), args)
80
| MLocalAssign (ident, value) ->
81
pp_local_eq env fmt ident value
82
| MStateAssign (ident, value) ->
83
pp_state_eq env fmt ident value
84
| MStep ([i0], i, vl) when is_builtin_fun i ->
85
let value = mk_val (Fun (i, vl)) i0.var_type in
86
if List.mem_assoc i0.var_id env then
87
pp_state_eq env fmt i0 value
88
else
89
pp_local_eq env fmt i0 value
90
91
| MStep (il, i, vl) when List.mem_assoc i typed_submachines ->
92
let (substitution, submachine) = get_instance i typed_submachines in
93
let pp_package = pp_package_name_with_polymorphic substitution submachine in
94
let input = List.map (fun x fmt -> pp_value env fmt x) vl in
95
let output = List.map pp_var_name il in
96
let args =
97
(if is_machine_statefull submachine then [[pp_state suffixOld i;pp_state suffixNew i]] else [])
98
@(if input!=[] then [input] else [])
99
@(if output!=[] then [output] else [])
100
in
101
fprintf fmt "(%a)" pp_call (pp_package_access (pp_package, pp_transition_name), args)
102
| MComment s -> ()
103
| _ -> assert false
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
(** Print the expression function representing the transition predicate.
123
@param fmt the formater to print on
124
@param machine the machine
125
**)
126
let pp_transition_predicate prototype typed_submachines fmt (opt_spec_machine, m) =
127
let old_state = (AdaIn, pp_state_name_transition suffixOld, pp_state_type, None) in
128
let new_state = (AdaIn, pp_state_name_transition suffixNew, pp_state_type, None) in
129
let env = List.map (fun x -> x.var_id, pp_state_name_transition suffixOld) m.mmemory in
130
let inputs = build_pp_var_decl_step_input AdaIn None m in
131
let outputs = build_pp_var_decl_step_output AdaIn None m in
132
let instrs = push_if_in_expr m.mstep.step_instrs in
133
let content = List.map (pp_machine_instr typed_submachines env) instrs in
134
let locals = List.map (fun x-> (pp_var_name x, fun fmt -> pp_var_type fmt x)) m.mstep.step_locals in
135
pp_predicate pp_transition_name ([[old_state; new_state]]@inputs@outputs) fmt (if prototype then None else Some (locals, content))
136
137
138
139
140
141
142
143
144
145
146
147
148
149
(** Print a new statement instantiating a generic package.
150
@param fmt the formater to print on
151
@param substitutions the instanciation substitution
152
@param machine the machine to instanciate
153
**)
154
let pp_new_package fmt (substitutions, machine) =
155
let pp_name = pp_package_name machine in
156
let pp_new_name = pp_package_name_with_polymorphic substitutions machine in
157
let instanciations = List.map (fun (id, typ) -> (pp_polymorphic_type id, fun fmt -> pp_type fmt typ)) substitutions in
158
pp_package_instanciation pp_new_name pp_name fmt instanciations
159
160
(** Remove duplicates from a list according to a given predicate.
161
@param eq the predicate defining equality
162
@param l the list to parse
163
**)
164
let remove_duplicates eq l =
165
let aux l x = if List.exists (eq x) l then l else x::l in
166
List.fold_left aux [] l
167
168
169
(** Compare two typed machines.
170
**)
171
let eq_typed_machine (subst1, machine1) (subst2, machine2) =
172
(String.equal machine1.mname.node_id machine2.mname.node_id) &&
173
(List.for_all2 (fun a b -> pp_eq_type (snd a) (snd b)) subst1 subst2)
174
175
176
(** Print the package declaration(ads) of a machine.
177
It requires the list of all typed instance.
178
A typed submachine is a (ident, typed_machine) with
179
- ident: the name
180
- typed_machine: a (substitution, machine) with
181
- machine: the submachine struct
182
- substitution the instanciation of all its polymorphic types.
183
@param fmt the formater to print on
184
@param typed_submachines list of all typed submachines of this machine
185
@param m the machine
186
**)
187
let pp_file fmt (typed_submachines, ((m_spec_opt, guarantees), m)) =
188
let typed_machines = snd (List.split typed_submachines) in
189
let typed_machines_set = remove_duplicates eq_typed_machine typed_machines in
190
191
let machines_to_import = List.map pp_package_name (snd (List.split typed_machines_set)) in
192
193
let polymorphic_types = find_all_polymorphic_type m in
194
195
let typed_machines_to_instanciate =
196
List.filter (fun (l, _) -> l != []) typed_machines_set in
197
198
let typed_instances = List.filter is_submachine_statefull typed_submachines in
199
200
let pp_state_decl_and_reset fmt = fprintf fmt "%t;@,@,%a;@,@,"
201
(*Declare the state type*)
202
(pp_type_decl pp_state_type AdaPrivate)
203
(*Declare the reset procedure*)
204
(pp_procedure pp_reset_procedure_name (build_pp_arg_reset m) None) AdaNoContent
205
in
206
207
let vars_spec = match m_spec_opt with
208
| None -> []
209
| Some m_spec -> List.map (build_pp_var_decl AdaNoMode None) m_spec.mstep.step_locals
210
in
211
let vars = List.map (build_pp_var_decl AdaNoMode None) m.mmemory in
212
let states = List.map (build_pp_state_decl_from_subinstance AdaNoMode None) typed_instances in
213
let var_lists =
214
(if states = [] then [] else [states]) @
215
(if vars = [] then [] else [vars])@
216
(if vars_spec = [] then [] else [vars_spec]) in
217
218
let pp_ifstatefull fmt pp =
219
if is_machine_statefull m then
220
fprintf fmt "%t" pp
221
else
222
fprintf fmt ""
223
in
224
225
let pp_private_section fmt = fprintf fmt "@,private@,@,%a%t%a;@,@,%a"
226
(*Instantiate the polymorphic type that need to be instantiated*)
227
(Utils.fprintf_list ~sep:";@," pp_new_package) typed_machines_to_instanciate
228
(Utils.pp_final_char_if_non_empty ";@,@," typed_machines_to_instanciate)
229
(*Define the state type*)
230
pp_ifstatefull (fun fmt-> pp_record pp_state_type fmt var_lists)
231
232
(*Declare the transition predicate*)
233
(pp_transition_predicate false typed_submachines) (m_spec_opt, m)
234
in
235
236
let pp_content fmt =
237
let pp_contract_opt =
238
let ghost_memory = match m_spec_opt with
239
| None -> []
240
| Some m_spec -> m_spec.mstep.step_locals
241
in
242
let pp_var x fmt =
243
if List.exists (fun var -> var.var_id == x) ghost_memory then
244
pp_access pp_state_name (fun fmt -> pp_clean_ada_identifier fmt x) fmt
245
else
246
pp_clean_ada_identifier fmt x
247
in
248
let input = List.map pp_var_name m.mstep.step_inputs in
249
let output = List.map pp_var_name m.mstep.step_outputs in
250
let args =
251
(if is_machine_statefull m then [[pp_old pp_state_name;pp_state_name]] else [])
252
@(if input!=[] then [input] else [])
253
@(if output!=[] then [output] else [])
254
in
255
let transition fmt = pp_call fmt (pp_transition_name, args) in
256
match guarantees with
257
| [] -> Some (false, [], [transition])
258
| _ -> Some (false, [], transition::(List.map pp_var guarantees))
259
in
260
fprintf fmt "%a%a%a@,@,%a;%t" (* %a;@, *)
261
pp_ifstatefull pp_state_decl_and_reset
262
263
(*Declare the step procedure*)
264
(pp_procedure pp_step_procedure_name (build_pp_arg_step m) pp_contract_opt) AdaNoContent
265
266
pp_ifstatefull (fun fmt -> fprintf fmt ";@,")
267
268
(*Declare the transition predicate*)
269
(pp_transition_predicate true typed_submachines) (m_spec_opt, m)
270
271
(*Print the private section*)
272
pp_private_section
273
in
274
275
let pp_poly_types id = pp_type_decl (pp_polymorphic_type id) AdaPrivate in
276
let pp_generics = List.map pp_poly_types polymorphic_types in
277
278
fprintf fmt "@[<v>%a%t%a;@]@."
279
280
(* Include all the subinstance package*)
281
(Utils.fprintf_list ~sep:";@," (pp_with AdaPrivate)) machines_to_import
282
(Utils.pp_final_char_if_non_empty ";@,@," machines_to_import)
283
284
(*Begin the package*)
285
(pp_package (pp_package_name m) pp_generics false) pp_content
286
287
end
(5-5/12)
|
__label__pos
| 0.97595 |
Pen Settings
HTML
CSS
CSS Base
Vendor Prefixing
Add External Stylesheets/Pens
Any URL's added here will be added as <link>s in order, and before the CSS in the editor. You can use the CSS from another Pen by using it's URL and the proper URL extention.
+ add another resource
JavaScript
Babel includes JSX processing.
Add External Scripts/Pens
Any URL's added here will be added as <script>s in order, and run before the JavaScript in the editor. You can use the URL of any other Pen and it will include the JavaScript from that Pen.
+ add another resource
Packages
Add Packages
Search for and use JavaScript packages from npm here. By selecting a package, an import statement will be added to the top of the JavaScript editor for this package.
Behavior
Save Automatically?
If active, Pens will autosave every 30 seconds after being saved once.
Auto-Updating Preview
If enabled, the preview panel updates automatically as you code. If disabled, use the "Run" button to update.
Format on Save
If enabled, your code will be formatted when you actively save your Pen. Note: your code becomes un-folded during formatting.
Editor Settings
Code Indentation
Want to change your Syntax Highlighting theme, Fonts and more?
Visit your global Editor Settings.
HTML
<div class="main">
<div class="top half">
<img src="https://s3-us-west-2.amazonaws.com/s.cdpn.io/142996/avengers2.jpg" />
</div>
<div class="bottom half">
<img src="https://s3-us-west-2.amazonaws.com/s.cdpn.io/142996/avengers2.jpg" />
</div>
<button class="super-button">Enter</button>
<div class="overlay"></div>
<div class="button-line left">
<div class="inner"></div>
</div>
<div class="button-line right">
<div class="inner"></div>
</div>
</div>
<div class="content">
<div class="hello">Welcome!</div>
<nav>
<ul>
<li class="active"><a href="#">Hulk crush!</a></li>
<li><a href="#">Such movie</a></li>
<li><a href="#">Wow!</a></li>
<li><a href="#">Very avengers</a></li>
<li><a href="#">Much comics</a></li>
</ul>
</nav>
</div>
!
CSS
@import "compass/css3";
@font-face {
font-family: decade;
src: url('https://s3-us-west-2.amazonaws.com/s.cdpn.io/142996/decade.ttf');
}
@mixin transition($dur) {
-webkit-transition: $dur;
-moz-transition: $dur;
-ms-transition: $dur;
-o-transition: $dur;
transition: $dur;
}
@mixin transitionDelay($dur) {
-webkit-transition-delay: $dur;
-moz-transition-delay: $dur;
-ms-transition-delay: $dur;
-o-transition-delay: $dur;
transition-delay: $dur;
}
@mixin translateXY($x: 0, $y: 0) {
-webkit-transform: translateX($x) translateY($y);
-moz-transform: translateX($x) translateY($y);
-ms-transform: translateX($x) translateY($y);
-o-transform: translateX($x) translateY($y);
transform: translateX($x) translateY($y);
}
@mixin translateXYandScale($x: 0, $y: 0, $scale: 0) {
-webkit-transform: translateX($x) translateY($y) scale($scale);
-moz-transform: translateX($x) translateY($y) scale($scale);
-ms-transform: translateX($x) translateY($y) scale($scale);
-o-transform: translateX($x) translateY($y) scale($scale);
transform: translateX($x) translateY($y) scale($scale);
}
@mixin rotate($deg) {
-webkit-transform: rotate($deg);
-moz-transform: rotate($deg);
-ms-transform: rotate($deg);
-o-transform: rotate($deg);
transform: rotate($deg);
}
@mixin scale($val) {
-webkit-transform: scale($val);
-moz-transform: scale($val);
-ms-transform: scale($val);
-o-transform: scale($val);
transform: scale($val);
}
@mixin clearfix {
content: "";
display: table;
clear: both;
}
$green: #c7ff66;
$gray: #dcdcdc;
$lightGray: #8f8f8f;
$darkBg: rgba(49,49,49, .7);
$red: #ff5954;
*, *:before, *:after {
-webkit-box-sizing: border-box;
-moz-box-sizing: border-box;
box-sizing: border-box;
@include transition(.3s);
list-style-type: none;
margin: 0;
padding: 0;
}
a {
color: inherit;
text-decoration: none;
}
html, body {
width: 100%;
height: 100%;
overflow: hidden;
}
body {
font-family: decade;
}
.half {
position: absolute;
overflow: hidden;
z-index: 50;
left: 0;
width: 100%;
height: 50%;
@include transition(3s);
will-change: transform;
img {
position: absolute;
top: -10%;
left: 0;
width: 100%;
}
&.top {
top: 0;
}
&.bottom {
top: 50%;
img {
top: -110%;
}
}
&.active.top {
@include translateXY(0, -100%);
}
&.active.bottom {
@include translateXY(0, 100%);
}
}
.super-button {
position: absolute;
left: 50%;
top: 50%;
z-index: 200;
width: 200px;
height: 80px;
opacity: 0.8;
@include translateXY(-50%, -50%);
text-transform: uppercase;
font-size: 40px;
font-weight: bold;
color: $lightGray;
background-color: transparent;
border: 4px solid rgba(143,143,143, 0.5);
@include transition(1.5s);
will-change: color, opacity, border-color;
&:hover {
color: #fff;
opacity: 1;
border-color: #fff;
}
&:hover ~ .overlay {
background-color: rgba(155, 0, 0, .5);
}
&:hover ~ .overlay.active {
background-color: transparent;
}
&:hover ~ .button-line .inner {
max-width: 100%;
}
&.active {
@include transition(4.5s);
@include translateXY(-50%, -1000px);
}
}
.overlay {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
z-index: 100;
background-color: rgba(155, 0, 0, 0);
@include transition(1.5s);
will-change: background-color;
&.active {
background-color: transparent;
}
}
.button-line {
position: absolute;
top: -webkit-calc(50% - 2px);
top: -moz-calc(50% - 2px);
top: calc(50% - 2px);
left: 0;
z-index: 200;
width: -webkit-calc(50% - 100px);
width: -moz-calc(50% - 100px);
width: calc(50% - 100px);
height: 4px;
background-color: rgba(143,143,143, 0.5);
@include transition(.5s);
will-change: width, max-width;
&.left {
@include rotate(180deg);
}
&.right {
left: -webkit-calc(50% + 100px);
left: -moz-calc(50% + 100px);
left: calc(50% + 100px);
}
.inner {
width: 100%;
height: 100%;
background-color: #fff;
max-width: 0;
@include transition(1.5s);
}
&.active {
opacity: 0;
}
}
.content {
position: absolute;
padding: 50px;
top: 0;
left: 0;
width: 100%;
height: 100%;
z-index: 40;
background-color: #1d1f20;
.hello {
font-size: 50px;
font-weight: bold;
color: $red;
text-transform: uppercase;
position: absolute;
top: 50%;
left: 50%;
opacity: 0;
@include translateXYandScale(-50%, -50%, 0.5);
@include transition(1s);
}
nav {
margin: 0 auto;
border: 2px solid #fff;
opacity: 0;
@include scale(0.5);
@include transition(1s);
max-width: 900px;
ul {
font-size: 0;
li {
display: inline-block;
font-size: 25px;
text-transform: uppercase;
color: #fff;
padding: 5px 10px;
border-right: 2px solid #fff;
&:last-child {
border-right: none;
}
&.active {
color: $red;
}
&:hover {
color: $red;
}
}
}
}
&.active {
.hello {
opacity: 1;
@include transitionDelay(1.5s);
@include translateXYandScale(-50%, -50%, 1);
}
nav {
@include transitionDelay(1.5s);
@include scale(1);
opacity: 1;
}
}
}
!
JS
$(document).ready(function() {
$(document).on("click", ".super-button", function() {
$(".main, .half, .overlay, .button-line, .super-button, .content").addClass("active");
setTimeout(function() {
$(".main *").css("z-index", "1");
}, 3000);
});
});
!
999px
Console
|
__label__pos
| 0.981739 |
×
Search anything:
Union in C
Learn Algorithms and become a National Programmer
Indian Technical Authorship Contest starts on 1st July 2023. Stay tuned.
Reading time: 20 minutes
A union is a special data type available in C that allows to store different data types in the same memory location. You can define a union with many members, but only one member can contain a value at any given time. Unions provide an efficient way of using the same memory location for multiple-purpose.
Syntax :
union [union tag]
{
member definition;
member definition;
...
member definition;
} [one or more union variables];
Create union variables and access members of a union
When a union is defined, it creates a user-defined type. However, no memory is allocated. To allocate memory for a given union type and work with it, we need to create variables.
Here's how we create union variables:
union car
{
char name[50];
int price;
};
int main()
{
union car car1, car2, *car3;
return 0;
}
Another way of creating union variables is:
union car
{
char name[50];
int price;
} car1, car2, *car3;
In both cases, union variables car1, car2, and a union pointer car3 of union car type are created.
How to access members of a union?
We use '.' to access normal variables of a union. To access pointer variables, we use -> operator.
In the above example,
price for car1 can be accessed using car1.price
price for car3 can be accessed using car3->price
Similarities between struct and union
1. Both are user-defined data types used to store data of different types as a single unit.
2. Their members can be objects of any type, including other structures and unions or arrays. A member can also consist of a bit field.
3. Both structures and unions support only assignment = and sizeof operators. The two structures or unions in the assignment must have the same members and member types.
4. A structure or a union can be passed by value to functions and returned by value by functions. The argument must have the same type as the function parameter. A structure or union is passed by value just like a scalar variable as a corresponding parameter.
5. ‘.’ operator is used for accessing members.
Difference between struct and union
keyword
• struct keyword is used to declare the structure
• union keyword is used to declare the Union
Memory allocation
• Structure variable will allocate memory for all the structure member separately.
• Union variable will allocate common memory for all the union members.
Example
struct Employee
{ int age; char name[50]; float salary;
};
union Employee
{ int age; char name[50]; float salary;
};
Memory Space
• Structures will occupy more memory space.Memory_Size = addition of all the structure members sizes.Memory_Size = int + char array [50] + floatMemory_Size = 2 + 50 + 4 BytesMemory_Size = 56 Byte.
• Union will occupy less memory space compared to structures.Memory_Size = Size of the largest Union member. From the above example, Largest Union member is char array so, Memory_Size = 50 Bytes.
Access
• It allows us to access any or all the members at any time. * It allows us to access only one union member at a time
Example
#include <stdio.h>
union test1
{
int x;
int y;
} Test1;
union test2
{
int x;
char y;
} Test2;
union test3
{
int arr[10];
char y;
} Test3;
int main()
{
printf("sizeof(test1) = %lu, sizeof(test2) = %lu, "
"sizeof(test3) = %lu",
sizeof(Test1),
sizeof(Test2), sizeof(Test3));
return 0;
}
Output :
sizeof(test1) = 4, sizeof(test2) = 4, sizeof(test3) = 40
Question
Consider the following C code:
union test
{
int x;
char arr[8];
int y;
};
int main()
{
printf("%d", sizeof(union test));
return 0;
}
What will be the output of the above C code?
8
12
6
compiler error
When we declare a union, memory allocated for a union variable of the type is equal to memory needed for the largest member of it, and all members share this same memory space. In above example, "char arr[8]" is the largest member. Therefore size of union test is 8 bytes.
Harshita Sahai
Harshita Sahai
Maintainer at OpenGenus | Previously Software Developer, Intern at OpenGenus (June to August 2019) | B.Tech in Information Technology from Guru Gobind Singh Indraprastha University (2017 to 2021)
Read More
Vote for Author of this article:
Improved & Reviewed by:
OpenGenus Tech Review Team OpenGenus Tech Review Team
|
__label__pos
| 0.995024 |
Easy To Use Patents Search & Patent Lawyer Directory
At Patents you can conduct a Patent Search, File a Patent Application, find a Patent Attorney, or search available technology through our Patent Exchange. Patents are available using simple keyword or date criteria. If you are looking to hire a patent attorney, you've come to the right place. Protect your idea and hire a patent lawyer.
Search All Patents:
This Patent May Be For Sale or Lease. Contact Us
Is This Your Patent? Claim This Patent Now.
Register or Login To Download This Patent As A PDF
United States Patent Application 20180321906
Kind Code A1
Laaksonen; Lasse Juhani November 8, 2018
Metadata-free Audio-object Interactions
Abstract
A method including, detecting interaction between the user and an audio-object via local tracking, determining an audio-object state modification based on the local tracking, and performing an audio-object interaction based on the audio-object state modification.
Inventors: Laaksonen; Lasse Juhani; (Tampere, FI)
Applicant:
Name City State Country Type
Nokia Technologies Oy
Espoo
FI
Family ID: 1000002622525
Appl. No.: 15/587582
Filed: May 5, 2017
Current U.S. Class: 1/1
Current CPC Class: G06F 3/167 20130101; G06F 3/165 20130101; G06F 3/017 20130101
International Class: G06F 3/16 20060101 G06F003/16; G06F 3/01 20060101 G06F003/01
Claims
1. A method comprising: detecting interaction between a user and an audio-object via local tracking, wherein the local tracking includes determining a distance between the user and the audio-object; determining an audio-object state modification based on the local tracking; and performing an audio-object interaction based on the audio-object state modification.
2. The method of claim 1, wherein detecting the interaction between the user and the audio-object via the local tracking further comprises: detecting the user entering a vicinity of the audio-object; initializing the local tracking; performing the local tracking and updating a local tracking area; and updating an interaction event state based on the local tracking.
3. The method of claim 2, wherein updating the interaction event state based on the local tracking further comprises: accessing the interaction event state based on information from a database.
4. The method of claim 2, wherein updating the interaction event state based on local tracking further comprises: determining an interaction area based on the distance between the user and the audio-object.
5. The method of claim 4, wherein updating the interaction event state further comprises: determining a transition between a plurality of interaction event states based on tracking a user movement of the user relative to the audio-object within the interaction area; and updating the interaction event state based on the determined transition.
6. The method of claim 5, further comprising: triggering a predetermined response based on the determined transition.
7. The method of claim 2, wherein initializing the local tracking further comprises: initializing the local tracking based on at least one local tracking distance.
8. The method of claim 7, wherein the at least one local tracking distance further comprises a distance derived based on past user behavior consuming free-viewpoint audio.
9. The method of claim 2, wherein the at least one local tracking distance further comprises a distance received via a device associated with a content creator.
10. The method of claim 2, wherein performing the local tracking and updating the interaction area further comprises: stabilizing an audio-object rendering at a variable distance to the audio-object based on real user activity of the user.
11. The method of claim 1, further comprising: determining the local tracking area; and determining an interaction area, wherein the interaction area is different from the local tracking area.
12. The method of claim 1, further comprising: determining an interaction area based on a center of mass (CoM) associated with the user and the audio-object.
13. The method of claim 1, wherein determining whether metadata associated with a user for audio-object interactions has been received further comprises: determining a level of network congestion associated with the audio-object interactions.
14. The method of claim 1, further comprising: prior to detecting interaction between the user and the audio-object via local tracking, determining whether metadata associated with a user for audio-object interactions has been received; wherein the interaction between the user and the audio-object is detected via local tracking in response to determination that metadata associated with the user for audio-object interactions has not been received.
15. An apparatus comprising: at least one processor; and at least one non-transitory memory including computer program code, the at least one memory and the computer program code configured to, with the at least one processor, cause the apparatus to: detect interaction between a user and an audio-object via local tracking, wherein the local tracking includes determining a distance between the user and the audio-object; determine an audio-object state modification based on the local tracking; and perform an audio-object interaction based on the audio-object state modification.
16. An apparatus as in claim 15, where, when detecting the interaction between the user and the audio-object via the local tracking, the at least one memory and the computer program code are configured to, with the at least one processor, cause the apparatus to: detect the user entering a vicinity the audio-object; initialize the local tracking; perform the local tracking and updating a local tracking area; and update an interaction event state based on the local tracking.
17. An apparatus as in claim 16, where, when updating the interaction event state based on local tracking, the at least one memory and the computer program code are configured to, with the at least one processor, cause the apparatus to: access the interaction event state based on information from a database.
18. An apparatus as in claim 17, where, when updating the interaction event state, the at least one memory and the computer program code are configured to, with the at least one processor, cause the apparatus to: determine a transition between a plurality of interaction event states based on tracking a user movement of the user relative to the audio-object within the interaction area; and update the interaction event state based on the determined transition.
19. An apparatus as in claim 16, wherein, when wherein initializing the local tracking, the at least one memory and the computer program code are configured to, with the at least one processor, cause the apparatus to: initialize the local tracking based on at least one local tracking distance.
20. A non-transitory program storage device readable by a machine, tangibly embodying a program of instructions executable by the machine for performing operations, the operations comprising: detecting interaction between a user and an audio-object via local tracking, wherein the local tracking includes determining a distance between the user and the audio-object; determining an audio-object state modification based on the local tracking; and performing an audio-object interaction based on the audio-object state modification.
Description
BACKGROUND
Technical Field
[0001] The exemplary and non-limiting embodiments relate generally to rendering of free-viewpoint audio for presentation to a user using a spatial rendering engine.
Brief Description of Prior Developments
[0002] Free-viewpoint audio generally allows for a user to move around in the audio (or generally, audio-visual or mediated reality) space and experience the audio space in a manner that correctly corresponds to his location and orientation in it. This may enable various virtual reality (VR) and augmented reality (AR) use cases. The spatial audio may consist, for example, of a channel-based bed and audio-objects, audio-objects only, or any equivalent spatial audio representation. While moving in the space, the user may come into contact with audio-objects, the user may distance themselves considerably from other objects, and new objects may also appear. The listening/rendering point may thereby adapt to the user's movement, and the user may interact with the audio-objects, and/or the audio content may otherwise evolve due to the changes relative to the rendering point or user action.
SUMMARY
[0003] The following summary is merely intended to be exemplary. The summary is not intended to limit the scope of the claims.
[0004] In accordance with one aspect, an example method comprises, detecting interaction between the user and an audio-object via local tracking, determining an audio-object state modification based on the local tracking, and sending the audio-object state modification to an audio-object spatial rendering engine.
[0005] In accordance with another aspect, an example apparatus comprises at least one processor; and at least one non-transitory memory including computer program code, the at least one memory and the computer program code configured to, with the at least one processor, cause the apparatus to: detect interaction between the user and an audio-object via local tracking, determine an audio-object state modification based on the local tracking, and send the audio-object state modification to an audio-object spatial rendering engine.
[0006] In accordance with another aspect, an example apparatus comprises a non-transitory program storage device readable by a machine, tangibly embodying a program of instructions executable by the machine for performing operations, the operations comprising: detecting interaction between the user and an audio-object via local tracking, determining an audio-object state modification based on the local tracking, and sending the audio-object state modification to an audio-object spatial rendering engine.
BRIEF DESCRIPTION OF THE DRAWINGS
[0007] The foregoing aspects and other features are explained in the following description, taken in connection with the accompanying drawings, wherein:
[0008] FIG. 1 is a diagram illustrating a reality system comprising features of an example embodiment;
[0009] FIG. 2 is a diagram illustrating some components of the system shown in FIG. 1;
[0010] FIGS. 3a and 3b are diagrams illustrating characteristics of free-viewpoint content consumption;
[0011] FIG. 4 is an example illustration of a relationship between a user rendering position and an audio-object position;
[0012] FIG. 5 is an example illustration of a user rendering position against audio-object position over time;
[0013] FIG. 6 is another example illustration of a user rendering position against audio-object position over time;
[0014] FIG. 7 is an example illustration of a user relationship to a local tracking area of an audio-object;
[0015] FIG. 8 is an illustration of interaction-area modification control instructions;
[0016] FIG. 9 is an example state machine illustration of audio-interaction events;
[0017] FIG. 10 is an example diagram illustrating components of a rendering system;
[0018] FIG. 11 is an example illustration of a High-level block diagram for metadata-based audio-object interactions; and
[0019] FIG. 12 is an example illustration of a high-level block diagram of a switched system implementing a reduced metadata rendering system as a backup system for a rendering system.
DETAILED DESCRIPTION OF EMBODIMENTS
[0020] Referring to FIG. 1, a diagram is shown illustrating a reality system 100 incorporating features of an example embodiment. The reality system 100 may be used by a user for augmented-reality (AR), virtual-reality (VR), or presence-captured (PC) experiences and content consumption, for example, which incorporate free-viewpoint audio. Although the features will be described with reference to the example embodiments shown in the drawings, it should be understood that features can be embodied in many alternate forms of embodiments.
[0021] The system 100 generally comprises a visual system 110, an audio system 120, a relative location system 130 and a reduced metadata (or a metadata free) rendering system 140. The visual system 110 is configured to provide visual images to a user. For example, the visual system 12 may comprise a virtual reality (VR) headset, goggles or glasses. The audio system 120 is configured to provide audio sound to the user, such as by one or more speakers, a VR headset, or ear buds for example. The relative location system 130 is configured to sense a location of the user, such as the user's head for example, and determine the location of the user in the realm of the reality content consumption space. The movement in the reality content consumption space may be based on actual user movement, user-controlled movement, and/or some other externally-controlled movement or pre-determined movement, or any combination of these. The user is able to move and turn their head in the content consumption space of the free-viewpoint. The relative location system 130 may be able to change what the user sees and hears based upon the user's movement in the real-world; that real-world movement changing what the user sees and hears in the free-viewpoint rendering.
[0022] The movement of the user, interaction with audio-objects and things seen and heard by the user may be defined by predetermined parameters including an effective distance parameter and a reversibility parameter. An effective distance parameter may be a core parameter that defines the distance from which user interaction is considered for the current audio-object. In some embodiments, the effective distance parameter may also be considered a modification adjustment parameter, which may be applied to modification of interactions, as described in U.S. patent application Ser. No. 15/293,607, filed Oct. 14, 2016, which is hereby incorporated by reference. A reversibility parameter may also be considered a core parameter, and may define the reversibility of the interaction response. The reversibility parameter may also be considered a modification adjustment parameter. Although particular modes of audio-object interaction are described herein for ease of explanation, brevity and simplicity, it should be understood that the methods described herein may be applied to other types of audio-object interactions.
[0023] The user may be virtually located in the free-viewpoint content space, or in other words, receive a rendering corresponding to a location in the free-viewpoint rendering. Audio-objects may be rendered to the user at this user location. The area around a selected listening point may be defined based on user input, based on use case or content specific settings, and/or based on particular implementations of the audio rendering. Additionally, the area may in some embodiments be defined at least partly based on an indirect user or system setting such as the overall output level of the system (for example, some sounds may not be audible when the sound pressure level at the output is reduced). In such instances the output level input to an application may result in particular sounds being not rendered because the sound level associated with these audio-objects may be considered imperceptible from the listening point. In other instances, distant sounds with higher output levels (such as, for example, an explosion or similar loud event) may be exempted from the requirement (in other words, these sounds may be rendered). A process such as dynamic range control may also affect the rendering, and therefore the area, if the audio output level is considered in the area definition.
[0024] The reduced metadata rendering system 140 is configured to enable controlled audio-object interactions without needing transmission of any associated metadata. Thus, the method allows for a new alternative implementation of an audio-object interaction system. The reduced metadata rendering system 140 may furthermore enable audio-object interactions in free-viewpoint audio experiences for such content that does not include the metadata required by other rendering systems. The reduced metadata rendering system 140 may implement rendering of free-viewpoint (or free-listening point; six-degrees-of-freedom; 6DoF, for example) audio for presentation to a user using a spatial rendering engine. In some implementations, reduced metadata rendering system 140 may use an audio-object spatial modification engine or the spatial rendering engine may include functionality of an audio-object spatial modification engine.
[0025] The reduced metadata rendering system 140 may implement processes for controlled audio-object interactions without needing transmission of any associated metadata, or metadata-free controlled audio-object interactions, based on a local tracking of user movement and activity. Specifically, reduced metadata rendering system 140 may track 1) a distance between the user and the audio-object to determine an interaction area in which we consider audio-object interactions, and 2) a user movement relative to the audio-object (within the interaction area) to determine transitions between interaction states.
[0026] The interaction states may each correspond to an interaction event (a user activity model and an audio-object interaction response). The interaction states may be defined by the implementer or derived, for example, from an interaction event database. The transitions between the states (or interaction events) may thereby be used to trigger each separate type of audio-object interaction response. The relevant responses may differ between content, use case, and implementation. None of the responses depend on any transmitted metadata.
[0027] At least one distance related to initializing the local tracking may be defined by the implementer or, for example, a content creator. In some embodiments, this distance may be derived automatically based, for example, on past user behavior while consuming free-viewpoint audio.
[0028] Reduced metadata rendering system 140 may define the interaction area via local tracking and thereby enable stabilization of the audio-object rendering at a variable distance to the audio-object depending on real user activity. In other words, the response of the reduced metadata rendering system 140 may be altered (for example, the response may be slightly different) each time, thereby improving the realism of the interaction. The reduced metadata rendering system 140 may track the user's local activity and further enable making of intuitive decisions on when to apply specific interaction rendering effects to the audio presented to the user. Reduced metadata rendering system 140 may implement these steps together to significantly enhance the user experience of free-viewpoint audio where no or only a reduced set of metadata is available.
[0029] Referring also to FIG. 2, the reality system 100 generally comprises one or more controllers 210, one or more inputs 220 and one or more outputs 230. The input(s) 220 may comprise, for example, location sensors of the relative location system 130 and the reduced metadata rendering system 140, rendering information for reduced metadata rendering system 140, reality information from another device, such as over the Internet for example, or any other suitable device for inputting information into the system 100. The output(s) 230 may comprise, for example, a display on a VR headset of the visual system 110, speakers of the audio system 120, and a communications output to communication information to another device. The controller(s) 210 may comprise one or more processors 240 and one or more memory 250 having software 260 (or machine-readable instructions)
[0030] Referring also to FIGS. 3a and 3b, diagrams 300, 350 illustrating characteristics of free-viewpoint content consumption are shown.
[0031] FIG. 3a illustrates a user 310 navigating around an audiovisual free-viewpoint VR experience 300. The user 310 is surrounded by a nature scene, where the user 310 hears, for example, birds singing 320 around the user 310 and bees buzzing 330 at some distance in front of the user 310. As the user 310 moves forward (FIG. 3b), the user 310 may come into contact with the beehive 340 that may, in terms of audio (or audio-wise), consist, for example, of a single audio-object. This is an example use case in which a definition for an interaction between the user and the audio-object is required for an immersive free-viewpoint audio experience.
[0032] Existing systems for audio-object interactions in free-viewpoint audio (for example, systems as described in U.S. patent application Ser. No. 15/293,607, and similar systems) may utilize metadata for defining how the interactions should be detected and how they modify the rendering. For example, in FIG. 3b, the audio-object rendering may be modified based on instructions derived from the metadata to amplify and accelerate the buzzing of the bees in a circle around the user 310 and make the sound of the bees 330 to follow the user 310 for a while even if the user tried to leave this part of the scene. While this approach to modification may allow for well-defined and truly immersive user experiences, there may also be problems arising from mobile use cases as well as with regard to rendering of content that does not include metadata (for example, legacy content for some systems).
[0033] In some instances, the metadata associated with the audio-object interactions may add a considerable amount of overhead to the transmission. In instances of high-end VR applications the overhead associated with audio-object interactions may not significantly affect performance (especially if there is also high-quality video content whose bit rate typically dominates the consumption). However, in instances of low bit rate AR audio solutions or while under severe network congestion, the overhead associated with audio-object interactions may make it difficult to apply these advanced features (especially if the audio-object interactions are dynamic and thus require frequent metadata updates). On the other hand, in other instances there may be (a considerable amount of) VR/AR content, or other audiovisual content that may be adapted for free-viewpoint VR/AR use, that does not include these advanced metadata for audio-object interactions.
[0034] Reduced metadata rendering system 140 may implement a metadata-free system as an alternative and/or a backup for a system that includes advanced metadata for audio-object interactions to better allow audio-object interactions also for low bit rate AR scenarios and for improved rendering of content (for example, legacy content) that does not have the required metadata.
[0035] Reduced metadata rendering system 140 may enable audio-object interactions without metadata based on instructions, which, from a content creator's perspective, may appear to be arbitrary. However, the interaction rendering cannot follow the content creator's instructions without any metadata indicating this. In instances of content that does not consider audio-object interactions (such as, for example, legacy content) there may be no content creator input initially. Reduced metadata rendering system 140 may provide an improved user experience in these instances.
[0036] Referring also to FIG. 4, an example illustration 400 of a relationship between a user rendering position 410 and an audio-object position 420 based on a main trackable parameter 430 (in this instance, a distance between the user and audio-object) when no metadata related to audio-object interactions is used is shown.
[0037] Referring also to FIG. 5, an example illustration 500 of a user rendering position 410 against an audio-object position 420 over time 510 is shown. A distance of the user 410 from a middle line 550 of the audio-object 420 is shown (530-L denotes a distance from the middle line 550 in a left area while 530-R denotes the distance from the middle line 550 in the right area). The user position (for example, in relation to the audio-object position) is illustrated in a single dimension (left-to-right) with time 510 being illustrated on the vertical axis.
[0038] FIGS. 4 and 5, present illustrations showing the distance between the user rendering position 410 and the audio-object position 420. This distance may be tracked by reduced metadata rendering system 140. Reduced metadata rendering system 140 may thereby determine a portion of the information related to the overlap of the two positions before the actual overlap takes place and as the user 410 moves towards the audio-object. FIG. 5 illustrates the user in FIG. 3a moving towards the audio-object. In this case, reduced metadata rendering system 140 may track the distance 540 (of the user) along a single dimension (for example, the left to right movement of FIG. 4) at several time instances 520 (shown in FIG. 5 as 1, 2, 3, etc.).
[0039] In FIG. 5, the user 410 is at time instance 1 (shown as 520-1) at a considerable distance (shown as 540-1-L) from the audio-object. This corresponds to the situation of FIG. 4. At time instance 520 2, the user 410 has moved significantly closer to the audio-object. The audio-object may now appear very close to the user's head. The user 410 stops, but there may still be small changes to the distance due to subtle movements of the user 410. The user 410 may, for example, turn their head to view the scene, make small nods, correct their posture, or take a small step in any direction. The audio-object 420 may thus end up oscillating around the user 410 along at least one dimension, as illustrated for time instances 520 2-9 in FIG. 5 (shown as single numerals 2, 3 to 9, in FIG. 5). This may provide a very disturbing user experience when the audio-object 420 is rendered to the user 410. Reduced metadata rendering system 140 may control the rendering such that it would appear more pleasant (for example, stable with smooth transitions) for the user 410. In addition to removing disturbances, reduced metadata rendering system 140 may implement processes (for example, based on a second target of control for the audio-object interaction) to provide new information or an enhanced experience, for example as discussed above with respect to FIGS. 3a and 3b.
[0040] FIG. 6 is an example illustration 600 of user rendering position against audio-object position over time. The position is illustrated in a single dimension (left-to-right) with time 510 being illustrated on the vertical axis.
[0041] As illustrated with respect to FIG. 6, reduced metadata rendering system 140 may determine an additional area (for example, reduced metadata rendering system 140 may augment FIG. 5) by adding a region 610 (for example, in place of the "audio-object center point line" 540, shown in FIG. 5) to better indicate that the user rendering position and audio-object distances fall into two categories: the audio-object 420 is either close to the rendering position (within region 610) or not close to it (within area 620-L or 620-R, which may correspond to extended areas outside of region 610). Reduced metadata rendering system 140 may implement processes to ensure that the user 410 response 1) does not hear blurry or oscillating audio-object position changes in the rendering for time instances 520 2-9 and 12-15, and response 2) may instead hear a change in the audio-object rendering corresponding to an interaction trigger. Reduced metadata rendering system 140 (or any audio rendering system) may require metadata to implement response 2) (for example, without metadata response 2 may be difficult or impossible to execute). Reduced metadata rendering system 140 may implement processes for time instances 520 10-11 in FIG. 6, in which the user leaves and is outside of the region 610.
[0042] According to an example, reduced metadata rendering system 140 may implement a distance tracking process for triggering and maintaining an audio-object interaction. Reduced metadata rendering system 140 may implement the distance tracking process to calculate a distance between the user rendering position and the audio-object position. This value (for example, the distance) and its change may be tracked over time. Reduced metadata rendering system 140 may thereby define whether the user rendering position relative to the audio-object position is within an area (for example region 610) where audio-object interaction may be considered.
[0043] Referring back to FIG. 6, reduced metadata rendering system 140 may define a value for the "size of the area" 610 that is to be considered "close" and correspondingly, areas that are "not so close". The value(s) may be adaptive or dynamic. Reduced metadata rendering system 140 may define for each implementation or each general content type a distance/radius that corresponds to the correct size of region 610. Reduced metadata rendering system 140 may define the area in a dynamic free-viewpoint audio use case in specific ways and additionally, or alternatively, reduced metadata rendering system 140 may trigger and control an audio-object interaction using this area definition and related criteria.
[0044] Reduced metadata rendering system 140 may implement area definition to provide a way for triggering and controlling audio-object interactions. Reduced metadata rendering system 140 may implement a dynamic area definition instances in which a static area definition is not optimal. Reduced metadata rendering system 140 may implement a static area definition for a simple stabilization of the rendering. In addition, due to the nature of the user-on-audio-object overlaps and interaction in the virtual space, reduced metadata rendering system 140 may center the area 610 at positions other than the audio-object 420 although the area 610 is to cover the audio-object 420. For example, in a particular instance, the user 100 may be interacting with an audio-object 420 on one side of the audio-object 420, and then decide to move away, for example, through said audio-object 420 (for example, on the other side of said audio-object 420). If the area was centered at the audio-object 420, the audio-object interaction would continue longer than required by the corresponding (for example, real world based) logic of the implementation. Reduced metadata rendering system 140 may therefore define a dynamic interaction area that may change at least one of its size or its location based on the observed action of the user 410 in relation to the audio-object 420 and the audio-object location. Reduced metadata rendering system 140 may, in other words, track the local activity of the user 410 relative to the audio-object 420 while the user 410 is in the vicinity of the audio-object 420.
[0045] Referring also to FIG. 7, an example illustration of a user relationship 700 to a local tracking area of an audio-object, is shown.
[0046] An initial stage 710, illustrates a user 410 approaching a local tracking area 610 of an audio-object 420, stage 720 illustrates the user 410 entering the local tracking area 610 and triggering an audio-object interaction, and stage 730 illustrates the local tracking area 610 being adjusted based on local user tracking.
[0047] Reduced metadata rendering system 140 may implement location tracking and area definition. FIG. 7 illustrates (different stages of) a user approaching (and entering a local tracking area of) an audio-object 420. Reduced metadata rendering system 140 may specify an implementation-specific distance around the audio-object 420 where the local tracking is initially considered (and where reduced metadata rendering system 140 may also begin to consider the audio-object interaction). The distance may also be user-configurable, defined by the content creator (and loaded, for example, once per session) or, in advanced embodiments, based on an ongoing process (for example, based on a learning algorithm) that accumulates user-specific data over time and thus allows automatic personalization of the experience.
[0048] The distance may correspond to a static area centered at the audio-object 420, for example as shown at stage 710. As the user moves closer, he reaches the border of the tracked distance and triggers an audio-object interaction, for example as shown at stage 720. We may thus begin the local tracking when the user enters this pre-defined area around the audio-object. Alternatively, there may be different decision distances for the tracking and the actual interaction part. In this example, to simplify the description, the decision distances for the tracking and the actual interaction may be consider a same single distance. The local tracking may be seen as defining a "center of mass" that is based on the user rendering position and the audio-object position, for example as shown at stage 730.
[0049] Referring also to FIG. 8 an illustration of interaction-area modification control instructions is shown. These interaction-area modification control instructions may be based on corresponding basic instructions (corresponding to implementer defined logic or principles, such as based on real world scenarios) regarding the modification of interaction-areas in response to particular user motion.
[0050] Reduced metadata rendering system 140 may implement instructions based on a model for controlling the dynamic interaction area (or center of mass/CoM) 810. Reduced metadata rendering system 140 may implement interaction-area tracking and modification based on core instructions or guidelines (for example, basic principles), such as shown in FIG. 8. With the user 410 entering the tracking distance 820 (shown at stage 1, block 815 in FIG. 8), the reduced metadata rendering system 140 may initialize CoM 810 at a point between the user position and the audio-object 420. This may correspond to interaction area bounds 830, 840 for the user 410 and audio-object 420. The exact location of the CoM 810 may depend on the implementation and/or attributes such as the audio-object size. The initial location may be, for example, the center point between the user position and the audio-object position.
[0051] After initialization, reduced metadata rendering system 140 may begin the local tracking. Step 2, block 825, of FIG. 8 illustrates the user 410 approaching 850 the CoM 810, the position of which is maintained.
[0052] As the user 410 stops (or the absolute distance between the user 410 and the audio-object 420 is otherwise maintained), the CoM 810 may move towards the user 410 as seen in step 3, block 835. This movement may become slower the farther away from the audio-object the CoM 810 goes. At some point the CoM 810 may meet the user position, and reduced metadata rendering system 140 may center the interaction area at the user position 860. The interaction area may, in some embodiments, also cover the actual audio-object position. Reduced metadata rendering system 140 may therefore render the audio-object 420 with the user 410 (and not render the audio-object separately). Reduced metadata rendering system 140 may control the rendering via the interaction area.
[0053] Steps 4a and 4b (blocks 845 and 855) demonstrate two possibilities where the user-to-audio-object distance is changed (after the user position and CoM 810 have merged). In step 4a, block 845, the user 410 may move towards 870 the audio-object and the CoM 810 may follow the user position.
[0054] In step 4b, block 855, the user 410 may move away from the audio-object position. In this case, the CoM 810 may separate 880 from the user position. The separation may, depending on the implementation (and various criteria such as the current distance to the audio-object) result in the CoM 810 being maintained or the CoM 810 following the user position with a lag (inertia).
[0055] In some embodiments, the CoM 810 may move towards the audio-object 420 if the user-to-audio-object distance becomes larger following the initialization. Or, an increase of the user-to-audio-object distance prior to merging of the user position and the CoM 810 may result in the CoM 810 moving towards the audio-object 420.
[0056] Reduced metadata rendering system 140 may define an interaction area for the audio-object 420 that depends on the local tracking of the user activity based on at least the user position (or analysis of the user position). Reduced metadata rendering system 140 may also use other aspects associated with the user or audio-object, such as speed of user movement, past user movement, etc. In some embodiments, reduced metadata rendering system 140 may provide instructions for the CoM 810 to follow the user 410 while still providing a preference (or secondary instructions) for the CoM 810 to stay close to (or return to) the audio-object 420.
[0057] FIG. 9 is an example illustration 900 of audio-interaction events that may be detected based on the local tracking of the user movement and behavior in the proximity of an audio-object 420. Reduced metadata rendering system 140 may implement different states (for example, via a state machine, such as shown in FIG. 9) based on audio-interaction events derived by local tracking.
[0058] Reduced metadata rendering system 140 may control rendering for audio interactions without transmitted metadata. Reduced metadata rendering system 140 may analyze the particular audio interactions. When no metadata is available, reduced metadata rendering system 140 may derive possible interaction effect by other means. Reduced metadata rendering system 140 may use analysis of local tracking of the user 410, such as may already be performed for defining the interaction area 810. As described hereinabove, reduced metadata rendering system 140 may define the interaction area based on the user position (distance). On the other hand, reduced metadata rendering system 140 may determine the audio-object interaction effect to be applied to the rendering based on other information derived from the local tracking of the user activity. As described above, the user 410 may, for example, consume/interact with an audio-object 420 firstly on one side of the audio-object 420 and then proceed to leave this part of the scene, for example, through the audio-object 420. Reduced metadata rendering system 140 may determine that there are at least two parts in this activity. The first part may be mostly a static consumption part, while the other part may be a faster movement (which, in this instance, may furthermore be through the audio-object 420). Reduced metadata rendering system 140 may base the metadata-free interaction effect decisions on these types of user action transitions.
[0059] Referring again to FIG. 9, at 910, a first state in which the user 410 is not interacting with an audio-object 420, is shown. The user 410 may then enter a first interaction state with the audio-object at 920. This is denoted as an initial state, which may assume a first static user behavior and trigger a first interaction response for the rendering of the audio-object 420. For example, the audio-object 420 may grow in size and be stabilized in rendering position relative to the user 410. The size may be, for example, relative to the user-to-audio-object distance. From this first static state we may then trigger new responses every time a state is changed.
[0060] Reduced metadata rendering system 140 may have, for example, at least a non-static state 930 and/or a second static state differing from the first static state 920, where the user 410 has left the audio-object interaction area and then returned to interact with the same audio-object (`return`) 940. For example, time instances 520 9-12 in FIG. 6 may correspond to an ending of, and subsequent relaunching of, an audio-object interaction. The at least second interaction with the same audio-object may trigger a different response rendering.
[0061] In some embodiments, a user returning to interact with an audio object after a particular time has elapsed (for example, a minimum time has passed) may trigger the initial state 920 instead of the `return` state 940. The reduced metadata rendering system 140 may therefore utilize at least one rule, which may be based on time, for selecting between the at least two static states that may be entered from the `no interaction` state 910. In further embodiments, the rule may be carried over from a first user session to a second user session. It is understood that in some embodiments, only a single static state may be defined. Different audio objects may, depending on the implementation, have different number of states.
[0062] The static state 920 may occur in instances in which there is user movement (in addition to instances in which there is no user movement). For example, the static state may include instances in which there is a relatively local (on one side, in a certain segment, etc.) movement, and/or a relatively slow movement. Reduced metadata rendering system 140 may thereby trigger a transition from a static state when at least a user movement distance is over a threshold and/or a user movement speed is over a threshold. Reduced metadata rendering system 140 may determine that the interaction is entering a non-static state based on a particular amount of said movement over a time period (a time threshold). Transition from a non-static state to a static state may require a more stable user activity than firstly remaining in a static state. The reduced metadata rendering system 140 may implement the transition based on instructions provided by an implementer (for example, based on a particular virtual environment, etc.) and, at least in some cases, also based on the type of content.
[0063] The interaction states may each correspond to an interaction event (which may be a user activity model and a corresponding audio-object interaction response). These may be defined by the implementer or the content creator for the said implementation or content, respectively. Or they may be derived, for example, from an interaction event database.
[0064] FIG. 10 is a diagram 1000 illustrating components of (corresponding to steps in a process implementing) a reduced metadata rendering system 140.
[0065] Reduced metadata rendering system 140 may, based on the local tracking and within the defined tracking area, determine when a user movement triggers a state transition (FIG. 9) and start applying the audio-object interaction for the new state. In some example embodiments, reduced metadata rendering system 140 may use a separate database for defining audio-object interaction events. Reduced metadata rendering system 140 may derive at least the user activity model for each state and/or the audio-object interaction response for the each state from a database.
[0066] At block 1010, reduced metadata rendering system 140 may detect that a user is entering an audio-object vicinity (or distance). Reduced metadata rendering system 140 may initialize local tracking at block 1020 and perform local tracking and update of local tracking area at block 1030. Reduced metadata rendering system 140 may update interaction event states based on local tracking at block 1040. Alternatively, or additionally, reduced metadata rendering system 140 may read interaction state data from a database at block 1050. At block 1060, reduced metadata rendering system 140 may perform audio-object state modification based on a current interaction state. At block 1070, reduced metadata rendering system 140 may send modification information to an audio object spatial rendering engine. Additionally, at block 1080, reduced metadata rendering system 140 may, while user is in local tracking area, perform local tracking and update of local tracking area.
[0067] In alternative implementations, reduced metadata rendering system 140 may utilize, for example, deep learning processes to further distinguish between various user activities or ways of reacting to audio-objects. Reduced metadata rendering system 140 may thereby allow for personalization of the system response. For example, reduced metadata rendering system 140 may utilize a training sequence of metadata-based audio-object interactions, where user movement is tracked, or reduced metadata rendering system 140 may learn how user responds to default interaction responses.
[0068] In further example embodiments, reduced metadata rendering system 140 may analyze the audio-object 420 and the analysis result may affect at least some of the audio-object interaction parameters, such as states and thresholds. In further example embodiments, reduced metadata rendering system 140 may also analyze the physical rendering environment (user's room properties). Reduced metadata rendering system 140 may similarly affect the audio-object interaction rendering when no pre-defined metadata is used.
[0069] Referring now to FIG. 11, a high-level block diagram illustrating rendering for metadata-based audio-object interactions is shown.
[0070] Reduced metadata rendering system 140 may implement processes to provide backwards compatibility with previous systems, when compared to metadata-based systems, such as described in U.S. patent application Ser. No. 15/293,607, which include metadata-based audio-object interactions. The metadata-based system may read the metadata (block 1110), detect interaction (block 1120), and determine information for an audio-object state modification (block 1130). The metadata-based system may then send the modification information to an audio-object spatial rendering engine (block 1140).
[0071] While reduced metadata rendering system 140 may allow controlled audio interactions without additional metadata (as shown in FIG. 9), reduced metadata rendering system 140 may also implement the metadata-free processes in conjunction with systems, such as the metadata-based system described with respect to FIG. 11, which implement metadata-based audio interactions. For example, in instances of network congestion, if a user is receiving a low-rate representation of a free-viewpoint audio scene, for example, over a wireless data link or communications system, complicated metadata may require a substantial amount of the overall bandwidth that would be better utilized for source-coding of the audio waveform. Therefore, reduced metadata rendering system 140 may utilize the metadata-free processes for rendering audio-object interactions and allow for the transmitter or a network element to drop the metadata and only transmit the audio payload in the downlink.
[0072] Referring to FIG. 12, a high-level block diagram of a switched system implementing metadata-free processes for rendering audio-object interactions as a backup system for a system that uses metadata for rendering audio-object interactions (for example, a system such as described in U.S. patent application Ser. No. 15/293,607) is shown.
[0073] As shown in FIG. 12, reduced metadata rendering system 140 may mirror (or perform as a substitute or alternate to) a metadata-based system, such as the system described with respect to FIG. 11, for instances in which metadata is not available. The functional blocks of FIG. 10 are thus re-arranged on the left-hand side of FIG. 12 to illustrate the high-level implementation of the two processes. The combined system may determine if metadata has been received at block 1210. If metadata has not been received, reduced metadata rendering system 140 may detect interaction via local tracking at block 1220 and determine an audio-object state modification based on local tracking at block 1230. Reduced metadata rendering system 140 may then send modification information to audio-object spatial rendering engine at block 1140. However, if metadata has been received, metadata-based system may perform steps 1110 to 1140, as described herein above with respect to FIG. 11.
[0074] Reduced metadata rendering system 140 may implement processes, for example, under heavy network congestion when metadata transmission may need to be dropped to save bandwidth or to allocate it in a way that is perceptually more beneficial. Thus, an administrator (or a combined system, such as described with respect to FIG. 12) may run the metadata-based system when metadata is received and switch to the reduced metadata rendering system 140 when no metadata is available. However, in these instances the combined system may be required to interpolate the effects in order not to create discontinuities if the switching between the two modes (branches) is frequent. For example, reduced metadata rendering system 140 may determine a metadata-free rendering utilizing the information about the past states derived from the received metadata in previous frames. The combined system may have a default setting so that as long as data rate is sufficient, the combined (or switched) system processes the rendering for the audio-object interactions via the metadata-based system (and only uses the reduced metadata rendering system 140 when no metadata is available).
[0075] Note that although the preceding implementations are described with respect to user movement, dynamic audio-objects may also move themselves, which may also affect the distance between the user position and the audio-object. In instance of determination of user-to-audio-object distance, the relative distance is measured, and reduced metadata rendering system 140 may discount whether the movement is due to the user moving or the audio-object moving. However, in instances of using local tracking of the user activity for determining the audio-interaction effects, the actual user movement is of interest. If the audio-object is also moving, reduced metadata rendering system 140 may compensate for the tracking in at least some embodiments.
[0076] The metadata-free rendering of audio-object interactions may provide technical advantages and/or enhance the end-user experience. At a high level, the processes may enable a stable audio-object rendering under audio-object interaction with no or reduced set of metadata available at the renderer. These processes are thereby suitable for example for very low bit rate VR systems where metadata transmission may not be favored and free-viewpoint rendering of legacy content that is not supported by the full-metadata system.
[0077] Reduced metadata rendering system 140 may thereby make it possible for the user to experience free-viewpoint audio based on both legacy content and new VR-specific content (for example, content that includes metadata for audio-object interactions). The interaction-area tracking may enable stabilizing the audio-object rendering. The tracking of user's local activity may further enable making further decisions on when to apply specific interaction rendering effects to the audio. Together these steps may significantly enhance the user experience. While reduced metadata rendering system 140 may not follow content creator or implementer instructions when no metadata is available, reduced metadata rendering system 140 may, in some instances, provide options (or make decisions) that correspond for example to typical content creator decisions at the renderer. Reduced metadata rendering system 140 may also implement personalization of audio-object interactions based on the modeling of a specific user's typical interaction style while experiencing free-viewpoint audio.
[0078] One advantage of the metadata-free rendering of audio-object interactions described herein is that it can be implemented as a stand-alone system (thus, it offers a new, alternative implementation for free-viewpoint audio-object interaction rendering) and in addition as a backup system for metadata-based systems (thus, improving the existing system for lower bit rates and legacy content). It can thus be used independently from metadata-based systems (in terms of not needing metadata) or in conjunction with metadata-based systems (when metadata is offered but it is not available due to transmission issues).
[0079] In accordance with an example, a method may include detecting interaction between the user and an audio-object via local tracking, determining an audio-object state modification based on the local tracking, and sending the audio-object state modification to an audio-object spatial rendering engine. The method may also include performing an audio-object interaction based on the audio-object state modification.
[0080] In accordance with another example, an example apparatus may comprise at least one processor; and at least one non-transitory memory including computer program code, the at least one memory and the computer program code configured to, with the at least one processor, cause the apparatus to: determine whether metadata associated with a user for audio-object interactions has been received, detect interaction between the user and an audio-object via local tracking in response to determination that metadata associated with the user for audio-object interactions has not been received, determine an audio-object state modification based on the local tracking, and send the audio-object state modification to an audio-object spatial rendering engine.
[0081] In accordance with another example, an example apparatus may comprise a non-transitory program storage device readable by a machine, tangibly embodying a program of instructions executable by the machine for performing operations, the operations comprising: detecting interaction between the user and an audio-object via local tracking, determining an audio-object state modification based on the local tracking, and sending the audio-object state modification to an audio-object spatial rendering engine.
[0082] In accordance with another example, an example apparatus comprises: means for determining whether metadata associated with a user for audio-object interactions has been received, means for detecting interaction between the user and an audio-object via local tracking in response to determination that metadata associated with the user for audio-object interactions has not been received, means for determining an audio-object state modification based on the local tracking, and means for sending the audio-object state modification to an audio-object spatial rendering engine.
[0083] Any combination of one or more computer readable medium(s) may be utilized as the memory. The computer readable medium may be a computer readable signal medium or a non-transitory computer readable storage medium. A non-transitory computer readable storage medium does not include propagating signals and may be, for example, but not limited to, an electronic, magnetic, optical, electromagnetic, infrared, or semiconductor system, apparatus, or device, or any suitable combination of the foregoing. More specific examples (a non-exhaustive list) of the computer readable storage medium would include the following: an electrical connection having one or more wires, a portable computer diskette, a hard disk, a random access memory (RAM), a read-only memory (ROM), an erasable programmable read-only memory (EPROM or Flash memory), an optical fiber, a portable compact disc read-only memory (CD-ROM), an optical storage device, a magnetic storage device, or any suitable combination of the foregoing.
[0084] It should be understood that the foregoing description is only illustrative. Various alternatives and modifications can be devised by those skilled in the art. For example, features recited in the various dependent claims could be combined with each other in any suitable combination(s). In addition, features from different embodiments described above could be selectively combined into a new embodiment. Accordingly, the description is intended to embrace all such alternatives, modifications and variances which fall within the scope of the appended claims.
* * * * *
File A Patent Application
• Protect your idea -- Don't let someone else file first. Learn more.
• 3 Easy Steps -- Complete Form, application Review, and File. See our process.
• Attorney Review -- Have your application reviewed by a Patent Attorney. See what's included.
|
__label__pos
| 0.939173 |
source: flair-src/trunk/lib/FlairSensorActuator/src/VrpnObject_impl.cpp@ 135
Last change on this file since 135 was 135, checked in by Sanahuja Guillaume, 7 years ago
VrpnObject no longer outputs Euler (ony Quaternion): warning, output matrix has changed!
File size: 5.7 KB
RevLine
[3]1// %flair:license{
[15]2// This file is part of the Flair framework distributed under the
3// CECILL-C License, Version 1.0.
[3]4// %flair:license}
5// created: 2013/04/03
6// filename: VrpnObject.cpp
7//
[122]8// author: César Richard, Guillaume Sanahuja
[3]9// Copyright Heudiasyc UMR UTC/CNRS 7253
10//
11// version: $Id: $
12//
13// purpose: objet vrpn
14//
15//
16/*********************************************************************/
17#include "VrpnObject_impl.h"
18#include "VrpnObject.h"
19#include "VrpnClient.h"
20#include "VrpnClient_impl.h"
[15]21#include <vrpn_Tracker.h>
[3]22#include <fcntl.h>
23#include <errno.h>
24#include <string.h>
25#include <unistd.h>
26#include <vrpn_Connection.h>
27#include <cvmatrix.h>
28#include <Tab.h>
29#include <TabWidget.h>
30#include <DataPlot1D.h>
31#include <OneAxisRotation.h>
32#include <Vector3D.h>
33#include <Euler.h>
34#include <math.h>
35
36using std::string;
37using namespace flair::core;
38using namespace flair::gui;
39using namespace flair::sensor;
40
[122]41VrpnObject_impl::VrpnObject_impl(VrpnObject *self,
[15]42 string name, int id, const TabWidget *tab) {
[122]43 parent = GetVrpnClient();
[15]44 this->self = self;
[3]45
[122]46 if(parent==NULL) {
47 self->Err("VrpnClient must be instanced before creating VrpnObject\n");
48 return;
49 }
[15]50 if (id == -1 && parent->UseXbee()) {
51 self->Err("erreur aucun identifiant specifie pour la connexion Xbee\n");
52 }
53 if (id != -1 && !parent->UseXbee()) {
54 self->Warn(
55 "identifiant pour la connexion Xbee ignore car pas en mode Xbee\n");
56 }
[3]57
[15]58 if (parent->UseXbee()) {
59 parent->pimpl_->AddTrackable(this, id);
60 tracker = NULL;
61 } else {
62 parent->pimpl_->AddTrackable(self);
63 tracker = new vrpn_Tracker_Remote(name.c_str(), parent->pimpl_->connection);
64 tracker->register_change_handler(this, handle_pos);
65 tracker->shutup = true;
66 }
[3]67
[15]68 // state
[135]69 cvmatrix_descriptor *desc = new cvmatrix_descriptor(7, 1);
70 desc->SetElementName(0, 0, "q0");
71 desc->SetElementName(1, 0, "q1");
72 desc->SetElementName(2, 0, "q2");
73 desc->SetElementName(3, 0, "q3");
74 desc->SetElementName(4, 0, "x");
75 desc->SetElementName(5, 0, "y");
76 desc->SetElementName(6, 0, "z");
[15]77 output = new cvmatrix(self, desc, floatType);
78
79 desc = new cvmatrix_descriptor(3, 1);
80 desc->SetElementName(0, 0, "roll");
81 desc->SetElementName(1, 0, "pitch");
82 desc->SetElementName(2, 0, "yaw");
83 state = new cvmatrix(self, desc, floatType);
84
85 // ui
86 plot_tab = new Tab(tab, "Mesures " + name);
87 x_plot = new DataPlot1D(plot_tab->NewRow(), "x", -10, 10);
[135]88 x_plot->AddCurve(output->Element(4));
[15]89 y_plot = new DataPlot1D(plot_tab->LastRowLastCol(), "y", -10, 10);
[135]90 y_plot->AddCurve(output->Element(5));
[15]91 z_plot = new DataPlot1D(plot_tab->LastRowLastCol(), "z", -2, 0);
[135]92 z_plot->AddCurve(output->Element(6));
[3]93}
[15]94
95VrpnObject_impl::~VrpnObject_impl(void) {
96 if (tracker != NULL) // normal
97 {
98 parent->pimpl_->RemoveTrackable(self);
99 tracker->unregister_change_handler(this, handle_pos);
100 delete tracker;
101 } else // xbee
102 {
103 parent->pimpl_->RemoveTrackable(this);
104 }
105 delete plot_tab;
[3]106}
107
[15]108void VrpnObject_impl::mainloop(void) { tracker->mainloop(); }
[3]109
[15]110bool VrpnObject_impl::IsTracked(unsigned int timeout_ms) {
111 output->GetMutex();
112 Time a = GetTime();
113 Time dt = a - output->DataTime();
114 output->ReleaseMutex();
115
116 if (dt > (Time)(timeout_ms * 1000000)) {
117 // self->Printf("%lld %lld %lld
118 // %lld\n",a,output->DataTime(),dt,(Time)(timeout_ms*1000000));
119 return false;
120 } else {
121 return true;
122 }
[3]123}
124
125void VrpnObject_impl::GetQuaternion(Quaternion &quaternion) {
[15]126 output->GetMutex();
[135]127 quaternion.q0 = output->ValueNoMutex(0, 0);
128 quaternion.q1 = output->ValueNoMutex(1, 0);
129 quaternion.q2 = output->ValueNoMutex(2, 0);
130 quaternion.q3 = output->ValueNoMutex(3, 0);
[15]131 output->ReleaseMutex();
132}
[3]133
[15]134void VrpnObject_impl::GetPosition(Vector3D &point) {
135 output->GetMutex();
[135]136 point.x = output->ValueNoMutex(4, 0);
137 point.y = output->ValueNoMutex(5, 0);
138 point.z = output->ValueNoMutex(6, 0);
[15]139 output->ReleaseMutex();
140}
[3]141
[15]142void VRPN_CALLBACK
143VrpnObject_impl::handle_pos(void *userdata, const vrpn_TRACKERCB t) {
144 bool is_nan = false;
145 VrpnObject_impl *caller = reinterpret_cast<VrpnObject_impl *>(userdata);
146 Time time = GetTime();
[3]147
[15]148 // check if something is nan
149 for (int i = 0; i < 3; i++) {
150 if (isnan(t.pos[i]) == true)
151 is_nan = true;
152 }
153 for (int i = 0; i < 4; i++) {
154 if (isnan(t.quat[i]) == true)
155 is_nan = true;
156 }
157 if (is_nan == true) {
158 caller->self->Warn("data is nan, skipping it (time %lld)\n", time);
159 return;
160 }
[3]161
[122]162 // on prend une fois pour toute le mutex et on fait des accès directs
[15]163 caller->output->GetMutex();
[3]164
[15]165 // warning: t.quat is defined as (qx,qy,qz,qw), which is different from
166 // flair::core::Quaternion
[135]167 Quaternion quaternion(t.quat[3],t.quat[0],t.quat[1],t.quat[2]);
[15]168 Vector3D pos((float)t.pos[0], (float)t.pos[1], (float)t.pos[2]);
[3]169
[15]170 // on effectue les rotation
171 caller->parent->pimpl_->ComputeRotations(pos);
[135]172 caller->parent->pimpl_->ComputeRotations(quaternion);
[3]173
[135]174 caller->output->SetValueNoMutex(0, 0, quaternion.q0);
175 caller->output->SetValueNoMutex(1, 0, quaternion.q1);
176 caller->output->SetValueNoMutex(2, 0, quaternion.q2);
177 caller->output->SetValueNoMutex(3, 0, quaternion.q3);
178 caller->output->SetValueNoMutex(4, 0, pos.x);
179 caller->output->SetValueNoMutex(5, 0, pos.y);
180 caller->output->SetValueNoMutex(6, 0, pos.z);
[3]181
[15]182 caller->output->SetDataTime(time);
183 caller->output->ReleaseMutex();
[3]184
[135]185 Euler euler=quaternion.ToEuler();
[15]186 caller->state->GetMutex();
187 caller->state->SetValueNoMutex(0, 0, Euler::ToDegree(euler.roll));
188 caller->state->SetValueNoMutex(1, 0, Euler::ToDegree(euler.pitch));
189 caller->state->SetValueNoMutex(2, 0, Euler::ToDegree(euler.yaw));
190 caller->state->ReleaseMutex();
191
192 caller->self->ProcessUpdate(caller->output);
[3]193}
Note: See TracBrowser for help on using the repository browser.
|
__label__pos
| 0.994404 |
OML Search
3D Trigonometry
Related Topics:
More Math Lessons for GCSE
More Trigonometry Lessons
Trigonometry Worksheets
Collection of videos, solutions, games, activities and worksheets that are suitable for GCSE Maths.
How to solve problems that involve 3D shapes using trigonometry and the Pythagoras' Theorem?
The following diagram gives an example of the projection of a line on a plane and the angle between a line and and plane. Scroll down the page for more examples and explanations on using Trigonometry and the Pythagoras' Theorem to solve 3D word problems.
3d trigonometry
GCSE Maths 3 D Trigonometry A* Question
Example:
ABCDEFG is a solid cuboid.
a) Find the length BE
b) Calculate the angle that BH makes with the plane ABFE. IB Math Studies: 3D Trigonometry
Examples:
1. A room is in the shape of a cuboid. Its floor measures 7.2m by 9.6m and its height is 3.5m.
a) Calculate the length of AC
b) Calculate the length of AG
c) Calculate the angle that AG makes with the floor.
2. The right pyramid shown in the diagram has a square base with sides of length 40 cm. The height of the pyramid is also 40 cm.
a) Find the length of OB.
b) Find the size of angle OBP.
Trigonometry and Pythagoras in 3D Shapes
Calculating an angle between an edge and plane in a 3D shape using Pythagoras and trigonometry.
Example:
For this cuboid calculate
a) Length AG
b) Angle between AG and the plane ABCD Angle between a line and a plane
AddMaths Ex8A Q1
3D Trigonometry Problem
The worked solution to a three-dimensional trigonometry problem.
A tree on the far side of a river bank is used to determine the width of the river with the help of a few right angle triangles.
Step 1: Identify the right angles in your diagram.
Step 2: Include all the measurements in your diagram.
How to do Trigonometry in Three dimensions?
3D trig Pythagoras cuboid Part 1
Example:
ABCDEFGH is a cuboid with dimensions 4m, 2m and 1.5m. X is the midpoint of the side EF. Find the lengths AC, AG and AX. Find the angles GAC and AXB. 3D Trig Pythagoras cuboid part 2. 3D Trig Pythagoras cuboid part 3. 3D Trig Pythagoras cuboid part 4. 3D Trig Pythagoras cuboid part 5.
Try the free Mathway calculator and problem solver below to practice various math topics. Try the given examples, or type in your own problem and check your answer with the step-by-step explanations.
Mathway Calculator Widget
OML Search
We welcome your feedback, comments and questions about this site or page. Please submit your feedback or enquiries via our Feedback page.
|
__label__pos
| 0.99548 |
5 x steps to make Magento data a piece of Cake
By in Ecommerce, Magento
Magento is quite a beast. An open source e-commerce platform that’s able to accommodate all types of sale. From Burger King to Kurt Geiger, Kennards to Seat, its versatility has been harnessed and modified to suit all manner of business.
And yes, there are many resources and themes out there on setting up and creating the perfect shop. But having recently spent far, far too many hours lately inputting attributes, creating sets, and developing products, one thing I have never found was any information on the logic behind data-loading.
Working in data management for well over a decade, I have witnessed so often that basic common sense gets overlooked for ‘quick fixes’ or ‘desperate urgency’, and can often mean the subsequent cleanup operation will cost the business more than getting it right from the off. Like baking a cake, one wouldn’t start baking a cake without knowing what they’re doing from the off, and ensuring they have everything they need, otherwise that cake won’t be edible by the end of it.
So please read on to find my 5 things to bear in mind when entering data into Magento (or any other database for that matter), and how to ensure the results aren’t half-baked.
1. Preparation, Preparation, Preparation
Preparation is vitally important. I’m not talking about the data gathering here. I should hope this has already been done, otherwise there is little point bothering to data load at all. But cleaning, normalising, and prepping, ready for the load, is so often missed and mistakenly overlooked for the sake of firing data in.
The database is broken up into the following 4 x segments:
1. Attributes – the pieces of information that make up a product
2. Attribute Sets – the template of attributes that define what goes into each product
3. Categories – the menu structure that defines where the product appears in the shop (the types of products)
4. Products – the accumulated sum of the previous segments in this list
Quite simply, if you build your products in the above order, you should have no issues with loading the product. But ensuring you have all of the attributes in the beginning is key to moving forwards with ease and efficiency.
2. Attributes – Gotta love a spreadsheet
Ultimately, the data is the Attributes. And like a baker who won’t start creating their gateau without knowing they have noted (and sourced) all of the ingredients to make it properly, so no-one should start building a database without having listed all of the attributes they are going to need from the base up, and then subsequently getting the details on them.
The simplest form of database is a spreadsheet, and it’s a great place to start when gathering data together, just like writing down a recipe will have certain parts to it (e.g. the base; the filling; the ganache etc.), so the attributes of a product will have their own parts to them.
Imagine a black forest gateau:
Attribute Black Forest Gateau
Sponge eggs
caster sugar
cocoa powder
dark chocolate
Filling tinned cherries
kirsch
cherry jam
whipping cream
Decoration fresh cherries
The gateau itself is made up of three types of attribute, and these attributes have values associated to them. If you added another cake, it may share the same attribute values (e.g. eggs, sugar) or have different ones (e.g. in a carrot cake: carrots). If you subsequently added more products, you may realise that a large number of attributes and values are shared, and there may be a few more attributes to add to the list.
3. Sort it out!
Once the attributes are all listed in the spreadsheet, you can then play with it to get a list of unique attribute values, ready to be imported, and separated from the products. The beauty of going product by product to get the attribute list together in the first place means you still have a sheet with all of the information related to its specific product, but you can take the sections you need, one at a time, and build this into the relational database in Magento.
4. Ready, set, go!
So the attributes have been sorted and loaded into the database, now you need to start looking at attribute sets. The sets collate the attributes that will be associated to specific types of products. Not all products share the same attributes. In fact, there may be certain types that have some that are almost identical, like models of cars, or t-shirts that come in different sizes or colours. So the attribute set is the collation of attributes that make the products what they are.
Ensure the attribute sets cover the various models of products, and these make it much easier, when creating the products, to select the relevant attribute values for it. So with ingredients, there may be an attribute set that covers sponge cakes, but also one that covers tray bakes!
5. Where d’ya want it?
Once everything is in, it only remains to categorise them and actually create your products. The reason the categories go in before the products is because there are fewer of them, and the products will subsequently be associated to them. You could manage a whole database of courses! For instance: the database could have 30 cakes, but they are only to be listed under the ‘cake’ category. There could be 20 ‘main courses’, but you wouldn’t want them popping up in the ‘dessert’ section. Categories can therefore work this way:
• Starter
• Fish dishes
• Meat dishes
• Vegetarian
• Main
• Fish dishes
• Meat dishes
• Vegetarian
• Dessert
• Sweet
• Savoury
• Cake
• Drinks
• Alcoholic
• Non-Alcoholic
• Hot
Be methodical, and copy your products. If you prep’d correctly, you will still have all of the products listed in your spreadsheet against their relevant attributes and values. Those that are almost identical can be duplicated easily, and the minor changes made. In all, Magento is a very simple relational database, and should, if prepared correctly, be a simple and easy to use tool. So easy in fact, you can have you cake, and eat it too.
|
__label__pos
| 0.566517 |
Commit f6f63c91 authored by Marco Lorini's avatar Marco Lorini
Browse files
2020-10-12 ML: add ExternalDNS section
parent d6d9bdd5
...@@ -193,11 +193,13 @@ $ kubefedctl disable services --kubefed-namespace kube-federation-system ...@@ -193,11 +193,13 @@ $ kubefedctl disable services --kubefed-namespace kube-federation-system
**N.B.**: Do not federate the “kind: pod” resource. In this case, federating a whole namespace will federate pods and deployments at the same time. Deployments will create replicasets which in turn create pods. It will result a duplication of the pods resources. **N.B.**: Do not federate the “kind: pod” resource. In this case, federating a whole namespace will federate pods and deployments at the same time. Deployments will create replicasets which in turn create pods. It will result a duplication of the pods resources.
## Deploy application ## Federate an application
Below the procedure to deploy and federate an application and enable ExternalDNS. Below the procedure to deploy and federate an application and enable ExternalDNS.
### Create namespace (Host Cluster) **N.B.**: all commands must be run on Host Cluster.
### Create namespace
The first step is create a namespace in the host cluster: The first step is create a namespace in the host cluster:
...@@ -244,3 +246,98 @@ $ kubectl --context=<host-cluster-context> create -f resource/federated_namespac ...@@ -244,3 +246,98 @@ $ kubectl --context=<host-cluster-context> create -f resource/federated_namespac
``` ```
**N.B.**: the option `--context` is not necessary but make sure that the right context is selected. **N.B.**: the option `--context` is not necessary but make sure that the right context is selected.
### Create RBAC for ExternalDNS
Now it is possible to deploy ExternalDNS in the federated namespace. Create the RBAC for the ExternalDNS:
```yaml
# rbac_externaldns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: external-dns
namespace: fed-namespace
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: external-dns
namespace: fed-namespace
rules:
- apiGroups: [""]
resources: ["services","endpoints","pods"]
verbs: ["get","watch","list"]
- apiGroups: ["extensions"]
resources: ["ingresses"]
verbs: ["get","watch","list"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get","watch","list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: external-dns-viewer
namespace: fed-namespace
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: external-dns
subjects:
- kind: ServiceAccount
name: external-dns
namespace: fed-namespace
```
```bash
$ kubectl --context=<host-cluster-context> create -f resource/rbac_externaldns.yaml
```
### Deploy ExternalDNS for PowerDNS
Create the ExternalDNS deployment and configure it for PowerDNS (in the our case):
```yaml
# externaldns.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: external-dns
namespace: fed-namespace
spec:
strategy:
type: Recreate
selector:
matchLabels:
app: external-dns
template:
metadata:
labels:
app: external-dns
spec:
# Only use if you're also using RBAC
serviceAccountName: external-dns
containers:
- name: external-dns
image: registry.opensource.zalan.do/teapot/external-dns:latest
args:
- --source=crd # or service or ingress
- --crd-source-apiversion=multiclusterdns.kubefed.io/v1alpha1
- --crd-source-kind=DNSEndpoint
- --provider=pdns
- --pdns-server=http://<ip>:<port>
- --pdns-api-key=<api-key>
- --registry=txt
- --txt-prefix=cname
- --domain-filter=<domain> # will make ExternalDNS see only the zones matching provided domain; omit to process all available zones in PowerDNS
- --policy=upsert-only # would prevent ExternalDNS from deleting any records, omit to enable full synchronization
```
```bash
$ kubectl --context=<host-cluster-context> create -f resource/externaldns.yaml
```
# create_externaldns.yaml # externaldns.yaml
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
......
# create_rbac_externaldns.yaml # rbac_externaldns.yaml
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment
|
__label__pos
| 0.994319 |
dcsimg
www.webdeveloper.com
Results 1 to 5 of 5
Thread: Sending email using JSP
1. #1
Join Date
Nov 2004
Posts
34
Sending email using JSP
Hi
I have a web application that requires me to send (not required to receive) emails to others. Is it possible to use JSP to do it and if so, are there any other tools required for it? Is ASP a better alternative for this?
Thanks
2. #2
Join Date
Nov 2004
Posts
1
HI,
If IIS server is used than it is very easy to send mails by using CDONT in ASP page.
enjoy
3. #3
Join Date
Nov 2003
Location
Jerryville, Tejas
Posts
11,715
Sending mail is pretty simple using the JavaMail bundled with the newer versions (since 1.3 I think). You can do it directly from a JSP but the proper way to do web design is the Model View Controller pattern (JSP2) where the JSP is the View. The form on the JSP would contain a text area for the body of the email and potentially Subject and To inputs. The servlet handling the form would create a mail object, set the to, subject and body then send it (by default using the SMTP server the host system uses).
The JSP1.5 way to do it would be create a mailer bean that the JSP would use to send the mail in the same way as above except using the the CGI style "post to itself" form handler.
4. #4
Join Date
Aug 2003
Posts
74
Hi,
I'm trying to send email from my JSP page in HTML format, I don't know how to do it.
Kindly give me some solution, heres my code
<%@page contentType="text/html"%>
<%@ page import="util.*" %>
<%
String Email = request.getParameter("Email");
String FirstName = request.getParameter("FirstName");
String LastName = request.getParameter("LastName");
String PhoneNumber = request.getParameter("PhoneNumber");
String Country = request.getParameter("Country");
String Occasion = request.getParameter("Occasion");
String Age = request.getParameter("Age");
String Gender = request.getParameter("Gender");
String Interests = request.getParameter("Interests");
String PriceRange = request.getParameter("PriceRange");
String Colors = request.getParameter("Colors");
String Relationship = request.getParameter("Relationship");
String mailbody = "<b>Your Information<br>";
mailbody = mailbody + "Email" + Email + "<br>";
mailbody = mailbody + "First Name" + FirstName + "<br>";
mailbody = mailbody + "Last Name" + LastName + "<br>";
mailbody = mailbody + "Phone Number" + PhoneNumber + "<br>";
mailbody = mailbody + "Country" + Country + "<br>";
mailbody = mailbody + "<b>Gifting Information<br>";
mailbody = mailbody + "Occasion" + Occasion + "<br>";
mailbody = mailbody + "Recipient Age" + Age + "<br>";
mailbody = mailbody + "Recipient Gender" + Gender + "<br>";
mailbody = mailbody + "Interests" + Interests + "<br>";
mailbody = mailbody + "Price Range" + PriceRange + "<br>";
mailbody = mailbody + "Favorite Colors" + Colors + "<br>";
mailbody = mailbody + "Relationship to You" + Relationship + "<br>";
String to = "[email protected]";
String subject = "Gift Consultant";
SendMail sendMail = new SendMail();
sendMail.sendMail(to, Email, subject, mailbody, "localhost");
SendMail.setContent(mailbody, "text/html");
%>
5. #5
Join Date
Dec 2005
Posts
38
Create a javamail bean and use it to send mail.
See following code
Code:
import java.io.*;
import javax.mail.*;
import javax.mail.internet.*;
import javax.activation.*;
public class SendEmail {
public static void send(String smtpHost, int smtpPort,
String from, String to,
String subject, String content)
throws AddressException, MessagingException {
// Create a mail session
java.util.Properties props = new java.util.Properties();
props.put("mail.smtp.host", smtpHost);
props.put("mail.smtp.port", ""+smtpPort);
Session session = Session.getDefaultInstance(props, null);
// Construct the message
Message msg = new MimeMessage(session);
msg.setFrom(new InternetAddress(from));
msg.setRecipient(Message.RecipientType.TO, new InternetAddress(to));
msg.setSubject(subject);
msg.setText(content);
// Send the message
Transport.send(msg);
}
public static void main(String[] args) throws Exception {
// Send a test message
send("hostname", 25, " [email protected] ", " [email protected] ",
"Hello", "Hello, \n\n How are you ?");
}
}
See Javamail tutorial
Thread Information
Users Browsing this Thread
There are currently 1 users browsing this thread. (0 members and 1 guests)
Posting Permissions
• You may not post new threads
• You may not post replies
• You may not post attachments
• You may not edit your posts
•
HTML5 Development Center
Recent Articles
|
__label__pos
| 0.664563 |
Web Development
The term web development refers to building and maintaining websites. It includes various web-related activities including web design, web programming, and database management.
Frontend and Backend developments constitute Web development.
Frontend Web development refers to the development of the website or an application where the user interacts and communicate with the system. It is also known as the client-side of the website or an application.
Backend development refers to the development and maintaining backend process including database management system, dynamic web page development, and server side management. This is also referred to as server-side development where the data storage and all the backend process are grouped.
Frontend code is used to create static websites which display a web page and are used with frontend technologies like HTML, Vanilla Javascript, JQuery, etc.
But the dynamic websites include managing the databases, files across the system, user access control, and many more, this is done by using backend technologies like PHP, Node JS, Python, etc.
A database is an organized collection of data in a form of tables or collections and this data can be access electronically by a user from everywhere connected through an internet.
The backend database development process involves maintaining, storing, deleting, and updating the data related to the website. This is usually done by the database administrators, they manage the data related to an application. It also involves the creation of tables or collections in a DB (database) to store data in it, manipulating the data by using various data manipulation techniques, backing up the data for further use, and generating reports which give them analytical or statistical data on a particular web application.
Examples of database technologies and software used are MYSQL, Microsoft access, Microsoft SQL Server, Oracle, MongoDB, etc.
Explore more services we provide?
|
__label__pos
| 0.634254 |
Take the 2-minute tour ×
Stack Overflow is a question and answer site for professional and enthusiast programmers. It's 100% free.
I have a simple domain object
def MyDomain{
String id
String userId
String blah
}
and a user Id domain object
def User{
String id //maps to user_id column, ommitted from example
String firstName
String lastName
}
Within grails 1.3.7, is there a way to join the tables together using criteria? We have many (80+) domain objects that use dynamically built criteria to do row lookups. We recently have needed to add a User Name to each of these row lookups but would like to avoid going through and modifying that many domain objects if possible, especially since the data is small.
I have looked at DetachedCriteria, but this is only applicable for grails 2.0+ and upgrading is not an option just for this. Other options I have seen involve putting User as a field in the MyDomain definition. Is there a better way of doing this without all that overhead? The corresponding query would be similar to:
select md.id, md.userId from myDomain md , user u where md.user_id = u.user_id order by u.last_name, pu.first_name;
share|improve this question
What overhead are you talking about? Why would adding a field of type User have more overhead than adding a field of type String? – JB Nizet Nov 19 '12 at 23:02
The actual coding overhead of adding a field to over 80 domain objects, not overhead in regards to performance – Joseph Nov 19 '12 at 23:52
Another option which I don't think will work (I would try, but away from my work computer now) is adding SQL to the end of the criteria. As I understand it though, SQLRestriction is for adding a 'restriction' to the end of the query, rather than simply adding a subquery – Joseph Nov 20 '12 at 0:45
For now we are adding the domain reference to User into the domains. However, I'd still be interested in seeing if there is a solution to this. – Joseph Nov 20 '12 at 21:46
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Browse other questions tagged or ask your own question.
|
__label__pos
| 0.634333 |
Install Sage X3 using Azure SQL Elastic Pools as your Database
In my previous post, I explained how to deploy a SQL Elastic Pool in Azure Portal. In this post, I am going to show how to perform a Sage X3 Solution configuration using the deployed Database, so let's dive right in.
To create a solution, open the Solutions component on the left list panel, and click the New Solution button of the general tool bar.
The following dialog box entitled Create a new solution is displayed; it enables to build the basic solution using the components that have already been installed on the servers:
Perform the following steps:
1. Click the New button.
2. Enter the name of the server where the Application and Runtime components are installed.
3. Enter the port, user, and password for the AdxAdmin service. The AdxAdmin default port is 1895.
4. Click the Next button.
The process starts with the selection of the application component that refers to the product and determines the solution type.
The following dialog box entitled Create a new solution is displayed:
To select the application component to be used in your solution:
1. Set the cursor on the selected Application component.
2. Click the Next button.
Then the Console submits a list of main runtimes available for your solution in the following dialog box:
To select the main runtime to be used in your solution:
1. Set the cursor on the selected Runtime component.
2. Click the Next button.
The next step is to choose via the following dialog box the configuration method for the database to be used by the solution. For Elastic Pools, you need to choose Manual configuration. Click the Next button.
Enter the following details in the following window to connect to the Pools:
1. Name of data server: Your Azure SQL Server name noted in the previous article. In usually ends with *database.windows.net
2. Type of database: Elastic Pools Azure
3. ElasticPool Name: The name of the pool created in the previous article.
4. ODBC Tools Path: The directory in which SQL Server Command Line Utilities V15 is installed. You need to install this manually if you have not already done so: https://go.microsoft.com/fwlink/?linkid=2142258
5. Click the Next button.
6. Click the OK button
In the Solution Configuration window, provide the following details:
• DB Login: This is the database administrator login username we created in the previous article.
• DB Login user password: This is the database administrator login password we created in the previous article.
• Database name: Choose a database name to be created. The default value is “sagex3
• Use Apache: It is recommended to choose No in this field, but you may choose to use Apache if you wish to do so.
• ODBC Tools path: The directory in which SQL Server Command Line Utilities V15 is installed.
• Pwsh[.exe] executable fullpath: The directory in which PowerShell 7.2 an above is installed
• User login for Sage X3 service: The Windows login account to use by the Runtime component service
• User account password: The password of the above user.
Once these parameters are entered, click the Save button of the Configuration Console.
To start the configuration of the database server, click the Configure Database button.
Choose a local directory in which the temporary Database generation scripts will be saved. I am choosing to save them on my desktop:
Two files will be saved to the local directory. A bat file and a sql file including the scripts to generate the database.
Open SQL Server Management Studio, connect to your Pools and run the query which is in the database_CrDb.sql file generated above.
Paste the contents of database_CrDb.sql and if necessary, change the EDITION to Standard if, as per the last article, you chose to go with the DTU Based edition of the Pools, rather than Core based edition. You will also need to remove the MAXSIZE argument if going for the Standard edition.
Upon the successful execution of this script, you may go back to the Management Console and continue with the configuration of the Application component and the rest of the Sage X3 installation process as usual.
|
__label__pos
| 0.767592 |
cregit-Linux how code gets into the kernel
Release 4.12 include/media/v4l2-tpg.h
Directory: include/media
/*
* v4l2-tpg.h - Test Pattern Generator
*
* Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _V4L2_TPG_H_
#define _V4L2_TPG_H_
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/videodev2.h>
#include <media/v4l2-tpg-colors.h>
enum tpg_pattern {
TPG_PAT_75_COLORBAR,
TPG_PAT_100_COLORBAR,
TPG_PAT_CSC_COLORBAR,
TPG_PAT_100_HCOLORBAR,
TPG_PAT_100_COLORSQUARES,
TPG_PAT_BLACK,
TPG_PAT_WHITE,
TPG_PAT_RED,
TPG_PAT_GREEN,
TPG_PAT_BLUE,
TPG_PAT_CHECKERS_16X16,
TPG_PAT_CHECKERS_2X2,
TPG_PAT_CHECKERS_1X1,
TPG_PAT_COLOR_CHECKERS_2X2,
TPG_PAT_COLOR_CHECKERS_1X1,
TPG_PAT_ALTERNATING_HLINES,
TPG_PAT_ALTERNATING_VLINES,
TPG_PAT_CROSS_1_PIXEL,
TPG_PAT_CROSS_2_PIXELS,
TPG_PAT_CROSS_10_PIXELS,
TPG_PAT_GRAY_RAMP,
/* Must be the last pattern */
TPG_PAT_NOISE,
};
extern const char * const tpg_pattern_strings[];
enum tpg_quality {
TPG_QUAL_COLOR,
TPG_QUAL_GRAY,
TPG_QUAL_NOISE
};
enum tpg_video_aspect {
TPG_VIDEO_ASPECT_IMAGE,
TPG_VIDEO_ASPECT_4X3,
TPG_VIDEO_ASPECT_14X9_CENTRE,
TPG_VIDEO_ASPECT_16X9_CENTRE,
TPG_VIDEO_ASPECT_16X9_ANAMORPHIC,
};
enum tpg_pixel_aspect {
TPG_PIXEL_ASPECT_SQUARE,
TPG_PIXEL_ASPECT_NTSC,
TPG_PIXEL_ASPECT_PAL,
};
enum tpg_move_mode {
TPG_MOVE_NEG_FAST,
TPG_MOVE_NEG,
TPG_MOVE_NEG_SLOW,
TPG_MOVE_NONE,
TPG_MOVE_POS_SLOW,
TPG_MOVE_POS,
TPG_MOVE_POS_FAST,
};
enum tgp_color_enc {
TGP_COLOR_ENC_RGB,
TGP_COLOR_ENC_YCBCR,
TGP_COLOR_ENC_HSV,
TGP_COLOR_ENC_LUMA,
};
extern const char * const tpg_aspect_strings[];
#define TPG_MAX_PLANES 3
#define TPG_MAX_PAT_LINES 8
struct tpg_data {
/* Source frame size */
unsigned src_width, src_height;
/* Buffer height */
unsigned buf_height;
/* Scaled output frame size */
unsigned scaled_width;
u32 field;
bool field_alternate;
/* crop coordinates are frame-based */
struct v4l2_rect crop;
/* compose coordinates are format-based */
struct v4l2_rect compose;
/* border and square coordinates are frame-based */
struct v4l2_rect border;
struct v4l2_rect square;
/* Color-related fields */
enum tpg_quality qual;
unsigned qual_offset;
u8 alpha_component;
bool alpha_red_only;
u8 brightness;
u8 contrast;
u8 saturation;
s16 hue;
u32 fourcc;
enum tgp_color_enc color_enc;
u32 colorspace;
u32 xfer_func;
u32 ycbcr_enc;
u32 hsv_enc;
/*
* Stores the actual transfer function, i.e. will never be
* V4L2_XFER_FUNC_DEFAULT.
*/
u32 real_xfer_func;
/*
* Stores the actual Y'CbCr encoding, i.e. will never be
* V4L2_YCBCR_ENC_DEFAULT.
*/
u32 real_hsv_enc;
u32 real_ycbcr_enc;
u32 quantization;
/*
* Stores the actual quantization, i.e. will never be
* V4L2_QUANTIZATION_DEFAULT.
*/
u32 real_quantization;
enum tpg_video_aspect vid_aspect;
enum tpg_pixel_aspect pix_aspect;
unsigned rgb_range;
unsigned real_rgb_range;
unsigned buffers;
unsigned planes;
bool interleaved;
u8 vdownsampling[TPG_MAX_PLANES];
u8 hdownsampling[TPG_MAX_PLANES];
/*
* horizontal positions must be ANDed with this value to enforce
* correct boundaries for packed YUYV values.
*/
unsigned hmask[TPG_MAX_PLANES];
/* Used to store the colors in native format, either RGB or YUV */
u8 colors[TPG_COLOR_MAX][3];
u8 textfg[TPG_MAX_PLANES][8], textbg[TPG_MAX_PLANES][8];
/* size in bytes for two pixels in each plane */
unsigned twopixelsize[TPG_MAX_PLANES];
unsigned bytesperline[TPG_MAX_PLANES];
/* Configuration */
enum tpg_pattern pattern;
bool hflip;
bool vflip;
unsigned perc_fill;
bool perc_fill_blank;
bool show_border;
bool show_square;
bool insert_sav;
bool insert_eav;
/* Test pattern movement */
enum tpg_move_mode mv_hor_mode;
int mv_hor_count;
int mv_hor_step;
enum tpg_move_mode mv_vert_mode;
int mv_vert_count;
int mv_vert_step;
bool recalc_colors;
bool recalc_lines;
bool recalc_square_border;
/* Used to store TPG_MAX_PAT_LINES lines, each with up to two planes */
unsigned max_line_width;
u8 *lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES];
u8 *downsampled_lines[TPG_MAX_PAT_LINES][TPG_MAX_PLANES];
u8 *random_line[TPG_MAX_PLANES];
u8 *contrast_line[TPG_MAX_PLANES];
u8 *black_line[TPG_MAX_PLANES];
};
void tpg_init(struct tpg_data *tpg, unsigned w, unsigned h);
int tpg_alloc(struct tpg_data *tpg, unsigned max_w);
void tpg_free(struct tpg_data *tpg);
void tpg_reset_source(struct tpg_data *tpg, unsigned width, unsigned height,
u32 field);
void tpg_log_status(struct tpg_data *tpg);
void tpg_set_font(const u8 *f);
void tpg_gen_text(const struct tpg_data *tpg,
u8 *basep[TPG_MAX_PLANES][2], int y, int x, char *text);
void tpg_calc_text_basep(struct tpg_data *tpg,
u8 *basep[TPG_MAX_PLANES][2], unsigned p, u8 *vbuf);
unsigned tpg_g_interleaved_plane(const struct tpg_data *tpg, unsigned buf_line);
void tpg_fill_plane_buffer(struct tpg_data *tpg, v4l2_std_id std,
unsigned p, u8 *vbuf);
void tpg_fillbuffer(struct tpg_data *tpg, v4l2_std_id std,
unsigned p, u8 *vbuf);
bool tpg_s_fourcc(struct tpg_data *tpg, u32 fourcc);
void tpg_s_crop_compose(struct tpg_data *tpg, const struct v4l2_rect *crop,
const struct v4l2_rect *compose);
static inline void tpg_s_pattern(struct tpg_data *tpg, enum tpg_pattern pattern) { if (tpg->pattern == pattern) return; tpg->pattern = pattern; tpg->recalc_colors = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil37100.00%1100.00%
Total37100.00%1100.00%
static inline void tpg_s_quality(struct tpg_data *tpg, enum tpg_quality qual, unsigned qual_offset) { if (tpg->qual == qual && tpg->qual_offset == qual_offset) return; tpg->qual = qual; tpg->qual_offset = qual_offset; tpg->recalc_colors = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil52100.00%1100.00%
Total52100.00%1100.00%
static inline enum tpg_quality tpg_g_quality(const struct tpg_data *tpg) { return tpg->qual; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil19100.00%1100.00%
Total19100.00%1100.00%
static inline void tpg_s_alpha_component(struct tpg_data *tpg, u8 alpha_component) { if (tpg->alpha_component == alpha_component) return; tpg->alpha_component = alpha_component; tpg->recalc_colors = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil36100.00%1100.00%
Total36100.00%1100.00%
static inline void tpg_s_alpha_mode(struct tpg_data *tpg, bool red_only) { if (tpg->alpha_red_only == red_only) return; tpg->alpha_red_only = red_only; tpg->recalc_colors = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil36100.00%1100.00%
Total36100.00%1100.00%
static inline void tpg_s_brightness(struct tpg_data *tpg, u8 brightness) { if (tpg->brightness == brightness) return; tpg->brightness = brightness; tpg->recalc_colors = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil36100.00%1100.00%
Total36100.00%1100.00%
static inline void tpg_s_contrast(struct tpg_data *tpg, u8 contrast) { if (tpg->contrast == contrast) return; tpg->contrast = contrast; tpg->recalc_colors = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil36100.00%1100.00%
Total36100.00%1100.00%
static inline void tpg_s_saturation(struct tpg_data *tpg, u8 saturation) { if (tpg->saturation == saturation) return; tpg->saturation = saturation; tpg->recalc_colors = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil36100.00%1100.00%
Total36100.00%1100.00%
static inline void tpg_s_hue(struct tpg_data *tpg, s16 hue) { if (tpg->hue == hue) return; tpg->hue = hue; tpg->recalc_colors = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil36100.00%1100.00%
Total36100.00%1100.00%
static inline void tpg_s_rgb_range(struct tpg_data *tpg, unsigned rgb_range) { if (tpg->rgb_range == rgb_range) return; tpg->rgb_range = rgb_range; tpg->recalc_colors = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil36100.00%1100.00%
Total36100.00%1100.00%
static inline void tpg_s_real_rgb_range(struct tpg_data *tpg, unsigned rgb_range) { if (tpg->real_rgb_range == rgb_range) return; tpg->real_rgb_range = rgb_range; tpg->recalc_colors = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil36100.00%1100.00%
Total36100.00%1100.00%
static inline void tpg_s_colorspace(struct tpg_data *tpg, u32 colorspace) { if (tpg->colorspace == colorspace) return; tpg->colorspace = colorspace; tpg->recalc_colors = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil36100.00%1100.00%
Total36100.00%1100.00%
static inline u32 tpg_g_colorspace(const struct tpg_data *tpg) { return tpg->colorspace; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil18100.00%1100.00%
Total18100.00%1100.00%
static inline void tpg_s_ycbcr_enc(struct tpg_data *tpg, u32 ycbcr_enc) { if (tpg->ycbcr_enc == ycbcr_enc) return; tpg->ycbcr_enc = ycbcr_enc; tpg->recalc_colors = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil36100.00%1100.00%
Total36100.00%1100.00%
static inline u32 tpg_g_ycbcr_enc(const struct tpg_data *tpg) { return tpg->ycbcr_enc; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil18100.00%1100.00%
Total18100.00%1100.00%
static inline void tpg_s_hsv_enc(struct tpg_data *tpg, u32 hsv_enc) { if (tpg->hsv_enc == hsv_enc) return; tpg->hsv_enc = hsv_enc; tpg->recalc_colors = true; }
Contributors
PersonTokensPropCommitsCommitProp
Ricardo Ribalda Delgado36100.00%1100.00%
Total36100.00%1100.00%
static inline u32 tpg_g_hsv_enc(const struct tpg_data *tpg) { return tpg->hsv_enc; }
Contributors
PersonTokensPropCommitsCommitProp
Ricardo Ribalda Delgado18100.00%1100.00%
Total18100.00%1100.00%
static inline void tpg_s_xfer_func(struct tpg_data *tpg, u32 xfer_func) { if (tpg->xfer_func == xfer_func) return; tpg->xfer_func = xfer_func; tpg->recalc_colors = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil36100.00%1100.00%
Total36100.00%1100.00%
static inline u32 tpg_g_xfer_func(const struct tpg_data *tpg) { return tpg->xfer_func; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil18100.00%1100.00%
Total18100.00%1100.00%
static inline void tpg_s_quantization(struct tpg_data *tpg, u32 quantization) { if (tpg->quantization == quantization) return; tpg->quantization = quantization; tpg->recalc_colors = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil36100.00%1100.00%
Total36100.00%1100.00%
static inline u32 tpg_g_quantization(const struct tpg_data *tpg) { return tpg->quantization; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil18100.00%1100.00%
Total18100.00%1100.00%
static inline unsigned tpg_g_buffers(const struct tpg_data *tpg) { return tpg->buffers; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil18100.00%1100.00%
Total18100.00%1100.00%
static inline unsigned tpg_g_planes(const struct tpg_data *tpg) { return tpg->interleaved ? 1 : tpg->planes; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil24100.00%2100.00%
Total24100.00%2100.00%
static inline bool tpg_g_interleaved(const struct tpg_data *tpg) { return tpg->interleaved; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil18100.00%1100.00%
Total18100.00%1100.00%
static inline unsigned tpg_g_twopixelsize(const struct tpg_data *tpg, unsigned plane) { return tpg->twopixelsize[plane]; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil24100.00%1100.00%
Total24100.00%1100.00%
static inline unsigned tpg_hdiv(const struct tpg_data *tpg, unsigned plane, unsigned x) { return ((x / tpg->hdownsampling[plane]) & tpg->hmask[plane]) * tpg->twopixelsize[plane] / 2; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil49100.00%1100.00%
Total49100.00%1100.00%
static inline unsigned tpg_hscale(const struct tpg_data *tpg, unsigned x) { return (x * tpg->scaled_width) / tpg->src_width; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil29100.00%1100.00%
Total29100.00%1100.00%
static inline unsigned tpg_hscale_div(const struct tpg_data *tpg, unsigned plane, unsigned x) { return tpg_hdiv(tpg, plane, tpg_hscale(tpg, x)); }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil34100.00%1100.00%
Total34100.00%1100.00%
static inline unsigned tpg_g_bytesperline(const struct tpg_data *tpg, unsigned plane) { return tpg->bytesperline[plane]; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil24100.00%1100.00%
Total24100.00%1100.00%
static inline void tpg_s_bytesperline(struct tpg_data *tpg, unsigned plane, unsigned bpl) { unsigned p; if (tpg->buffers > 1) { tpg->bytesperline[plane] = bpl; return; } for (p = 0; p < tpg_g_planes(tpg); p++) { unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0]; tpg->bytesperline[p] = plane_w / tpg->hdownsampling[p]; } if (tpg_g_interleaved(tpg)) tpg->bytesperline[1] = tpg->bytesperline[0]; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil115100.00%5100.00%
Total115100.00%5100.00%
static inline unsigned tpg_g_line_width(const struct tpg_data *tpg, unsigned plane) { unsigned w = 0; unsigned p; if (tpg->buffers > 1) return tpg_g_bytesperline(tpg, plane); for (p = 0; p < tpg_g_planes(tpg); p++) { unsigned plane_w = tpg_g_bytesperline(tpg, p); w += plane_w / tpg->vdownsampling[p]; } return w; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil82100.00%3100.00%
Total82100.00%3100.00%
static inline unsigned tpg_calc_line_width(const struct tpg_data *tpg, unsigned plane, unsigned bpl) { unsigned w = 0; unsigned p; if (tpg->buffers > 1) return bpl; for (p = 0; p < tpg_g_planes(tpg); p++) { unsigned plane_w = bpl * tpg->twopixelsize[p] / tpg->twopixelsize[0]; plane_w /= tpg->hdownsampling[p]; w += plane_w / tpg->vdownsampling[p]; } return w; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil98100.00%3100.00%
Total98100.00%3100.00%
static inline unsigned tpg_calc_plane_size(const struct tpg_data *tpg, unsigned plane) { if (plane >= tpg_g_planes(tpg)) return 0; return tpg_g_bytesperline(tpg, plane) * tpg->buf_height / tpg->vdownsampling[plane]; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil47100.00%4100.00%
Total47100.00%4100.00%
static inline void tpg_s_buf_height(struct tpg_data *tpg, unsigned h) { tpg->buf_height = h; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil21100.00%1100.00%
Total21100.00%1100.00%
static inline void tpg_s_field(struct tpg_data *tpg, unsigned field, bool alternate) { tpg->field = field; tpg->field_alternate = alternate; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil30100.00%2100.00%
Total30100.00%2100.00%
static inline void tpg_s_perc_fill(struct tpg_data *tpg, unsigned perc_fill) { tpg->perc_fill = perc_fill; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil21100.00%1100.00%
Total21100.00%1100.00%
static inline unsigned tpg_g_perc_fill(const struct tpg_data *tpg) { return tpg->perc_fill; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil18100.00%1100.00%
Total18100.00%1100.00%
static inline void tpg_s_perc_fill_blank(struct tpg_data *tpg, bool perc_fill_blank) { tpg->perc_fill_blank = perc_fill_blank; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil21100.00%1100.00%
Total21100.00%1100.00%
static inline void tpg_s_video_aspect(struct tpg_data *tpg, enum tpg_video_aspect vid_aspect) { if (tpg->vid_aspect == vid_aspect) return; tpg->vid_aspect = vid_aspect; tpg->recalc_square_border = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil37100.00%1100.00%
Total37100.00%1100.00%
static inline enum tpg_video_aspect tpg_g_video_aspect(const struct tpg_data *tpg) { return tpg->vid_aspect; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil19100.00%1100.00%
Total19100.00%1100.00%
static inline void tpg_s_pixel_aspect(struct tpg_data *tpg, enum tpg_pixel_aspect pix_aspect) { if (tpg->pix_aspect == pix_aspect) return; tpg->pix_aspect = pix_aspect; tpg->recalc_square_border = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil37100.00%1100.00%
Total37100.00%1100.00%
static inline void tpg_s_show_border(struct tpg_data *tpg, bool show_border) { tpg->show_border = show_border; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil21100.00%1100.00%
Total21100.00%1100.00%
static inline void tpg_s_show_square(struct tpg_data *tpg, bool show_square) { tpg->show_square = show_square; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil21100.00%1100.00%
Total21100.00%1100.00%
static inline void tpg_s_insert_sav(struct tpg_data *tpg, bool insert_sav) { tpg->insert_sav = insert_sav; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil21100.00%1100.00%
Total21100.00%1100.00%
static inline void tpg_s_insert_eav(struct tpg_data *tpg, bool insert_eav) { tpg->insert_eav = insert_eav; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil21100.00%1100.00%
Total21100.00%1100.00%
void tpg_update_mv_step(struct tpg_data *tpg);
static inline void tpg_s_mv_hor_mode(struct tpg_data *tpg, enum tpg_move_mode mv_hor_mode) { tpg->mv_hor_mode = mv_hor_mode; tpg_update_mv_step(tpg); }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil27100.00%1100.00%
Total27100.00%1100.00%
static inline void tpg_s_mv_vert_mode(struct tpg_data *tpg, enum tpg_move_mode mv_vert_mode) { tpg->mv_vert_mode = mv_vert_mode; tpg_update_mv_step(tpg); }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil27100.00%1100.00%
Total27100.00%1100.00%
static inline void tpg_init_mv_count(struct tpg_data *tpg) { tpg->mv_hor_count = tpg->mv_vert_count = 0; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil22100.00%1100.00%
Total22100.00%1100.00%
static inline void tpg_update_mv_count(struct tpg_data *tpg, bool frame_is_field) { tpg->mv_hor_count += tpg->mv_hor_step * (frame_is_field ? 1 : 2); tpg->mv_vert_count += tpg->mv_vert_step * (frame_is_field ? 1 : 2); }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil47100.00%1100.00%
Total47100.00%1100.00%
static inline void tpg_s_hflip(struct tpg_data *tpg, bool hflip) { if (tpg->hflip == hflip) return; tpg->hflip = hflip; tpg_update_mv_step(tpg); tpg->recalc_lines = true; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil41100.00%1100.00%
Total41100.00%1100.00%
static inline bool tpg_g_hflip(const struct tpg_data *tpg) { return tpg->hflip; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil18100.00%1100.00%
Total18100.00%1100.00%
static inline void tpg_s_vflip(struct tpg_data *tpg, bool vflip) { tpg->vflip = vflip; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil21100.00%1100.00%
Total21100.00%1100.00%
static inline bool tpg_g_vflip(const struct tpg_data *tpg) { return tpg->vflip; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil18100.00%1100.00%
Total18100.00%1100.00%
static inline bool tpg_pattern_is_static(const struct tpg_data *tpg) { return tpg->pattern != TPG_PAT_NOISE && tpg->mv_hor_mode == TPG_MOVE_NONE && tpg->mv_vert_mode == TPG_MOVE_NONE; }
Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil32100.00%1100.00%
Total32100.00%1100.00%
#endif
Overall Contributors
PersonTokensPropCommitsCommitProp
Hans Verkuil237196.78%1575.00%
Ricardo Ribalda Delgado753.06%420.00%
Helen Mae Koike Fornazier40.16%15.00%
Total2450100.00%20100.00%
Directory: include/media
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.
|
__label__pos
| 0.667716 |
Clicky
What is M Soft Update? And how does it work?
M Soft Update is a browser extension that currently works only on Google Chrome and is advertised to “Get latest product comparison on Furniture deals. Visit product specific page on sites like..”. If installed, the MSoft Update will display a pop-up that uses the yourshoppingwizard.com to redirect you to unknown sites which also depends on the sites you are visiting. It also changes both your browser’s default home page and search provider. Once it runs on your browser, you will experience constant appearance of sponsored contents from third parties such as ads, pop-ups, banners and links with the purpose of generating profit through pay-per-click marketing.
Its name MSoft Update may be used to trick users into thinking that this is a Microsoft Update for them to install it. This is one of its strange traits among others and is the reason why it is under the category of Browser Hijackers and potentially unwanted program (PUP). Apart from that, this browser extension is distributed stealthily. So you should not let this kind of potential threat linger on your computer and must remove this from your computer as soon as you can.
How does Msoft Update spread online?
Even if it only works on Google Chrome browsers, this unwanted program can’t be found on the Chrome Web Store. To put it simply, MSoft Update does not have any reputable source. As it turns out, this browser extension is distributed on software bundles found on free sharing sites. This means that when you install these bundles, besides the legitimate program on the bundle, it will also install the unwanted programs part of the bundle like MSoft Update. To avoid such occurrence, make sure to always opt for the Advanced or Custom installation option when installing software bundles.
For you to get rid of MSoft Update from Google Chrome, follow the removal guide below.
Step 1: Launch Google Chrome.
Step 2: Click the wrench icon. And from the drop-down menu click on Tools and then select Extensions.
Step 3: Under the Extensions tab, select Msoft Update or other suspicious browser extensions related to it and then click on the Trash icon.
Step 4: Click on Remove button from the confirmation dialog box.
Step 5: Reset Google Chrome. To do that, click the three stripes icon located at the upper-right corner of your screen. And then select Settings.
Step 6: under Settings, look for Show advanced settings at the bottom of the page and click on it.
Step 7: Then click on the Reset browser settings button.
Step 8: After that, a dialog box pops up, click the Reset button to Reset Chrome.
Step 9: After you reset Chrome, enable the Phishing and Malware Protection in Chrome to prevent PUPs like Msoft Update from infecting your computer again in the future. To do that, click Settings once again.
Step 10: Select the Show advanced settings again.
Step 11: Go to the Privacy section and check the Enable phishing and malware protection option.
Step 12: Restart Chrome.
Follow the continued advanced steps below to ensure the removal of MSoft Update:
Perform a full system scan using SpyRemover Pro.
1. Turn on your computer. If it’s already on, you have to reboot
2. After that, the BIOS screen will be displayed, but if Windows pops up instead, reboot your computer and try again. Once you’re on the BIOS screen, repeat pressing F8, by doing so the Advanced Option shows up.
1. To navigate the Advanced Option use the arrow keys and select Safe Mode with Networking then hit
2. Windows will now load the Safe Mode with Networking.
3. Press and hold both R key and Windows key.
1. If done correctly, the Windows Run Box will show up.
2. Type in explorer http://www.fixmypcfree.com/install/spyremoverpro
A single space must be in between explorer and http. Click OK.
8. A dialog box will be displayed by Internet Explorer. Click Run to begin downloading SpyRemover Pro. Installation will start automatically once download is done.
1. Click OK to launch SpyRemover Pro.
2. Run SpyRemover Pro and perform a full system scan.
3. After all the infections are identified, click REMOVE ALL.
1. Register SpyRemover Pro to protect your computer from future threats.
logo main menu
Copyright © 2022, FixMyPcFree. All Rights Reserved Trademarks: Microsoft Windows logos are registered trademarks of Microsoft. Disclaimer: FixMyPcFree.com is not affiliated with Microsoft, nor claim direct affiliation. The information on this page is provided for information purposes only.
DMCA.com Protection Status
Log in with your credentials
Forgot your details?
|
__label__pos
| 0.576163 |
数组的遍历你都会用了,那Promise版本的呢
数组的遍历你都会用了,那Promise版本的呢
这里指的遍历方法包括:mapreducereduceRightforEachfiltersomeevery 因为最近要进行了一些数据汇总,node版本已经是8.11.1了,所以直接写了个async/await的脚本。 但是在对数组进行一些遍历操作时,发现有些遍历方法对Promise的反馈并不是我们想要的结果。
当然,有些严格来讲并不能算是遍历,比如说someevery这些的。 但确实,这些都会根据我们数组的元素来进行多次的调用传入的回调。
这些方法都是比较常见的,但是当你的回调函数是一个Promise时,一切都变了。
前言
async/awaitPromise的语法糖 文中会直接使用async/await替换Promise
let result = await func()
// => 等价于
func().then(result => {
// code here
})
// ======
async function func () {
return 1
}
// => 等价与
function func () {
return new Promise(resolve => resolve(1))
}
map
map可以说是对Promise最友好的一个函数了。 我们都知道,map接收两个参数:
1. 对每项元素执行的回调,回调结果的返回值将作为该数组中相应下标的元素
2. 一个可选的回调函数this指向的参数
[1, 2, 3].map(item => item ** 2) // 对数组元素进行求平方
// > [1, 4, 9]
上边是一个普通的map执行,但是当我们的一些计算操作变为异步的:
[1, 2, 3].map(async item => item ** 2) // 对数组元素进行求平方
// > [Promise, Promise, Promise]
这时候,我们获取到的返回值其实就是一个由Promise函数组成的数组了。
所以为什么上边说map函数为最友好的,因为我们知道,Promise有一个函数为Promise.all 会将一个由Promise组成的数组依次执行,并返回一个Promise对象,该对象的结果为数组产生的结果集。
await Promise.all([1, 2, 3].map(async item => item ** 2))
// > [1, 4, 9]
首先使用Promise.all对数组进行包装,然后用await获取结果。
reduce/reduceRight
reduce的函数签名想必大家也很熟悉了,接收两个参数:
1. 对每一项元素执行的回调函数,返回值将被累加到下次函数调用中,回调函数的签名:
1. accumulator累加的值
2. currentValue当前正在的元素
3. array调用reduce的数组
2. 可选的初始化的值,将作为accumulator的初始值
[1, 2, 3].reduce((accumulator, item) => accumulator + item, 0) // 进行加和
// > 6
这个代码也是没毛病的,同样如果我们加和的操作也是个异步的:
[1, 2, 3].reduce(async (accumulator, item) => accumulator + item, 0) // 进行加和
// > Promise {<resolved>: "[object Promise]3"}
这个结果返回的就会很诡异了,我们在回看上边的reduce的函数签名
对每一项元素执行的回调函数,返回值将被累加到下次函数调用中
然后我们再来看代码,async (accumulator, item) => accumulator += item 这个在最开始也提到了,是Pormise的语法糖,为了看得更清晰,我们可以这样写:
(accumulator, item) => new Promise(resolve =>
resolve(accumulator += item)
)
也就是说,我们reduce的回调函数返回值其实就是一个Promise对象 然后我们对Promise对象进行+=操作,得到那样怪异的返回值也就很合情合理了。
当然,reduce的调整也是很轻松的:
await [1, 2, 3].reduce(async (accumulator, item) => await accumulator + item, 0)
// > 6
我们对accumulator调用await,然后再与当前item进行加和,在最后我们的reduce返回值也一定是一个Promise,所以我们在最外边也添加await的字样 也就是说我们每次reduce都会返回一个新的Promise对象,在对象内部都会获取上次Promise的结果。 我们调用reduce实际上得到的是类似这样的一个Promise对象:
new Promise(resolve => {
let item = 3
new Promise(resolve => {
let item = 2
new Promise(resolve => {
let item = 1
Promise.resolve(0).then(result => resolve(item + result))
}).then(result => resolve(item + result))
}).then(result => resolve(item + result))
})
reduceRight
这个就没什么好说的了。。跟reduce只是执行顺序相反而已
forEach
forEach,这个应该是用得最多的遍历方法了,对应的函数签名:
1. callback,对每一个元素进行调用的函数
1. currentValue,当前元素
2. index,当前元素下标
3. array,调用forEach的数组引用
2. thisArg,一个可选的回调函数this指向
我们有如下的操作:
// 获取数组元素求平方后的值
[1, 2, 3].forEach(item => {
console.log(item ** 2)
})
// > 1
// > 4
// > 9
普通版本我们是可以直接这么输出的,但是如果遇到了Promise
// 获取数组元素求平方后的值
[1, 2, 3].forEach(async item => {
console.log(item ** 2)
})
// > nothing
forEach并不关心回调函数的返回值,所以forEach只是执行了三个会返回Promise的函数 所以如果我们想要得到想要的效果,只能够自己进行增强对象属性了:
Array.prototype.forEachSync = async function (callback, thisArg) {
for (let [index, item] of Object.entries(this)) {
await callback(item, index, this)
}
}
await [1, 2, 3].forEachSync(async item => {
console.log(item ** 2)
})
// > 1
// > 4
// > 9
await会忽略非Promise值,await 0await undefined与普通代码无异
filter
filter作为一个筛选数组用的函数,同样具有遍历的功能: 函数签名同forEach,但是callback返回值为true的元素将被放到filter函数返回值中去。
我们要进行一个奇数的筛选,所以我们这么写:
[1, 2, 3].filter(item => item % 2 !== 0)
// > [1, 3]
然后我们改为Promise版本:
[1, 2, 3].filter(async item => item % 2 !== 0)
// > [1, 2, 3]
这会导致我们的筛选功能失效,因为filter的返回值匹配不是完全相等的匹配,只要是返回值能转换为true,就会被认定为通过筛选。 Promise对象必然是true的,所以筛选失效。 所以我们的处理方式与上边的forEach类似,同样需要自己进行对象增强 但我们这里直接选择一个取巧的方式:
Array.prototype.filterSync = async function (callback, thisArg) {
let filterResult = await Promise.all(this.map(callback))
// > [true, false, true]
return this.filter((_, index) => filterResult[index])
}
await [1, 2, 3].filterSync(item => item % 2 !== 0)
我们可以直接在内部调用map方法,因为我们知道map会将所有的返回值返回为一个新的数组。 这也就意味着,我们map可以拿到我们对所有item进行筛选的结果,true或者false。 接下来对原数组每一项进行返回对应下标的结果即可。
some
some作为一个用来检测数组是否满足一些条件的函数存在,同样是可以用作遍历的 函数签名同forEach,有区别的是当任一callback返回值匹配为true则会直接返回true,如果所有的callback匹配均为false,则返回false
我们要判断数组中是否有元素等于2
[1, 2, 3].some(item => item === 2)
// > true
然后我们将它改为Promise
[1, 2, 3].some(async item => item === 2)
// > true
这个函数依然会返回true,但是却不是我们想要的,因为这个是async返回的Promise对象被认定为true
所以,我们要进行如下处理:
Array.prototype.someSync = async function (callback, thisArg) {
for (let [index, item] of Object.entries(this)) {
if (await callback(item, index, this)) return true
}
return false
}
await [1, 2, 3].someSync(async item => item === 2)
// > true
因为some在匹配到第一个true之后就会终止遍历,所以我们在这里边使用forEach的话是在性能上的一种浪费。 同样是利用了await会忽略普通表达式的优势,在内部使用for-of来实现我们的需求
every
以及我们最后的一个every 函数签名同样与forEach一样, 但是callback的处理还是有一些区别的: 其实换一种角度考虑,every就是一个反向的some some会在获取到第一个true时终止 而every会在获取到第一个false时终止,如果所有元素均为true,则返回true
我们要判定数组中元素是否全部大于3
[1, 2, 3].every(item => item > 3)
// > false
很显然,一个都没有匹配到的,而且回调函数在执行到第一次时就已经终止了,不会继续执行下去。 我们改为Promise版本:
[1, 2, 3].every(async => item > 3)
// > true
这个必然是true,因为我们判断的是Promise对象 所以我们拿上边的someSync实现稍微修改一下:
Array.prototype.everySync = async function (callback, thisArg) {
for (let [index, item] of Object.entries(this)) {
if (!await callback(item, index, this)) return false
}
return true
}
await [1, 2, 3].everySync(async item => item === 2)
// > true
当匹配到任意一个false时,直接返回false,终止遍历。
后记
关于数组的这几个遍历方法。 因为mapreduce的特性,所以是在使用async时改动最小的函数。 reduce的结果很像一个洋葱模型 但对于其他的遍历函数来说,目前来看就需要自己来实现了。
四个*Sync函数的实现:https://github.com/Jiasm/notebook/tree/master/array-sync
参考资料
Array - JavaScript | MDN
本文参与腾讯云自媒体分享计划,欢迎正在阅读的你也加入,一起分享。
发表于
我来说两句
0 条评论
登录 后参与评论
相关文章
来自专栏PHP在线
五个值得深思的PHP面试题
面试题 Q6 $x应该是输出什么? $x = true and false; var_dump($x); 部分同学或许会第一时间想到false,实际上这里依...
2915
来自专栏IMWeb前端团队
JSON Schema 参考书
本文作者:IMWeb 孙世吉 原文出处:IMWeb社区 未经同意,禁止转载 1 简介 JSON 模式(JSON Schema)是一种基于 JSON 格...
34510
来自专栏Python小屋
常用正则表达式锦集与Python中正则表达式的用法
1、常用正则表达式 最简单的正则表达式是普通字符串,只能匹配自身 '[pjc]ython'可以匹配'python'、'jython'、'cython' '[a-...
3135
来自专栏函数式编程语言及工具
Scalaz(5)- typeclass:my typeclass scalaz style-demo
我们在上一篇讨论中介绍了一些基本的由scalaz提供的typeclass。这些基本typeclass主要的作用是通过操作符来保证类型安全,也就是在前期编译时...
2009
来自专栏海天一树
小朋友学C++(12):多态
(一) 先编写函数: #include <iostream> using namespace std; class Shape { protected:...
3166
来自专栏Kevin-ZhangCG
[ Java面试题 ]基础篇之一
2115
来自专栏鸿的学习笔记
用Scala实现一个简单的Python的上下文管理器
上下文管理器是对try/finally模式的简化,保证一段代码运行完后执行某项操作,即使那段代码被中止了,也会执行指定的操作。在这篇文章将展现函数式编程的威力,...
1062
来自专栏landv
Java基本语法
1451
来自专栏抠抠空间
函数 (一) 基础
一、函数的作用 函数可以让我们代码结构更清晰,而且避免了代码的重复,冗余,使一段代码或者功能可以反复的被调用,大大提高了开发效率 二、函数的定义 def 函数名...
2936
来自专栏黑泽君的专栏
c语言基础学习06_函数
============================================================================= 涉及...
2442
扫码关注云+社区
|
__label__pos
| 0.748275 |
시간 제한 메모리 제한 제출 정답 맞은 사람 정답 비율
2 초 512 MB 38 5 4 36.364%
문제
Boyan is playing a computer game. In the beginning there are N balls arranged in a line. Each ball has a number written on it, so that every two consecutive balls have different numbers. The game consists of the following steps:
1. The player removes a ball from the line.
2. While there are consecutive balls with equal numbers, they are automatically removed from the line.
3. If there are balls left in the line, go to step 1, otherwise the game ends.
The score is the number of balls that are automatically removed. The goal of the game is to maximize the score.
Let’s see an example of a game with 6 balls with numbers {1, 2, 3, 2, 1, 5}.
1. Boyan removes the ball with number 3. The balls left are {1, 2, 2, 1, 5}.
2. Removing the consecutive balls with equal numbers we have {1, 2, 2, 1, 5} -> {1, 1, 5} -> {5}. The ball left is {5}.
3. Since there are balls left, we go to step 1.
1. Boyan removes the ball with number 5. The balls left are {}.
2. There are no consecutive balls with equal numbers.
3. There are no balls left, so the game ends.
The number of balls that are automatically removed is 4. It is the maximum possible score for this game. Boyan is playing a lot, but he is still not sure when he is playing optimally. Write program game to help him to find the best score he can achieve.
입력
The first line contains the positive integer N.
The second line contains N positive integers ─ the numbers written on the balls.
출력
Print the maximum possible score Boyan can achieve.
제한
• 1 ≤ N ≤ 500
• 1 ≤ number written on a ball ≤ 1 000 000
예제 입력 1
6
1 2 3 2 1 5
예제 출력 1
4
예제 입력 2
9
1 5 1 3 2 4 2 3 1
예제 출력 2
6
Remove the 9th, 6th and 2nd ball.
출처
Olympiad > International Tournament in Informatics > Shumen 2017 (Junior) 1-2번
• 문제의 오타를 찾은 사람: yukino
|
__label__pos
| 0.837842 |
音视频通信(Real-Time Communication,RTC)是阿里云覆盖全球的实时音视频开发平台,依托核心音视频编解码、信道传输、网络调度技术,提供高可用、高品质、超低延时的音视频通信服务。本教程指导您如何利用阿里云音视频通信搭建在线多人音视频聊天室。
前提条件
使用本教程之前,请您务必完成以下操作:
1. 您已经完成注册阿里云账号,并完成实名认证,具体操作请参见阿里云账号注册流程
2. 您已经开通音视频通信服务,具体操作请参见开通服务
3. 获取应用ID,具体操作请参见创建应用
4. 获取AppKey,具体操作请参见查询AppKey
背景信息
本教程中的聊天室分为服务端和客户端两部分,服务端主要功能用来生成和下发频道鉴权令牌,客户端拿到令牌调用阿里云音视频通信SDK获取频道信息,客户端也可以调用本地摄像头和麦克风设备将音视频流发布到频道中。
步骤一:搭建App Server
服务端App Server负责生成和下发频道鉴权令牌,令牌是由以下字段按顺序拼接并使用SHA-256哈希加密算法生成字符串的摘要:
• AppID:应用ID,使用控制台创建。
• AppKey:应用密钥,使用控制台查询。
• ChannelID:频道ID,AppServer生成。
• UserID:您的唯一标识,AppServer生成。
• Nonce:令牌随机码,AppServer生成。
• Timestamp:令牌过期时间戳,AppServer生成。
1. 构建服务端代码。
1. pom.xml文件中加入以下内容:
<dependencies>
<!-- 轻量级httpserver-->
<dependency>
<groupId>com.sun.net.httpserver</groupId>
<artifactId>http</artifactId>
<version>20070405</version>
<scope>test</scope>
</dependency>
<!-- 命令行参数解析工具-->
<dependency>
<groupId>commons-cli</groupId>
<artifactId>commons-cli</artifactId>
<version>1.2</version>
</dependency>
<dependency>
<groupId>org.json</groupId>
<artifactId>json</artifactId>
<version>20170516</version>
</dependency>
</dependencies>
<build>
<plugins>
<!-- 将依赖连带打到jar包中-->
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.1.1</version>
<configuration>
<archive>
<manifest>
<mainClass>com.company.App</mainClass>
</manifest>
</archive>
<descriptorRefs>
<descriptorRef>jar-with-dependencies</descriptorRef>
</descriptorRefs>
</configuration>
<executions>
<!--执行器 mvn assembly:assembly-->
<execution>
<id>make-assembly</id>
<!-- 绑定到package生命周期阶段上 -->
<phase>package</phase>
<goals>
<!-- 该打包任务只运行一次 -->
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
2. 新建App.java文件,内容如下:
import com.sun.net.httpserver.Headers;
import com.sun.net.httpserver.HttpExchange;
import com.sun.net.httpserver.HttpHandler;
import com.sun.net.httpserver.HttpServer;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.commons.cli.PosixParser;
import org.json.JSONArray;
import org.json.JSONObject;
import javax.xml.bind.DatatypeConverter;
import java.io.IOException;
import java.io.OutputStream;
import java.net.InetSocketAddress;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.*;
public class App {
// 监听端口
private int listen;
// 应用ID
private String appID;
// 应用密钥
private String appKey;
// 服务地址
private String gslb;
// 频道随机码
private String nonce;
// 频道时间戳
private Long timestamp;
// 用户唯一标识
private String userID;
// 加入频道token
private String token;
// 生成token
public static String createToken(
String appId, String appKey, String channelId, String userId,
String nonce, Long timestamp
) throws NoSuchAlgorithmException {
MessageDigest digest = MessageDigest.getInstance("SHA-256");
digest.update(appId.getBytes());
digest.update(appKey.getBytes());
digest.update(channelId.getBytes());
digest.update(userId.getBytes());
digest.update(nonce.getBytes());
digest.update(Long.toString(timestamp).getBytes());
String token = DatatypeConverter.printHexBinary(digest.digest()).toLowerCase();
return token;
}
// 生成userID
public static String createUserID(String channelID, String user) throws NoSuchAlgorithmException {
MessageDigest digest = MessageDigest.getInstance("SHA-256");
digest.update(channelID.getBytes());
digest.update("/".getBytes());
digest.update(user.getBytes());
String uid = DatatypeConverter.printHexBinary(digest.digest()).toLowerCase();
return uid.substring(0, 16);
}
// 写响应报文工具方法
private void httpWrite(HttpExchange he, int code, String response) throws IOException {
OutputStream os = he.getResponseBody();
he.sendResponseHeaders(code, response.length());
os.write(response.getBytes());
os.close();
}
// 请求处理类
class LoginHandler implements HttpHandler {
public void handle(HttpExchange he) throws IOException {
if (he.getRequestHeaders().containsKey("Origin")) {
// 配置响应头
Headers headers = he.getResponseHeaders();
headers.set("Access-Control-Allow-Origin", "*");
headers.set("Access-Control-Allow-Methods", "GET,POST,HEAD,PUT,DELETE,OPTIONS");
headers.set("Access-Control-Expose-Headers", "Server,Range,Content-Length,Content-Range");
headers.set("Access-Control-Allow-Headers", "Origin,Range,Accept-Encoding,Referer,Cache-Control,X-Proxy-Authorization,X-Requested-With,Content-Type");
}
if (he.getRequestMethod().equalsIgnoreCase("OPTIONS")) {
httpWrite(he, 200, "");
return;
}
// 将请求参数放入map中
Map<String, String> query = new HashMap<String, String>();
for (String param : he.getRequestURI().getQuery().split("&")) {
String[] entry = param.split("=");
if (entry.length > 1) {
query.put(entry[0], entry[1]);
} else {
query.put(entry[0], "");
}
}
// 频道ID
String channelID = query.get("room");
// 用户ID
String user = query.get("user");
// 处理非法参数
if (channelID == "" || user == "") {
httpWrite(he, 500, "invalid parameter");
return;
}
try {
userID = createUserID(channelID, user);
//令牌随机码,这里使用AK-+UUID
nonce = String.format("AK-%s", UUID.randomUUID().toString());
Calendar nowTime = Calendar.getInstance();
// 令牌过期时间,48小时
nowTime.add(Calendar.HOUR_OF_DAY, 48);
timestamp = nowTime.getTimeInMillis() / 1000;
token = createToken(appID, appKey, channelID, userID, nonce, timestamp);
} catch (NoSuchAlgorithmException e) {
e.printStackTrace();
httpWrite(he, 500, e.getMessage());
return;
}
// 生成随机用户名
String username = String.format("%s?appid=%s&channel=%s&nonce=%s×tamp=%d",
userID, appID, channelID, nonce, timestamp);
System.out.printf("Login: appID=%s, appKey=%s, channelID=%s, userID=%s, nonce=%s, " +
"timestamp=%d, user=%s, userName=%s, token=%s\n",
appID, appKey, channelID, userID, nonce, timestamp, user, username, token);
// 封装响应报文,json格式
JSONObject response = new JSONObject()
.put("code", 0)
.put("data", new JSONObject()
.put("appid", appID)
.put("userid", userID)
.put("gslb", new JSONArray().put(gslb))
.put("token", token)
.put("nonce", nonce)
.put("timestamp", timestamp)
.put("turn", new JSONObject()
.put("username", username)
.put("password", token)
));
he.getResponseHeaders().set("Content-Type", "application/json");
httpWrite(he, 200, response.toString());
}
}
// 解析命令行参数
public void run(String[] args) throws Exception {
Options options = new Options();
options.addOption(new Option("l", "listen", true, "listen port"));
options.addOption(new Option("a", "appid", true, "the id of app"));
options.addOption(new Option("k", "appkey", true, "the key of app"));
options.addOption(new Option("g", "gslb", true, "the url of gslb"));
CommandLine cli = new PosixParser().parse(options, args);
if (!cli.hasOption("listen")) {
throw new Exception("no listen");
}
if (!cli.hasOption("appid")) {
throw new Exception("no appid");
}
if (!cli.hasOption("appkey")) {
throw new Exception("no appkey");
}
if (!cli.hasOption("gslb")) {
throw new Exception("no gslb");
}
// 监听端口
listen = Integer.parseInt(cli.getOptionValue("listen"));
// 应用ID
appID = cli.getOptionValue("appid");
// 应用密钥
appKey = cli.getOptionValue("appkey");
// 服务地址
gslb = cli.getOptionValue("gslb");
System.out.printf("Server listen=%d, appid=%s, appkey=%s, gslb=%s\n", listen, appID, appKey, gslb);
// 创建httpserver
HttpServer server = HttpServer.create(new InetSocketAddress(listen), 0);
server.createContext("/app/v1/login", new LoginHandler());
server.start();
}
public static void main(String[] args) {
try {
new App().run(args);
} catch (Exception e) {
System.out.println(e);
}
}
}
2. 打包项目,在命令行项目根目录下运行:
mvn package
3. 运行项目,在命令行target目录下运行:
java -jar .\maven-rtc-1.0-SNAPSHOT-jar-with-dependencies.jar --listen=8080 --appid=<appid> --appkey=<appkey> --gslb=https://rgslb.rtc.aliyuncs.com
说明 如需搭建本地AppServer和Token校验服务,详情请参见搭建验证服务器
步骤二:运行客户端
Web客户端实现了阿里云音视频通信的基本功能,包括本地预览、加入频道、本地发布、订阅远端、离开频道等。Web客户端的搭建流程如下:
1. 搭建客户端代码。新建index.html,内容如下:
<!DOCTYPE html>
<html>
<head>
<title>AliWebRTC Demo</title>
<meta charset="UTF-8">
<meta name="viewport"
content="width=device-width, height=device-height, initial-scale=1, maximum-scale=1, minimum-scale=1, user-scalable=no" />
<link rel="stylesheet" href="./index.css" />
<script src="./jquery-1.10.2.min.js"></script>
<script src="./aliyun-webrtc-sdk-1.9.1.min.js"></script>
</head>
<body>
<div class='local-display-name'></div>
<div class='channel-input'>
<input type='text'></input>
<button>切换频道</button>
</div>
<div class='local-video'>
<video autoplay playsinline></video>
</div>
<div class="video-container"></div>
</body>
</html>
<script>
// 必须使用https
var AppServerUrl="https://127.0.0.1:8080/app/v1/login";
var getQueryString = function (name) {
var vars = [], hash;
var hashes = window.location.href.slice(window.location.href.indexOf('?') + 1).split('&');
for (var i = 0; i < hashes.length; i++) {
hash = hashes[i].split('=');
vars.push(hash[0]);
vars[hash[0]] = hash[1];
}
return vars[name];
}
var channelId = getQueryString('channel') || 1900
userName = Math.random().toString(36).replace(/[^a-z]+/g, '').substr(0, 5);
$('.local-display-name').text("User: " + userName + " Channel Id: " + channelId);
$('.channel-input input').val(channelId);
//AliWebRTC code
AliRtcEngine.isSupport().then(re => {
console.log(re);
init();
}).catch(err => {
alert(err.message);
})
var aliWebrtc;
function init() {
aliWebrtc = new AliRtcEngine("");
// remote用户加入房间
aliWebrtc.on('onJoin', (data) => {
});
// remote流发布事件
aliWebrtc.on('onPublisher', (publisher) => {
receivePublish(publisher);
});
// remote流结束发布事件
aliWebrtc.on('onUnPublisher', (publisher) => {
removePublish(publisher.userId);
});
// 错误信息
aliWebrtc.on('onError', (error) => {
var msg = error && error.message ? error.message : error;
alert(msg);
});
// 订阅remote流成功后,显示remote流
aliWebrtc.on('onMediaStream', (subscriber, stream) => {
var video = getDisplayRemoteVideo(subscriber.userId, subscriber.displayName);
aliWebrtc.setDisplayRemoteVideo(subscriber, video, stream);
});
aliWebrtc.on('OnConnecting', (data) => {
console.log(data.displayName + "正在建立连接中...");
});
aliWebrtc.on('OnConnected', (data) => {
console.log(data.displayName + "成功建立连接");
});
aliWebrtc.on('onLeave', (data) => {
removePublish(data.userId);
})
//1.预览
var localVideo = $('.local-video video');
aliWebrtc.startPreview(localVideo[0]).then((obj) => {
//2. 获取频道鉴权令牌参数
getRTCAuthInfo().then((authInfo) => {
//3. 加入房间
aliWebrtc.joinChannel(authInfo, userName).then(() => {
console.log('加入房间成功');
// 4. 发布本地流
aliWebrtc.publish().then((res) => {
console.log('发布流成功');
}, (error) => {
alert(error.message);
});
}).catch((error) => {
alert(error.message);
})
}).catch((error) => {
alert(error.message);
});
}).catch((error) => {
alert(error.message);
});
}
var receivePublish = (publisher) => {
//5.订阅remote流
aliWebrtc.subscribe(publisher.userId).then((subscribeCallId) => {
console.log('订阅成功')
}, (error) => {
alert(error.message);
});
};
var removePublish = (userId) => {
var id = userId;
var videoWrapper = $('#' + id);
videoWrapper ? videoWrapper.remove() : '';
}
var getDisplayRemoteVideo = function (userId, displayName) {
var id = userId;
var videoWrapper = $('#' + id);
if (videoWrapper.length == 0) {
videoWrapper = $('<div class="remote-subscriber" id=' + id + '> <video autoplay playsinline></video><div class="display-name"></div></div>');
$('.video-container').append(videoWrapper);
}
videoWrapper.find('.display-name').text(displayName);
return videoWrapper.find('video')[0];
}
//获取频道鉴权令牌
var getRTCAuthInfo = () => {
return new Promise(function (resolve, reject) {
$.ajax({
url: AppServerUrl+"?room=" + channelId + "&user=" + userName + "&passwd=1234",
type: 'POST',
contentType: 'application/json; charset=utf-8',
dataType: 'json',
success: (data) => {
data.data.channel = channelId;
resolve(data.data);
},
failed: (error) => {
reject(error);
}
});
});
}
$('.channel-input button').click(() => {
var value = $('.channel-input input').val();
if (!value) {
return;
}
//aliWebrtc.leaveChannel();
location.href = './index.html?channel=' + value;
});
window.onbeforeunload = function (e) {
//aliWebrtc.leaveChannel();
};
</script>
说明 获取客户端完整代码,请参见客户端代码
2. 部署Web服务。将客户端代码放入Apache Httpd服务器的wwwroot目录下,配置https证书并启动服务器。
3. 访问客户端。请使用支持WebRTC的浏览器例如Microsoft Edage,访问https://ip:port/index.html
1. Demo运行成功进入首页,默认进入房间,本地已经开启预览。
Web 本地预览
2. 如果该频道中有其他用户,即可开始实时音视频通话。
Web 通信
|
__label__pos
| 0.983683 |
Did personalizxed formatting; example cleanup; added some client commands
This commit is contained in:
itdominator 2023-11-11 23:38:53 -06:00
parent fc6e645261
commit 248771d57e
5 changed files with 350 additions and 247 deletions
View File
@ -1,8 +1,12 @@
#from __future__ import absolute_import
__all__ = []
from pylspclient.json_rpc_endpoint import JsonRpcEndpoint
from pylspclient.lsp_client import LspClient
from pylspclient.lsp_endpoint import LspEndpoint
from pylspclient import lsp_structs
# Python imports
# Lib imports
# Application imports
from .json_rpc_endpoint import JsonRpcEndpoint
from .lsp_client import LspClient
from .lsp_endpoint import LspEndpoint
from . import lsp_structs
View File
@ -1,17 +1,25 @@
# Python imports
from __future__ import print_function
import json
import re
from pylspclient import lsp_structs
import threading
JSON_RPC_REQ_FORMAT = "Content-Length: {json_string_len}\r\n\r\n{json_string}"
LEN_HEADER = "Content-Length: "
TYPE_HEADER = "Content-Type: "
# Lib imports
# Application imports
from . import lsp_structs
JSON_RPC_REQ_FORMAT = "Content-Length: {json_string_len}\r\n\r\n{json_string}"
LEN_HEADER = "Content-Length: "
TYPE_HEADER = "Content-Type: "
# TODO: add content-type
class MyEncoder(json.JSONEncoder):
"""
Encodes an object in JSON
@ -26,10 +34,10 @@ class JsonRpcEndpoint(object):
protocol. More information can be found: https://www.jsonrpc.org/
'''
def __init__(self, stdin, stdout):
self.stdin = stdin
self.stdout = stdout
self.read_lock = threading.Lock()
self.write_lock = threading.Lock()
self.stdin = stdin
self.stdout = stdout
self.read_lock = threading.Lock()
self.write_lock = threading.Lock()
@staticmethod
def __add_header(json_string):
@ -39,7 +47,7 @@ class JsonRpcEndpoint(object):
:param str json_string: The string
:return: the string with the header
'''
return JSON_RPC_REQ_FORMAT.format(json_string_len=len(json_string), json_string=json_string)
return JSON_RPC_REQ_FORMAT.format(json_string_len = len(json_string), json_string = json_string)
def send_request(self, message):
@ -48,7 +56,7 @@ class JsonRpcEndpoint(object):
:param dict message: The message to send.
'''
json_string = json.dumps(message, cls=MyEncoder)
json_string = json.dumps(message, cls = MyEncoder)
jsonrpc_req = self.__add_header(json_string)
with self.write_lock:
self.stdin.write(jsonrpc_req.encode())
@ -64,18 +72,16 @@ class JsonRpcEndpoint(object):
with self.read_lock:
message_size = None
while True:
#read header
line = self.stdout.readline()
if not line:
# server quit
return None
line = line.decode("utf-8")
if not line.endswith("\r\n"):
raise lsp_structs.ResponseError(lsp_structs.ErrorCodes.ParseError, "Bad header: missing newline")
#remove the "\r\n"
line = line[:-2]
line = line[:-2] #remove the "\r\n"
if line == "":
# done with the headers
break
elif line.startswith(LEN_HEADER):
line = line[len(LEN_HEADER):]
@ -83,12 +89,12 @@ class JsonRpcEndpoint(object):
raise lsp_structs.ResponseError(lsp_structs.ErrorCodes.ParseError, "Bad header: size is not int")
message_size = int(line)
elif line.startswith(TYPE_HEADER):
# nothing todo with type for now.
pass
...
else:
raise lsp_structs.ResponseError(lsp_structs.ErrorCodes.ParseError, "Bad header: unkown header")
if not message_size:
raise lsp_structs.ResponseError(lsp_structs.ErrorCodes.ParseError, "Bad header: missing size")
jsonrpc_res = self.stdout.read(message_size).decode("utf-8")
return json.loads(jsonrpc_res)
return json.loads(jsonrpc_res)
View File
@ -1,4 +1,11 @@
from pylspclient import lsp_structs
# Python imports
# Lib imports
# Application imports
from . import lsp_structs
class LspClient(object):
def __init__(self, lsp_endpoint):
@ -7,20 +14,20 @@ class LspClient(object):
:param lsp_endpoint: TODO
"""
self.lsp_endpoint = lsp_endpoint
self.lsp_endpoint = lsp_endpoint
def initialize(self, processId, rootPath, rootUri, initializationOptions, capabilities, trace, workspaceFolders):
"""
The initialize request is sent as the first request from the client to the server. If the server receives a request or notification
The initialize request is sent as the first request from the client to the server. If the server receives a request or notification
before the initialize request it should act as follows:
1. For a request the response should be an error with code: -32002. The message can be picked by the server.
2. Notifications should be dropped, except for the exit notification. This will allow the exit of a server without an initialize request.
Until the server has responded to the initialize request with an InitializeResult, the client must not send any additional requests or
notifications to the server. In addition the server is not allowed to send any requests or notifications to the client until it has responded
with an InitializeResult, with the exception that during the initialize request the server is allowed to send the notifications window/showMessage,
Until the server has responded to the initialize request with an InitializeResult, the client must not send any additional requests or
notifications to the server. In addition the server is not allowed to send any requests or notifications to the client until it has responded
with an InitializeResult, with the exception that during the initialize request the server is allowed to send the notifications window/showMessage,
window/logMessage and telemetry/event as well as the window/showMessageRequest request to the client.
The initialize request may only be sent once.
@ -36,9 +43,17 @@ class LspClient(object):
:param list workspaceFolders: The workspace folders configured in the client when the server starts. This property is only available if the client supports workspace folders.
It can be `null` if the client supports workspace folders but none are configured.
"""
self.lsp_endpoint.start()
return self.lsp_endpoint.call_method("initialize", processId=processId, rootPath=rootPath, rootUri=rootUri, initializationOptions=initializationOptions, capabilities=capabilities, trace=trace, workspaceFolders=workspaceFolders)
self.lsp_endpoint.start()
return self.lsp_endpoint.call_method("initialize",
processId = processId,
rootPath = rootPath,
rootUri = rootUri,
initializationOptions = initializationOptions,
capabilities = capabilities,
trace = trace,
workspaceFolders = workspaceFolders
)
def initialized(self):
"""
@ -46,150 +61,197 @@ class LspClient(object):
but before the client is sending any other request or notification to the server. The server can use the initialized notification
for example to dynamically register capabilities. The initialized notification may only be sent once.
"""
self.lsp_endpoint.send_notification("initialized")
self.lsp_endpoint.send_notification("initialized")
def shutdown(self):
"""
The initialized notification is sent from the client to the server after the client received the result of the initialize request
but before the client is sending any other request or notification to the server. The server can use the initialized notification
for example to dynamically register capabilities. The initialized notification may only be sent once.
"""
self.lsp_endpoint.stop()
return self.lsp_endpoint.call_method("shutdown")
def exit(self):
"""
The initialized notification is sent from the client to the server after the client received the result of the initialize request
but before the client is sending any other request or notification to the server. The server can use the initialized notification
for example to dynamically register capabilities. The initialized notification may only be sent once.
"""
self.lsp_endpoint.send_notification("exit")
self.lsp_endpoint.stop()
def didOpen(self, textDocument):
"""
The document open notification is sent from the client to the server to signal newly opened text documents. The document's truth is
now managed by the client and the server must not try to read the document's truth using the document's uri. Open in this sense
now managed by the client and the server must not try to read the document's truth using the document's uri. Open in this sense
means it is managed by the client. It doesn't necessarily mean that its content is presented in an editor. An open notification must
not be sent more than once without a corresponding close notification send before. This means open and close notification must be
balanced and the max open count for a particular textDocument is one. Note that a server's ability to fulfill requests is independent
not be sent more than once without a corresponding close notification send before. This means open and close notification must be
balanced and the max open count for a particular textDocument is one. Note that a server's ability to fulfill requests is independent
of whether a text document is open or closed.
The DidOpenTextDocumentParams contain the language id the document is associated with. If the language Id of a document changes, the
client needs to send a textDocument/didClose to the server followed by a textDocument/didOpen with the new language id if the server
The DidOpenTextDocumentParams contain the language id the document is associated with. If the language Id of a document changes, the
client needs to send a textDocument/didClose to the server followed by a textDocument/didOpen with the new language id if the server
handles the new language id as well.
:param TextDocumentItem textDocument: The document that was opened.
"""
return self.lsp_endpoint.send_notification("textDocument/didOpen", textDocument=textDocument)
return self.lsp_endpoint.send_notification("textDocument/didOpen", textDocument = textDocument)
def didSave(self, textDocument):
"""
:param TextDocumentIdentifier textDocument: The document that was saved.
"""
return self.lsp_endpoint.send_notification("textDocument/didSave", textDocument = textDocument)
def didClose(self, textDocument):
"""
:param TextDocumentIdentifier textDocument: The document that was closed.
"""
return self.lsp_endpoint.send_notification("textDocument/didClose", textDocument = textDocument)
def didChange(self, textDocument, contentChanges):
"""
The document change notification is sent from the client to the server to signal changes to a text document.
The document change notification is sent from the client to the server to signal changes to a text document.
In 2.0 the shape of the params has changed to include proper version numbers and language ids.
:param VersionedTextDocumentIdentifier textDocument: The initial trace setting. If omitted trace is disabled ('off').
:param TextDocumentContentChangeEvent[] contentChanges: The actual content changes. The content changes describe single state changes
to the document. So if there are two content changes c1 and c2 for a document in state S then c1 move the document
to the document. So if there are two content changes c1 and c2 for a document in state S then c1 move the document
to S' and c2 to S''.
"""
return self.lsp_endpoint.send_notification("textDocument/didChange", textDocument=textDocument, contentChanges=contentChanges)
return self.lsp_endpoint.send_notification("textDocument/didChange", textDocument = textDocument, contentChanges = contentChanges)
def documentSymbol(self, textDocument):
"""
The document symbol request is sent from the client to the server to return a flat list of all symbols found in a given text document.
Neither the symbol's location range nor the symbol's container name should be used to infer a hierarchy.
The document symbol request is sent from the client to the server to
return a flat list of all symbols found in a given text document.
Neither the symbol's location range nor the symbol's container name
should be used to infer a hierarchy.
:param TextDocumentItem textDocument: The text document.
"""
result_dict = self.lsp_endpoint.call_method("textDocument/documentSymbol", textDocument=textDocument)
result_dict = self.lsp_endpoint.call_method( "textDocument/documentSymbol", textDocument=textDocument )
if not result_dict: return []
return [lsp_structs.SymbolInformation(**sym) for sym in result_dict]
def definition(self, textDocument, position):
def declaration(self, textDocument, position):
"""
The goto definition request is sent from the client to the server to resolve the definition location of a symbol at a given text document position.
The go to declaration request is sent from the client to the server to
resolve the declaration location of a symbol at a given text document
position.
The result type LocationLink[] got introduce with version 3.14.0 and
depends in the corresponding client capability
`clientCapabilities.textDocument.declaration.linkSupport`.
:param TextDocumentItem textDocument: The text document.
:param Position position: The position inside the text document.
"""
result_dict = self.lsp_endpoint.call_method("textDocument/definition", textDocument=textDocument, position=position)
return [lsp_structs.Location(**l) for l in result_dict]
result_dict = self.lsp_endpoint.call_method("textDocument/declaration",
textDocument = textDocument,
position = position
)
if not result_dict: return []
if "uri" in result_dict:
return lsp_structs.Location(**result_dict)
return [lsp_structs.Location(**loc) if "uri" in loc else lsp_structs.LinkLocation(**loc) for loc in result_dict]
def definition(self, textDocument, position):
"""
The goto definition request is sent from the client to the server to
resolve the definition location of a symbol at a given text document
position.
:param TextDocumentItem textDocument: The text document.
:param Position position: The position inside the text document.
"""
result_dict = self.lsp_endpoint.call_method("textDocument/definition",
textDocument = textDocument,
position = position
)
if not result_dict: return []
return [lsp_structs.Location(**loc) for loc in result_dict]
def typeDefinition(self, textDocument, position):
"""
The goto type definition request is sent from the client to the server to resolve the type definition location of a symbol at a given text document position.
The goto type definition request is sent from the client to the server
to resolve the type definition location of a symbol at a given text
document position.
:param TextDocumentItem textDocument: The text document.
:param Position position: The position inside the text document.
"""
result_dict = self.lsp_endpoint.call_method("textDocument/definition", textDocument=textDocument, position=position)
return [lsp_structs.Location(**l) for l in result_dict]
result_dict = self.lsp_endpoint.call_method("textDocument/typeDefinition",
textDocument = textDocument,
position = position
)
if not result_dict: return []
return [lsp_structs.Location(**loc) for loc in result_dict]
def signatureHelp(self, textDocument, position):
"""
The signature help request is sent from the client to the server to request signature information at a given cursor position.
"""
The signature help request is sent from the client to the server to
request signature information at a given cursor position.
:param TextDocumentItem textDocument: The text document.
:param Position position: The position inside the text document.
"""
result_dict = self.lsp_endpoint.call_method("textDocument/signatureHelp", textDocument=textDocument, position=position)
return lsp_structs.SignatureHelp(**result_dict)
:param TextDocumentItem textDocument: The text document.
:param Position position: The position inside the text document.
"""
result_dict = self.lsp_endpoint.call_method( "textDocument/signatureHelp",
textDocument = textDocument,
position = position
)
if not result_dict: return []
return lsp_structs.SignatureHelp(**result_dict)
def completion(self, textDocument, position, context):
"""
The signature help request is sent from the client to the server to request signature information at a given cursor position.
"""
The signature help request is sent from the client to the server to
request signature information at a given cursor position.
:param TextDocumentItem textDocument: The text document.
:param Position position: The position inside the text document.
:param CompletionContext context: The completion context. This is only available if the client specifies
to send this using `ClientCapabilities.textDocument.completion.contextSupport === true`
"""
result_dict = self.lsp_endpoint.call_method("textDocument/completion", textDocument=textDocument, position=position, context=context)
if "isIncomplete" in result_dict:
return lsp_structs.CompletionList(**result_dict)
return [lsp_structs.CompletionItem(**l) for l in result_dict]
def declaration(self, textDocument, position):
"""
The go to declaration request is sent from the client to the server to resolve the declaration location of a
symbol at a given text document position.
:param TextDocumentItem textDocument: The text document.
:param Position position: The position inside the text document.
:param CompletionContext context: The completion context. This is only
available if the client specifies
to send this using `ClientCapabilities.textDocument.completion.contextSupport === true`
"""
The result type LocationLink[] got introduce with version 3.14.0 and depends in the corresponding client
capability `clientCapabilities.textDocument.declaration.linkSupport`.
result_dict = self.lsp_endpoint.call_method("textDocument/completion",
textDocument = textDocument,
position = position,
context = context
)
if not result_dict: return []
:param TextDocumentItem textDocument: The text document.
:param Position position: The position inside the text document.
"""
result_dict = self.lsp_endpoint.call_method("textDocument/declaration", textDocument=textDocument, position=position)
if "uri" in result_dict:
return lsp_structs.Location(**result_dict)
if "isIncomplete" in result_dict:
return lsp_structs.CompletionList(**result_dict)
return [lsp_structs.Location(**l) if "uri" in l else lsp_structs.LinkLocation(**l) for l in result_dict]
return [lsp_structs.CompletionItem(**loc) for loc in result_dict]
def definition(self, textDocument, position):
"""
The go to definition request is sent from the client to the server to resolve the declaration location of a
symbol at a given text document position.
def references(self, textDocument, position):
"""
The references request is sent from the client to the server to resolve
project-wide references for the symbol denoted by the given text
document position.
The result type LocationLink[] got introduce with version 3.14.0 and depends in the corresponding client
capability `clientCapabilities.textDocument.declaration.linkSupport`.
:param TextDocumentItem textDocument: The text document.
:param Position position: The position inside the text document.
"""
:param TextDocumentItem textDocument: The text document.
:param Position position: The position inside the text document.
"""
result_dict = self.lsp_endpoint.call_method("textDocument/definition", textDocument=textDocument, position=position)
if "uri" in result_dict:
return lsp_structs.Location(**result_dict)
result_dict = self.lsp_endpoint.call_method("textDocument/references",
textDocument = textDocument,
position = position)
return [lsp_structs.Location(**l) if "uri" in l else lsp_structs.LinkLocation(**l) for l in result_dict]
if not result_dict: return []
return [lsp_structs.Location(**loc) for loc in result_dict]
View File
@ -1,20 +1,27 @@
# Python imports
from __future__ import print_function
import threading
import collections
from pylspclient import lsp_structs
# Lib imports
# Application imports
from . import lsp_structs
class LspEndpoint(threading.Thread):
def __init__(self, json_rpc_endpoint, method_callbacks={}, notify_callbacks={}, timeout=2):
threading.Thread.__init__(self)
self.json_rpc_endpoint = json_rpc_endpoint
self.notify_callbacks = notify_callbacks
self.method_callbacks = method_callbacks
self.event_dict = {}
self.response_dict = {}
self.next_id = 0
self._timeout = timeout
self.shutdown_flag = False
self.notify_callbacks = notify_callbacks
self.method_callbacks = method_callbacks
self.event_dict = {}
self.response_dict = {}
self.next_id = 0
self._timeout = timeout
self.shutdown_flag = False
def handle_result(self, rpc_id, result, error):
@ -34,25 +41,23 @@ class LspEndpoint(threading.Thread):
try:
jsonrpc_message = self.json_rpc_endpoint.recv_response()
if jsonrpc_message is None:
print("server quit")
print("Server Quit...")
break
method = jsonrpc_message.get("method")
result = jsonrpc_message.get("result")
error = jsonrpc_message.get("error")
error = jsonrpc_message.get("error")
rpc_id = jsonrpc_message.get("id")
params = jsonrpc_message.get("params")
if method:
if rpc_id:
# a call for method
if method not in self.method_callbacks:
raise lsp_structs.ResponseError(lsp_structs.ErrorCodes.MethodNotFound, "Method not found: {method}".format(method=method))
result = self.method_callbacks[method](params)
self.send_response(rpc_id, result, None)
else:
# a call for notify
if method not in self.notify_callbacks:
# Have nothing to do with this.
print("Notify method not found: {method}.".format(method=method))
else:
self.notify_callbacks[method](params)
@ -63,13 +68,13 @@ class LspEndpoint(threading.Thread):
def send_response(self, id, result, error):
message_dict = {}
message_dict = {}
message_dict["jsonrpc"] = "2.0"
message_dict["id"] = id
message_dict["id"] = id
if result:
message_dict["result"] = result
if error:
message_dict["error"] = error
message_dict["error"] = error
self.json_rpc_endpoint.send_request(message_dict)
@ -77,9 +82,9 @@ class LspEndpoint(threading.Thread):
message_dict = {}
message_dict["jsonrpc"] = "2.0"
if id is not None:
message_dict["id"] = id
message_dict["method"] = method_name
message_dict["params"] = params
message_dict["id"] = id
message_dict["method"] = method_name
message_dict["params"] = params
self.json_rpc_endpoint.send_request(message_dict)
@ -101,9 +106,11 @@ class LspEndpoint(threading.Thread):
self.event_dict.pop(current_id)
result, error = self.response_dict.pop(current_id)
if error:
raise lsp_structs.ResponseError(error.get("code"), error.get("message"), error.get("data"))
raise lsp_structs.ResponseError(error.get("code"),
error.get("message"),
error.get("data"))
return result
def send_notification(self, method_name, **kwargs):
self.send_message(method_name, kwargs)
self.send_message(method_name, kwargs)
View File
@ -1,17 +1,22 @@
# Python imports
import enum
# Lib imports
# Application imports
def to_type(o, new_type):
'''
Helper funciton that receives an object or a dict and convert it to a new given type.
Helper funciton that receives an object or a dict and convert it to a new
given type.
:param object|dict o: The object to convert
:param Type new_type: The type to convert to.
'''
if new_type == type(o):
return o
else:
return new_type(**o)
return o if new_type == type(o) else new_type(**o)
class Position(object):
@ -20,9 +25,10 @@ class Position(object):
Constructs a new Position instance.
:param int line: Line position in a document (zero-based).
:param int character: Character offset on a line in a document (zero-based).
:param int character: Character offset on a line in a document
(zero-based).
"""
self.line = line
self.line = line
self.character = character
@ -35,13 +41,14 @@ class Range(object):
:param Position end: The range's end position.
"""
self.start = to_type(start, Position)
self.end = to_type(end, Position)
self.end = to_type(end, Position)
class Location(object):
"""
Represents a location inside a resource, such as a line inside a text file.
"""
def __init__(self, uri, range):
"""
Constructs a new Location instance.
@ -49,7 +56,7 @@ class Location(object):
:param str uri: Resource file.
:param Range range: The range inside the file
"""
self.uri = uri
self.uri = uri
self.range = to_type(range, Range)
@ -57,6 +64,7 @@ class LocationLink(object):
"""
Represents a link between a source and a target location.
"""
def __init__(self, originSelectionRange, targetUri, targetRange, targetSelectionRange):
"""
Constructs a new LocationLink instance.
@ -64,18 +72,18 @@ class LocationLink(object):
:param Range originSelectionRange: Span of the origin of this link.
Used as the underlined span for mouse interaction. Defaults to the word range at the mouse position.
:param str targetUri: The target resource identifier of this link.
:param Range targetRange: The full target range of this link. If the target for example is a symbol then target
:param Range targetRange: The full target range of this link. If the target for example is a symbol then target
range is the range enclosing this symbol not including leading/trailing whitespace but everything else
like comments. This information is typically used to highlight the range in the editor.
:param Range targetSelectionRange: The range that should be selected and revealed when this link is being followed,
:param Range targetSelectionRange: The range that should be selected and revealed when this link is being followed,
e.g the name of a function. Must be contained by the the `targetRange`. See also `DocumentSymbol#range`
"""
self.originSelectionRange = to_type(originSelectionRange, Range)
self.targetUri = targetUri
self.targetRange = to_type(targetRange, Range)
self.targetUri = targetUri
self.targetRange = to_type(targetRange, Range)
self.targetSelectionRange = to_type(targetSelectionRange, Range)
class Diagnostic(object):
def __init__(self, range, severity, code, source, message, relatedInformation):
"""
@ -87,14 +95,14 @@ class Diagnostic(object):
:param str source: A human-readable string describing the source of this
diagnostic, e.g. 'typescript' or 'super lint'.
:param str message: The diagnostic's message.
:param list relatedInformation: An array of related diagnostic information, e.g. when symbol-names within
:param list relatedInformation: An array of related diagnostic information, e.g. when symbol-names within
a scope collide all definitions can be marked via this property.
"""
self.range = range
self.severity = severity
self.code = code
self.source = source
self.message = message
self.range = range
self.severity = severity
self.code = code
self.source = source
self.message = message
self.relatedInformation = relatedInformation
@ -113,9 +121,9 @@ class DiagnosticRelatedInformation(object):
:param str message: The message of this related diagnostic information.
"""
self.location = location
self.message = message
self.message = message
class Command(object):
def __init__(self, title, command, arguments):
"""
@ -124,8 +132,8 @@ class Command(object):
:param str command: The identifier of the actual command handler.
:param list argusments: Arguments that the command handler should be invoked with.
"""
self.title = title
self.command = command
self.title = title
self.command = command
self.arguments = arguments
@ -136,27 +144,27 @@ class TextDocumentItem(object):
def __init__(self, uri, languageId, version, text):
"""
Constructs a new Diagnostic instance.
:param DocumentUri uri: Title of the command, like `save`.
:param str languageId: The identifier of the actual command handler.
:param int version: Arguments that the command handler should be invoked with.
:param str text: Arguments that the command handler should be invoked with.
"""
self.uri = uri
self.uri = uri
self.languageId = languageId
self.version = version
self.text = text
self.version = version
self.text = text
class TextDocumentIdentifier(object):
"""
Text documents are identified using a URI. On the protocol level, URIs are passed as strings.
Text documents are identified using a URI. On the protocol level, URIs are passed as strings.
"""
def __init__(self, uri):
"""
Constructs a new TextDocumentIdentifier instance.
:param DocumentUri uri: The text document's URI.
:param DocumentUri uri: The text document's URI.
"""
self.uri = uri
@ -168,13 +176,13 @@ class VersionedTextDocumentIdentifier(TextDocumentIdentifier):
def __init__(self, uri, version):
"""
Constructs a new TextDocumentIdentifier instance.
:param DocumentUri uri: The text document's URI.
:param int version: The version number of this document. If a versioned
text document identifier is sent from the server to the client and
the file is not open in the editor (the server has not received an
open notification before) the server can send `null` to indicate
that the version is known and the content on disk is the truth (as
:param int version: The version number of this document. If a versioned
text document identifier is sent from the server to the client and
the file is not open in the editor (the server has not received an
open notification before) the server can send `null` to indicate
that the version is known and the content on disk is the truth (as
speced with document content ownership).
The version number of a document will increase after each change, including
undo/redo. The number doesn't need to be consecutive.
@ -196,9 +204,9 @@ class TextDocumentContentChangeEvent(object):
:param int rangeLength: The length of the range that got replaced.
:param str text: The new text of the range/document.
"""
self.range = range
self.range = range
self.rangeLength = rangeLength
self.text = text
self.text = text
class TextDocumentPositionParams(object):
@ -208,64 +216,64 @@ class TextDocumentPositionParams(object):
def __init__(self, textDocument, position):
"""
Constructs a new TextDocumentPositionParams instance.
:param TextDocumentIdentifier textDocument: The text document.
:param Position position: The position inside the text document.
"""
self.textDocument = textDocument
self.position = position
self.position = position
class LANGUAGE_IDENTIFIER(object):
BAT="bat"
BIBTEX="bibtex"
CLOJURE="clojure"
COFFESCRIPT="coffeescript"
C="c"
CPP="cpp"
CSHARP="csharp"
CSS="css"
DIFF="diff"
DOCKERFILE="dockerfile"
FSHARP="fsharp"
GIT_COMMIT="git-commit"
GIT_REBASE="git-rebase"
GO="go"
GROOVY="groovy"
HANDLEBARS="handlebars"
HTML="html"
INI="ini"
JAVA="java"
JAVASCRIPT="javascript"
JSON="json"
LATEX="latex"
LESS="less"
LUA="lua"
MAKEFILE="makefile"
MARKDOWN="markdown"
OBJECTIVE_C="objective-c"
OBJECTIVE_CPP="objective-cpp"
Perl="perl"
PHP="php"
POWERSHELL="powershell"
PUG="jade"
PYTHON="python"
R="r"
RAZOR="razor"
RUBY="ruby"
RUST="rust"
SASS="sass"
SCSS="scss"
ShaderLab="shaderlab"
SHELL_SCRIPT="shellscript"
SQL="sql"
SWIFT="swift"
TYPE_SCRIPT="typescript"
TEX="tex"
VB="vb"
XML="xml"
XSL="xsl"
YAML="yaml"
BAT = "bat"
BIBTEX = "bibtex"
CLOJURE = "clojure"
COFFESCRIPT = "coffeescript"
C = "c"
CPP = "cpp"
CSHARP = "csharp"
CSS = "css"
DIFF = "diff"
DOCKERFILE = "dockerfile"
FSHARP = "fsharp"
GIT_COMMIT = "git-commit"
GIT_REBASE = "git-rebase"
GO = "go"
GROOVY = "groovy"
HANDLEBARS = "handlebars"
HTML = "html"
INI = "ini"
JAVA = "java"
JAVASCRIPT = "javascript"
JSON = "json"
LATEX = "latex"
LESS = "less"
LUA = "lua"
MAKEFILE = "makefile"
MARKDOWN = "markdown"
OBJECTIVE_C = "objective-c"
OBJECTIVE_CPP = "objective-cpp"
Perl = "perl"
PHP = "php"
POWERSHELL = "powershell"
PUG = "jade"
PYTHON = "python"
R = "r"
RAZOR = "razor"
RUBY = "ruby"
RUST = "rust"
SASS = "sass"
SCSS = "scss"
ShaderLab = "shaderlab"
SHELL_SCRIPT = "shellscript"
SQL = "sql"
SWIFT = "swift"
TYPE_SCRIPT = "typescript"
TEX = "tex"
VB = "vb"
XML = "xml"
XSL = "xsl"
YAML = "yaml"
class SymbolKind(enum.Enum):
@ -301,7 +309,7 @@ class SymbolInformation(object):
"""
Represents information about programming constructs like variables, classes, interfaces etc.
"""
def __init__(self, name, kind, location, containerName=None, deprecated=False):
def __init__(self, name, kind, location, containerName = None, deprecated = False):
"""
Constructs a new SymbolInformation instance.
@ -322,10 +330,10 @@ class SymbolInformation(object):
symbols.
:param bool deprecated: Indicates if this symbol is deprecated.
"""
self.name = name
self.kind = SymbolKind(kind)
self.deprecated = deprecated
self.location = to_type(location, Location)
self.name = name
self.kind = SymbolKind(kind)
self.deprecated = deprecated
self.location = to_type(location, Location)
self.containerName = containerName
@ -334,14 +342,14 @@ class ParameterInformation(object):
Represents a parameter of a callable-signature. A parameter can
have a label and a doc-comment.
"""
def __init__(self, label, documentation=""):
def __init__(self, label, documentation = ""):
"""
Constructs a new ParameterInformation instance.
:param str label: The label of this parameter. Will be shown in the UI.
:param str documentation: The human-readable doc-comment of this parameter. Will be shown in the UI but can be omitted.
"""
self.label = label
self.label = label
self.documentation = documentation
@ -351,7 +359,7 @@ class SignatureInformation(object):
can have a label, like a function-name, a doc-comment, and
a set of parameters.
"""
def __init__(self, label, documentation="", parameters=[]):
def __init__(self, label, documentation = "", parameters = []):
"""
Constructs a new SignatureInformation instance.
@ -359,9 +367,9 @@ class SignatureInformation(object):
:param str documentation: The human-readable doc-comment of this signature. Will be shown in the UI but can be omitted.
:param ParameterInformation[] parameters: The parameters of this signature.
"""
self.label = label
self.label = label
self.documentation = documentation
self.parameters = [to_type(parameter, ParameterInformation) for parameter in parameters]
self.parameters = [to_type(parameter, ParameterInformation) for parameter in parameters]
class SignatureHelp(object):
@ -370,7 +378,7 @@ class SignatureHelp(object):
callable. There can be multiple signature but only one
active and only one active parameter.
"""
def __init__(self, signatures, activeSignature=0, activeParameter=0):
def __init__(self, signatures, activeSignature = 0, activeParameter = 0):
"""
Constructs a new SignatureHelp instance.
@ -378,7 +386,7 @@ class SignatureHelp(object):
:param int activeSignature:
:param int activeParameter:
"""
self.signatures = [to_type(signature, SignatureInformation) for signature in signatures]
self.signatures = [to_type(signature, SignatureInformation) for signature in signatures]
self.activeSignature = activeSignature
self.activeParameter = activeParameter
@ -393,7 +401,7 @@ class CompletionContext(object):
"""
Contains additional information about the context in which a completion request is triggered.
"""
def __init__(self, triggerKind, triggerCharacter=None):
def __init__(self, triggerKind, triggerCharacter = None):
"""
Constructs a new CompletionContext instance.
@ -412,31 +420,47 @@ class TextEdit(object):
"""
def __init__(self, range, newText):
"""
:param Range range: The range of the text document to be manipulated. To insert
:param Range range: The range of the text document to be manipulated. To insert
text into a document create a range where start === end.
:param str newText: The string to be inserted. For delete operations use an empty string.
"""
self.range = range
self.range = range
self.newText = newText
class InsertTextFormat(object):
PlainText = 1
Snippet = 2
Snippet = 2
class CompletionItem(object):
"""
"""
def __init__(self, label, kind=None, detail=None, documentation=None, deprecated=None, presented=None, sortText=None, filterText=None, insertText=None, insertTextFormat=None, textEdit=None, additionalTextEdits=None, commitCharacters=None, command=None, data=None, score=0.0):
"""
def __init__(self, label, \
kind = None, \
detail = None, \
documentation = None, \
deprecated = None, \
preselect = None, \
sortText = None, \
filterText = None, \
insertText = None, \
insertTextFormat = None, \
textEdit = None, \
additionalTextEdits = None, \
commitCharacters = None, \
command = None, \
data = None, \
score = 0.0
):
"""
:param str label: The label of this completion item. By default also the text that is inserted when selecting
this completion.
:param int kind: The kind of this completion item. Based of the kind an icon is chosen by the editor.
:param str detail: A human-readable string with additional information about this item, like type or symbol information.
:param tr ocumentation: A human-readable string that represents a doc-comment.
:param bool deprecated: Indicates if this item is deprecated.
:param bool presented: Select this item when showing. Note: that only one completion item can be selected and that the
:param bool preselect: Select this item when showing. Note: that only one completion item can be selected and that the
tool / client decides which item that is. The rule is that the first item of those that match best is selected.
:param str sortText: A string that should be used when comparing this item with other items. When `falsy` the label is used.
:param str filterText: A string that should be used when filtering a set of completion items. When `falsy` the label is used.
@ -450,7 +474,7 @@ class CompletionItem(object):
:param TextEdit textEdit: An edit which is applied to a document when selecting this completion. When an edit is provided the value of `insertText` is ignored.
Note:* The range of the edit must be a single line range and it must contain the position at which completion
has been requested.
:param TextEdit additionalTextEdits: An optional array of additional text edits that are applied when selecting this completion.
:param TextEdit additionalTextEdits: An optional array of additional text edits that are applied when selecting this completion.
Edits must not overlap (including the same insert position) with the main edit nor with themselves.
Additional text edits should be used to change text unrelated to the current cursor position
(for example adding an import statement at the top of the file if the completion item will
@ -468,7 +492,7 @@ class CompletionItem(object):
self.detail = detail
self.documentation = documentation
self.deprecated = deprecated
self.presented = presented
self.preselect = preselect
self.sortText = sortText
self.filterText = filterText
self.insertText = insertText
@ -511,12 +535,12 @@ class CompletionItemKind(enum.Enum):
class CompletionList(object):
"""
Represents a collection of [completion items](#CompletionItem) to be presented in the editor.
Represents a collection of [completion items](#CompletionItem) to be preselect in the editor.
"""
def __init__(self, isIncomplete, items):
"""
Constructs a new CompletionContext instance.
:param bool isIncomplete: This list it not complete. Further typing should result in recomputing this list.
:param CompletionItem items: The completion items.
"""
@ -544,4 +568,4 @@ class ResponseError(Exception):
self.code = code
self.message = message
if data:
self.data = data
self.data = data
|
__label__pos
| 0.99371 |
Phil Goetz Phil Goetz - 1 year ago 70
R Question
R: Why doesn't as.vector give a vector when given the output of read.table?
On the output of read.table, as.vector produces an m x 1 matrix rather a length m vector:
# data.txt contains one integer per line and nothing else
dataframe = read.table("data.txt", encoding='UTF-8', header=F)
v = as.vector(dataframe)
is.vector(v)
[1] FALSE
length(v)
[1] 1
dim(v)
[1] 19783 1
Answer Source
To summerise the above data types:
• Data frame: A tabular object where each column can be a different type. A data frame is really a list.
• Matrix: A tabular object where all values must have the same type.
• Vector: A one dimensional object; all values must have the same type.
Hence it doesn't (in general) make sense to convert from a data frame to a vector.
In your example, you can either
unlist(dataframe)
or convert to a matrix, then use as.vector
as.vector(data.matrix(dataframe))
Recommended from our users: Dynamic Network Monitoring from WhatsUp Gold from IPSwitch. Free Download
|
__label__pos
| 0.993911 |
Highlightingedit
Allows to highlight search results on one or more fields. The implementation uses either the lucene plain highlighter, the fast vector highlighter (fvh) or postings highlighter. The following is an example of the search request body:
GET /_search
{
"query" : {
"match": { "content": "kimchy" }
},
"highlight" : {
"fields" : {
"content" : {}
}
}
}
In the above case, the content field will be highlighted for each search hit (there will be another element in each search hit, called highlight, which includes the highlighted fields and the highlighted fragments).
In order to perform highlighting, the actual content of the field is required. If the field in question is stored (has store set to true in the mapping) it will be used, otherwise, the actual _source will be loaded and the relevant field will be extracted from it.
The _all field cannot be extracted from _source, so it can only be used for highlighting if it mapped to have store set to true.
The field name supports wildcard notation. For example, using comment_* will cause all text and keyword fields (and string from versions before 5.0) that match the expression to be highlighted. Note that all other fields will not be highlighted. If you use a custom mapper and want to highlight on a field anyway, you have to provide the field name explicitly.
Plain highlighteredit
The default choice of highlighter is of type plain and uses the Lucene highlighter. It tries hard to reflect the query matching logic in terms of understanding word importance and any word positioning criteria in phrase queries.
If you want to highlight a lot of fields in a lot of documents with complex queries this highlighter will not be fast. In its efforts to accurately reflect query logic it creates a tiny in-memory index and re-runs the original query criteria through Lucene’s query execution planner to get access to low-level match information on the current document. This is repeated for every field and every document that needs highlighting. If this presents a performance issue in your system consider using an alternative highlighter.
Postings highlighteredit
If index_options is set to offsets in the mapping the postings highlighter will be used instead of the plain highlighter. The postings highlighter:
• Is faster since it doesn’t require to reanalyze the text to be highlighted: the larger the documents the better the performance gain should be
• Requires less disk space than term_vectors, needed for the fast vector highlighter
• Breaks the text into sentences and highlights them. Plays really well with natural languages, not as well with fields containing for instance html markup
• Treats the document as the whole corpus, and scores individual sentences as if they were documents in this corpus, using the BM25 algorithm
Here is an example of setting the content field in the index mapping to allow for highlighting using the postings highlighter on it:
{
"type_name" : {
"content" : {"index_options" : "offsets"}
}
}
Note that the postings highlighter is meant to perform simple query terms highlighting, regardless of their positions. That means that when used for instance in combination with a phrase query, it will highlight all the terms that the query is composed of, regardless of whether they are actually part of a query match, effectively ignoring their positions.
The postings highlighter doesn’t support highlighting some complex queries, like a match query with type set to match_phrase_prefix. No highlighted snippets will be returned in that case.
Fast vector highlighteredit
If term_vector information is provided by setting term_vector to with_positions_offsets in the mapping then the fast vector highlighter will be used instead of the plain highlighter. The fast vector highlighter:
• Is faster especially for large fields (> 1MB)
• Can be customized with boundary_chars, boundary_max_scan, and fragment_offset (see below)
• Requires setting term_vector to with_positions_offsets which increases the size of the index
• Can combine matches from multiple fields into one result. See matched_fields
• Can assign different weights to matches at different positions allowing for things like phrase matches being sorted above term matches when highlighting a Boosting Query that boosts phrase matches over term matches
Here is an example of setting the content field to allow for highlighting using the fast vector highlighter on it (this will cause the index to be bigger):
{
"type_name" : {
"content" : {"term_vector" : "with_positions_offsets"}
}
}
Unified Highlighteredit
This functionality is in technical preview and may be changed or removed in a future release. Elastic will apply best effort to fix any issues, but features in technical preview are not subject to the support SLA of official GA features.
The unified highlighter can extract offsets from either postings, term vectors, or via re-analyzing text. Under the hood it uses Lucene UnifiedHighlighter which picks its strategy depending on the field and the query to highlight. Independently of the strategy this highlighter breaks the text into sentences and scores individual sentences as if they were documents in this corpus, using the BM25 algorithm. It supports accurate phrase and multi-term (fuzzy, prefix, regex) highlighting and can be used with the following options:
• force_source
• encoder
• highlight_query
• pre_tags and `post_tags
• require_field_match
Force highlighter typeedit
The type field allows to force a specific highlighter type. This is useful for instance when needing to use the plain highlighter on a field that has term_vectors enabled. The allowed values are: plain, postings and fvh. The following is an example that forces the use of the plain highlighter:
GET /_search
{
"query" : {
"match": { "user": "kimchy" }
},
"highlight" : {
"fields" : {
"content" : {"type" : "plain"}
}
}
}
Force highlighting on sourceedit
Forces the highlighting to highlight fields based on the source even if fields are stored separately. Defaults to false.
GET /_search
{
"query" : {
"match": { "user": "kimchy" }
},
"highlight" : {
"fields" : {
"content" : {"force_source" : true}
}
}
}
Highlighting Tagsedit
By default, the highlighting will wrap highlighted text in <em> and </em>. This can be controlled by setting pre_tags and post_tags, for example:
GET /_search
{
"query" : {
"match": { "user": "kimchy" }
},
"highlight" : {
"pre_tags" : ["<tag1>"],
"post_tags" : ["</tag1>"],
"fields" : {
"_all" : {}
}
}
}
Using the fast vector highlighter there can be more tags, and the "importance" is ordered.
GET /_search
{
"query" : {
"match": { "user": "kimchy" }
},
"highlight" : {
"pre_tags" : ["<tag1>", "<tag2>"],
"post_tags" : ["</tag1>", "</tag2>"],
"fields" : {
"_all" : {}
}
}
}
There are also built in "tag" schemas, with currently a single schema called styled with the following pre_tags:
<em class="hlt1">, <em class="hlt2">, <em class="hlt3">,
<em class="hlt4">, <em class="hlt5">, <em class="hlt6">,
<em class="hlt7">, <em class="hlt8">, <em class="hlt9">,
<em class="hlt10">
and </em> as post_tags. If you think of more nice to have built in tag schemas, just send an email to the mailing list or open an issue. Here is an example of switching tag schemas:
GET /_search
{
"query" : {
"match": { "user": "kimchy" }
},
"highlight" : {
"tags_schema" : "styled",
"fields" : {
"content" : {}
}
}
}
Encoderedit
An encoder parameter can be used to define how highlighted text will be encoded. It can be either default (no encoding) or html (will escape html, if you use html highlighting tags).
Highlighted Fragmentsedit
Each field highlighted can control the size of the highlighted fragment in characters (defaults to 100), and the maximum number of fragments to return (defaults to 5). For example:
GET /_search
{
"query" : {
"match": { "user": "kimchy" }
},
"highlight" : {
"fields" : {
"content" : {"fragment_size" : 150, "number_of_fragments" : 3}
}
}
}
The fragment_size is ignored when using the postings highlighter, as it outputs sentences regardless of their length.
On top of this it is possible to specify that highlighted fragments need to be sorted by score:
GET /_search
{
"query" : {
"match": { "user": "kimchy" }
},
"highlight" : {
"order" : "score",
"fields" : {
"content" : {"fragment_size" : 150, "number_of_fragments" : 3}
}
}
}
If the number_of_fragments value is set to 0 then no fragments are produced, instead the whole content of the field is returned, and of course it is highlighted. This can be very handy if short texts (like document title or address) need to be highlighted but no fragmentation is required. Note that fragment_size is ignored in this case.
GET /_search
{
"query" : {
"match": { "user": "kimchy" }
},
"highlight" : {
"fields" : {
"_all" : {},
"bio.title" : {"number_of_fragments" : 0}
}
}
}
When using fvh one can use fragment_offset parameter to control the margin to start highlighting from.
In the case where there is no matching fragment to highlight, the default is to not return anything. Instead, we can return a snippet of text from the beginning of the field by setting no_match_size (default 0) to the length of the text that you want returned. The actual length may be shorter than specified as it tries to break on a word boundary. When using the postings highlighter it is not possible to control the actual size of the snippet, therefore the first sentence gets returned whenever no_match_size is greater than 0.
GET /_search
{
"query" : {
"match": { "user": "kimchy" }
},
"highlight" : {
"fields" : {
"content" : {
"fragment_size" : 150,
"number_of_fragments" : 3,
"no_match_size": 150
}
}
}
}
Fragmenteredit
Fragmenter can control how text should be broken up in highlight snippets. However, this option is applicable only for the Plain Highlighter. There are two options:
simple
Breaks up text into same sized fragments.
span
Same as the simple fragmenter, but tries not to break up text between highlighted terms (this is applicable when using phrase like queries). This is the default.
GET twitter/tweet/_search
{
"query" : {
"match_phrase": { "message": "number 1" }
},
"highlight" : {
"fields" : {
"message" : {
"fragment_size" : 15,
"number_of_fragments" : 3,
"fragmenter": "simple"
}
}
}
}
Response:
{
...
"hits": {
"total": 1,
"max_score": 1.4818809,
"hits": [
{
"_index": "twitter",
"_type": "tweet",
"_id": "1",
"_score": 1.4818809,
"_source": {
"user": "test",
"message": "some message with the number 1",
"date": "2009-11-15T14:12:12",
"likes": 1
},
"highlight": {
"message": [
" with the <em>number</em>",
" <em>1</em>"
]
}
}
]
}
}
GET twitter/tweet/_search
{
"query" : {
"match_phrase": { "message": "number 1" }
},
"highlight" : {
"fields" : {
"message" : {
"fragment_size" : 15,
"number_of_fragments" : 3,
"fragmenter": "span"
}
}
}
}
Response:
{
...
"hits": {
"total": 1,
"max_score": 1.4818809,
"hits": [
{
"_index": "twitter",
"_type": "tweet",
"_id": "1",
"_score": 1.4818809,
"_source": {
"user": "test",
"message": "some message with the number 1",
"date": "2009-11-15T14:12:12",
"likes": 1
},
"highlight": {
"message": [
"some message with the <em>number</em> <em>1</em>"
]
}
}
]
}
}
If the number_of_fragments option is set to 0, NullFragmenter is used which does not fragment the text at all. This is useful for highlighting the entire content of a document or field.
Highlight queryedit
It is also possible to highlight against a query other than the search query by setting highlight_query. This is especially useful if you use a rescore query because those are not taken into account by highlighting by default. Elasticsearch does not validate that highlight_query contains the search query in any way so it is possible to define it so legitimate query results aren’t highlighted at all. Generally it is better to include the search query in the highlight_query. Here is an example of including both the search query and the rescore query in highlight_query.
GET /_search
{
"stored_fields": [ "_id" ],
"query" : {
"match": {
"content": {
"query": "foo bar"
}
}
},
"rescore": {
"window_size": 50,
"query": {
"rescore_query" : {
"match_phrase": {
"content": {
"query": "foo bar",
"slop": 1
}
}
},
"rescore_query_weight" : 10
}
},
"highlight" : {
"order" : "score",
"fields" : {
"content" : {
"fragment_size" : 150,
"number_of_fragments" : 3,
"highlight_query": {
"bool": {
"must": {
"match": {
"content": {
"query": "foo bar"
}
}
},
"should": {
"match_phrase": {
"content": {
"query": "foo bar",
"slop": 1,
"boost": 10.0
}
}
},
"minimum_should_match": 0
}
}
}
}
}
}
Note that the score of text fragment in this case is calculated by the Lucene highlighting framework. For implementation details you can check the ScoreOrderFragmentsBuilder.java class. On the other hand when using the postings highlighter the fragments are scored using, as mentioned above, the BM25 algorithm.
Global Settingsedit
Highlighting settings can be set on a global level and then overridden at the field level.
GET /_search
{
"query" : {
"match": { "user": "kimchy" }
},
"highlight" : {
"number_of_fragments" : 3,
"fragment_size" : 150,
"fields" : {
"_all" : { "pre_tags" : ["<em>"], "post_tags" : ["</em>"] },
"bio.title" : { "number_of_fragments" : 0 },
"bio.author" : { "number_of_fragments" : 0 },
"bio.content" : { "number_of_fragments" : 5, "order" : "score" }
}
}
}
Require Field Matchedit
require_field_match can be set to false which will cause any field to be highlighted regardless of whether the query matched specifically on them. The default behaviour is true, meaning that only fields that hold a query match will be highlighted.
GET /_search
{
"query" : {
"match": { "user": "kimchy" }
},
"highlight" : {
"require_field_match": false,
"fields": {
"_all" : { "pre_tags" : ["<em>"], "post_tags" : ["</em>"] }
}
}
}
Boundary Charactersedit
When highlighting a field using the fast vector highlighter, boundary_chars can be configured to define what constitutes a boundary for highlighting. It’s a single string with each boundary character defined in it. It defaults to .,!? \t\n.
The boundary_max_scan allows to control how far to look for boundary characters, and defaults to 20.
Matched Fieldsedit
The Fast Vector Highlighter can combine matches on multiple fields to highlight a single field using matched_fields. This is most intuitive for multifields that analyze the same string in different ways. All matched_fields must have term_vector set to with_positions_offsets but only the field to which the matches are combined is loaded so only that field would benefit from having store set to yes.
In the following examples content is analyzed by the english analyzer and content.plain is analyzed by the standard analyzer.
GET /_search
{
"query": {
"query_string": {
"query": "content.plain:running scissors",
"fields": ["content"]
}
},
"highlight": {
"order": "score",
"fields": {
"content": {
"matched_fields": ["content", "content.plain"],
"type" : "fvh"
}
}
}
}
The above matches both "run with scissors" and "running with scissors" and would highlight "running" and "scissors" but not "run". If both phrases appear in a large document then "running with scissors" is sorted above "run with scissors" in the fragments list because there are more matches in that fragment.
GET /_search
{
"query": {
"query_string": {
"query": "running scissors",
"fields": ["content", "content.plain^10"]
}
},
"highlight": {
"order": "score",
"fields": {
"content": {
"matched_fields": ["content", "content.plain"],
"type" : "fvh"
}
}
}
}
The above highlights "run" as well as "running" and "scissors" but still sorts "running with scissors" above "run with scissors" because the plain match ("running") is boosted.
GET /_search
{
"query": {
"query_string": {
"query": "running scissors",
"fields": ["content", "content.plain^10"]
}
},
"highlight": {
"order": "score",
"fields": {
"content": {
"matched_fields": ["content.plain"],
"type" : "fvh"
}
}
}
}
The above query wouldn’t highlight "run" or "scissor" but shows that it is just fine not to list the field to which the matches are combined (content) in the matched fields.
Technically it is also fine to add fields to matched_fields that don’t share the same underlying string as the field to which the matches are combined. The results might not make much sense and if one of the matches is off the end of the text then the whole query will fail.
There is a small amount of overhead involved with setting matched_fields to a non-empty array so always prefer
"highlight": {
"fields": {
"content": {}
}
}
to
"highlight": {
"fields": {
"content": {
"matched_fields": ["content"],
"type" : "fvh"
}
}
}
Phrase Limitedit
The fast vector highlighter has a phrase_limit parameter that prevents it from analyzing too many phrases and eating tons of memory. It defaults to 256 so only the first 256 matching phrases in the document scored considered. You can raise the limit with the phrase_limit parameter but keep in mind that scoring more phrases consumes more time and memory.
If using matched_fields keep in mind that phrase_limit phrases per matched field are considered.
Field Highlight Orderedit
Elasticsearch highlights the fields in the order that they are sent. Per the json spec objects are unordered but if you need to be explicit about the order that fields are highlighted then you can use an array for fields like this:
"highlight": {
"fields": [
{"title":{ /*params*/ }},
{"text":{ /*params*/ }}
]
}
None of the highlighters built into Elasticsearch care about the order that the fields are highlighted but a plugin may.
|
__label__pos
| 0.999993 |
0
Hi all!
I get the following error while trying to compile this code
/usr/lib/gcc/i386-redhat-linux/3.4.6/../../../../include/c++/3.4.6/bits/stl_numeric.h:116: error: must use .* or ->* to call pointer-to-member function in `__binary_op (...)'
I am quite stunned because I've used before in a similar way
Can any body give me a hint?
Thank you in advance!
int main(void)
{
// things happening
std::vector< std::vector<double> > values(3, std::vector<double>());
// fill values in values vector
doIt(values);
}
void doIt(const std::vector< std::vector<double> > &values )
{
// things happening
double e = sqrt(std::accumulate(values[0].begin(), values[0].end(), 0.0, accumSq))/values[0].size();
// things happening
}
double accumSq(double sum_so_far, double x)
{
return sum_so_far + x*x;
}
Edited by tikoti: n/a
4
Contributors
5
Replies
6
Views
5 Years
Discussion Span
Last Post by tikoti
0
I have just run it on Mac, didn't get any compilation error, what platform you are using?
#include <iostream>
#include <complex>
#include <functional>
#include <numeric>
#include <vector>
using namespace std;
double accumSq(double sum_so_far, double x)
{
return sum_so_far + x*x;
}
void doIt(const std::vector< std::vector<double> > &values )
{
// things happening
double e = sqrt(std::accumulate(values[0].begin(), values[0].end(), 0.0, accumSq))/values[0].size();
// things happening
}
int main(int argc, char *args[])
{
// things happening
std::vector< std::vector<double> > values(3, std::vector<double>());
// fill values in values vector
doIt(values);
return 0;
}
Edited by alwaysLearning0: n/a
0
If I put in declarations for each of the functions ( doIt and accumSq ) above main the example compiles just fine for me.
0
Is there a reason why you are using such an old compiler? I'm sure that red-hat has some more recent package of GCC available. And yes, you need the function AccumSq to be declared sometime before you use it.
0
I have just run it on Mac, didn't get any compilation error, what platform you are using?
In this red hat works like a charm
Linux hostname 2.6.18-238 #1 SMP Fri Jan 14 17:17:13 CET 2011 x86_64 x86_64 x86_64 GNU/Linux
gcc version 4.1.2 20080704 (Red Hat 4.1.2-50)
Though in this one I get the compilation error
Linux hostname 2.6.9-89 #1 SMP Wed Jun 3 13:16:24 CEST 2009 i686 i686 i386 GNU/Linux
gcc version 3.4.6 20060404 (Red Hat 3.4.6-11)
If I put in declarations for each of the functions ( doIt and accumSq ) above main the example compiles just fine for me.
Getting the same error using for_each
I am confused, the difference the gcc version differs and the platform 64bits vs 32bits
0
Is there a reason why you are using such an old compiler? I'm sure that red-hat has some more recent package of GCC available. And yes, you need the function AccumSq to be declared sometime before you use it.
Actually is because of a matter of compatibility with other stuff, anyway I can't change the compiler
I think I will end up using a loop with an iterator even though it is not so neat
Thank you!
This topic has been dead for over six months. Start a new discussion instead.
Have something to contribute to this discussion? Please be thoughtful, detailed and courteous, and be sure to adhere to our posting rules.
|
__label__pos
| 0.957128 |
Security 101: Using a Proxy for Downloads
Security 101: Using a Proxy for Downloads
When you visit any website, you are sharing an incredible amount of information. Any click can let others know your physical location, the type of device you used to access the Internet, what site you visited just prior, and even what type of browser you used. Proxies are a great way to protect your online identity.
What Are Proxies?
A proxy server acts as a protective shell around any information you send or receive online. Proxies work by providing an inaccurate depiction of your location, essentially masking your identifying features. Proxies have a primary practical purpose: to keep Internet users’ identities safe. For many, a proxy is a way to ensure confidential connections, such as when accessing and transferring sensitive business files.
Proxy servers act as a technology that provides a tunnel from one computer to another. When a user connects to a proxy server, the user’s actual IP address, a unique identifier showing the location of an online device, is hidden. When a user requests information from another server, the proxy server acts as an intermediary, providing access to the sought-after information.
Why Does Using a Proxy Matter?
If you want to keep your web activity private, especially when downloading, proxies are an effective way to protect yourself. On an open connection without proxy protection, you share more information than you might realize.
Software programs let website owners learn the locations of the devices used via your IP address. These programs also reveal the kind of devices you used, what service providers you used, and which sites you came from. If you visited a particular site after clicking on an email link, a page link, a banner ad, or you typed the URL into your address bar, the site owner knows that information too.
Even worse, visiting some sites installs cookies on your device. Granted, cookies can be helpful, but they can also used to capture even more information about you, possibly opening you up to attacks by malware. By using a proxy connection, those issues are virtually wiped out.
Why Do People Use Proxy Servers?
There are all kinds of reasons for using a proxy. Maybe you want to keep your digital footprint small and not give sites information about your browsing and computing habits. Maybe you want to download information, music, or videos without running up against ISP limits on downloaded data. Whatever the reason, using a proxy is an important tool in maintaining your privacy while staying within the guidelines.
How to Use a Proxy for Safer Internet Surfing
There are a number of free proxy sites available, but download speeds can be slow. Be sure to find sites that have a good reputation. Some of those free sites, and many others, offer a paid option that promises faster download times and extra features.
Once on a proxy website, simply enter the name of the website you wish to visit. Your browser window will show the proxy site address with an add-on and an encrypted alphanumeric link to the site you’re visiting. Within the browser window, most proxy browsers include an address bar that lets you surf from site to site without returning to the proxy home page.
If you regularly need to download large files from the Internet, using a proxy server is an effective and easy way to get the files you need while protecting your identity from those who may wish to do you harm.
Check out this video to get faster downloads with Internet Download Manager!
View all comments
Loading comments
Latest articles
Top downloaded apps
|
__label__pos
| 0.588519 |
Pass LINQ expression as parameter to where clause
.net c# entity-framework linq linq-expressions
Question
Please read the question carefully before voting to close it. That is not a duplicate.
I am trying to build a generic method that returns list of entities of type T joined to logs of type AuditLog. Here is a LEFT JOIN interpretation in LINQ that I use
var result = from entity in entitySet
from auditLog in auditLogSet.Where(joinExpression).DefaultIfEmpty()
select new { entity, auditLog };
return result.GroupBy(item => item.entity)
.Select(group => new
{
Entity = group.Key,
Logs = group.Where(i => i.auditLog != null).Select(i => i.auditLog)
});
The problem is in joinExpression. I want to pass it to the WHERE clause, but it is different for different concrete types T (it is dependent on the entity variable), for example for a specific entity it could be
joinExpression = l => l.TableName == "SomeTable" && l.EntityId == entity.SomeTableId;
Notice entity.SomeTableId above. This is the reason I can't initialize joinExpression before the beginning of the query. How can I pass joinExpression as a parameter if it is actually dependent on "entity" variable, which is part of the query itself?
1
0
7/2/2014 10:43:16 PM
Accepted Answer
Your method may read something like this:
IQueryable<dynamic> GetEntities<T>(IDbSet<T> entitySet, Expression<Func<T, IEnumerable<AuditLog>>> joinExpression) where T : class
{
var result = entitySet.SelectMany(joinExpression,(entity, auditLog) => new {entity, auditLog});
return result.GroupBy(item => item.entity)
.Select(group => new
{
Entity = group.Key,
Logs = group.Where(i => i.auditLog != null).Select(i => i.auditLog)
});
}
And then you call it like this:
Expression<Func<SomeEntity, IEnumerable<AuditLog>>> ddd = entity => auditLogSet.Where(a => a.TableName == "SomeEntity" && entity.Id == a.EntityId).DefaultIfEmpty();
var result = GetEntities(entitySet, ddd).ToList();
I do not really see how this is different from the duplicate that I linked, in both cases you pass your query as an expression. Obviously you need to pass the query with all the dependencies, so you need your entity value be a part of it.
Here is a self-contained working example:
using System;
using System.Collections.Generic;
using System.ComponentModel.DataAnnotations.Schema;
using System.Data.Entity;
using System.Data.Entity.ModelConfiguration;
using System.Linq;
using System.Linq.Expressions;
namespace SO24542133
{
public class AuditLog
{
public int Id { get; set; }
public string TableName { get; set; }
public int? EntityId { get; set; }
public string Text { get; set; }
}
public class SomeEntity
{
public int Id { get; set; }
public string Something { get; set; }
}
internal class AuditLogConfiguration : EntityTypeConfiguration<AuditLog>
{
public AuditLogConfiguration()
{
ToTable("dbo.AuditLog");
HasKey(x => x.Id);
Property(x => x.Id).HasColumnName("Id").IsRequired().HasDatabaseGeneratedOption(DatabaseGeneratedOption.Identity);
Property(x => x.TableName).HasColumnName("TableName").IsOptional().HasMaxLength(50);
Property(x => x.EntityId).HasColumnName("EntityId").IsOptional();
Property(x => x.Text).HasColumnName("Text").IsOptional();
}
}
internal class SomeEntityConfiguration : EntityTypeConfiguration<SomeEntity>
{
public SomeEntityConfiguration()
{
ToTable("dbo.SomeEntity");
HasKey(x => x.Id);
Property(x => x.Id).HasColumnName("Id").IsRequired().HasDatabaseGeneratedOption(DatabaseGeneratedOption.Identity);
Property(x => x.Something).HasColumnName("Something").IsOptional();
}
}
public interface IMyDbContext : IDisposable
{
IDbSet<AuditLog> AuditLogSet { get; set; }
IDbSet<SomeEntity> SomeEntitySet { get; set; }
int SaveChanges();
}
public class MyDbContext : DbContext, IMyDbContext
{
public IDbSet<AuditLog> AuditLogSet { get; set; }
public IDbSet<SomeEntity> SomeEntitySet { get; set; }
static MyDbContext()
{
Database.SetInitializer(new DropCreateDatabaseAlways<MyDbContext>());
}
public MyDbContext(string connectionString) : base(connectionString)
{
}
protected override void OnModelCreating(DbModelBuilder modelBuilder)
{
base.OnModelCreating(modelBuilder);
modelBuilder.Configurations.Add(new AuditLogConfiguration());
modelBuilder.Configurations.Add(new SomeEntityConfiguration());
}
}
class Program
{
private static void CreateTestData(MyDbContext context)
{
SomeEntity e1 = new SomeEntity { Something = "bla" };
SomeEntity e2 = new SomeEntity { Something = "another bla" };
SomeEntity e3 = new SomeEntity { Something = "third bla" };
context.SomeEntitySet.Add(e1);
context.SomeEntitySet.Add(e2);
context.SomeEntitySet.Add(e3);
context.SaveChanges();
AuditLog a1 = new AuditLog { EntityId = e1.Id, TableName = "SomeEntity", Text = "abc" };
AuditLog a2 = new AuditLog { EntityId = e1.Id, TableName = "AnotherTable", Text = "def" };
AuditLog a3 = new AuditLog { EntityId = e1.Id, TableName = "SomeEntity", Text = "ghi" };
AuditLog a4 = new AuditLog { EntityId = e2.Id, TableName = "SomeEntity", Text = "jkl" };
context.AuditLogSet.Add(a1);
context.AuditLogSet.Add(a2);
context.AuditLogSet.Add(a3);
context.AuditLogSet.Add(a4);
context.SaveChanges();
}
static IQueryable<dynamic> GetEntities<T>(IDbSet<T> entitySet, Expression<Func<T, IEnumerable<AuditLog>>> joinExpression) where T : class
{
var result = entitySet.SelectMany(joinExpression,(entity, auditLog) => new {entity, auditLog});
return result.GroupBy(item => item.entity)
.Select(group => new
{
Entity = group.Key,
Logs = group.Where(i => i.auditLog != null).Select(i => i.auditLog)
});
}
static void Main()
{
MyDbContext context = new MyDbContext("Data Source=(local);Initial Catalog=SO24542133;Integrated Security=True;");
CreateTestData(context);
Expression<Func<SomeEntity, IEnumerable<AuditLog>>> ddd = entity => context.AuditLogSet.Where(a => a.TableName == "SomeEntity" && entity.Id == a.EntityId).DefaultIfEmpty();
var result = GetEntities(context.SomeEntitySet, ddd).ToList();
// Examine results here
result.ToString();
}
}
}
And to address a point that is raised in another answer regarding DefaultIfEmpty. The call to DefaultIfEmpty is just a node on expression tree that you end up with in the ddd variable. You don't have to include it in this expression tree and instead add it dynamically in your GetEntites method to the expression tree that you receive as a parameter.
EDIT:
To touch on other issues with the code, it is correct, that the sql generated by this query is less then optimal. Particularly bad thing about it is that we first flatten the join with SelectMany and then un-flatten it back again with GroupBy. This does not make much sense. Let's see how we can improve that. First, let's get rid of this dynamic nonsense. Our result set item can be defined like this:
class QueryResultItem<T>
{
public T Entity { get; set; }
public IEnumerable<AuditLog> Logs { get; set; }
}
Good. Now let's rewrite our EF query so that it does not flatten and then groups by. Let's start simple and come up with a non-generic implementation, we will improve that later. Our query can look something like this:
static IQueryable<QueryResultItem<SomeEntity>> GetEntities(IDbSet<SomeEntity> entitySet, IDbSet<AuditLog> auditLogSet)
{
return entitySet.Select(entity =>
new QueryResultItem<SomeEntity>
{
Entity = entity,
Logs = auditLogSet.Where(a => a.TableName == "SomeEntity" && entity.Id == a.EntityId)
});
}
Nice and clean. Now let's see what we need to do to make it work with any entity. First of all let's make the expression itself easier to manipulate by pulling it in a separate variable like this:
static IQueryable<QueryResultItem<SomeEntity>> GetEntities(IDbSet<SomeEntity> entitySet, IDbSet<AuditLog> auditLogSet)
{
Expression<Func<SomeEntity, QueryResultItem<SomeEntity>>> entityExpression = entity =>
new QueryResultItem<SomeEntity>
{
Entity = entity,
Logs = auditLogSet.Where(a => a.TableName == "SomeEntity" && entity.Id == a.EntityId)
};
return entitySet.Select(entityExpression);
}
We obviously need to be able to pass the where expression from somewhere, so let's separate this part to a variable as well:
static IQueryable<QueryResultItem<T>> GetEntities<T>(IDbSet<T> entitySet, IDbSet<AuditLog> auditLogSet, Expression<Func<AuditLog, T, bool>> whereTemplate) where T : class
{
Expression<Func<AuditLog, bool>> whereExpression = null;
Expression<Func<T, QueryResultItem<T>>> entityExpression = entity =>
new QueryResultItem<T>
{
Entity = entity,
Logs = auditLogSet.Where(whereExpression)
};
whereExpression = SubstituteSecondParameter(whereTemplate, entityExpression.Parameters[0]);
return entitySet.Select(entityExpression);
}
So now where expression is in a separate variable, but we also took a chance to do a few other changes as well. Our method now is generic again, so it can accept any entity. Also note that we are passing a where template in but it has an extra generic parameter, which substitutes for the entity variable that we are dependent on. Since type is different we can't use this template directly in our expression, so we need some way to translate it into where expression that we can use: the mysterious SubstituteSecondParameter method represents this. The last thing to note about this piece of code, that we are assigning the result of the substitution back to the variable that we used above in our expression. Will this work? Well, yes. The expression represents an anonymous method and by the merit of it lifts local variables and parameters to form a closure. If you have ReSharper you will notice that it warns you that the whereExpression variable gets modified after it was lifted. In most cases this is unintentional, but in our case this is exactly what we want to do, substitute the temporary whereExpression to the real one.
Next step is to consider what we are going to pass to our method. This is simple:
Expression<Func<AuditLog, SomeEntity, bool>> whereExpression2 = (l, entityParam) => l.TableName == "SomeEntity" && l.EntityId == entityParam.Id;
This will work out nicely. Now the last piece of the puzzle, how do we convert this expression with an extra parameter to the expression which has this parameter inside it. Well the bad news is you can't modify expression trees you have to re-build them from scratch. The good news, that Marc can help us here. First, let's define a simple Expression Visitor class it is based on what is already implemented in the BCL and looks simple:
class ExpressionSubstitute : ExpressionVisitor
{
private readonly Expression _from;
private readonly Expression _to;
public ExpressionSubstitute(Expression from, Expression to)
{
_from = from;
_to = to;
}
public override Expression Visit(Expression node)
{
return node == _from ? _to : base.Visit(node);
}
}
All we have is a constructor that tells us what node to substitute with what node, and an override to do the check / substitution. The SubstituteSecondParameter is also is not very complicated, it's a two liner:
static Expression<Func<AuditLog, bool>> SubstituteSecondParameter<T>(Expression<Func<AuditLog, T, bool>> expression, ParameterExpression parameter)
{
ExpressionSubstitute swapParam = new ExpressionSubstitute(expression.Parameters[1], parameter);
return Expression.Lambda<Func<AuditLog, bool>>(swapParam.Visit(expression.Body), expression.Parameters[0]);
}
Look at the signature, we take an expression with two parameters and a parameter and return an expression with only one parameter. To do this we create out visitor passing it our second parameter as "to" and the method parameter argument as "from", and then construct a new Lambda Expression that only has a single parameter, which we take from the original expression. And that concludes it. To put our changes together these are the new classes/methods:
class QueryResultItem<T>
{
public T Entity { get; set; }
public IEnumerable<AuditLog> Logs { get; set; }
}
class ExpressionSubstitute : ExpressionVisitor
{
private readonly Expression _from;
private readonly Expression _to;
public ExpressionSubstitute(Expression from, Expression to)
{
_from = from;
_to = to;
}
public override Expression Visit(Expression node)
{
return node == _from ? _to : base.Visit(node);
}
}
static Expression<Func<AuditLog, bool>> SubstituteSecondParameter<T>(Expression<Func<AuditLog, T, bool>> expression, ParameterExpression parameter)
{
ExpressionSubstitute swapParam = new ExpressionSubstitute(expression.Parameters[1], parameter);
return Expression.Lambda<Func<AuditLog, bool>>(swapParam.Visit(expression.Body), expression.Parameters[0]);
}
static IQueryable<QueryResultItem<T>> GetEntities2<T>(IDbSet<T> entitySet, IDbSet<AuditLog> auditLogSet, Expression<Func<AuditLog, T, bool>> whereTemplate) where T : class
{
Expression<Func<AuditLog, bool>> whereExpression = null;
Expression<Func<T, QueryResultItem<T>>> entityExpression = entity =>
new QueryResultItem<T>
{
Entity = entity,
Logs = auditLogSet.Where(whereExpression)
};
whereExpression = SubstituteSecondParameter(whereTemplate, entityExpression.Parameters[0]);
return entitySet.Select(entityExpression);
}
And this is how we call them:
Expression<Func<AuditLog, SomeEntity, bool>> whereExpression2 = (l, entityParam) => l.TableName == "SomeEntity" && l.EntityId == entityParam.Id;
var r2 = GetEntities2(context.SomeEntitySet, context.AuditLogSet, whereExpression2).ToList();
Much better!
And one last thing. This is the SQL generated by EF as the result of this query. As you can see it's very simple and readable (at least as far as EF generated sql goes):
SELECT
[Project1].[Id] AS [Id],
[Project1].[Something] AS [Something],
[Project1].[C1] AS [C1],
[Project1].[Id1] AS [Id1],
[Project1].[TableName] AS [TableName],
[Project1].[EntityId] AS [EntityId],
[Project1].[Text] AS [Text]
FROM ( SELECT
[Extent1].[Id] AS [Id],
[Extent1].[Something] AS [Something],
[Extent2].[Id] AS [Id1],
[Extent2].[TableName] AS [TableName],
[Extent2].[EntityId] AS [EntityId],
[Extent2].[Text] AS [Text],
CASE WHEN ([Extent2].[Id] IS NULL) THEN CAST(NULL AS int) ELSE 1 END AS [C1]
FROM [dbo].[SomeEntity] AS [Extent1]
LEFT OUTER JOIN [dbo].[AuditLog] AS [Extent2] ON (N'SomeEntity' = [Extent2].[TableName]) AND ([Extent2].[EntityId] = [Extent1].[Id])
) AS [Project1]
ORDER BY [Project1].[Id] ASC, [Project1].[C1] ASC
7
5/23/2017 11:52:29 AM
Popular Answer
So what you're trying to do is fake a Join in a way that makes it easy to make generic. It makes sense to use the Join extension method directly instead of trying to fake it with a Where clause. Not just because that's what Join is for, but because you simply can't do it generically the other way.
The Join method in LINQ takes three Expression parameters to do its job: a pair of key selectors (one for each side of the join) and a select expression. You can get define two of those (the inner key selector and the select) inside the method, and just pass in the final key selector.
First, you'll need to define a type for your join keys. You can't use anonymous types, because they don't . In this case this should do:
public class LogKey
{
public string TableName;
public int EntityId;
}
We're going to cut out the anonymous return - you do know that's a terrible thing to do right? - and return a composed IQueryable that you can enumerate. It will need to know a few things like what connection to use and which list of data it's querying, but can be reduced to a generic.
Here's the method:
public IQueryable<IGrouping<T, LogEntry>> GetLogEntries<T>(
MyDataEntities context,
IQueryable<T> entities,
Expression<Func<T, LogKey>> outerKeySelector
)
{
// Join:
var query =
entities.Join(
context.auditLogSet,
outerKeySelector,
log => new LogKey { TableName = log.TableName, EntityId = log.EntityId },
(ent, log) => new { entity = ent, log = log }
);
// Grouping:
var group =
from pair in query
group pair.log by pair.entity into grp
select grp;
return group;
}
And finally, the invocation:
// get query for fetching logs grouped by entity:
var entLog = GetLogEntries(context, context.myEntities, e => new LogKey { TableName = "MyTableName", EntityId = (int)e.ID });
// get logs for entity with ID #2
var data = entLog.First(grp => grp.Key.ID == 2);
Console.WriteLine("ID {0}, {1} log entries", data.Key.ID, data.Count());
The good part is that it doesn't actually hit the database for anything until you enumerate the query. All of the code up until the First(...) call in the above code is about compositing IQueryable operations together.
And that's about as generic as I can figure it. Unfortunately it misses one point: DefaultIfEmpty. Normally I wouldn't worry about it, but there's no simple method I know of for including it here. Perhaps someone else will point one out.
Related Questions
Related
Licensed under: CC-BY-SA with attribution
Not affiliated with Stack Overflow
Licensed under: CC-BY-SA with attribution
Not affiliated with Stack Overflow
|
__label__pos
| 0.994358 |
Dviejų kintamų tiesinių lygčių sistema
Studijuodami algebrą, mes esame susipažinę su vieno kintamojo tiesinėmis lygtimis. Viena kintamoji tiesinė lygtis gali būti parašyta forma ax + b = 0, kur a ir b yra tikrieji skaičiai ir a ≠ 0. Kaip rodo pavadinimas, vieno kintamojo tiesinė lygtis turi tik vieną kintamąjį lygtyje. Kitas pavyzdys yra 4x - 2x = 13, 2m - 4 = 5m ir pan. Tada kaip apie dviejų kintamųjų tiesinių lygčių sistemą?
Bendroji dviejų kintamųjų tiesinės lygties forma yra ax + pagal + c = 0, kur a, b ir c yra tikrieji skaičiai, o nei a, nei b nėra lygūs nuliui. Dviejų kintamųjų tiesinės lygties pavyzdys yra toks.
4x + 3y = 4
-3x + 7 = 5m
x = 4m
y = 2–3 kartus
Dviejų kintamųjų tiesinių lygčių sistemos sprendinių rinkinys yra sutvarkytų porų, kurios atitinka lygtį, rinkinys. X = m ir y = n reikšmės yra tiesinės lygties nuo ax + iki + c = 0 sprendimų rinkinys, jei am + bn + c = 0. Pažvelkite į žemiau pateiktą problemos pavyzdį.
(Taip pat skaitykite: Apskritimo lygčių apibrėžimas ir formos)
Raskite 4 sprendinių rinkinius nuo 2x + 3y - 12 = 0!
Šią lygtį galime parašyti taip:
Jei pakeisime x = 0, gausime:
Jei pakeisime x = 3, gausime:
Jei pakeisime x = 6, gausime:
Jei pakeisime x = 9, gausime:
Remiantis šiuo skaičiavimu, keturi sprendimų rinkiniai yra šie:
• x = 0, y = 4
• x = 3, y = 2
• x = 6, y = 0
• x = 9, y = -2
Galime daryti išvadą, kad dviejų kintamųjų tiesinė lygtis turi begalinį sprendinių rinkinį.
|
__label__pos
| 0.737227 |
Skip to main content
YAML Selectors
Write resource selectors in YAML, save them with a human-friendly name, and reference them using the --selector flag. By recording selectors in a top-level selectors.yml file:
• Legibility: complex selection criteria are composed of dictionaries and arrays
• Version control: selector definitions are stored in the same git repository as the dbt project
• Reusability: selectors can be referenced in multiple job definitions, and their definitions are extensible (via YAML anchors)
Selectors live in a top-level file named selectors.yml. Each must have a name and a definition, and can optionally define a description and default flag.
selectors.yml
selectors:
- name: nodes_to_joy
definition: ...
- name: nodes_to_a_grecian_urn
description: Attic shape with a fair attitude
default: true
definition: ...
Definitions
Each definition is comprised of one or more arguments, which can be one of the following:
• CLI-style: strings, representing CLI-style arguments
• Key-value: pairs in the form method: value
• Full YAML: fully specified dictionaries with items for method, value, operator-equivalent keywords, and support for exclude
Use union and intersection to organize multiple arguments.
CLI-style
definition:
'tag:nightly'
This simple syntax supports use of the +, @, and * operators. It does not support exclude.
Key-value
definition:
tag: nightly
This simple syntax does not support any operators or exclude.
Full YAML
This is the most thorough syntax, which can include graph and set operators.
Review methods for the available list.
definition:
method: tag
value: nightly
# Optional keywords map to the `+` and `@` operators:
children: true | false
parents: true | false
children_depth: 1 # if children: true, degrees to include
parents_depth: 1 # if parents: true, degrees to include
childrens_parents: true | false # @ operator
indirect_selection: eager | cautious # include all tests selected indirectly? eager by default
The * operator to select all nodes can be written as:
definition:
method: fqn
value: "*"
Exclude
The exclude keyword is only supported by fully-qualified dictionaries. It may be passed as an argument to each dictionary, or as an item in a union. The following are equivalent:
- method: tag
value: nightly
exclude:
- "@tag:daily"
- union:
- method: tag
value: nightly
- exclude:
- method: tag
value: daily
Note: The exclude argument in YAML selectors is subtly different from the --exclude CLI argument. Here, exclude always returns a set difference, and it is always applied last within its scope.
This gets us more intricate subset definitions than what's available on the CLI, where we can only pass one "yeslist" (--select) and one "nolist" (--exclude).
Indirect selection
As a general rule, dbt will indirectly select all tests if they touch any resource that you're selecting directly. We call this "eager" indirect selection. You can optionally switch the indirect selection mode to "cautious" by setting indirect_selection for a specific criterion:
- union:
- method: fqn
value: model_a
indirect_selection: eager # default: will include all tests that touch model_a
- method: fqn
value: model_b
indirect_selection: cautious # will not include tests touching model_b
# if they have other unselected parents
If provided, a yaml selector's indirect_selection value will take precedence over the CLI flag --indirect-selection. Because indirect_selection is defined separately for each selection criterion, it's possible to mix eager/cautious modes within the same definition, to achieve the exact behavior that you need. Remember that you can always test out your critiera with dbt ls --selector.
See test selection examples for more details about indirect selection.
Example
Here are two ways to represent:
$ dbt run --select @source:snowplow,tag:nightly models/export --exclude package:snowplow,config.materialized:incremental export_performance_timing
selectors.yml
selectors:
- name: nightly_diet_snowplow
description: "Non-incremental Snowplow models that power nightly exports"
definition:
union:
- intersection:
- '@source:snowplow'
- 'tag:nightly'
- 'models/export'
- exclude:
- intersection:
- 'package:snowplow'
- 'config.materialized:incremental'
- export_performance_timing
Then in our job definition:
$ dbt run --selector nightly_diet_snowplow
Default
Selectors may define a boolean default property. If a selector has default: true, dbt will use this selector's criteria when tasks do not define their own selection criteria.
Let's say we define a default selector that only selects resources defined in our root project:
selectors:
- name: root_project_only
description: >
Only resources from the root project.
Excludes resources defined in installed packages.
default: true
definition:
method: package
value: <my_root_project_name>
If I run an "unqualified" command, dbt will use the selection criteria defined in root_project_only—that is, dbt will only build / freshness check / generate compiled SQL for resources defined in my root project.
$ dbt build
$ dbt source freshness
$ dbt docs generate
If I run a command that defines its own selection criteria (via --select, --exclude, or --selector), dbt will ignore the default selector and use the flag criteria instead. It will not try to combine the two.
$ dbt run --select model_a
$ dbt run --exclude model_a
Only one selector may set default: true for a given invocation; otherwise, dbt will return an error. You may use a Jinja expression to adjust the value of default depending on the environment, however:
selectors:
- name: default_for_dev
default: "{{ target.name == 'dev' | as_bool }}"
definition: ...
- name: default_for_prod
default: "{{ target.name == 'prod' | as_bool }}"
definition: ...
0
|
__label__pos
| 0.789277 |
1. 39
1.
2. 4
Hi Dan – thank you for archiving this information. As someone who’s starting to get interested in computer architecture, it’s been hard trying to find thorough informational posts on ARM/x86 and RISC/CISC, outside of reading a textbook.
1. 3
L4 [IBM System/3x0] happens to be one in which you can tell the length of the instruction from the first few bits, has a fairly regular instruction decode, has relatively few addressing modes, no indirect addressing. In fact, a big subset of its instructions are actually fairly RISC-like, although another subset is very CISCy.
IIRC, it might be because of the Model 44, which had a simplified decoder (limited instruction forms, less direct-memory) in exchange for greater integer performance - arguably a kind of ur-RISC.
1. 3
Ah, from the days when we believed in sufficiently smart compilers. :)
1. 4
Putting our trust in sufficiently smart processors hasn’t exactly gone well either to be fair.
1. 2
I think the bigger issue here is that software is usually compiled once per ISA and not per processor, so the compiler never gets the chance to be very smart.
Stories with similar links:
1. What's the definition of RISC? via julienxx 2 years ago | 10 points | no comments
|
__label__pos
| 0.974922 |
LibreOffice Module vcl (master) 1
BitmapPalette.hxx
Go to the documentation of this file.
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
2 /*
3 * This file is part of the LibreOffice project.
4 *
5 * This Source Code Form is subject to the terms of the Mozilla Public
6 * License, v. 2.0. If a copy of the MPL was not distributed with this
7 * file, You can obtain one at http://mozilla.org/MPL/2.0/.
8 *
9 * This file incorporates work covered by the following license notice:
10 *
11 * Licensed to the Apache Software Foundation (ASF) under one or more
12 * contributor license agreements. See the NOTICE file distributed
13 * with this work for additional information regarding copyright
14 * ownership. The ASF licenses this file to you under the Apache
15 * License, Version 2.0 (the "License"); you may not use this file
16 * except in compliance with the License. You may obtain a copy of
17 * the License at http://www.apache.org/licenses/LICENSE-2.0 .
18 */
19
20 #ifndef INCLUDED_VCL_BITMAPPALETTE_HXX
21 #define INCLUDED_VCL_BITMAPPALETTE_HXX
22
23 #include <vcl/dllapi.h>
24 #include <vcl/BitmapColor.hxx>
25 #include <vcl/checksum.hxx>
26 #include <vector>
27
29 {
30 friend class SalBitmap;
31 friend class BitmapAccess;
32
33 private:
34
35 std::vector<BitmapColor> maBitmapColor;
36
37 public:
38
39 SAL_DLLPRIVATE const BitmapColor* ImplGetColorBuffer() const
40 {
41 return maBitmapColor.data();
42 }
43
44 SAL_DLLPRIVATE BitmapColor* ImplGetColorBuffer()
45 {
46 return maBitmapColor.data();
47 }
48
50 {
51 return vcl_get_checksum(0, maBitmapColor.data(), maBitmapColor.size() * sizeof(BitmapColor));
52 }
53
54 public:
55
57 {
58 }
59
60 BitmapPalette(sal_uInt16 nCount)
61 : maBitmapColor(nCount)
62 {
63 }
64
65 bool operator==( const BitmapPalette& rBitmapPalette ) const
66 {
67 return maBitmapColor == rBitmapPalette.maBitmapColor;
68 }
69
70 bool operator!=(const BitmapPalette& rBitmapPalette) const
71 {
72 return !( *this == rBitmapPalette );
73 }
74
75 bool operator!()
76 {
77 return maBitmapColor.empty();
78 }
79
80 sal_uInt16 GetEntryCount() const
81 {
82 return maBitmapColor.size();
83 }
84
85 void SetEntryCount(sal_uInt16 nCount)
86 {
87 maBitmapColor.resize(nCount);
88 }
89
90 const BitmapColor& operator[](sal_uInt16 nIndex) const
91 {
92 assert(nIndex < maBitmapColor.size() && "Palette index is out of range");
93 return maBitmapColor[nIndex];
94 }
95
96 BitmapColor& operator[](sal_uInt16 nIndex)
97 {
98 assert(nIndex < maBitmapColor.size() && "Palette index is out of range");
99 return maBitmapColor[nIndex];
100 }
101
102 sal_uInt16 GetBestIndex(const BitmapColor& rCol) const
103 {
104 sal_uInt16 nRetIndex = 0;
105
106 if (!maBitmapColor.empty())
107 {
108 for (size_t j = 0; j < maBitmapColor.size(); ++j)
109 {
110 if (rCol == maBitmapColor[j])
111 {
112 return j;
113 }
114 }
115
116 sal_uInt16 nLastErr = SAL_MAX_UINT16;
117 for (size_t i = 0; i < maBitmapColor.size(); ++i)
118 {
119 const sal_uInt16 nActErr = rCol.GetColorError(maBitmapColor[i]);
120 if ( nActErr < nLastErr )
121 {
122 nLastErr = nActErr;
123 nRetIndex = i;
124 }
125 }
126 }
127
128 return nRetIndex;
129 }
130
131 bool IsGreyPalette() const;
132 };
133
134 #endif // INCLUDED_VCL_BITMAPPALETTE_HXX
135
136 /* vim:set shiftwidth=4 softtabstop=4 expandtab: */
const BitmapColor & operator[](sal_uInt16 nIndex) const
sal_uInt64 BitmapChecksum
Definition: checksum.hxx:30
BitmapChecksum GetChecksum() const
BitmapPalette(sal_uInt16 nCount)
void SetEntryCount(sal_uInt16 nCount)
#define VCL_DLLPUBLIC
Definition: dllapi.h:29
BitmapChecksum vcl_get_checksum(BitmapChecksum Checksum, const void *Data, sal_uInt32 DatLen)
Definition: checksum.hxx:72
SAL_DLLPRIVATE const BitmapColor * ImplGetColorBuffer() const
#define SAL_MAX_UINT16
sal_uInt16 GetColorError(const Color &rCompareColor) const
sal_uInt16 GetEntryCount() const
SAL_DLLPRIVATE BitmapColor * ImplGetColorBuffer()
int i
sal_uInt16 GetBestIndex(const BitmapColor &rCol) const
BitmapColor & operator[](sal_uInt16 nIndex)
std::vector< BitmapColor > maBitmapColor
bool operator!=(const BitmapPalette &rBitmapPalette) const
bool operator==(const BitmapPalette &rBitmapPalette) const
|
__label__pos
| 0.924914 |
lpsconstelm
This tool removes constant process parameters from the LPS. If it can be determined that certain parameters of this LPS remain constant throughout any run of the process, all occurrences of these process parameter are replaced by the initial value and the process parameters are removed from the LPS. After substitution expressions are simplified using a rewriter. Note that summands of which the conditions are false are only removed with the --remove-trivial-summands flag.
If the initial value of a process parameter is a global variable and remains a global variable throughout the run of the process, the process variable is considered constant.
If the initial value of a process parameter is a global variable and is only changed once to a certain value, the process parameter is constant and the specific value is used for substitution.
A typical example of lpsconstelm is the following. Consider the linear process:
act a:Nat;
proc P(m,n:Nat)=
a(m).P(m,n+1) +
(m>0) -> a(m).P(m+1,n);
init P(0,0);
It is determined that m can only have the value 0, and the second summand can never take place. The result is:
act a: Nat;
proc P(n: Nat) =
a(0) .
P(n+1);
init P(0);
Note that lpsconstelm is very useful in simplifying linear processes. Its application does not reduce the size of generated state spaces. But its application can enable other tools, such as lpsparelm to become more effective.
In some cases lpsconstelm can reduce the number of summands quite dramatically. For instance when dealing with similar communicating processes, such as in:
proc P(id:Nat,...)= ....;
init allow({...},comm({...},P(0,...)||P(1,...)||...||P(10,...)));
the variables id for each processes are replaced by the concrete ids. When processes send messages to other processes indexed by numbers (e.g., send(message,sender_id,receiver_id)) then summands with communication that are not possible due to mismatching id’s are removed from the lps by lpsconstelm. For a typical example such as Milner’s scheduler, this reduces the number of summands from quadratic to linear in the number of participating processes.
Manual page for lpsconstelm
Usage
lpsconstelm [OPTION]... [INFILE [OUTFILE]]
Description
Remove constant process parameters from the LPS in INFILE and write the result to OUTFILE. If INFILE is not present, standard input is used. If OUTFILE is not present, standard output is used.
Command line options
-c , --ignore-conditions
ignore conditions by assuming they evaluate to true
-f , --instantiate-free-variables
allow free variables to be instantiated as a side effect of the algorithm. This functionality is untested!
-QNUM , --qlimit=NUM
limit enumeration of quantifiers to NUM iterations. (Default NUM=1000, NUM=0 for unlimited).
-s , --remove-singleton-sorts
remove parameters with single element sorts
-t , --remove-trivial-summands
remove summands with condition false
-rNAME , --rewriter=NAME
use rewrite strategy NAME:
jitty
jitty rewriting
jittyc
compiled jitty rewriting
jittyp
jitty rewriting with prover
--timings[=FILE]
append timing measurements to FILE. Measurements are written to standard error if no FILE is provided
Standard options
-q , --quiet
do not display warning messages
-v , --verbose
display short intermediate messages
-d , --debug
display detailed intermediate messages
--log-level=LEVEL
display intermediate messages up to and including level
-h , --help
display help information
--version
display version information
--help-all
display help information, including hidden and experimental options
Author
Wieger Wesselink; Frank Stappers
|
__label__pos
| 0.657584 |
Pen Settings
HTML
CSS
CSS Base
Vendor Prefixing
Add External Stylesheets/Pens
Any URL's added here will be added as <link>s in order, and before the CSS in the editor. You can use the CSS from another Pen by using it's URL and the proper URL extention.
+ add another resource
JavaScript
Babel includes JSX processing.
Add External Scripts/Pens
Any URL's added here will be added as <script>s in order, and run before the JavaScript in the editor. You can use the URL of any other Pen and it will include the JavaScript from that Pen.
+ add another resource
Packages
Add Packages
Search for and use JavaScript packages from npm here. By selecting a package, an import statement will be added to the top of the JavaScript editor for this package.
Behavior
Save Automatically?
If active, Pens will autosave every 30 seconds after being saved once.
Auto-Updating Preview
If enabled, the preview panel updates automatically as you code. If disabled, use the "Run" button to update.
Format on Save
If enabled, your code will be formatted when you actively save your Pen. Note: your code becomes un-folded during formatting.
Editor Settings
Code Indentation
Want to change your Syntax Highlighting theme, Fonts and more?
Visit your global Editor Settings.
HTML
<body>
<div class="container text-center">
<a class="btn btn-primary" id="turnX">I Chose X</a>
<a class="btn btn-primary" id="turnO">I Chose O</a>
<div class="row">
<div class="col-xs-4 square" id="0"><span hidden>#</span></div>
<div class="col-xs-4 square" id="1"><span hidden>#</span></div>
<div class="col-xs-4 square" id="2"><span hidden>#</span></div>
</div>
<div class="row">
<div class="col-xs-4 square" id="3"><span hidden>#</span></div>
<div class="col-xs-4 square" id="4"><span hidden>#</span></div>
<div class="col-xs-4 square" id="5"><span hidden>#</span></div>
</div>
<div class="row">
<div class="col-xs-4 square" id="6"><span hidden>#</span></div>
<div class="col-xs-4 square" id="7"><span hidden>#</span></div>
<div class="col-xs-4 square" id="8"><span hidden>#</span></div>
</div>
</div>
<div class="text-center" id="Win"></div>
<div class="container text-center">
<div class = "btn btn-primary" id="reset">Reset</div>
<div>
</body>
!
CSS
body{
background:no-repeat center fixed;
background-image:url("https://image.shutterstock.com/z/stock-vector-vector-seamless-abstract-pattern-with-cross-marks-and-circles-hand-painted-background-304485251.jpg");
-webkit-background-size:cover;
-o-background-size:cover;
background-size:cover;
}
#Win{
font-size:30px;
text-shadow:1px 1px 2px black, 0 0 1em blue,0 0 0.2em blue;
background:rgba(255,255,255,0.9);
margin:0px auto;
padding:0;
width:800px;
}
.boks{
float:none;
margin:0 auto;
margin-top:5%;
width:38%;
}
.square span{
text-align:center;
vertical-align:middle;
}
.square{
width:100px;
height:100px;
border:1px solid black;
font-size:600%;
background:rgba(255,255,255,0.9);
cursor:pointer;
}
!
JS
$(document).ready(function(){
var turn = "X";
var marker = ["#","#","#","#","#","#","#","#","#"];
var computersChoice = 0;
var runGame = false;//Hou trek van wie se beurt dit is in die speletjie.
var count = 0;//Kyk hoeveel skuiwe is gemaak laat daar nie te vell skuiwe dear spleer of rekenaar gemaak word nie.
$("#turnX").one("click",function(){
$("#turnO").off();//As turnX geselekteer is dan is turnO onaktief.
turn = "X";
computersChoice = "O";
$("#turnO").css("opacity",0.23);
$('.square').click(function(){
var slot = $(this).attr('id');
playerTurn(turn,slot);
});
});
$("#turnO").one("click",function(){
$("#turnX").off("click");
turn = "O";
computersChoice = "X";
$("#turnX").css("opacity",0.23);
$('.square').click(function(){
var slot = $(this).attr('id');
playerTurn(turn,slot);
});
});
function computerTurn(){
var taken = false;
while(taken === false && count !== 5){
var computersCalc = (Math.random()*10).toFixed();
var move = $("#" + computersCalc).text();
if(move === "#"){
$("#" + computersCalc).text(computersChoice);
taken = true;
marker[computersCalc] = computersChoice;
}
}
}
function playerTurn(whosTurn,id){
var blockInUse = $("#" + id).text();
if(blockInUse === "#"){
count++;//Hou trek van hoeveel beweegings was daar tussen die spleer en rekenaar op speletjie bord.
marker[id] = whosTurn;//Verwys na die posisie in arry turns = ["#"(index 0 id = "0"),"#"()index(1),"#","#","#","#","#","#","#"]. Sit die X of O index wat speler of rekeneaar click in turns array bv turns = ["X(Verander na X as die blookie gekies word in array eerste blookie bo lings index 0)","#","#","#","#","#","#","#","#",];.
$("#" + id).text(turn);
whoWins(marker,whosTurn);
if(runGame === false){
computerTurn();
whoWins(marker,computersChoice);
}
}
}
function whoWins(turnArray,playerMovement){
if(turnArray[0] === playerMovement && turnArray[1] === playerMovement && turnArray[2] === playerMovement){
runGame = true;//Speler of rekenaar wen dus hoef die rekenaar nie meer die loop te laat loop nie wen kondiesie is bepaal.
$("#Win").text("Player " + playerMovement + " wins! (Top row across 0,1 and 2 spots)");
setTimeout(function(){
location.reload(true);
},2000);
}else if(turnArray[2] === playerMovement && turnArray[4] === playerMovement && turnArray[6] === playerMovement){
runGame = true;
$("#Win").text("Player " + playerMovement + " Wins (Top row across 2,5 and 6 spots)");
setTimeout(function(){
location.reload(true);
},2000);
}
else if(turnArray[0] === playerMovement && turnArray[3] === playerMovement && turnArray[6] === playerMovement){
runGame = true;
$("#Win").text("Player " + playerMovement + " Wins (Top row down 0,3 and 6 spots)");
setTimeout(function(){
location.reload(true);
},2000);
}
else if(turnArray[0] === playerMovement && turnArray[4] === playerMovement && turnArray[8] === playerMovement){
runGame = true;
$("#Win").text("Player " + playerMovement + " Wins (Top row across 0,4 and 8 spots)");
setTimeout(function(){
location.reload(true);
},2000);
}
else if(turnArray[3] === playerMovement && turnArray[4] === playerMovement && turnArray[5] === playerMovement){
runGame = true;
$("#Win").text("Player " + playerMovement + " Wins (Middle row across 3,4 and 5 spots)");
setTimeout(function(){
location.reload(true);
},2000);
}
else if(turnArray[6] === playerMovement && turnArray[7] === playerMovement && turnArray[8] === playerMovement){
runGame = true;
$("#Win").text("Player " + playerMovement + " Wins (Bottom row across 6,7 and 8 spots)");
setTimeout(function(){
location.reload(true);
},1000);
}
else if(turnArray[2] === playerMovement && turnArray[5] === playerMovement && turnArray[8] === playerMovement){
runGame = true;
$("#Win").text("Player " + playerMovement + " Wins (Top row across 2,5 and 8 spots)");
setTimeout(function(){
location.reload(true);
},2000);
}
else if(turnArray[1] === playerMovement && turnArray[4] === playerMovement && turnArray[7] === playerMovement){
runGame = true;
$("#Win").text("Player " + playerMovement + " Wins (Top row bottom 1,4 and 7 spots)");
setTimeout(function(){
location.reload(true);
},2000);
}
else{
runGame = false;//Niemand het nog gewen nie spel gaan aan vir die oomblik nog.
}
}
$("#reset").click(function(){
location.reload(true);
});
});
!
999px
Console
|
__label__pos
| 0.802745 |
【基本】ベクトルの和の定数倍
ここでは、ベクトルの和を定数倍したものなど、定数倍や和を組み合わせた場合の計算について見ていきます。
【広告】
ベクトルの定数倍に関する性質
ベクトル $\vec{a}$ に定数 k を掛けた $k\vec{a}$ は、$\vec{a}$ の長さを $|k|$ 倍にし、符号によって向きを変えるんでしたね(参考:【基本】ベクトルの定数倍)。このことから、次が成り立つことはすぐにわかるでしょう。\[ k(l\vec{a})=(kl)\vec{a} \]つまり、「定数倍したベクトルの定数倍」は、定数倍部分を計算してから考えればいい、ということです。長さと向きに着目すれば、これが成り立つことはわかると思います。
また、次も、長さと向きに着目すれば、成り立つことがわかるでしょう。\[ (k+l)\vec{a}=k\vec{a}+l\vec{a} \]どちらも、 $\vec{a}$ の $|k+l|$ 倍の長さで、 $(k+l)$ の符号によって、 $\vec{a}$ と同じ向きか反対の向きかが変わります。
ベクトルの和の定数倍
続いて、次のベクトル\[ k(\vec{a}+\vec{b}) \]について考えましょう。ベクトルの和を定数倍したものです。
$\overrightarrow{ \mathrm{ OA } }=\vec{a}$, $\overrightarrow{ \mathrm{ AB } }=\vec{b}$ となるように、 O, A, B をとったとします。
このとき、 $k\overrightarrow{ \mathrm{ OB } }$ がどうなるかを考えればいいですね。 $k\gt 0$ のときは次のようになります。
$\overrightarrow{ \mathrm{ OB’ } }=k\overrightarrow{ \mathrm{ OB } }$ となるような点 $\mathrm{ B }’$ をとります。また、 $\mathrm{ B’ }$ を通る AB に平行な直線と、直線 OA との交点を $\mathrm{ A }’$ とします。すると、三角形 OAB と三角形 $\mathrm{ OA’B’ }$ は相似なので、\[ \overrightarrow{ \mathrm{ OA’ } }=k\overrightarrow{ \mathrm{ OA } },\quad \overrightarrow{ \mathrm{ A’B’ } }=k\overrightarrow{ \mathrm{ AB } } \]となることがわかります。向きが同じ(延長線or平行より)で、長さが k 倍だからです。
このことから、\[ \overrightarrow{ \mathrm{ OB’ } }=\overrightarrow{ \mathrm{ OA’ } }+\overrightarrow{ \mathrm{ A’B’ } }=k\overrightarrow{ \mathrm{ OA } }+k\overrightarrow{ \mathrm{ AB } } \]であることがわかります。 $k\lt 0$ のときは、 $\mathrm{ A }’, \mathrm{ B }’$ の位置が反対になるだけで、同様に成り立ちます。 $k=0$ のときも明らかに成り立ちます。
つまり、\[ k(\vec{a}+\vec{b})=k\vec{a}+k\vec{b} \]が成り立つということです。言葉で書くと「ベクトルの和の定数倍は、ベクトルの定数倍の和に等しい」ということですね。
ベクトルの和・差・定数倍
今まで、【基本】ベクトルの足し算【基本】ベクトルの引き算【基本】ベクトルの定数倍でベクトルの計算を見てきました。さらに、上でもベクトルの計算を見ました。
このことから何が言えるかというと、ベクトルの和・差・定数倍については、文字の計算と同じように計算ができる、ということです。
例えば、\[ 3\vec{x}-2\vec{a}=4\vec{b}+\vec{x} \]という式から、 $\vec{x}$ を $\vec{a}$, $\vec{b}$ で表してみましょう。
両辺に $2\vec{a}$ を足しても、等式は成り立ったままです。等しいベクトルに同じベクトルを足しても結果は同じですね。また、 $2\vec{a}-2\vec{a}=\vec{0}$ となることや $\vec{0}$ を足しても変化しないことを使うと\[ 3\vec{x}=2\vec{a}+4\vec{b}+\vec{x} \]となります。つまり、移行ができるということですね。なので、さらに\[\
2\vec{x}=2\vec{a}+4\vec{b} \]とできることがわかります。さらに、両辺を $2$ でわると、右辺はそれぞれを $2$ で割ることに等しい(先ほど見た、ベクトルの和の定数倍の性質より)ので\[ \vec{x}=\vec{a}+2\vec{b} \]となります。
このように、ベクトルの計算を
• ベクトルが等しいとは、向きと大きさが等しいということ
• ベクトルの和は、しりとりのようにつなげて、1つ目の始点と2つ目の終点をつなげたもの(もしくは、平行四辺形の対角線)
• 逆ベクトルは、元のベクトルと同じ大きさで向きが反対のベクトル
• ベクトルを引くときは、逆ベクトルを足す(もしくは、足して元に戻るベクトルを対応させる)
• ベクトルの定数倍は、長さを定数倍して、符号に応じて向きを変える
と定めれば、ベクトルの世界での計算は、普通の文字の計算のようにできる、ということです。三角形や平行四辺形を使って定義したのに、毎回図をかかなくても計算することができるんですね。
おわりに
ここでは、ベクトルの和の定数倍などの性質を見たあと、簡単なベクトルの計算をしました。成り立つことは特に目新しいものはありませんが、重要なのは、「文字の入った計算と同じような計算ができる」ということです。新しい世界(=ベクトルの世界)でも、今までと似たようなことができる、というのがポイントです。
|
__label__pos
| 0.808127 |
Get last item in array (v3)
Revision 3 of this benchmark created on
Description
Which is better for getting the last item in array: square brackets or .at()?
Setup
function generateRandomWord(length) {
let result = '';
const characters = 'abcdefghijklmnopqrstuvwxyz';
for (let i = 0; i < length; i++) {
result += characters.charAt(Math.floor(Math.random() * characters.length));
}
return result;
}
const testLen = 1000;
const words = Array.from({ length: testLen }, () => generateRandomWord(5));
Test runner
Ready to run.
Testing in
TestOps/sec
square brackets []
const lastWord = words[words.length-1];
ready
at()
const lastWord = words.at(-1);
ready
Revisions
You can edit these tests or add more tests to this page by appending /edit to the URL.
|
__label__pos
| 0.998447 |
BootstrapConfigStorageFactory.php 1.78 KB
Newer Older
1 2 3
<?php
namespace Drupal\Core\Config;
4
5
use Drupal\Core\Database\Database;
6
use Drupal\Core\Site\Settings;
7 8 9 10 11 12 13 14 15
/**
* Defines a factory for retrieving the config storage used pre-kernel.
*/
class BootstrapConfigStorageFactory {
/**
* Returns a configuration storage implementation.
*
16 17 18 19 20
* @param $class_loader
* The class loader. Normally Composer's ClassLoader, as included by the
* front controller, but may also be decorated; e.g.,
* \Symfony\Component\ClassLoader\ApcClassLoader.
*
21 22 23
* @return \Drupal\Core\Config\StorageInterface
* A configuration storage implementation.
*/
24
public static function get($class_loader = NULL) {
25
$bootstrap_config_storage = Settings::get('bootstrap_config_storage');
26
$storage_backend = FALSE;
27
if (!empty($bootstrap_config_storage) && is_callable($bootstrap_config_storage)) {
28
$storage_backend = call_user_func($bootstrap_config_storage, $class_loader);
29
}
30
// Fallback to the DatabaseStorage.
31
return $storage_backend ?: self::getDatabaseStorage();
32 33 34 35 36 37 38 39 40
}
/**
* Returns a Database configuration storage implementation.
*
* @return \Drupal\Core\Config\DatabaseStorage
*/
public static function getDatabaseStorage() {
return new DatabaseStorage(Database::getConnection(), 'config');
41 42
}
43 44 45
/**
* Returns a File-based configuration storage implementation.
*
46 47 48
* If there is no active configuration directory calling this method will
* result in an error.
*
49
* @return \Drupal\Core\Config\FileStorage
50 51 52 53 54
*
* @deprecated in Drupal 8.0.x and will be removed before 9.0.0. Drupal core
* no longer creates an active directory.
*
* @throws \Exception
55 56 57 58
*/
public static function getFileStorage() {
return new FileStorage(config_get_config_directory(CONFIG_ACTIVE_DIRECTORY));
}
59
}
|
__label__pos
| 0.614178 |
check which collider collides
im trying to make a hook which can pick up and knock over other objects, the problem is it has both a mesh and a box collider but how do i check if the hook is colliding with its box collider and not with its mesh collider?
Hello Iron,
Well in javascript you could do:
var Item : Transform;
function OnCollisionEnter (hit : Collision){
var boxColl = Item.GetComponent(BoxCollider);
if(hit.collider == boxColl){
Grab();
}
The grab function is your code for picking the item up.
if you want to code for that too just ask.
Seems like I had time for this answer today.
This code is just a bit different.
function OnCollisionEnter (hit : Collision){
var boxColl = hit.GetComponent(BoxCollider);
if(hit.collider == boxColl){
Grab(); //or call another script
}
//Apply this to the object you want to pick up
//I didnt test this 1, please tell me if it works or not.
Don’t forget: don’t just feed the scripts in unity answers, read them, change them, use them, LEARN
|
__label__pos
| 0.741148 |
70-483 Programming in C#
$25.00
• Passing Score: 800
• Time Limit: 120 min
• File Version: 20.1
• Q&A: 280
• Updated December 2022
Recommend this page
Description
Udacity - Cloud Computing Nanodegree program: Unleash Your Career!
Exam Demo
70-483 Programming in C#
QUESTION 1
You are developing an application that includes a class named Order. The application will store a collection of Order objects.
The collection must meet the following requirements:
Use strongly typed members.
Process Order objects in first-in-first-out order.
Store values for each Order object.
Use zero-based indices.
You need to use a collection type that meets the requirements.
Which collection type should you use?
A. Queue<T>
B. SortedList
C. LinkedList<T>
D. HashTable
E. Array<T>
Correct Answer: A
Explanation:
Queues are useful for storing messages in the order they were received for sequential processing. Objects stored in a Queue<T> are inserted at one
end and removed from the other.
References: http://msdn.microsoft.com/en-us/library/7977ey2c.aspx
QUESTION 2
You are developing an application. The application calls a method that returns an array of integers named employeeIds. You define an integer variable
named employeeIdToRemove and assign a value to it. You declare an array named filteredEmployeeIds.
You have the following requirements:
Remove duplicate integers from the employeeIds array.
Sort the array in order from the highest value to the lowest value.
Remove the integer value stored in the employeeIdToRemove variable from the employeeIds array.
You need to create a LINQ query to meet the requirements.
Which code segment should you use?
70-483-1
A. Option A
B. Option B
C. Option C
D. Option D
Correct Answer: C
QUESTION 3
You are developing an application that includes the following code segment. (Line numbers are included for reference only.)
70-483-2
The GetAnimals() method must meet the following requirements:
Connect to a Microsoft SQL Server database.
Create Animal objects and populate them with data from the database.
Return a sequence of populated Animal objects.
You need to meet the requirements.
Which two actions should you perform? Each correct answer presents part of the solution.
NOTE: Each correct selection is worth one point.
A. Insert the following code segment at line 16:
while(sqlDataReader.NextResult())
B. Insert the following code segment at line 13:
sqlConnection.Open();
C. Insert the following code segment at line 13:
sqlConnection.BeginTransaction();
D. Insert the following code segment at line 16:
while(sqlDataReader.Read())
E. Insert the following code segment at line 16:
while(sqlDataReader.GetValues())
Correct Answer: BD
Explanation:
SqlConnection.Open – Opens a database connection with the property settings specified by the ConnectionString.
SqlDataReader.Read – Advances the SqlDataReader to the next record.
References:
http://msdn.microsoft.com/en-us/library/system.data.sqlclient.sqlconnection.open.aspx
http://msdn.microsoft.com/en-us/library/system.data.sqlclient.sqldatareader.read.aspx
QUESTION 4
DRAG DROP
You are developing a custom collection named LoanCollection for a class named Loan class.
You need to ensure that you can process each Loan object in the LoanCollection collection by using a foreach loop.
How should you complete the relevant code? (To answer, drag the appropriate code segments to the correct locations in the answer area. Each code
segment may be used once, more than once, or not at all. You may need to drag the split bar between panes or scroll to view content.)
Select and Place:
70-483-3
Correct Answer:
70-483-4
QUESTION 5
You are developing an application that uses the Microsoft ADO.NET Entity Framework to retrieve order information from a Microsoft SQL Server
database. The application includes the following code. (Line numbers are included for reference only.)
70-483-5
The application must meet the following requirements:
Return only orders that have an OrderDate value other than null.
Return only orders that were placed in the year specified in the OrderDate property or in a later year.
You need to ensure that the application meets the requirements.
Which code segment should you insert at line 08?
A. Where order.OrderDate.Value != null && order.OrderDate.Value.Year >= year
B. Where order.OrderDate.Value == null && order.OrderDate.Value.Year == year
C. Where order.OrderDate.HasValue && order.OrderDate.Value.Year == year
D. Where order.OrderDate.Value.Year == year
Correct Answer: A
Explanation:
For the requirement to use an OrderDate value other than null use:
OrderDate.Value != null
For the requirement to use an OrderDate value for this year or a later year use:
OrderDate.Value>= year
Learn foundational machine learning algorithms, starting with data cleaning and supervised models.
0/5 (0 Reviews)
|
__label__pos
| 0.569416 |
Content
NTL
Core
cpp::CParserNumeric
cpp::CParserNumeric
Class implements parser of integer and float values written in decimal form. It uses C++ language standard of decimal numbers.
You can find more information in comments below. Overrided methods can be found in corresponding base class or interface.
class CParserNumeric :public virtual IParser, public CReleasable
{
public:
// IParser methods
bool Parse(size_t &index, Token &token) override;
CParserNumeric(IReader *reader, ILexic *lexic);
};
Namespace: nitisa::script::cpp
Include: Nitisa/Modules/Script/Parsers/C++/ParserNumeric.h
|
__label__pos
| 0.859478 |
¿Cómo obtengo una matriz vacía de cualquier tamaño en python?
Básicamente quiero un equivalente de python de esto en C:
int a[x];
pero en python declaro una matriz como:
a = []
y el problema es que quiero asignar ranuras aleatorias con valores como:
a[4] = 1
pero no puedo hacer eso con python, ya que la matriz está vacía.
Si por “matriz” te refieres a una lista de Python, puedes usar
a = [0] * 10
o
a = [None] * 10
No puedes hacer exactamente lo que quieres en Python (si te leo correctamente). Debe ingresar valores para cada elemento de la lista (o como lo llamó, matriz).
Pero, intente esto:
a = [0 for x in range(N)] # N = size of list you want a[i] = 5 # as long as i < N, you're okay
Para listas de otros tipos, use algo además de 0. None es a menudo una buena opción también.
Puedes usar numpy:
importar numpy como np
Ejemplo de Empty Array :
np.empty([2, 2]) array([[ -9.74499359e+001, 6.69583040e-309], [ 2.13182611e-314, 3.06959433e-309]])
También puedes extender eso con un método extendido de lista.
a= [] a.extend([None]*10) a.extend([None]*20)
Solo declara la lista y agrega cada elemento. Por ejemplo:
a = [] a.append('first item') a.append('second item')
|
__label__pos
| 0.954599 |
February 22, 2024
The Importance of Gender Equality in Computer Science Education
Gender equality is a pressing issue in various fields, and computer science education is no exception. Despite the significant progress made in recent years, there is still a significant gender gap in this field. It is crucial to address this gap and encourage more girls and women to pursue computer science education for several reasons.
Firstly, promoting gender equality in computer science education is essential for creating a diverse and inclusive industry. Different perspectives and experiences lead to innovation and better problem-solving. By encouraging more girls to participate in computer science education, we can ensure that the tech industry reflects the diversity of our society.
Secondly, closing the gender gap in computer science education can help bridge the gender wage gap. The tech industry offers high-paying jobs, and by encouraging more women to pursue careers in this field, we can empower them economically and contribute to reducing gender inequality.
Furthermore, addressing gender issues in computer science education allows us to challenge stereotypes and biases. Many girls are discouraged from pursuing computer science due to societal expectations and stereotypes that associate tech with masculinity. By breaking these stereotypes and promoting inclusivity, we can create a more equitable society.
Challenges Faced by Girls in Computer Science Education
There are several challenges that contribute to the gender gap in computer science education. One of the primary challenges is the lack of representation. When girls do not see many women in the field, they may feel like computer science is not for them. This lack of representation can lead to a lack of confidence and interest in pursuing computer science.
Another challenge is the gender bias that exists within educational systems. Research has shown that girls are often discouraged from pursuing STEM subjects, including computer science, due to biased teaching practices or subtle messaging that suggests these fields are more suitable for boys. Addressing these biases and creating an inclusive learning environment is crucial for encouraging girls to engage in computer science education.
Strategies to Promote Gender Equality in Computer Science Education
There are several strategies that can be implemented to promote gender equality in computer science education. Firstly, it is important to provide early exposure to computer science concepts and opportunities. Introducing coding and computational thinking at an early age can help dispel gender stereotypes and create a strong foundation for future learning.
Additionally, mentorship programs and role models play a vital role in encouraging girls to pursue computer science. By connecting girls with successful women in the tech industry, they can see firsthand that they belong in this field and have the potential to succeed.
Furthermore, creating inclusive learning environments where girls feel supported and valued is crucial. This can involve diverse teaching methods, collaborative projects, and promoting a growth mindset that emphasizes effort and perseverance over innate abilities.
The Way Forward: Building a More Inclusive Future
Addressing gender issues in computer science education requires collective efforts from educators, policymakers, and society as a whole. It is essential to challenge stereotypes, provide equal opportunities, and create supportive environments that encourage girls to pursue computer science.
By closing the gender gap in computer science education, we can unlock the potential of countless talented individuals and create a more diverse and innovative tech industry. Let us work together to build a future where gender equality is the norm in computer science education and beyond.
|
__label__pos
| 0.999122 |
Java集合框架分析(五)-HashSet分析
本篇文章主要分析一下Java集合框架中的Set部分,HashSet,该源码分析基于JDK1.8,分析工具,AndroidStudio,文章分析不足之处,还请指正!
HashSet简介
类结构
public class HashSet<E>
extends AbstractSet<E>
implements Set<E>, Cloneable, java.io.Serializable
HashSet是一个没有重复元素的集合 。它是由HashMap实现的, 不保证元素的顺序 ,而且 HashSet允许使用 null 元素 。HashSet是 非同步的 。和List接口一样,HashSet也是先继承了AbstractSet同时实现了Set接口,实现了Cloneable接口,即覆盖了函数clone(),能克隆。实现java.io.Serializable接口,这意味着HashSet支持序列化,能通过序列化去传输。
属性
接下来我们详细分析一下HashSet相关源码。首先我们来看下它的属性。
static final long serialVersionUID = -5024744406713321676L;
// 底层使用HashMap来保存HashSet的元素
private transient HashMap<E,Object> map;
// 由于Set只使用到了HashMap的key,所以此处定义一个静态的常量Object类,来充当HashMap的value
private static final Object PRESENT = new Object();
是不是很奇怪?HashSet的内部竟然使用了HashMap的数据的数据结构,这就有点意思了,我们都知道Set这种数据结构是不允许有重复的存在的,接下来我们就一探究竟,看看到底如何实现不重复操作的。
构造器
public HashSet() {
map = new HashMap<>();
}
public HashSet(Collection<? extends E> c) {
map = new HashMap<>(Math.max((int) (c.size()/.75f) + 1, 16));
addAll(c);
}
public HashSet(int initialCapacity, float loadFactor) {
map = new HashMap<>(initialCapacity, loadFactor);
}
public HashSet(int initialCapacity) {
map = new HashMap<>(initialCapacity);
}
HashSet(int initialCapacity, float loadFactor, boolean dummy) {
map = new LinkedHashMap<>(initialCapacity, loadFactor);
}
上面便是HashSet的构造器,非常简单,由于底层是使用了HashMap的数据结构,所以它的构造器就是初始化HashMap的代码,只有最后一个构造方法有写区别,这里构造的是LinkedHashMap,该方法不对外公开,实际上是提供给LinkedHashSet使用的,而第三个参数dummy是无意义的,只是为了区分其他构造方法。
其他方法
我们分下一下HashSet的添加add方法。
public boolean add(E e) {
return map.put(e, PRESENT)==null;
}
看到了什么?非常简单,就是HashMap的put方法,但是发现没有,它的value是PRESENT,是我们一开始申明的Object对象。
private static final Object PRESENT = new Object();
在这里我们就需要着重说明一下了,为什么要这样搞?
看到private static final Object PRESENT = new Object();不知道你有没有一点疑问呢。这里使用一个静态的常量Object类来充当HashMap的value,既然这里map的value是没有意义的,为什么不直接使用null值来充当value呢?比如写成这样子private final Object PRESENT = null;我们都知道的是,Java首先将变量PRESENT分配在栈空间,而将new出来的Object分配到堆空间,这里的new Object()是占用堆内存的(一个空的Object对象占用8byte),而null值我们知道,是不会在堆空间分配内存的。那么想一想这里为什么不使用null值。想到什么吗,看一个异常类java.lang.NullPointerException,这绝对是Java程序员的一个噩梦,这是所有Java程序猿都会遇到的一个异常,你看到这个异常你以为很好解决,但是有些时候也不是那么容易解决,Java号称没有指针,但是处处碰到NullPointerException。所以啊,为了从根源上避免NullPointerException的出现,浪费8个byte又怎么样,在下面的代码中我再也不会写这样的代码啦if (xxx == null) { … } else {….},好爽。
我们接着分析一下其他的方法。
public boolean remove(Object o) {
return map.remove(o)==PRESENT;
}
移除功能也就是调用HashMap的移除功能,没什么好说的,对于熟悉HashMap源码的伙伴可以参考文章开头列出来的一系列的文章。
由于HashSet的源码实在是比较简单,所以一次性都把剩余的方法都简单注释一下吧。
迭代器
//迭代器
public Iterator<E> iterator() {
return map.keySet().iterator();
}
容量
public int size() {
return map.size();
}
**是否为空集合 **
//是否为空集合
public boolean isEmpty() {
return map.isEmpty();
}
是否包含一个元素
//是否包含一个元素
public boolean contains(Object o) {
return map.containsKey(o);
}
以上便是HashSet的大概源码,HashSet的源码比较简单,主要是依靠HashMap来实现的,接下来我们总结一下关于HashSet的一些内容。
总结
由于HashMap基于hash表实现,hash表实现的容器最重要的一点就是可以快速存取,那么HashSet对于contains方法,利用HashMap的containsKey方法,效率是非常之快的。
HashSet 是一个没有重复元素的集合。它是由HashMap实现的,不保证元素的顺序,而且HashSet允许使用 null 元素,HashSet是非同步的。
关于作者
专注于 Android 开发多年,喜欢写 blog 记录总结学习经验,blog 同步更新于本人的公众号,欢迎大家关注,一起交流学习~
在这里插入图片描述
推荐阅读更多精彩内容
|
__label__pos
| 0.957144 |
Manage components appearance with useToggle hook
🔰 Manage components appearance with useToggle hook
4m
Let's understand why you may need the useToggle hook
Usually when creating components like: Dialogs, Menus, Modals, and Alerts, you need a flag that determines whether should something be displayed or not. With this flag, you need to provide 3 functions: open, close, and toggle. It will probably looks like the following code:
Loading
Imagine that you have tons of components like mentioned one and in addition in your application logic, you need to determine whether to show something or hide.
Loading
The next case may require assigning some data when you want to display UI parts - for example, you want to edit user data, so you need to store somewhere the current user object, update this object, close the form, and reset the object to initial value or just null.
Loading
This code is easy to understand but hard to maintain - the same repetitive logic, hard coded in different places. You need to write repeated tests to cover these cases for every component that needs to show/hide UI or for components that store some data during toggling.
This is the moment when lights turn on and we're kicking in with easy to use and type-safe hook. This hook will be called useToggle - it was really hard for me to find the appropriate name for this hook.
How we will use the useToggle hook?
We want to make this hook "flat" as possible to reduce the number of lines. So it will be used in the following way:
Loading
It's insane how we reduced the code from the previous example. Let's explore the full API of this hook:
Loading
Creating type definitions in TypeScript
Contracts in terms of type-safety are required to achieve protection from typos, invalid usage of functions, and invalid usage of returned data. Let's create them in separate file:
Loading
We've used generics T and we assigned the initial type to null. It means if we skip providing the generic type to hook, the default data type will be null.
Implementation process of the useToggle hook
Now it's time for implementation. With before-created type definitions, we'll be safe during implementation. Look at the following gif to understand the order and implementation process with defined contracts.
Loading
Ok, we know how it was implemented so let's explain each part of this implementation.
Loading
Now is the time for unit tests!
Unit tests for useToggle hook
We'll use the react-testing-library module that allows us to test hooks. We'll try to go through the typical use case of this hook and we'll check if the state changed correctly.
Loading
We started by assigning the initial state to our hook via configuration and then we closed, opened, toggled, and reassigned data. After each interaction, we checked the state.
Final result
Repository to play with implemented code. In addition below you have all the required code Copy and paste it and just use useToggle hook.
Look at type definitions file.
Loading
Now the implementation kicks in.
Loading
In the end the really important part - tests.
Loading
What did we learn in this lesson?
As you have probably seen, sometimes simple logic can be frustrating to work with if you need to repeat such logic in different places. This simple useToggle hook shows how you can wrap such logic inside a reusable, independent being.
What scenarios can be handled with this utility? It may be useful in creating any components like Modal, Menu, in simple words - to create something that appears and disappears.
In addition, we can assign additional data that may be useful when dealing with real business requirements or more complex components.
Everything is tested in a single place, so next time when you'll test your components you need to focus only on checking - is the useToggle hook used? If yes, you can skip testing these flags and data setup in your application/presentation-related components.
|
__label__pos
| 0.796456 |
Changes between Version 9 and Version 10 of MscgenPlugin
Ignore:
Timestamp:
Feb 14, 2022, 5:43:41 AM (5 months ago)
Author:
figaro
Comment:
Cosmetic changes, move example to functional description
Legend:
Unmodified
Added
Removed
Modified
• MscgenPlugin
v9 v10
77This plugin renders [http://www.mcternan.me.uk/mscgen/ mscgen] message sequence chart diagrams within a Trac wiki page:
88
9 [[Image(msc1.png)]]
9[[Image(msc1.png, border=2)]]
1010
1111Mscgen is a small program that parses Message Sequence Chart descriptions and produces PNG, SVG, EPS or server side image maps as the output. Message Sequence Charts (MSCs) are a way of representing entities and interactions over some time period. Mscgen is a simplification of Graphviz. It is GPLv2 licensed.
12
13An example of the syntax used:
14
15{{{
16{{{
17#!mscgen
18msc {
19 hscale = "1";
20
21 a,b,c;
22
23 a->b [ label = "ab()"];
24 b->c [ label = "bc(TRUE)"];
25 c=>c [ label = "process(1)"];
26 c=>c [ label = "process(2)"];
27 ...;
28 c=>c [ label = "process(n)"];
29 c=>c [ label = "process(END)"];
30 a<<=c [ label = "callback()"];
31 --- [ label = "If more to run", ID="*" ];
32 a->a [ label = "next()"];
33 a->c [ label = "ac1()\nac2()"];
34 b<-c [ label = "cb(TRUE)"];
35 b->b [ label = "stalled(...)"];
36 a<-b [ label = "ab() = FALSE"];
37}
38
39}}}
40}}}
1241
1342== Bugs/Feature Requests
3968General instructions on installing Trac plugins can be found on the [TracPlugins#InstallingaTracplugin TracPlugins] page.
4069
41 == Example
42
43 {{{
44 {{{
45 #!mscgen
46 msc {
47 hscale = "1";
48
49 a,b,c;
50
51 a->b [ label = "ab()"];
52 b->c [ label = "bc(TRUE)"];
53 c=>c [ label = "process(1)"];
54 c=>c [ label = "process(2)"];
55 ...;
56 c=>c [ label = "process(n)"];
57 c=>c [ label = "process(END)"];
58 a<<=c [ label = "callback()"];
59 --- [ label = "If more to run", ID="*" ];
60 a->a [ label = "next()"];
61 a->c [ label = "ac1()\nac2()"];
62 b<-c [ label = "cb(TRUE)"];
63 b->b [ label = "stalled(...)"];
64 a<-b [ label = "ab() = FALSE"];
65 }
66
67 }}}
68 }}}
69
7070== Recent Changes
7171
|
__label__pos
| 0.997381 |
W3cubDocs
/Rust
Struct std::sync::Mutex
pub struct Mutex<T: ?Sized> { /* fields omitted */ }
A mutual exclusion primitive useful for protecting shared data
This mutex will block threads waiting for the lock to become available. The mutex can also be statically initialized or created via a new constructor. Each mutex has a type parameter which represents the data that it is protecting. The data can only be accessed through the RAII guards returned from lock and try_lock, which guarantees that the data is only ever accessed when the mutex is locked.
Poisoning
The mutexes in this module implement a strategy called "poisoning" where a mutex is considered poisoned whenever a thread panics while holding the mutex. Once a mutex is poisoned, all other threads are unable to access the data by default as it is likely tainted (some invariant is not being upheld).
For a mutex, this means that the lock and try_lock methods return a Result which indicates whether a mutex has been poisoned or not. Most usage of a mutex will simply unwrap() these results, propagating panics among threads to ensure that a possibly invalid invariant is not witnessed.
A poisoned mutex, however, does not prevent all access to the underlying data. The PoisonError type has an into_inner method which will return the guard that would have otherwise been returned on a successful lock. This allows access to the data, despite the lock being poisoned.
Examples
use std::sync::{Arc, Mutex};
use std::thread;
use std::sync::mpsc::channel;
const N: usize = 10;
// Spawn a few threads to increment a shared variable (non-atomically), and
// let the main thread know once all increments are done.
//
// Here we're using an Arc to share memory among threads, and the data inside
// the Arc is protected with a mutex.
let data = Arc::new(Mutex::new(0));
let (tx, rx) = channel();
for _ in 0..N {
let (data, tx) = (Arc::clone(&data), tx.clone());
thread::spawn(move || {
// The shared state can only be accessed once the lock is held.
// Our non-atomic increment is safe because we're the only thread
// which can access the shared state when the lock is held.
//
// We unwrap() the return value to assert that we are not expecting
// threads to ever fail while holding the lock.
let mut data = data.lock().unwrap();
*data += 1;
if *data == N {
tx.send(()).unwrap();
}
// the lock is unlocked here when `data` goes out of scope.
});
}
rx.recv().unwrap();
To recover from a poisoned mutex:
use std::sync::{Arc, Mutex};
use std::thread;
let lock = Arc::new(Mutex::new(0_u32));
let lock2 = lock.clone();
let _ = thread::spawn(move || -> () {
// This thread will acquire the mutex first, unwrapping the result of
// `lock` because the lock has not been poisoned.
let _guard = lock2.lock().unwrap();
// This panic while holding the lock (`_guard` is in scope) will poison
// the mutex.
panic!();
}).join();
// The lock is poisoned by this point, but the returned result can be
// pattern matched on to return the underlying guard on both branches.
let mut guard = match lock.lock() {
Ok(guard) => guard,
Err(poisoned) => poisoned.into_inner(),
};
*guard += 1;
It is sometimes necessary to manually drop the mutex guard to unlock it sooner than the end of the enclosing scope.
use std::sync::{Arc, Mutex};
use std::thread;
const N: usize = 3;
let data_mutex = Arc::new(Mutex::new(vec![1, 2, 3, 4]));
let res_mutex = Arc::new(Mutex::new(0));
let mut threads = Vec::with_capacity(N);
(0..N).for_each(|_| {
let data_mutex_clone = Arc::clone(&data_mutex);
let res_mutex_clone = Arc::clone(&res_mutex);
threads.push(thread::spawn(move || {
let mut data = data_mutex_clone.lock().unwrap();
// This is the result of some important and long-ish work.
let result = data.iter().fold(0, |acc, x| acc + x * 2);
data.push(result);
drop(data);
*res_mutex_clone.lock().unwrap() += result;
}));
});
let mut data = data_mutex.lock().unwrap();
// This is the result of some important and long-ish work.
let result = data.iter().fold(0, |acc, x| acc + x * 2);
data.push(result);
// We drop the `data` explicitly because it's not necessary anymore and the
// thread still has work to do. This allow other threads to start working on
// the data immediately, without waiting for the rest of the unrelated work
// to be done here.
//
// It's even more important here than in the threads because we `.join` the
// threads after that. If we had not dropped the mutex guard, a thread could
// be waiting forever for it, causing a deadlock.
drop(data);
// Here the mutex guard is not assigned to a variable and so, even if the
// scope does not end after this line, the mutex is still released: there is
// no deadlock.
*res_mutex.lock().unwrap() += result;
threads.into_iter().for_each(|thread| {
thread
.join()
.expect("The thread creating or execution failed !")
});
assert_eq!(*res_mutex.lock().unwrap(), 800);
Implementations
impl<T> Mutex<T>[src]
pub fn new(t: T) -> Mutex<T>[src]
Creates a new mutex in an unlocked state ready for use.
Examples
use std::sync::Mutex;
let mutex = Mutex::new(0);
impl<T: ?Sized> Mutex<T>[src]
pub fn lock(&self) -> LockResult<MutexGuard<'_, T>>[src]
Acquires a mutex, blocking the current thread until it is able to do so.
This function will block the local thread until it is available to acquire the mutex. Upon returning, the thread is the only thread with the lock held. An RAII guard is returned to allow scoped unlock of the lock. When the guard goes out of scope, the mutex will be unlocked.
The exact behavior on locking a mutex in the thread which already holds the lock is left unspecified. However, this function will not return on the second call (it might panic or deadlock, for example).
Errors
If another user of this mutex panicked while holding the mutex, then this call will return an error once the mutex is acquired.
Panics
This function might panic when called if the lock is already held by the current thread.
Examples
use std::sync::{Arc, Mutex};
use std::thread;
let mutex = Arc::new(Mutex::new(0));
let c_mutex = mutex.clone();
thread::spawn(move || {
*c_mutex.lock().unwrap() = 10;
}).join().expect("thread::spawn failed");
assert_eq!(*mutex.lock().unwrap(), 10);
pub fn try_lock(&self) -> TryLockResult<MutexGuard<'_, T>>[src]
Attempts to acquire this lock.
If the lock could not be acquired at this time, then Err is returned. Otherwise, an RAII guard is returned. The lock will be unlocked when the guard is dropped.
This function does not block.
Errors
If another user of this mutex panicked while holding the mutex, then this call will return failure if the mutex would otherwise be acquired.
Examples
use std::sync::{Arc, Mutex};
use std::thread;
let mutex = Arc::new(Mutex::new(0));
let c_mutex = mutex.clone();
thread::spawn(move || {
let mut lock = c_mutex.try_lock();
if let Ok(ref mut mutex) = lock {
**mutex = 10;
} else {
println!("try_lock failed");
}
}).join().expect("thread::spawn failed");
assert_eq!(*mutex.lock().unwrap(), 10);
pub fn is_poisoned(&self) -> bool[src]1.2.0
Determines whether the mutex is poisoned.
If another thread is active, the mutex can still become poisoned at any time. You should not trust a false value for program correctness without additional synchronization.
Examples
use std::sync::{Arc, Mutex};
use std::thread;
let mutex = Arc::new(Mutex::new(0));
let c_mutex = mutex.clone();
let _ = thread::spawn(move || {
let _lock = c_mutex.lock().unwrap();
panic!(); // the mutex gets poisoned
}).join();
assert_eq!(mutex.is_poisoned(), true);
pub fn into_inner(self) -> LockResult<T> where
T: Sized
[src]1.6.0
Consumes this mutex, returning the underlying data.
Errors
If another user of this mutex panicked while holding the mutex, then this call will return an error instead.
Examples
use std::sync::Mutex;
let mutex = Mutex::new(0);
assert_eq!(mutex.into_inner().unwrap(), 0);
pub fn get_mut(&mut self) -> LockResult<&mut T>[src]1.6.0
Returns a mutable reference to the underlying data.
Since this call borrows the Mutex mutably, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist.
Errors
If another user of this mutex panicked while holding the mutex, then this call will return an error instead.
Examples
use std::sync::Mutex;
let mut mutex = Mutex::new(0);
*mutex.get_mut().unwrap() = 10;
assert_eq!(*mutex.lock().unwrap(), 10);
Trait Implementations
impl<T: ?Sized + Debug> Debug for Mutex<T>[src]
impl<T: ?Sized + Default> Default for Mutex<T>[src]1.10.0
fn default() -> Mutex<T>[src]
Creates a Mutex<T>, with the Default value for T.
impl<T: ?Sized> Drop for Mutex<T>[src]
impl<T> From<T> for Mutex<T>[src]1.24.0
fn from(t: T) -> Self[src]
Creates a new mutex in an unlocked state ready for use. This is equivalent to Mutex::new.
impl<T: ?Sized> RefUnwindSafe for Mutex<T>[src]1.12.0
impl<T: ?Sized + Send> Send for Mutex<T>[src]
impl<T: ?Sized + Send> Sync for Mutex<T>[src]
impl<T: ?Sized> UnwindSafe for Mutex<T>[src]1.9.0
Auto Trait Implementations
impl<T: ?Sized> Unpin for Mutex<T> where
T: Unpin
Blanket Implementations
impl<T> Any for T where
T: 'static + ?Sized
[src]
impl<T> Borrow<T> for T where
T: ?Sized
[src]
impl<T> BorrowMut<T> for T where
T: ?Sized
[src]
impl<T> From<!> for T[src]
impl<T> From<T> for T[src]
impl<T, U> Into<U> for T where
U: From<T>,
[src]
impl<T, U> TryFrom<U> for T where
U: Into<T>,
[src]
type Error = Infallible
The type returned in the event of a conversion error.
impl<T, U> TryInto<U> for T where
U: TryFrom<T>,
[src]
type Error = <U as TryFrom<T>>::Error
The type returned in the event of a conversion error.
© 2010 The Rust Project Developers
Licensed under the Apache License, Version 2.0 or the MIT license, at your option.
https://doc.rust-lang.org/std/sync/struct.Mutex.html
|
__label__pos
| 0.600364 |
Plausible deniability for node operators
There is no PLAN regarding censorship. It’s a community driven project.
And while DFINITY still holds the reins for now (thanks to liquid democracy and neurons defaulting to following DFINITY) the plan (small “p”) is to not be in this situation for long and actually let the community drive it entirely. At that point, if the community wants moderation, we’ll have moderation. If they want anarchy, we’ll have anarchy. But for the time being I’m actually comforted by DFINITY still having enough control to keep the thing afloat.
5 Likes
Thanks, yeah exactly DFINITY still holds the reins and the community can’t really take it in whatever direction at this point. Thats why I need to know whether I should invest any more of my time into this.
This wouldn’t apply to the case of DMCA/copyright situations, but I actually wonder if there’s a very large lawsuit from investors waiting for DFINITY in here, depending on how things play out.
– You have the CEO of the Foundation running around making repeated statements that now appear like they could end up being very invalid and essentially take the teeth out of the entire value proposition.
Or from Dapp developers who chose to build on the Internet Computer because of explicit claims about lack of “platform risk”, built Dapps that were at the very least legal at the time they deployed, and then end up getting censored.
These are pretty serious things, when you start talking about misleading statements. The FOUNDATION’s already got a bit of a reputation for what happened during the Genesis unlock.
2 Likes
Actually, I was also trapped into thinking the same way you do, but after reviewing the web, it appears that Dominic Williams’s statement were targeting private censorship, and NEVER government censorship:
Still it’s disappointing not to see him here while he’s posting on twitter all the time, and this not always wisely.
Lets imagine 10 years from now it IC becomes what it oughts to:
Since all ICP owners are supposed to have KYCed, then it will be a highway for the governments from anywhere in the world to force ICP/neuron owners votings for removing this or that. The more a government have ICP/neuron owners among its citizen, the more they can control what is published on the worldwide internet :slight_smile: .
1 Like
Thanks for the links,
I predict government censorship initiatives over the next decade (and new cryptocurrency regulations) will seriously hamper the original vision of the Internet Computer, but I can understand the strong case to say “governments having to explicitly pass new laws in order to make content on the IC censorable is a very high bar”.
Regardless, what I’d really like to hear from Foundation members is how the fact that countries around the world have very different is going to work out in practice w/ this…
Thus, I can see the most resilient scenario as enacting the ideas around a free market for node providers.
If not, then a lot of DeFi initiatives and other things in crypto land that are currently not outlawed are basically dead in the water…
From a public relations perspective, I think the lower bound on the case that DFINITY has to effectively make here is that the censorship on the Internet Computer will be less than the vulnerability of other blockchains to a similar fate – as Dom likes to point out about Ethereum.
I just see a vision where…without the ability for node operators to decide which content they are willing to serve through some kind of market system, the whole thing is going to get very dystopian very fast.
A lot of the innovations (especially the IID) can snap back into the opposite of their stated purpose very quickly.
I say give the hardest core libertarians a way to exercise their beliefs at the same time as giving people who rely on government to determine what is acceptable behavior and what isn’t.
Some people want to get as censorship happy as governments are going to…fine, as long as some people who oppose that have the right to take on that risk in IC-Land as well.
The thing is, who will take the risk to host the illegal content if the governments wants to censor it ? In France for instance, they switched off bittorent by fining you when you upload copyrighted content.
If you host for everybody, then you can be spotted, this is for sure. The only way I can see would be to exchange hosting, you host what is forbidden abroad, and foreigners host what is forbidden in your juridiction.
– People who are in a situation where:
They live in a country where content that is illegal in another jurisdiction is not illegal in their home country
AND
They believe strongly that the controversial content should not be censored
Point is that by giving people the option to do so, you let every node operator decide how much risk they believe is worth it for various content pieces.
The network has been consistently marketed by Dom as resistant to the kind of regulation that Uniswap faced when it had to delist coins though, that narrative seems to be falling apart quite quickly.
3 Likes
People understand what they are dealing with. And people is smart enough to know that 100% censorship resistance is impossible for blockchain to gain real mass-adoption. And I believe, it is not IC is aiming to be. For people that prefer anarchy, they can use other protocol, such as dark web, tor network, etc. These kind of anarchy protocols, IMO are not going to get mass real adoption for sure, they only serves niche market.
At current state, IC makes censorship is not as easy as happen on web 2.0. As the IC grows and evolves in the future, it will even harder to censor. (but still, censorship is possible if community decide to vote for it)
1 Like
Actually, one of the use cases of IC Dom brought up in his seminal 20-year plan Medium article is a decentralized form of Uber:
If rapid adoption resulted, powerful competitors might be irked and try to slow things down with lawsuits, but here Open Rides would have an advantage: open internet services run autonomously as part of the fabric of the internet, here in the mode of an advanced P2P protocol that connects drivers with riders, and code cannot easily be stopped. As autonomous code on the internet, Open Rides might be made instantly available in all territories around the world, without expensive negotiations with regional governments who are doing the bidding of local taxi monopolies wishing to protect their turf
Not sure if how that would work here… when do we draw a line between submitting a canister takedown proposal and not?
Side question, since I saw it brought up in the other thread:
Can’t governments treat the NNS (and other DAOs) as legal entities, just like they treat corporations as legal entities?
Then, they could sue them and extract penalties, I guess? How would that work? Who would represent the NNS in court? Is there even a concept of “management” for DAOs? If so, isn’t a DAO basically just a more digitalized corporation at that point?
Multi-national corporations, multi-jurisdictional DAOs… is there a legal difference?
1 Like
I suggest rebrand to: Internet Corporation
and hire an army of lawyers with the NNS community fund :upside_down_face:
“You can send out a DMCA notice, not just for infringing material, but also for any indices, references, or pointers that lead to infringing material.”
Does this apply to boundary nodes at all?
I agree this will set an important precedent, and ideally the nodes should be not in a position to deal with any legal claims. Just how to get there?
Through technical improvements? Will this actually solve the problem as to nodes getting served?
Through courts? Does this even make sense?
Could doing nothing result in nodes simply getting blacklisted/blocked by service providers?
Could a node’s decision to have a canister blocked/removed be seen as admitting responsibility?
I feel like we reached some crossroads here, specially where to draw the line of who is accountable in the end. Does the NNS vote on every canister that might be seen infringing on others rights? Does that mean the NNS is responsible for all content on the IC ?
I’d like to add to the list of proposals - Dfinity could look at separating the data layer from the compute layer, and distributing the data across many laptop-grade nodes. Using something like erasure encoding could help ensure that data is split across many nodes and not be lost. It will increase latency and also increase the bandwidth requirements but may have lower bandwidth requirements than shuffling.
Erasure encoding data and making having stateless clients was one of the directions considered for ETH 2.0. @lastmjs
In addition to plausible deniability for replica node operators, another key piece of the solution to the censorship problem may be making boundary node operators the actual censors. Please explode this idea with us: Boundary Nodes as Censors
2 Likes
So what ever happened with all of this?
We goin’ IC Orwell, or did the things brought up in this thread get advanced in some capacity / resolved for the moment through some kind of vote or anything?
1 Like
A few ideas discussed were added to the roadmap IC Roadmap Milestones for 2022 (Sneak Preview)
|
__label__pos
| 0.636931 |
阅读 331
记一次使用Asp.Net Core WebApi 5.0+Dapper+Mysql+Redis+Docker的开发过程
前言
我可能有三年没怎么碰C#了,目前的工作是在全职搞前端,最近有时间抽空看了一下Asp.net Core,Core版本号都到了5.0了,也越来越好用了,下面将记录一下这几天以来使用Asp.Net Core WebApi+Dapper+Mysql+Redis+Docker的一次开发过程。
项目结构
最终项目结构如下,CodeUin.Dapper数据访问层,CodeUin.WebApi应用层,其中涉及到具体业务逻辑的我将直接写在Controllers中,不再做过多分层。CodeUin.Helpers我将存放一些项目的通用帮助类,如果是只涉及到当前层的帮助类将直接在所在层级种的Helpers文件夹中存储即可。
项目结构
安装环境
MySQL
# 下载镜像
docker pull mysql
# 运行
docker run -itd --name 容器名称 -p 3306:3306 -e MYSQL_ROOT_PASSWORD=你的密码 mysql
复制代码
如果正在使用的客户端工具连接MySQL提示1251,这是因为客户端不支持新的加密方式造成的,解决办法如下。
1251
# 查看当前运行的容器
docker ps
# 进入容器
docker exec -it 容器名称 bash
# 访问MySQL
mysql -u root -p
# 查看加密规则
select host,user,plugin,authentication_string from mysql.user;
# 对远程连接进行授权
GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;
# 更改密码加密规则
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY '你的密码';
# 刷新权限
flush privileges;
复制代码
最后,使用MySQL客户端工具进行连接测试,我使用的工具是Navicat Premium
MySQL
Redis
# 下载镜像
docker pull redis
# 运行
docker run -itd -p 6379:6379 redis
复制代码
使用Redis客户端工具进行连接测试,我使用的工具是Another Redis DeskTop Manager
Redis
.NET 环境
服务器我使用的是CentOS 8,使用的NET SDK版本5.0,下面将记录我是如何在CentOS 8中安装.NET SDK和.NET运行时的。
# 安装SDK
sudo dnf install dotnet-sdk-5.0
# 安装运行时
sudo dnf install aspnetcore-runtime-5.0
复制代码
检查是否安装成功,使用dotnet --info命令查看安装信息
SDK
创建项目
下面将实现一个用户的登录注册,和获取用户信息的小功能。
数据服务层
该层设计参考了 玉龙雪山 的架构,我也比较喜欢这种结构,一看结构就知道是要做什么的,简单清晰。
首先,新建一个项目命名为CodeUin.Dapper,只用来提供接口,为业务层服务。
• Entities
• 存放实体类
• IRepository
• 存放仓库接口
• Repository
• 存放仓库接口实现类
• BaseModel
• 实体类的基类,用来存放通用字段
• DataBaseConfig
• 数据访问配置类
• IRepositoryBase
• 存放最基本的仓储接口 增删改查等
• RepositoryBase
• 基本仓储接口的具体实现
Dapper
创建BaseModel基类
该类存放在项目的根目录下,主要作用是将数据库实体类中都有的字段独立出来。
using System;
namespace CodeUin.Dapper
{
/// <summary>
/// 基础实体类
/// </summary>
public class BaseModel
{
/// <summary>
/// 主键Id
/// </summary>
public int Id { get; set; }
/// <summary>
/// 创建时间
/// </summary>
public DateTime CreateTime { get; set; }
}
}
复制代码
创建DataBaseConfig类
该类存放在项目的根目录下,我这里使用的是MySQL,需要安装以下依赖包,如果使用的其他数据库,自行安装对应的依赖包即可。
依赖
该类具体代码如下:
using MySql.Data.MySqlClient;
using System.Data;
namespace CodeUin.Dapper
{
public class DataBaseConfig
{
private static string MySqlConnectionString = @"Data Source=数据库地址;Initial Catalog=codeuin;Charset=utf8mb4;User ID=root;Password=数据库密码;";
public static IDbConnection GetMySqlConnection(string sqlConnectionString = null)
{
if (string.IsNullOrWhiteSpace(sqlConnectionString))
{
sqlConnectionString = MySqlConnectionString;
}
IDbConnection conn = new MySqlConnection(sqlConnectionString);
conn.Open();
return conn;
}
}
}
复制代码
创建IRepositoryBase类
该类存放在项目的根目录下,存放常用的仓储接口。
using System;
using System.Collections.Generic;
using System.Threading.Tasks;
namespace CodeUin.Dapper
{
public interface IRepositoryBase<T>
{
Task<int> Insert(T entity, string insertSql);
Task Update(T entity, string updateSql);
Task Delete(int Id, string deleteSql);
Task<List<T>> Select(string selectSql);
Task<T> Detail(int Id, string detailSql);
}
}
复制代码
创建RepositoryBase类
该类存放在项目的根目录下,是IRepositoryBase类的具体实现。
using Dapper;
using System.Collections.Generic;
using System.Data;
using System.Linq;
using System.Threading.Tasks;
namespace CodeUin.Dapper
{
public class RepositoryBase<T> : IRepositoryBase<T>
{
public async Task Delete(int Id, string deleteSql)
{
using (IDbConnection conn = DataBaseConfig.GetMySqlConnection())
{
await conn.ExecuteAsync(deleteSql, new { Id });
}
}
public async Task<T> Detail(int Id, string detailSql)
{
using (IDbConnection conn = DataBaseConfig.GetMySqlConnection())
{
return await conn.QueryFirstOrDefaultAsync<T>(detailSql, new { Id });
}
}
public async Task<List<T>> ExecQuerySP(string SPName)
{
using (IDbConnection conn = DataBaseConfig.GetMySqlConnection())
{
return await Task.Run(() => conn.Query<T>(SPName, null, null, true, null, CommandType.StoredProcedure).ToList());
}
}
public async Task<int> Insert(T entity, string insertSql)
{
using (IDbConnection conn = DataBaseConfig.GetMySqlConnection())
{
return await conn.ExecuteAsync(insertSql, entity);
}
}
public async Task<List<T>> Select(string selectSql)
{
using (IDbConnection conn = DataBaseConfig.GetMySqlConnection())
{
return await Task.Run(() => conn.Query<T>(selectSql).ToList());
}
}
public async Task Update(T entity, string updateSql)
{
using (IDbConnection conn = DataBaseConfig.GetMySqlConnection())
{
await conn.ExecuteAsync(updateSql, entity);
}
}
}
}
复制代码
好了,基础类基本已经定义完成。下面将新建一个Users类,并定义几个常用的接口。
创建Users实体类
该类存放在Entities文件夹中,该类继承BaseModel。
namespace CodeUin.Dapper.Entities
{
/// <summary>
/// 用户表
/// </summary>
public class Users : BaseModel
{
/// <summary>
/// 用户名
/// </summary>
public string UserName { get; set; }
/// <summary>
/// 密码
/// </summary>
public string Password { get; set; }
/// <summary>
///
/// </summary>
public string Salt { get; set; }
/// <summary>
/// 邮箱
/// </summary>
public string Email { get; set; }
/// <summary>
/// 手机号
/// </summary>
public string Mobile { get; set; }
/// <summary>
/// 性别
/// </summary>
public int Gender { get; set; }
/// <summary>
/// 年龄
/// </summary>
public int Age { get; set; }
/// <summary>
/// 头像
/// </summary>
public string Avatar { get; set; }
/// <summary>
/// 是否删除
/// </summary>
public int IsDelete { get; set; }
}
}
复制代码
创建IUserRepository类
该类存放在IRepository文件夹中,继承IRepositoryBase,并定义了额外的接口。
using CodeUin.Dapper.Entities;
using System;
using System.Collections.Generic;
using System.Threading.Tasks;
namespace CodeUin.Dapper.IRepository
{
public interface IUserRepository : IRepositoryBase<Users>
{
Task<List<Users>> GetUsers();
Task<int> AddUser(Users entity);
Task DeleteUser(int d);
Task<Users> GetUserDetail(int id);
Task<Users> GetUserDetailByEmail(string email);
}
}
复制代码
创建UserRepository类
该类存放在Repository文件夹中,继承RepositoryBase, IUserRepository ,是IUserRepository类的具体实现。
using CodeUin.Dapper.Entities;
using CodeUin.Dapper.IRepository;
using Dapper;
using System.Collections.Generic;
using System.Data;
using System.Threading.Tasks;
namespace CodeUin.Dapper.Repository
{
public class UserRepository : RepositoryBase<Users>, IUserRepository
{
public async Task DeleteUser(int id)
{
string deleteSql = "DELETE FROM [dbo].[Users] WHERE Id=@Id";
await Delete(id, deleteSql);
}
public async Task<Users> GetUserDetail(int id)
{
string detailSql = @"SELECT Id, Email, UserName, Mobile, Password, Age, Gender, CreateTime,Salt, IsDelete FROM Users WHERE Id=@Id";
return await Detail(id, detailSql);
}
public async Task<Users> GetUserDetailByEmail(string email)
{
string detailSql = @"SELECT Id, Email, UserName, Mobile, Password, Age, Gender, CreateTime, Salt, IsDelete FROM Users WHERE Email=@email";
using (IDbConnection conn = DataBaseConfig.GetMySqlConnection())
{
return await conn.QueryFirstOrDefaultAsync<Users>(detailSql, new { email });
}
}
public async Task<List<Users>> GetUsers()
{
string selectSql = @"SELECT * FROM Users";
return await Select(selectSql);
}
public async Task<int> AddUser(Users entity)
{
string insertSql = @"INSERT INTO Users (UserName, Gender, Avatar, Mobile, CreateTime, Password, Salt, IsDelete, Email) VALUES (@UserName, @Gender, @Avatar, @Mobile, now(),@Password, @Salt, @IsDelete,@Email);SELECT @id= LAST_INSERT_ID();";
return await Insert(entity, insertSql);
}
}
}
复制代码
大功告成,接下来需要手动创建数据库和表结构,不能像使用EF那样自动生成了,使用Dapper基本上是要纯写SQL的,如果想像EF那样使用,就要额外的安装一个扩展 Dapper.Contrib
数据库表结构如下,比较简单。
DROP TABLE IF EXISTS `Users`;
CREATE TABLE `Users` (
`Id` int(11) NOT NULL AUTO_INCREMENT COMMENT '主键',
`Email` varchar(255) DEFAULT NULL COMMENT '邮箱',
`UserName` varchar(20) DEFAULT NULL COMMENT '用户名称',
`Mobile` varchar(11) DEFAULT NULL COMMENT '手机号',
`Age` int(11) DEFAULT NULL COMMENT '年龄',
`Gender` int(1) DEFAULT '0' COMMENT '性别',
`Avatar` varchar(255) DEFAULT NULL COMMENT '头像',
`Salt` varchar(255) DEFAULT NULL COMMENT '加盐',
`Password` varchar(255) DEFAULT NULL COMMENT '密码',
`IsDelete` int(2) DEFAULT '0' COMMENT '0-正常 1-删除',
`CreateTime` datetime DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
PRIMARY KEY (`Id`),
UNIQUE KEY `USER_MOBILE_INDEX` (`Mobile`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=10000 DEFAULT CHARSET=utf8mb4 COMMENT='用户信息表';
复制代码
好了,数据访问层大概就这样子了,下面来看看应用层的具体实现方式。
应用程序层
创建一个WebApi项目,主要对外提供Api接口服务,具体结构如下。
• Autofac
• 存放IOC 依赖注入的配置项
• AutoMapper
• 存放实体对象映射关系的配置项
• Controllers
• 控制器,具体业务逻辑也将写在这
• Fliters
• 存放自定义的过滤器
• Helpers
• 存放本层中用到的一些帮助类
• Models
• 存放输入/输出/DTO等实体类
WebApi
好了,结构大概就是这样。错误优先,先处理程序异常,和集成日志程序吧。
自定义异常处理
在Helpers文件夹中创建一个ErrorHandingMiddleware中间件,添加扩展方法ErrorHandlingExtensions,在Startup中将会使用到。
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Http;
using Microsoft.Extensions.Logging;
using Newtonsoft.Json;
using System;
using System.Threading.Tasks;
namespace CodeUin.WebApi.Helpers
{
public class ErrorHandlingMiddleware
{
private readonly RequestDelegate next;
private readonly ILogger<ErrorHandlingMiddleware> _logger;
public ErrorHandlingMiddleware(RequestDelegate next, ILogger<ErrorHandlingMiddleware> logger)
{
this.next = next;
_logger = logger;
}
public async Task Invoke(HttpContext context)
{
try
{
await next(context);
}
catch (Exception ex)
{
_logger.LogError(ex.Message);
var statusCode = 500;
await HandleExceptionAsync(context, statusCode, ex.Message);
}
finally
{
var statusCode = context.Response.StatusCode;
var msg = "";
if (statusCode == 401)
{
msg = "未授权";
}
else if (statusCode == 404)
{
msg = "未找到服务";
}
else if (statusCode == 502)
{
msg = "请求错误";
}
else if (statusCode != 200)
{
msg = "未知错误";
}
if (!string.IsNullOrWhiteSpace(msg))
{
await HandleExceptionAsync(context, statusCode, msg);
}
}
}
// 异常错误信息捕获,将错误信息用Json方式返回
private static Task HandleExceptionAsync(HttpContext context, int statusCode, string msg)
{
var result = JsonConvert.SerializeObject(new { Msg = msg, Code = statusCode });
context.Response.ContentType = "application/json;charset=utf-8";
return context.Response.WriteAsync(result);
}
}
// 扩展方法
public static class ErrorHandlingExtensions
{
public static IApplicationBuilder UseErrorHandling(this IApplicationBuilder builder)
{
return builder.UseMiddleware<ErrorHandlingMiddleware>();
}
}
}
复制代码
然后在 Startup 的 Configure 方法中添加 app.UseErrorHandling() ,当程序发送异常时,会走我们的自定义异常处理。
public void Configure(IApplicationBuilder app, IWebHostEnvironment env)
{
if (env.IsDevelopment())
{
app.UseDeveloperExceptionPage();
}
app.UseHttpsRedirection();
// 请求错误提示配置
app.UseErrorHandling();
app.UseRouting();
app.UseAuthorization();
app.UseEndpoints(endpoints =>
{
endpoints.MapControllers();
});
}
复制代码
日志程序
我这里使用的是NLog,需要在项目中先安装依赖包。
Nlog
首先在项目根目录创建一个 nlog.config 的配置文件,具体内容如下。
<?xml version="1.0" encoding="utf-8" ?>
<nlog xmlns="http://www.nlog-project.org/schemas/NLog.xsd"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
autoReload="true"
internalLogLevel="Info"
internalLogFile="c:\temp\internal-nlog.txt">
<!-- enable asp.net core layout renderers -->
<extensions>
<add assembly="NLog.Web.AspNetCore"/>
</extensions>
<!-- the targets to write to -->
<targets>
<target xsi:type="File" name="allfile" fileName="${currentdir}\logs\nlog-all-${shortdate}.log"
layout="${longdate}|${event-properties:item=EventId_Id}|${uppercase:${level}}|${aspnet-request-ip}|${logger}|${message} ${exception:format=tostring}" />
<target xsi:type="Console" name="ownFile-web"
layout="${longdate}|${event-properties:item=EventId_Id}|${uppercase:${level}}|${logger}|${aspnet-request-ip}|${message} ${exception:format=tostring}|url: ${aspnet-request-url}|action: ${aspnet-mvc-action}" />
</targets>
<!-- rules to map from logger name to target -->
<rules>
<!--All logs, including from Microsoft-->
<logger name="*" minlevel="Info" writeTo="allfile" />
<!--Skip non-critical Microsoft logs and so log only own logs-->
<logger name="Microsoft.*" maxlevel="Info" final="true" />
<!-- BlackHole without writeTo -->
<logger name="*" minlevel="Info" writeTo="ownFile-web" />
</rules>
</nlog>
复制代码
更多配置信息可以直接去官网查看 nlog-project.org
最后,在 Program 入口文件中集成 Nlog
using Autofac.Extensions.DependencyInjection;
using Microsoft.AspNetCore.Hosting;
using Microsoft.Extensions.Hosting;
using Microsoft.Extensions.Logging;
using NLog.Web;
namespace CodeUin.WebApi
{
public class Program
{
public static void Main(string[] args)
{
NLogBuilder.ConfigureNLog("nlog.config");
CreateHostBuilder(args).Build().Run();
}
public static IHostBuilder CreateHostBuilder(string[] args) =>
Host.CreateDefaultBuilder(args)
.UseServiceProviderFactory(new AutofacServiceProviderFactory())
.ConfigureLogging(logging =>
{
logging.ClearProviders();
logging.AddConsole();
})
.ConfigureWebHostDefaults(webBuilder =>
{
webBuilder.UseStartup<Startup>();
})
.UseNLog();
}
}
复制代码
现在,我们可以直接使用NLog了,使用方法可以查看上面的 ErrorHandlingMiddleware 类中有使用到。
依赖注入
将使用 Autofac 来管理类之间的依赖关系,Autofac 是一款超级赞的.NET IoC 容器 。首先我们需要安装依赖包。
Autofac
在 项目根目录的 Autofac 文件夹中新建一个 CustomAutofacModule 类,用来管理我们类之间的依赖关系。
using Autofac;
using CodeUin.Dapper.IRepository;
using CodeUin.Dapper.Repository;
namespace CodeUin.WebApi.Autofac
{
public class CustomAutofacModule:Module
{
protected override void Load(ContainerBuilder builder)
{
builder.RegisterType<UserRepository>().As<IUserRepository>();
}
}
}
复制代码
在 Startup 类中添加方法
public void ConfigureContainer(ContainerBuilder builder)
{
// 依赖注入
builder.RegisterModule(new CustomAutofacModule());
}
复制代码
实体映射
将使用 Automapper 帮我们解决对象映射到另外一个对象中的问题,比如这种代码。
// 如果有几十个属性是相当的可怕的
var users = new Users
{
Email = user.Email,
Password = user.Password,
UserName = user.UserName
};
// 使用Automapper就容易多了
var model = _mapper.Map<Users>(user);
复制代码
先安装依赖包
Automapper
在项目根目录的 AutoMapper 文件夹中 新建 AutoMapperConfig 类,来管理我们的映射关系。
using AutoMapper;
using CodeUin.Dapper.Entities;
using CodeUin.WebApi.Models;
namespace CodeUin.WebApi.AutoMapper
{
public class AutoMapperConfig : Profile
{
public AutoMapperConfig()
{
CreateMap<UserRegisterModel, Users>().ReverseMap();
CreateMap<UserLoginModel, Users>().ReverseMap();
CreateMap<UserLoginModel, UserModel>().ReverseMap();
CreateMap<UserModel, Users>().ReverseMap();
}
}
}
复制代码
在 Startup 文件的 ConfigureServices 方法中 添加 services.AddAutoMapper(AppDomain.CurrentDomain.GetAssemblies()) 即可。
使用JWT
下面将集成JWT,来处理授权等信息。首先,需要安装依赖包。
JWT
修改 appsttings.json 文件,添加 Jwt 配置信息。
{
"Logging": {
"LogLevel": {
"Default": "Information",
"Microsoft": "Warning",
"Microsoft.Hosting.Lifetime": "Information"
}
},
"AllowedHosts": "*",
"Jwt": {
"Key": "e816f4e9d7a7be785a", // 这个key必须大于16位数,非常生成的时候会报错
"Issuer": "codeuin.com"
}
}
复制代码
最后,在 Startup 类的 ConfigureServices 方法中添加 Jwt 的使用。
services.AddAuthentication(JwtBearerDefaults.AuthenticationScheme)
.AddJwtBearer(options =>
{
options.TokenValidationParameters = new TokenValidationParameters
{
ValidateIssuer = true,
ValidateAudience = true,
ValidateLifetime = true,
ClockSkew = TimeSpan.FromMinutes(5), //缓冲过期时间 默认5分钟
ValidateIssuerSigningKey = true,
ValidIssuer = Configuration["Jwt:Issuer"],
ValidAudience = Configuration["Jwt:Issuer"],
IssuerSigningKey = new SymmetricSecurityKey(Encoding.UTF8.GetBytes(Configuration["Jwt:Key"]))
};
});
复制代码
好了,最终我们的 Startup 类是这样子的,关于自定义的参数验证后面会讲到。
using Autofac;
using AutoMapper;
using CodeUin.WebApi.Autofac;
using CodeUin.WebApi.Filters;
using CodeUin.WebApi.Helpers;
using Microsoft.AspNetCore.Authentication.JwtBearer;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.DependencyInjection;
using Microsoft.Extensions.Hosting;
using Microsoft.IdentityModel.Tokens;
using System;
using System.Text;
namespace CodeUin.WebApi
{
public class Startup
{
public Startup(IConfiguration configuration)
{
Configuration = configuration;
}
public IConfiguration Configuration { get; }
public void ConfigureContainer(ContainerBuilder builder)
{
// 依赖注入
builder.RegisterModule(new CustomAutofacModule());
}
// This method gets called by the runtime. Use this method to add services to the container.
public void ConfigureServices(IServiceCollection services)
{
services.AddAuthentication(JwtBearerDefaults.AuthenticationScheme)
.AddJwtBearer(options =>
{
options.TokenValidationParameters = new TokenValidationParameters
{
ValidateIssuer = true,
ValidateAudience = true,
ValidateLifetime = true,
ClockSkew = TimeSpan.FromMinutes(5), //缓冲过期时间 默认5分钟
ValidateIssuerSigningKey = true,
ValidIssuer = Configuration["Jwt:Issuer"],
ValidAudience = Configuration["Jwt:Issuer"],
IssuerSigningKey = new SymmetricSecurityKey(Encoding.UTF8.GetBytes(Configuration["Jwt:Key"]))
};
});
services.AddHttpContextAccessor();
// 使用AutoMapper
services.AddAutoMapper(AppDomain.CurrentDomain.GetAssemblies());
// 关闭参数自动校验
services.Configure<ApiBehaviorOptions>((options) =>
{
options.SuppressModelStateInvalidFilter = true;
});
// 使用自定义验证器
services.AddControllers(options =>
{
options.Filters.Add<ValidateModelAttribute>();
}).
AddJsonOptions(options =>
{
// 忽略null值
options.JsonSerializerOptions.IgnoreNullValues = true;
});
}
// This method gets called by the runtime. Use this method to configure the HTTP request pipeline.
public void Configure(IApplicationBuilder app, IWebHostEnvironment env)
{
if (env.IsDevelopment())
{
app.UseDeveloperExceptionPage();
}
app.UseHttpsRedirection();
// 请求错误提示配置
app.UseErrorHandling();
// 授权
app.UseAuthentication();
app.UseRouting();
app.UseAuthorization();
app.UseEndpoints(endpoints =>
{
endpoints.MapControllers();
});
}
}
}
复制代码
新建实体类
我将新建三个实体类,分别是 UserLoginModel 用户登录,UserRegisterModel 用户注册,UserModel 用户基本信息。
UserLoginModel 和 UserRegisterModel 将根据我们在属性中配置的特性自动验证合法性,就不需要在控制器中单独写验证逻辑了,极大的节省了工作量。
using System;
using System.ComponentModel.DataAnnotations;
namespace CodeUin.WebApi.Models
{
/// <summary>
/// 用户实体类
/// </summary>
public class UserModel
{
public int Id { get; set; }
public string Email { get; set; }
public string UserName { get; set; }
public string Mobile { get; set; }
public int Gender { get; set; }
public int Age { get; set; }
public string Avatar { get; set; }
}
public class UserLoginModel
{
[Required(ErrorMessage = "请输入邮箱")]
public string Email { get; set; }
[Required(ErrorMessage = "请输入密码")]
public string Password { get; set; }
}
public class UserRegisterModel
{
[Required(ErrorMessage = "请输入邮箱")]
[EmailAddress(ErrorMessage = "请输入正确的邮箱地址")]
public string Email { get; set; }
[Required(ErrorMessage = "请输入用户名")]
[MaxLength(length: 12, ErrorMessage = "用户名最大长度不能超过12")]
[MinLength(length: 2, ErrorMessage = "用户名最小长度不能小于2")]
public string UserName { get; set; }
[Required(ErrorMessage = "请输入密码")]
[MaxLength(length: 20, ErrorMessage = "密码最大长度不能超过20")]
[MinLength(length: 6, ErrorMessage = "密码最小长度不能小于6")]
public string Password { get; set; }
}
}
复制代码
验证器
在项目根目录的 Filters 文件夹中 添加 ValidateModelAttribute 文件夹,将在 Action 请求中先进入我们的过滤器,如果不符合我们定义的规则将直接输出错误项。
具体代码如下。
using Microsoft.AspNetCore.Mvc;
using Microsoft.AspNetCore.Mvc.Filters;
using System.Linq;
namespace CodeUin.WebApi.Filters
{
public class ValidateModelAttribute : ActionFilterAttribute
{
public override void OnActionExecuting(ActionExecutingContext context)
{
if (!context.ModelState.IsValid)
{
var item = context.ModelState.Keys.ToList().FirstOrDefault();
//返回第一个验证参数错误的信息
context.Result = new BadRequestObjectResult(new
{
Code = 400,
Msg = context.ModelState[item].Errors[0].ErrorMessage
});
}
}
}
}
复制代码
添加自定义验证特性
有时候我们需要自己额外的扩展一些规则,只需要继承 ValidationAttribute 类然后实现 IsValid 方法即可,比如我这里验证了中国的手机号码。
using System.ComponentModel.DataAnnotations;
using System.Text.RegularExpressions;
namespace CodeUin.WebApi.Filters
{
public class ChineMobileAttribute : ValidationAttribute
{
public override bool IsValid(object value)
{
if (!(value is string)) return false;
var val = (string)value;
return Regex.IsMatch(val, @"^[1]{1}[2,3,4,5,6,7,8,9]{1}\d{9}$");
}
}
}
复制代码
实现登录注册
我们来实现一个简单的业务需求,用户注册,登录,和获取用户信息,其他的功能都大同小异,无非就是CRUD!。
接口我们在数据服务层已经写好了,接下来是处理业务逻辑的时候到了,将直接在 Controllers 中编写。
新建一个控制器 UsersController ,业务很简单,不过多介绍了,具体代码如下。
using System;
using System.IdentityModel.Tokens.Jwt;
using System.Security.Claims;
using System.Text;
using System.Threading.Tasks;
using AutoMapper;
using CodeUin.Dapper.Entities;
using CodeUin.Dapper.IRepository;
using CodeUin.Helpers;
using CodeUin.WebApi.Models;
using Microsoft.AspNetCore.Authorization;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Mvc;
using Microsoft.Extensions.Configuration;
using Microsoft.Extensions.Logging;
using Microsoft.IdentityModel.Tokens;
namespace CodeUin.WebApi.Controllers
{
[Route("api/[controller]/[action]")]
[ApiController]
[Authorize]
public class UsersController : Controller
{
private readonly ILogger<UsersController> _logger;
private readonly IUserRepository _userRepository;
private readonly IMapper _mapper;
private readonly IConfiguration _config;
private readonly IHttpContextAccessor _httpContextAccessor;
public UsersController(ILogger<UsersController> logger, IUserRepository userRepository, IMapper mapper, IConfiguration config, IHttpContextAccessor httpContextAccessor)
{
_logger = logger;
_userRepository = userRepository;
_mapper = mapper;
_config = config;
_httpContextAccessor = httpContextAccessor;
}
[HttpGet]
public async Task<JsonResult> Get()
{
var userId = int.Parse(_httpContextAccessor.HttpContext.User.FindFirst(ClaimTypes.NameIdentifier).Value);
var userInfo = await _userRepository.GetUserDetail(userId);
if (userInfo == null)
{
return Json(new { Code = 200, Msg = "未找到该用户的信息" });
}
var outputModel = _mapper.Map<UserModel>(userInfo);
return Json(new { Code = 200, Data = outputModel }); ;
}
[HttpPost]
[AllowAnonymous]
public async Task<JsonResult> Login([FromBody] UserLoginModel user)
{
// 查询用户信息
var data = await _userRepository.GetUserDetailByEmail(user.Email);
// 账号不存在
if (data == null)
{
return Json(new { Code = 200, Msg = "账号或密码错误" });
}
user.Password = Encrypt.Md5(data.Salt + user.Password);
// 密码不一致
if (!user.Password.Equals(data.Password))
{
return Json(new { Code = 200, Msg = "账号或密码错误" });
}
var userModel = _mapper.Map<UserModel>(data);
// 生成token
var token = GenerateJwtToken(userModel);
// 存入Redis
await new RedisHelper().StringSetAsync($"token:{data.Id}", token);
return Json(new
{
Code = 200,
Msg = "登录成功",
Data = userModel,
Token = token
});
}
[HttpPost]
[AllowAnonymous]
public async Task<JsonResult> Register([FromBody] UserRegisterModel user)
{
// 查询用户信息
var data = await _userRepository.GetUserDetailByEmail(user.Email);
if (data != null)
{
return Json(new { Code = 200, Msg = "该邮箱已被注册" });
}
var salt = Guid.NewGuid().ToString("N");
user.Password = Encrypt.Md5(salt + user.Password);
var users = new Users
{
Email = user.Email,
Password = user.Password,
UserName = user.UserName
};
var model = _mapper.Map<Users>(user);
model.Salt = salt;
await _userRepository.AddUser(model);
return Json(new { Code = 200, Msg = "注册成功" });
}
/// <summary>
/// 生成Token
/// </summary>
/// <param name="user">用户信息</param>
/// <returns></returns>
private string GenerateJwtToken(UserModel user)
{
var securityKey = new SymmetricSecurityKey(Encoding.UTF8.GetBytes(_config["Jwt:Key"]));
var credentials = new SigningCredentials(securityKey, SecurityAlgorithms.HmacSha256);
var claims = new[] {
new Claim(JwtRegisteredClaimNames.Email, user.Email),
new Claim(JwtRegisteredClaimNames.Gender, user.Gender.ToString()),
new Claim(ClaimTypes.NameIdentifier,user.Id.ToString()),
new Claim(ClaimTypes.Name,user.UserName),
new Claim(ClaimTypes.MobilePhone,user.Mobile??""),
};
var token = new JwtSecurityToken(_config["Jwt:Issuer"],
_config["Jwt:Issuer"],
claims,
expires: DateTime.Now.AddMinutes(120),
signingCredentials: credentials);
return new JwtSecurityTokenHandler().WriteToken(token);
}
}
}
复制代码
最后,来测试一下我们的功能,首先是注册。
先来验证一下我们的传入的参数是否符合我们定义的规则。
输入一个错误的邮箱号试试看!
注册
ok,没有问题,和我们在 UserRegisterModel 中 添加的验证特性返回结果一致,最后我们测试一下完全符合规则的情况。
注册成功
最后,注册成功了,查询下数据库也是存在的。
user
我们来试试登录接口,在调用登录接口之前我们先来测试一下我们的配置的权限验证是否已经生效,在不登录的情况下直接访问获取用户信息接口。
未授权
直接访问会返回未授权,那是因为我们没有登录,自然也就没有 Token,目前来看是没问题的,但要看看我们传入正确的Token 是否能过权限验证。
现在,我们需要调用登录接口,登录成功后会返回一个Token,后面的接口请求都需要用到,不然会无权限访问。
先来测试一下密码错误的情况。
密码错误
返回正确,符合我们的预期结果,下面将试试正确的密码登录,看是否能够返回我们想要的结果。
登录成功
登录成功,接口也返回了我们预期的结果,最后看看生成的 token 是否按照我们写的逻辑那样,存一份到 redis 当中。
redis
也是没有问题的,和我们预想的一样。
下面将携带正确的 token 请求获取用户信息的接口,看看是否能够正确返回。
获取用户信息的接口不会携带任何参数,只会在请求头的 Headers 中 添加 Authorization ,将我们正确的 token 传入其中。
获取用户信息
能够正确获取到我们的用户信息,也就是说我们的权限这一块也是没有问题的了,下面将使用 Docker 打包部署到 Linux 服务器中。
打包部署
在项目的根目录下添加 Dockerfile 文件,内容如下。
#See https://aka.ms/containerfastmode to understand how Visual Studio uses this Dockerfile to build your images for faster debugging.
FROM mcr.microsoft.com/dotnet/aspnet:5.0-buster-slim AS base
WORKDIR /app
EXPOSE 80
EXPOSE 443
FROM mcr.microsoft.com/dotnet/sdk:5.0-buster-slim AS build
WORKDIR /src
COPY ["CodeUin.WebApi/CodeUin.WebApi.csproj", "CodeUin.WebApi/"]
COPY ["CodeUin.Helpers/CodeUin.Helpers.csproj", "CodeUin.Helpers/"]
COPY ["CodeUin.Dapper/CodeUin.Dapper.csproj", "CodeUin.Dapper/"]
RUN dotnet restore "CodeUin.WebApi/CodeUin.WebApi.csproj"
COPY . .
WORKDIR "/src/CodeUin.WebApi"
RUN dotnet build "CodeUin.WebApi.csproj" -c Release -o /app/build
FROM build AS publish
RUN dotnet publish "CodeUin.WebApi.csproj" -c Release -o /app/publish
FROM base AS final
WORKDIR /app
COPY --from=publish /app/publish .
ENTRYPOINT ["dotnet", "CodeUin.WebApi.dll"]
复制代码
在 Dockerfile 文件的目录下运行打包命令
# 在当前文件夹(末尾的句点)中查找 Dockerfile
docker build -t codeuin-api .
# 查看镜像
docker images
# 保存镜像到本地
docker save -o codeuin-api.tar codeuin-api
复制代码
最后,将我们保存的镜像通过上传的服务器后导入即可。
通过 ssh 命令 连接服务器,在刚上传包的目录下执行导入命令。
# 加载镜像
docker load -i codeuin-api.tar
# 运行镜像
docker run -itd -p 8888:80 --name codeuin-api codeuin-api
# 查看运行状态
docker stats
复制代码
到此为止,我们整个部署工作已经完成了,最后在请求服务器的接口测试一下是否ok。
服务器请求
最终的结果也是ok的,到此为止,我们所有基础的工作都完成了,所有的代码存储在 github.com/xiazanzhang… 中,如果对你有帮助的话可以参考一下。
文章分类
后端
文章标签
|
__label__pos
| 0.975891 |
fbpx
case
Case Study: Setting Up an Unmanaged VPS Server with Hostever
Introduction: In this case study, we’ll walk through the process of setting up an unmanaged VPS (Virtual Private Server) with Hostever, Bangladesh’s leading BDIX web hosting company. From basic configurations to advanced optimizations, we’ll explore the steps involved in deploying a VPS server and provide solutions for various challenges along the way.
Scenario: ABC Corporation, a growing e-commerce startup based in Dhaka, Bangladesh, has decided to migrate its website to a VPS server for improved performance and scalability. With Hostever’s reputation for reliability and expertise in BDIX hosting, ABC Corporation has chosen Hostever to fulfill its hosting needs.
Solution:
1. Basic Setup:
• Selecting a Plan: ABC Corporation chooses a suitable VPS plan from Hostever’s offerings based on its resource requirements and budget.
• Provisioning the VPS: After selecting the plan, Hostever provisions the VPS server and provides ABC Corporation with login credentials to access the server.
2. Operating System Installation:
• Choosing an OS: ABC Corporation selects the operating system (OS) for its VPS server, such as CentOS, Ubuntu, or Debian.
• Installation: Using the server management panel provided by Hostever, ABC Corporation installs the chosen OS on the VPS server. Types of Operating Systems - GeeksforGeeks
3. Initial Server Configuration:
• SSH Access: ABC Corporation establishes SSH access to the server using the provided credentials, allowing secure remote management.
• Network Configuration: Configuring network settings, including IP addresses, DNS configuration, and firewall rules, to ensure connectivity and security.
4. Software Installation and Setup of Unmanaged VPS:
• Web Server Installation: Installing a web server software such as Apache, Nginx, or LiteSpeed to serve web content.
• Database Installation: Setting up a database server, such as MySQL or PostgreSQL, to store website data.
• PHP Installation: Installing PHP for server-side scripting support, essential for dynamic web applications.
5. Security Hardening:
• Updating Software: Keeping the server’s software packages up to date with the latest security patches and updates.
• Firewall Configuration: Configuring a firewall (e.g., iptables or firewalld) to restrict unauthorized access to the server.
• SSH Key Authentication: Enabling SSH key authentication and disabling password-based authentication for enhanced security.
6. Performance Optimization of Unmanaged VPS:
• Caching Solutions: Implementing caching solutions like Varnish or Redis to improve website performance and reduce server load.
• Content Delivery Network (CDN): Integrating a CDN to distribute website content globally and improve page load times.
• Monitoring and Tuning: Monitoring server performance metrics and tuning configurations for optimal resource utilization.
7. Backup and Disaster Recovery:
• Regular Backups: Setting up automated backups of website files, databases, and server configurations to a remote location.
• Disaster Recovery Plan: Developing a disaster recovery plan outlining procedures for restoring the server in case of hardware failure or data loss.
8. SSL/TLS Certificate Installation:
• Securing website traffic with SSL/TLS encryption by installing an SSL certificate issued by a trusted Certificate Authority (CA) like Let’s Encrypt or purchasing a commercial SSL certificate.
• Configuring the web server to enforce HTTPS protocol for all website connections, ensuring data privacy and protection against eavesdropping.
9. Domain Configuration:
• Pointing the domain name to the VPS server’s IP address by updating DNS records with the domain registrar.
• Configuring virtual hosts on the web server to host multiple websites or subdomains on the same VPS server.
10. Email Server Setup:
• Installing and configuring email server software such as Postfix, Dovecot, and Roundcube to enable email services for the domain.
• Setting up SPF, DKIM, and DMARC records to authenticate outgoing emails and prevent spoofing and phishing attacks.
11. Continuous Monitoring and Maintenance:
• Implementing monitoring tools like Nagios, Zabbix, or Prometheus to monitor server health, resource usage, and service availability.
• Performing routine maintenance tasks such as software updates, security audits, and disk space management to ensure the server’s reliability and security.
12. Scalability and High Availability:
• Implementing load balancing solutions using tools like HAProxy or Nginx to distribute incoming traffic across multiple VPS servers for improved performance and fault tolerance.
• Designing a scalable architecture with redundant components and failover mechanisms to minimize downtime and ensure uninterrupted service availability.
13. Advanced Security Measures:
• Implementing intrusion detection and prevention systems (IDS/IPS) to monitor and block malicious network activity in real-time.
• Utilizing security hardening techniques such as SELinux/AppArmor, chroot jails, and file system encryption to fortify the server against potential security threats.
14. Disaster Recovery Testing:
• Regularly testing the disaster recovery plan by simulating various failure scenarios and validating the effectiveness of backup and restoration procedures.
• Documenting any lessons learned from the testing process and refining the disaster recovery plan accordingly to improve resilience and readiness for unforeseen events.
15. Containerization and Orchestration:
• Implementing containerization technology such as Docker to encapsulate applications and dependencies into lightweight, portable containers.
• Utilizing orchestration platforms like Kubernetes or Docker Swarm to automate deployment, scaling, and management of containerized applications across multiple VPS nodes for improved efficiency and resource utilization.
16. Advanced Networking Configuration:
• Configuring virtual private networks (VPNs) for secure remote access to the server and private communication between VPS instances.
• Setting up VLANs (Virtual Local Area Networks) to logically segregate network traffic and enhance security and performance in multi-tenant environments.
17. Compliance and Regulatory Considerations:
• Ensuring compliance with industry-specific regulations and standards such as GDPR (General Data Protection Regulation) for data privacy and HIPAA (Health Insurance Portability and Accountability Act) for healthcare data security.
• Implementing security controls and audit trails to demonstrate compliance with regulatory requirements and mitigate legal risks.
18. Container Security and Isolation:
• Implementing security best practices for container environments, such as running containers with minimal privileges and limiting their access to host resources.
• Utilizing container security tools like Docker Security Scanning or vulnerability scanners to identify and remediate security vulnerabilities in container images.
19. Auto-Scaling and Elasticity:
• Implementing auto-scaling policies to dynamically adjust VPS resources based on workload demands, ensuring optimal performance and cost-efficiency during peak traffic periods.
• Leveraging cloud platforms like AWS, Google Cloud Platform, or Microsoft Azure for elastic scaling capabilities and seamless integration with VPS infrastructure.
20. Performance Tuning and Optimization:
• Fine-tuning server configurations, kernel parameters, and database settings to optimize performance and responsiveness for specific workloads.
• Conducting load testing and benchmarking to identify performance bottlenecks and optimize resource allocation for maximum efficiency.
By incorporating these additional points into the setup and management of the unmanaged VPS server, ABC Corporation can further enhance its hosting environment’s security, performance, and scalability. With Hostever’s expertise and support, ABC Corporation can leverage advanced technologies and best practices to build a robust and resilient infrastructure that meets its evolving business needs in the dynamic digital landscape of Bangladesh.
|
__label__pos
| 0.895195 |
Home Technology Software & Apps Why is Python the Most Widely Used Web Scraping Language?
Why is Python the Most Widely Used Web Scraping Language?
Why is Python the Most Widely Used Web Scraping Language?
Today, web scraping is one of the best, most effective data harvesting methods. It allows you to quickly browse the web, target top content across multiple web pages, and extract data to save in a preferable format.
Businesses of all sizes rely on web scraping to gather valuable data to beat their competitors, adjust their pricing, and expand operations into new markets. Since technology is constantly advancing, web scraping tools are evolving with it.
The internet offers several programming languages for developing the most advanced web scraping tools. You can use them to accomplish any scraping goal and extract any type of data.
Even if a target website is in a restricted area, you can combine your scraping tools with various proxies, such as proxy Mexico (find more info), to bypass geo-restrictions and access the data you need. Python is one of the best languages for web scraping. Today, we’ll discuss what Python is and how it can help with web scraping.
What is Python?
Python is one of the most popular programming languages on the web. It’s a high-level, general-purpose coding language that you can apply to a range of operations online, such as:
• Data scraping
• Web browsing
• Web page targeting
• Content crawling
• Data harvesting
Python excels at streamlining web scraping operations at any scale. It makes scraping bots virtually undetectable and can bypass any restriction and anti-scraping measures. Because of that, Python is the most popular choice for launching large-scale automated data scraping operations, especially when combined with other tactics, like in our proxy Mexico example.
What makes Python so unique and valuable is access to framework libraries such as Scrapy and Beautiful Soup. These top-grade libraries can execute almost any web scraping and data extraction process quickly and efficiently.
Python web scraping
Python is perfect for web scraping and data extraction because it offers increased flexibility and more effective database management. Since it provides unprecedented levels of scraping and crawling efficiency, you can use it to avoid detecting and blocking mechanisms while extracting data from the web.
Although it comes with a relatively easy learning curve, most coders use Python to upgrade their coding skills. Python allows for automating complex web scraping processes related to crawling the web, targeting web pages, filtering content, and replicating data.
It allows you to develop different types of scraping bots to perform various digitized operations. However, the true strength of Python in web scraping is in extracting data from HTML pages. It does so by targeting and copying HTML code from the web pages.
However, many businesses choose Python for web scraping due to the option to store the extracted data in a preferred database format. Since it can replicate website content, Python allows you to scrape the needed data across several locations on the web.
That’s why companies prefer Python for web scraping, as it allows them to conduct thorough market research, auto-fetch the information they need, and monitor competitors.
Advantages of data scraping with Python
Let’s review some of the advantages of using Python in web scraping.
Diverse libraries
Python provides access to extremely powerful libraries such as Scrapy, Beautiful Soup, LXML, and Selenium. Since you need coding scripts to extract data from the web, Selenium automates repetitive script processes such as scrolling, clicking, browsing, and targeting content on web pages.
LXML does almost the same thing as Selenium with one difference – it automates processes related to scraping HTML and XML files. On the other hand, Beautiful Soup is an excellent option for XML and HTML parsing. It accesses XML and HTML files and parses them to make the extraction process easy and more time-efficient.
Easy to use
Since Python has an easy learning curve, it makes coding easy. Compared to C++ or PHP, Python code doesn’t need curly brackets or semicolons to work. More importantly, it’s much easier to navigate Python syntax and locate different blocks in the code.
Saves time
Python simplifies the web scraping process by automating most of the repetitive tasks. It helps you save time on harvesting vast amounts of data.
Large Python community
One of the best things about Python is its large and active online community. You can talk to other developers, get the latest updates, learn new scraping techniques, and receive valuable insights on the best code writing practices.
Conclusion
Python makes web scraping as simple as possible. It provides top features and framework libraries for automating web scraping processes. While you can use it for virtually every web scraping process, Python excels at extracting data in the desired format from HTML and XML pages.
You can use it to create your database, replicate website content across multiple locations on the web, gather data from several sources, and more. Its easy learning curve makes it simple to write in, clear to read, and easy to navigate.
Exit mobile version
|
__label__pos
| 0.954533 |
Codest’s good practice for building software: CircleCI
CircleCI is a very simple tool that is well-configured as a supervisor of our projects. But is the configuration itself simple? This, of course, depends on the complexity of the project. In our case (mono repo) it turned out to be more difficult than expected.
The configuration for Ruby on Rails projects is not complicated, and the documentation accurately describes each element of the config.yml. However, I would like to focus on the circleci tools that are used to help us keep the code clean and ensure good practice.
RUBOCOP
It is likely that RuboCope needs no introduction, however, for those who are not familiar with it, it is a static Ruby code analyzer and formater. If you already use rubocop in your project, simply add CircleCI to the configuration file:
- run:
name: Rubocop
command: bundle exec rubocop
ESLINT
ESLint is a tool for identifying and reporting patterns found in the ECMAScript or JavaScript code, in order to make the code more consistent and to avoid errors.
- run:
name: Eslint
command: npm run eslint
RSPEC
In RSpec, tests are not only scripts that verify the application code, they are also detailed explanations of how the application should behave, expressed in simple English:
- run:
name: RSpec
command: |
mkdir /tmp/test-results
TEST_FILES="$(circleci tests glob "spec/**/*_spec.rb" | \
circleci tests split --split-by=timings)"
bundle exec rspec \
--format progress \
--format RspecJunitFormatter \
--out /tmp/test-results/rspec.xml \
--format progress \
$TEST_FILES
- store_test_results:
path: /tmp/test-results
In the case of RSpec, we save the test result in a previously created catalog /tmp/test-results in the rspec.xml file, and then using the store_test_results key we store a given catalog. Now the Insights tab will give us access to information such as the median compilation time, the time of the last compilation or the success rate. You can read more about the Insights tab here. If we want to store the rspec.xml file as an “artifact” we need to add the store_artifacts key in our configuration file.
- store_artifacts:
path: /tmp/test-results
BRAKEMAN
Brakeman is a static analysis tool that checks Ruby on Rails applications for security vulnerabilities. By default, Brakeman will return a non-zero exit code if security warnings are detected or scan errors are encountered. Therefore, we focused only on critical errors, and the warnings were turned off.
- run:
name: Brakeman
command: bundle exec brakeman --no-exit-on-warn
If we also want to store the scanning result in the same way as RSpec, our configuration will look like this, and we will have access to our file in the Artifacts tab.
- run:
name: Brakeman
command: |
mkdir /tmp/brakeman
bundle exec brakeman --no-exit-on-warn -o /tmp/brakeman/output.json
- store_artifacts:
path: /tmp/brakeman
RUBYCRITIC
RubyCritic is a gem that uses gems for static analysis, such as Reek, Flay and Flog, to provide a report about the quality of your code. The report contains an A / B / C / D / F rating, every file in our project that we want to have scanned and accurate places that need improvement, and documentation with each alert (eg: TooManyMethods). This tool acts as a consultant in the project. On the basis of the report received, the final decision on whether our code actually needs to be corrected depends on the developer. In our circleci configuration, a separate job is assigned that is responsible for preparing the report and sending a special comment with the result on github.
The basic configuration of rubycritic is no different from the previous ones.
- run:
name: Rubycritic
command: bundle exec rubycritic ./app -p /tmp/rubycritic -f json -f html --no-browser
- store_artifacts:
path: /tmp/rubycritic
As standard, we run through the bundle with information on which directory we want to scan ./app, in which place we want to save the result -p /tmp/rubycritic (rubycritic automatically creates a directory in which we will store our report), in what format -f json and option –no- browser. We also use the gem circleci-coverage_reporter, which after the scan, puts a comment on github in our pull request with a link to the report and a percentage rating of the scanned files.
Ruby
In order for the above gem to work properly together with circleci, we must add it to our project and generate two keys (one of them is circleci, the second is github).
Standard installation:
• Gemfile
gem 'circleci-coverage_reporter'
• Rakefile
require 'circleci/coverage_reporter/rake_task' if ENV['CIRCLECI']
• .config.yml
- run:
name: Run Rubycritic
command: bundle exec rubycritic ./app -p /tmp/rubycritic -f json -f html --no-browser
- store_artifacts:
path: /tmp/rubycritic
- run:
name: Rubycritic notification
command: bundle exec rake circleci:report_coverage
Now we have to generate two keys:
COVERAGE_REPORTER_CIRCLECI_TOKEN
API token
Section ‘settings’ of our project. After choosing ‘Create Token’ change scope for ‘all’ and fill in Token label. Token to API will be generated after clicking
COVERAGE_REPORTER_VCS_TOKEN
token
Scope for key to repo
After generating the keys, we must add them to our environment variables in Settings:
environment variable
add variable
Sample configuration:
jobs:
build:
docker:
- image: circleci/ruby:2.5.1-node-browsers
environment:
BUNDLER_VERSION: 2.0.1
BUNDLE_PATH: /bundle
BUNDLE_JOBS: 4
RAILS_ENV: test
- image: circleci/postgres:10.1-alpine
environment:
POSTGRES_USER: postgres
POSTGRES_DB: example_test
POSTGRES_PASSWORD: example
- image: circleci/redis:4.0.8-alpine
working_directory: ~/insights
environment:
TZ: Europe/Warsaw
steps:
- checkout
- run:
name: Install bundler version 2.0.1
command: gem install bundler -v 2.0.1
- restore_cache:
keys:
- v1-ruby-dependencies-{{ checksum "Gemfile.lock" }}
- v1-ruby-dependencies-
- run:
name: Bundle Install
command: bundle check || bundle install
- save_cache:
key: v1-ruby-dependencies-{{ checksum "Gemfile.lock" }}
paths:
- vendor/bundle
- restore_cache:
keys:
- v1-npm-dependencies-{{ checksum "package.json" }}
- v1-npm-dependencies-
- run:
name: Npm Install
command: npm install
- save_cache:
key: v1-npm-dependencies-{{ checksum "package.json" }}
paths:
- ~/insights/node_modules
- run:
name: Database setup
command: |
bundle exec rake db:create
bundle exec rake db:schema:load
- run:
name: Rubocop
command: bundle exec rubocop
- run:
name: Eslint
command: npm run eslint
- run:
name: RSpec
command: |
mkdir /tmp/test-results
TEST_FILES="$(circleci tests glob "spec/**/*_spec.rb" | \
circleci tests split --split-by=timings)"
bundle exec rspec \
--format progress \
--format RspecJunitFormatter \
--out /tmp/test-results/rspec.xml \
--format progress \
$TEST_FILES
- run:
name: Brakeman
command: bundle exec brakeman --no-exit-on-warn
- store_test_results:
path: /tmp/test-results
rubycritic:
docker:
- image: circleci/ruby:2.5.1-node-browsers
environment:
BUNDLER_VERSION: 2.0.1
BUNDLE_PATH: /bundle
BUNDLE_JOBS: 4
RAILS_ENV: test
CIRCLE_ARTIFACTS: /tmp
working_directory: ~/insights
steps:
- checkout
- run:
name: Install bundler version 2.0.1
command: gem install bundler -v 2.0.1
- restore_cache:
keys:
- v1-rubycritic-dependencies-{{ checksum "Gemfile.lock" }}
- v1-rubycritic-dependencies-
- run:
name: Bundle Install
command: bundle check || bundle install
- save_cache:
key: v1-rubycritic-dependencies-{{ checksum "Gemfile.lock" }}
paths:
- vendor/bundle
- run:
name: Run Rubycritic
command: bundle exec rubycritic ./app -p /tmp/rubycritic -f json -f html --no-browser
- store_artifacts:
path: /tmp/rubycritic
- run:
name: Rubycritic notification
command: bundle exec rake circleci:report_coverage
workflows:
version: 2
build_and_rubycritic:
jobs:
- build
- rubycritic:
requires:
- build
Sources:
• List of available docker images for CircleCi
• CircleCi
• Used gems or tools
Read more:
– Codest’s good practice for building software: project documentation
– How to write a good and quality code?
– Open-closed principle. Do I ever have to use it?
Did you like it? Share this article
Next
Let's start a project
Estimate project
|
__label__pos
| 0.99212 |
Introduction to Photoshop CS6
In this Adobe Photoshop CS6 class, students will learn to use Photoshop for sizing, image editing, and color correction. Students will also explore creative effects with brushes and other tools.
Our instructor-led online classes are conducted in the Creative Cloud (CC) version of the software. Your instructor will point out the minor differences between versions where they exist. For our custom instructor-led online and onsite classes, we use the version of the software you use in your office.
Location
Public Classes: Delivered live online via WebEx and guaranteed to run . Join from anywhere!
Private Classes: Delivered at your offices , or any other location of your choice.
Goals
1. Become familiar with the Photoshop workspace.
2. Learn to work with images.
3. Learn selection techniques.
4. Learn how to collage images from various sources.
5. Learn to create layer masks.
6. Understand color models.
7. Learn to use brush tools for painting and retouching.
8. Understand basic color correction principles and techniques.
9. Use the Layers panel for adjustments, clipping, and special effects.
10. Learn to prepare final art for web and print.
11. Learn design techniques for web.
12. Learn to prepare final art.
Outline
1. Exploring the Photoshop Workspace
1. Resetting the Photoshop Preferences
2. Tools and Navigation
3. The Options Bar
4. Panels and the Panel Dock
2. Image Size and Resolution
1. Understanding Raster Images: The Relationship between Size and Resolution
2. Targeting Resolution for Print and Web
3. Using the Crop Tool
4. Saving an Image for the Web
5. Adding Canvas to an Image
3. Selection Tools
1. Simple Selection Techniques
2. The Magic Wand and Quick Selection Tools
3. Layer Masks
4. The Refine Edge Feature
5. Working with the Pen Tool
6. Using Paths to Make Selections
4. Painting
1. RGB and CMYK Color Models
2. Working with the Brush Tool and Brush Panels
3. Introduction to Brush Blending Modes
5. Retouching
1. Retouching and Restoration
2. Using the Clone Source Panel
6. Color Correction
1. Using Blending Modes for Simple Color Correction
2. Working with Adjustment Layers
7. Advanced Layer Techniques
1. Expanding on the the Basics
2. The Black & White Adjustment Layer
3. Clipping Masks
4. Blending Images with a Gradient Mask
5. Smart Objects
8. Working with Type and Vector Shapes
1. Vector Shapes
2. Working with Type
9. Designing for Web
1. Guides and Organization
2. Slice Tools
3. Save for Web
4. Combine Images with Design
10. Preparing Final Art
1. Preparing an Image for Print Production
2. File Formats
Class Materials
Each student in our Live Online and our Onsite classes receives a comprehensive set of materials, including course notes and all the class examples.
Class Prerequisites
Experience in the following is required for this Photoshop class:
• Experience with basic software programs such as Microsoft Word
Experience in the following would be useful for this Photoshop class:
• Although not required, using two monitors for this class is strongly recommended.
Follow-on Courses
Training for Yourself
$1,335.00 or 3 vouchers
Upcoming Live Online Classes
• See More Classes
Please select a class.
Training for your Team
Length: 3 Days
• Private Class for your Team
• Online or On-location
• Customizable
• Expert Instructors
What people say about our training
A quick and efficient way to jump right into using a completely new software.
Jonathan Ridley
OnQ
This class had a lot of information. The teacher was great in pacing/helping me understand the tools for practical application. I would definitely recommend this class.
Loria Heerensperger
Washington Dairy Products Commission
Webucator allowed me the convenience of taking the course from home while providing the class live so that it was just like being in the classroom. Great class, great instructor-led course, and overall great experience.
Lisa Keen
Flight Systems Electronics Group
Excellent.
Boris Shimonov
Loews Corporation
No cancelation for low enrollment
Certified Microsoft Partner
Registered Education Provider (R.E.P.)
GSA schedule pricing
64,155
Students who have taken Instructor-led Training
11,950
Organizations who trust Webucator for their Instructor-led training needs
100%
Satisfaction guarantee and retake option
9.63
Students rated our Introduction to Photoshop CS6 trainers 9.63 out of 10 based on 3 reviews
Contact Us or call 1-877-932-8228
|
__label__pos
| 0.914656 |
Georg Brandl avatar Georg Brandl committed 7bbb34c
Add makefile target for easy test coverage checking.
Comments (0)
Files changed (2)
test:
@cd tests; $(PYTHON) run.py -d -m '^[tT]est' $(TEST)
+
+covertest:
+ @cd tests; $(PYTHON) run.py -d -m '^[tT]est' --with-coverage --cover-package=sphinx $(TEST)
tests/coverage.py
+#!/usr/bin/python
+#
+# Perforce Defect Tracking Integration Project
+# <http://www.ravenbrook.com/project/p4dti/>
+#
+# COVERAGE.PY -- COVERAGE TESTING
+#
+# Gareth Rees, Ravenbrook Limited, 2001-12-04
+# Ned Batchelder, 2004-12-12
+# http://nedbatchelder.com/code/modules/coverage.html
+#
+#
+# 1. INTRODUCTION
+#
+# This module provides coverage testing for Python code.
+#
+# The intended readership is all Python developers.
+#
+# This document is not confidential.
+#
+# See [GDR 2001-12-04a] for the command-line interface, programmatic
+# interface and limitations. See [GDR 2001-12-04b] for requirements and
+# design.
+
+r"""Usage:
+
+coverage.py -x [-p] MODULE.py [ARG1 ARG2 ...]
+ Execute module, passing the given command-line arguments, collecting
+ coverage data. With the -p option, write to a temporary file containing
+ the machine name and process ID.
+
+coverage.py -e
+ Erase collected coverage data.
+
+coverage.py -c
+ Collect data from multiple coverage files (as created by -p option above)
+ and store it into a single file representing the union of the coverage.
+
+coverage.py -r [-m] [-o dir1,dir2,...] FILE1 FILE2 ...
+ Report on the statement coverage for the given files. With the -m
+ option, show line numbers of the statements that weren't executed.
+
+coverage.py -a [-d dir] [-o dir1,dir2,...] FILE1 FILE2 ...
+ Make annotated copies of the given files, marking statements that
+ are executed with > and statements that are missed with !. With
+ the -d option, make the copies in that directory. Without the -d
+ option, make each copy in the same directory as the original.
+
+-o dir,dir2,...
+ Omit reporting or annotating files when their filename path starts with
+ a directory listed in the omit list.
+ e.g. python coverage.py -i -r -o c:\python23,lib\enthought\traits
+
+Coverage data is saved in the file .coverage by default. Set the
+COVERAGE_FILE environment variable to save it somewhere else."""
+
+__version__ = "2.85.20080914" # see detailed history at the end of this file.
+
+import compiler
+import compiler.visitor
+import glob
+import os
+import re
+import string
+import symbol
+import sys
+import threading
+import token
+import types
+import zipimport
+from socket import gethostname
+
+# Python version compatibility
+try:
+ strclass = basestring # new to 2.3
+except:
+ strclass = str
+
+# 2. IMPLEMENTATION
+#
+# This uses the "singleton" pattern.
+#
+# The word "morf" means a module object (from which the source file can
+# be deduced by suitable manipulation of the __file__ attribute) or a
+# filename.
+#
+# When we generate a coverage report we have to canonicalize every
+# filename in the coverage dictionary just in case it refers to the
+# module we are reporting on. It seems a shame to throw away this
+# information so the data in the coverage dictionary is transferred to
+# the 'cexecuted' dictionary under the canonical filenames.
+#
+# The coverage dictionary is called "c" and the trace function "t". The
+# reason for these short names is that Python looks up variables by name
+# at runtime and so execution time depends on the length of variables!
+# In the bottleneck of this application it's appropriate to abbreviate
+# names to increase speed.
+
+class StatementFindingAstVisitor(compiler.visitor.ASTVisitor):
+ """ A visitor for a parsed Abstract Syntax Tree which finds executable
+ statements.
+ """
+ def __init__(self, statements, excluded, suite_spots):
+ compiler.visitor.ASTVisitor.__init__(self)
+ self.statements = statements
+ self.excluded = excluded
+ self.suite_spots = suite_spots
+ self.excluding_suite = 0
+
+ def doRecursive(self, node):
+ for n in node.getChildNodes():
+ self.dispatch(n)
+
+ visitStmt = visitModule = doRecursive
+
+ def doCode(self, node):
+ if hasattr(node, 'decorators') and node.decorators:
+ self.dispatch(node.decorators)
+ self.recordAndDispatch(node.code)
+ else:
+ self.doSuite(node, node.code)
+
+ visitFunction = visitClass = doCode
+
+ def getFirstLine(self, node):
+ # Find the first line in the tree node.
+ lineno = node.lineno
+ for n in node.getChildNodes():
+ f = self.getFirstLine(n)
+ if lineno and f:
+ lineno = min(lineno, f)
+ else:
+ lineno = lineno or f
+ return lineno
+
+ def getLastLine(self, node):
+ # Find the first line in the tree node.
+ lineno = node.lineno
+ for n in node.getChildNodes():
+ lineno = max(lineno, self.getLastLine(n))
+ return lineno
+
+ def doStatement(self, node):
+ self.recordLine(self.getFirstLine(node))
+
+ visitAssert = visitAssign = visitAssTuple = visitPrint = \
+ visitPrintnl = visitRaise = visitSubscript = visitDecorators = \
+ doStatement
+
+ def visitPass(self, node):
+ # Pass statements have weird interactions with docstrings. If this
+ # pass statement is part of one of those pairs, claim that the statement
+ # is on the later of the two lines.
+ l = node.lineno
+ if l:
+ lines = self.suite_spots.get(l, [l,l])
+ self.statements[lines[1]] = 1
+
+ def visitDiscard(self, node):
+ # Discard nodes are statements that execute an expression, but then
+ # discard the results. This includes function calls, so we can't
+ # ignore them all. But if the expression is a constant, the statement
+ # won't be "executed", so don't count it now.
+ if node.expr.__class__.__name__ != 'Const':
+ self.doStatement(node)
+
+ def recordNodeLine(self, node):
+ # Stmt nodes often have None, but shouldn't claim the first line of
+ # their children (because the first child might be an ignorable line
+ # like "global a").
+ if node.__class__.__name__ != 'Stmt':
+ return self.recordLine(self.getFirstLine(node))
+ else:
+ return 0
+
+ def recordLine(self, lineno):
+ # Returns a bool, whether the line is included or excluded.
+ if lineno:
+ # Multi-line tests introducing suites have to get charged to their
+ # keyword.
+ if lineno in self.suite_spots:
+ lineno = self.suite_spots[lineno][0]
+ # If we're inside an excluded suite, record that this line was
+ # excluded.
+ if self.excluding_suite:
+ self.excluded[lineno] = 1
+ return 0
+ # If this line is excluded, or suite_spots maps this line to
+ # another line that is exlcuded, then we're excluded.
+ elif self.excluded.has_key(lineno) or \
+ self.suite_spots.has_key(lineno) and \
+ self.excluded.has_key(self.suite_spots[lineno][1]):
+ return 0
+ # Otherwise, this is an executable line.
+ else:
+ self.statements[lineno] = 1
+ return 1
+ return 0
+
+ default = recordNodeLine
+
+ def recordAndDispatch(self, node):
+ self.recordNodeLine(node)
+ self.dispatch(node)
+
+ def doSuite(self, intro, body, exclude=0):
+ exsuite = self.excluding_suite
+ if exclude or (intro and not self.recordNodeLine(intro)):
+ self.excluding_suite = 1
+ self.recordAndDispatch(body)
+ self.excluding_suite = exsuite
+
+ def doPlainWordSuite(self, prevsuite, suite):
+ # Finding the exclude lines for else's is tricky, because they aren't
+ # present in the compiler parse tree. Look at the previous suite,
+ # and find its last line. If any line between there and the else's
+ # first line are excluded, then we exclude the else.
+ lastprev = self.getLastLine(prevsuite)
+ firstelse = self.getFirstLine(suite)
+ for l in range(lastprev+1, firstelse):
+ if self.suite_spots.has_key(l):
+ self.doSuite(None, suite, exclude=self.excluded.has_key(l))
+ break
+ else:
+ self.doSuite(None, suite)
+
+ def doElse(self, prevsuite, node):
+ if node.else_:
+ self.doPlainWordSuite(prevsuite, node.else_)
+
+ def visitFor(self, node):
+ self.doSuite(node, node.body)
+ self.doElse(node.body, node)
+
+ visitWhile = visitFor
+
+ def visitIf(self, node):
+ # The first test has to be handled separately from the rest.
+ # The first test is credited to the line with the "if", but the others
+ # are credited to the line with the test for the elif.
+ self.doSuite(node, node.tests[0][1])
+ for t, n in node.tests[1:]:
+ self.doSuite(t, n)
+ self.doElse(node.tests[-1][1], node)
+
+ def visitTryExcept(self, node):
+ self.doSuite(node, node.body)
+ for i in range(len(node.handlers)):
+ a, b, h = node.handlers[i]
+ if not a:
+ # It's a plain "except:". Find the previous suite.
+ if i > 0:
+ prev = node.handlers[i-1][2]
+ else:
+ prev = node.body
+ self.doPlainWordSuite(prev, h)
+ else:
+ self.doSuite(a, h)
+ self.doElse(node.handlers[-1][2], node)
+
+ def visitTryFinally(self, node):
+ self.doSuite(node, node.body)
+ self.doPlainWordSuite(node.body, node.final)
+
+ def visitWith(self, node):
+ self.doSuite(node, node.body)
+
+ def visitGlobal(self, node):
+ # "global" statements don't execute like others (they don't call the
+ # trace function), so don't record their line numbers.
+ pass
+
+the_coverage = None
+
+class CoverageException(Exception):
+ pass
+
+class coverage:
+ # Name of the cache file (unless environment variable is set).
+ cache_default = ".coverage"
+
+ # Environment variable naming the cache file.
+ cache_env = "COVERAGE_FILE"
+
+ # A dictionary with an entry for (Python source file name, line number
+ # in that file) if that line has been executed.
+ c = {}
+
+ # A map from canonical Python source file name to a dictionary in
+ # which there's an entry for each line number that has been
+ # executed.
+ cexecuted = {}
+
+ # Cache of results of calling the analysis2() method, so that you can
+ # specify both -r and -a without doing double work.
+ analysis_cache = {}
+
+ # Cache of results of calling the canonical_filename() method, to
+ # avoid duplicating work.
+ canonical_filename_cache = {}
+
+ def __init__(self):
+ global the_coverage
+ if the_coverage:
+ raise CoverageException("Only one coverage object allowed.")
+ self.usecache = 1
+ self.cache = None
+ self.parallel_mode = False
+ self.exclude_re = ''
+ self.nesting = 0
+ self.cstack = []
+ self.xstack = []
+ self.relative_dir = self.abs_file(os.curdir)+os.sep
+ self.exclude('# *pragma[: ]*[nN][oO] *[cC][oO][vV][eE][rR]')
+
+ # t(f, x, y). This method is passed to sys.settrace as a trace function.
+ # See [van Rossum 2001-07-20b, 9.2] for an explanation of sys.settrace and
+ # the arguments and return value of the trace function.
+ # See [van Rossum 2001-07-20a, 3.2] for a description of frame and code
+ # objects.
+
+ def t(self, f, w, unused): #pragma: no cover
+ if w == 'line':
+ self.c[(f.f_code.co_filename, f.f_lineno)] = 1
+ #-for c in self.cstack:
+ #- c[(f.f_code.co_filename, f.f_lineno)] = 1
+ return self.t
+
+ def help(self, error=None): #pragma: no cover
+ if error:
+ print error
+ print
+ print __doc__
+ sys.exit(1)
+
+ def command_line(self, argv, help_fn=None):
+ import getopt
+ help_fn = help_fn or self.help
+ settings = {}
+ optmap = {
+ '-a': 'annotate',
+ '-c': 'collect',
+ '-d:': 'directory=',
+ '-e': 'erase',
+ '-h': 'help',
+ '-i': 'ignore-errors',
+ '-m': 'show-missing',
+ '-p': 'parallel-mode',
+ '-r': 'report',
+ '-x': 'execute',
+ '-o:': 'omit=',
+ }
+ short_opts = string.join(map(lambda o: o[1:], optmap.keys()), '')
+ long_opts = optmap.values()
+ options, args = getopt.getopt(argv, short_opts, long_opts)
+ for o, a in options:
+ if optmap.has_key(o):
+ settings[optmap[o]] = 1
+ elif optmap.has_key(o + ':'):
+ settings[optmap[o + ':']] = a
+ elif o[2:] in long_opts:
+ settings[o[2:]] = 1
+ elif o[2:] + '=' in long_opts:
+ settings[o[2:]+'='] = a
+ else: #pragma: no cover
+ pass # Can't get here, because getopt won't return anything unknown.
+
+ if settings.get('help'):
+ help_fn()
+
+ for i in ['erase', 'execute']:
+ for j in ['annotate', 'report', 'collect']:
+ if settings.get(i) and settings.get(j):
+ help_fn("You can't specify the '%s' and '%s' "
+ "options at the same time." % (i, j))
+
+ args_needed = (settings.get('execute')
+ or settings.get('annotate')
+ or settings.get('report'))
+ action = (settings.get('erase')
+ or settings.get('collect')
+ or args_needed)
+ if not action:
+ help_fn("You must specify at least one of -e, -x, -c, -r, or -a.")
+ if not args_needed and args:
+ help_fn("Unexpected arguments: %s" % " ".join(args))
+
+ self.parallel_mode = settings.get('parallel-mode')
+ self.get_ready()
+
+ if settings.get('erase'):
+ self.erase()
+ if settings.get('execute'):
+ if not args:
+ help_fn("Nothing to do.")
+ sys.argv = args
+ self.start()
+ import __main__
+ sys.path[0] = os.path.dirname(sys.argv[0])
+ execfile(sys.argv[0], __main__.__dict__)
+ if settings.get('collect'):
+ self.collect()
+ if not args:
+ args = self.cexecuted.keys()
+
+ ignore_errors = settings.get('ignore-errors')
+ show_missing = settings.get('show-missing')
+ directory = settings.get('directory=')
+
+ omit = settings.get('omit=')
+ if omit is not None:
+ omit = [self.abs_file(p) for p in omit.split(',')]
+ else:
+ omit = []
+
+ if settings.get('report'):
+ self.report(args, show_missing, ignore_errors, omit_prefixes=omit)
+ if settings.get('annotate'):
+ self.annotate(args, directory, ignore_errors, omit_prefixes=omit)
+
+ def use_cache(self, usecache, cache_file=None):
+ self.usecache = usecache
+ if cache_file and not self.cache:
+ self.cache_default = cache_file
+
+ def get_ready(self, parallel_mode=False):
+ if self.usecache and not self.cache:
+ self.cache = os.environ.get(self.cache_env, self.cache_default)
+ if self.parallel_mode:
+ self.cache += "." + gethostname() + "." + str(os.getpid())
+ self.restore()
+ self.analysis_cache = {}
+
+ def start(self, parallel_mode=False):
+ self.get_ready()
+ if self.nesting == 0: #pragma: no cover
+ sys.settrace(self.t)
+ if hasattr(threading, 'settrace'):
+ threading.settrace(self.t)
+ self.nesting += 1
+
+ def stop(self):
+ self.nesting -= 1
+ if self.nesting == 0: #pragma: no cover
+ sys.settrace(None)
+ if hasattr(threading, 'settrace'):
+ threading.settrace(None)
+
+ def erase(self):
+ self.get_ready()
+ self.c = {}
+ self.analysis_cache = {}
+ self.cexecuted = {}
+ if self.cache and os.path.exists(self.cache):
+ os.remove(self.cache)
+
+ def exclude(self, re):
+ if self.exclude_re:
+ self.exclude_re += "|"
+ self.exclude_re += "(" + re + ")"
+
+ def begin_recursive(self):
+ self.cstack.append(self.c)
+ self.xstack.append(self.exclude_re)
+
+ def end_recursive(self):
+ self.c = self.cstack.pop()
+ self.exclude_re = self.xstack.pop()
+
+ # save(). Save coverage data to the coverage cache.
+
+ def save(self):
+ if self.usecache and self.cache:
+ self.canonicalize_filenames()
+ cache = open(self.cache, 'wb')
+ import marshal
+ marshal.dump(self.cexecuted, cache)
+ cache.close()
+
+ # restore(). Restore coverage data from the coverage cache (if it exists).
+
+ def restore(self):
+ self.c = {}
+ self.cexecuted = {}
+ assert self.usecache
+ if os.path.exists(self.cache):
+ self.cexecuted = self.restore_file(self.cache)
+
+ def restore_file(self, file_name):
+ try:
+ cache = open(file_name, 'rb')
+ import marshal
+ cexecuted = marshal.load(cache)
+ cache.close()
+ if isinstance(cexecuted, types.DictType):
+ return cexecuted
+ else:
+ return {}
+ except:
+ return {}
+
+ # collect(). Collect data in multiple files produced by parallel mode
+
+ def collect(self):
+ cache_dir, local = os.path.split(self.cache)
+ for f in os.listdir(cache_dir or '.'):
+ if not f.startswith(local):
+ continue
+
+ full_path = os.path.join(cache_dir, f)
+ cexecuted = self.restore_file(full_path)
+ self.merge_data(cexecuted)
+
+ def merge_data(self, new_data):
+ for file_name, file_data in new_data.items():
+ if self.cexecuted.has_key(file_name):
+ self.merge_file_data(self.cexecuted[file_name], file_data)
+ else:
+ self.cexecuted[file_name] = file_data
+
+ def merge_file_data(self, cache_data, new_data):
+ for line_number in new_data.keys():
+ if not cache_data.has_key(line_number):
+ cache_data[line_number] = new_data[line_number]
+
+ def abs_file(self, filename):
+ """ Helper function to turn a filename into an absolute normalized
+ filename.
+ """
+ return os.path.normcase(os.path.abspath(os.path.realpath(filename)))
+
+ def get_zip_data(self, filename):
+ """ Get data from `filename` if it is a zip file path, or return None
+ if it is not.
+ """
+ markers = ['.zip'+os.sep, '.egg'+os.sep]
+ for marker in markers:
+ if marker in filename:
+ parts = filename.split(marker)
+ try:
+ zi = zipimport.zipimporter(parts[0]+marker[:-1])
+ except zipimport.ZipImportError:
+ continue
+ try:
+ data = zi.get_data(parts[1])
+ except IOError:
+ continue
+ return data
+ return None
+
+ # canonical_filename(filename). Return a canonical filename for the
+ # file (that is, an absolute path with no redundant components and
+ # normalized case). See [GDR 2001-12-04b, 3.3].
+
+ def canonical_filename(self, filename):
+ if not self.canonical_filename_cache.has_key(filename):
+ f = filename
+ if os.path.isabs(f) and not os.path.exists(f):
+ if not self.get_zip_data(f):
+ f = os.path.basename(f)
+ if not os.path.isabs(f):
+ for path in [os.curdir] + sys.path:
+ g = os.path.join(path, f)
+ if os.path.exists(g):
+ f = g
+ break
+ cf = self.abs_file(f)
+ self.canonical_filename_cache[filename] = cf
+ return self.canonical_filename_cache[filename]
+
+ # canonicalize_filenames(). Copy results from "c" to "cexecuted",
+ # canonicalizing filenames on the way. Clear the "c" map.
+
+ def canonicalize_filenames(self):
+ for filename, lineno in self.c.keys():
+ if filename == '<string>':
+ # Can't do anything useful with exec'd strings, so skip them.
+ continue
+ f = self.canonical_filename(filename)
+ if not self.cexecuted.has_key(f):
+ self.cexecuted[f] = {}
+ self.cexecuted[f][lineno] = 1
+ self.c = {}
+
+ # morf_filename(morf). Return the filename for a module or file.
+
+ def morf_filename(self, morf):
+ if hasattr(morf, '__file__'):
+ f = morf.__file__
+ else:
+ f = morf
+ return self.canonical_filename(f)
+
+ # analyze_morf(morf). Analyze the module or filename passed as
+ # the argument. If the source code can't be found, raise an error.
+ # Otherwise, return a tuple of (1) the canonical filename of the
+ # source code for the module, (2) a list of lines of statements
+ # in the source code, (3) a list of lines of excluded statements,
+ # and (4), a map of line numbers to multi-line line number ranges, for
+ # statements that cross lines.
+
+ def analyze_morf(self, morf):
+ if self.analysis_cache.has_key(morf):
+ return self.analysis_cache[morf]
+ filename = self.morf_filename(morf)
+ ext = os.path.splitext(filename)[1]
+ source, sourcef = None, None
+ if ext == '.pyc':
+ if not os.path.exists(filename[:-1]):
+ source = self.get_zip_data(filename[:-1])
+ if not source:
+ raise CoverageException(
+ "No source for compiled code '%s'." % filename
+ )
+ filename = filename[:-1]
+ if not source:
+ sourcef = open(filename, 'rU')
+ source = sourcef.read()
+ try:
+ lines, excluded_lines, line_map = self.find_executable_statements(
+ source, exclude=self.exclude_re
+ )
+ except SyntaxError, synerr:
+ raise CoverageException(
+ "Couldn't parse '%s' as Python source: '%s' at line %d" %
+ (filename, synerr.msg, synerr.lineno)
+ )
+ if sourcef:
+ sourcef.close()
+ result = filename, lines, excluded_lines, line_map
+ self.analysis_cache[morf] = result
+ return result
+
+ def first_line_of_tree(self, tree):
+ while True:
+ if len(tree) == 3 and type(tree[2]) == type(1):
+ return tree[2]
+ tree = tree[1]
+
+ def last_line_of_tree(self, tree):
+ while True:
+ if len(tree) == 3 and type(tree[2]) == type(1):
+ return tree[2]
+ tree = tree[-1]
+
+ def find_docstring_pass_pair(self, tree, spots):
+ for i in range(1, len(tree)):
+ if self.is_string_constant(tree[i]) and self.is_pass_stmt(tree[i+1]):
+ first_line = self.first_line_of_tree(tree[i])
+ last_line = self.last_line_of_tree(tree[i+1])
+ self.record_multiline(spots, first_line, last_line)
+
+ def is_string_constant(self, tree):
+ try:
+ return tree[0] == symbol.stmt and tree[1][1][1][0] == symbol.expr_stmt
+ except:
+ return False
+
+ def is_pass_stmt(self, tree):
+ try:
+ return tree[0] == symbol.stmt and tree[1][1][1][0] == symbol.pass_stmt
+ except:
+ return False
+
+ def record_multiline(self, spots, i, j):
+ for l in range(i, j+1):
+ spots[l] = (i, j)
+
+ def get_suite_spots(self, tree, spots):
+ """ Analyze a parse tree to find suite introducers which span a number
+ of lines.
+ """
+ for i in range(1, len(tree)):
+ if type(tree[i]) == type(()):
+ if tree[i][0] == symbol.suite:
+ # Found a suite, look back for the colon and keyword.
+ lineno_colon = lineno_word = None
+ for j in range(i-1, 0, -1):
+ if tree[j][0] == token.COLON:
+ # Colons are never executed themselves: we want the
+ # line number of the last token before the colon.
+ lineno_colon = self.last_line_of_tree(tree[j-1])
+ elif tree[j][0] == token.NAME:
+ if tree[j][1] == 'elif':
+ # Find the line number of the first non-terminal
+ # after the keyword.
+ t = tree[j+1]
+ while t and token.ISNONTERMINAL(t[0]):
+ t = t[1]
+ if t:
+ lineno_word = t[2]
+ else:
+ lineno_word = tree[j][2]
+ break
+ elif tree[j][0] == symbol.except_clause:
+ # "except" clauses look like:
+ # ('except_clause', ('NAME', 'except', lineno), ...)
+ if tree[j][1][0] == token.NAME:
+ lineno_word = tree[j][1][2]
+ break
+ if lineno_colon and lineno_word:
+ # Found colon and keyword, mark all the lines
+ # between the two with the two line numbers.
+ self.record_multiline(spots, lineno_word, lineno_colon)
+
+ # "pass" statements are tricky: different versions of Python
+ # treat them differently, especially in the common case of a
+ # function with a doc string and a single pass statement.
+ self.find_docstring_pass_pair(tree[i], spots)
+
+ elif tree[i][0] == symbol.simple_stmt:
+ first_line = self.first_line_of_tree(tree[i])
+ last_line = self.last_line_of_tree(tree[i])
+ if first_line != last_line:
+ self.record_multiline(spots, first_line, last_line)
+ self.get_suite_spots(tree[i], spots)
+
+ def find_executable_statements(self, text, exclude=None):
+ # Find lines which match an exclusion pattern.
+ excluded = {}
+ suite_spots = {}
+ if exclude:
+ reExclude = re.compile(exclude)
+ lines = text.split('\n')
+ for i in range(len(lines)):
+ if reExclude.search(lines[i]):
+ excluded[i+1] = 1
+
+ # Parse the code and analyze the parse tree to find out which statements
+ # are multiline, and where suites begin and end.
+ import parser
+ tree = parser.suite(text+'\n\n').totuple(1)
+ self.get_suite_spots(tree, suite_spots)
+ #print "Suite spots:", suite_spots
+
+ # Use the compiler module to parse the text and find the executable
+ # statements. We add newlines to be impervious to final partial lines.
+ statements = {}
+ ast = compiler.parse(text+'\n\n')
+ visitor = StatementFindingAstVisitor(statements, excluded, suite_spots)
+ compiler.walk(ast, visitor, walker=visitor)
+
+ lines = statements.keys()
+ lines.sort()
+ excluded_lines = excluded.keys()
+ excluded_lines.sort()
+ return lines, excluded_lines, suite_spots
+
+ # format_lines(statements, lines). Format a list of line numbers
+ # for printing by coalescing groups of lines as long as the lines
+ # represent consecutive statements. This will coalesce even if
+ # there are gaps between statements, so if statements =
+ # [1,2,3,4,5,10,11,12,13,14] and lines = [1,2,5,10,11,13,14] then
+ # format_lines will return "1-2, 5-11, 13-14".
+
+ def format_lines(self, statements, lines):
+ pairs = []
+ i = 0
+ j = 0
+ start = None
+ pairs = []
+ while i < len(statements) and j < len(lines):
+ if statements[i] == lines[j]:
+ if start == None:
+ start = lines[j]
+ end = lines[j]
+ j = j + 1
+ elif start:
+ pairs.append((start, end))
+ start = None
+ i = i + 1
+ if start:
+ pairs.append((start, end))
+ def stringify(pair):
+ start, end = pair
+ if start == end:
+ return "%d" % start
+ else:
+ return "%d-%d" % (start, end)
+ ret = string.join(map(stringify, pairs), ", ")
+ return ret
+
+ # Backward compatibility with version 1.
+ def analysis(self, morf):
+ f, s, _, m, mf = self.analysis2(morf)
+ return f, s, m, mf
+
+ def analysis2(self, morf):
+ filename, statements, excluded, line_map = self.analyze_morf(morf)
+ self.canonicalize_filenames()
+ if not self.cexecuted.has_key(filename):
+ self.cexecuted[filename] = {}
+ missing = []
+ for line in statements:
+ lines = line_map.get(line, [line, line])
+ for l in range(lines[0], lines[1]+1):
+ if self.cexecuted[filename].has_key(l):
+ break
+ else:
+ missing.append(line)
+ return (filename, statements, excluded, missing,
+ self.format_lines(statements, missing))
+
+ def relative_filename(self, filename):
+ """ Convert filename to relative filename from self.relative_dir.
+ """
+ return filename.replace(self.relative_dir, "")
+
+ def morf_name(self, morf):
+ """ Return the name of morf as used in report.
+ """
+ if hasattr(morf, '__name__'):
+ return morf.__name__
+ else:
+ return self.relative_filename(os.path.splitext(morf)[0])
+
+ def filter_by_prefix(self, morfs, omit_prefixes):
+ """ Return list of morfs where the morf name does not begin
+ with any one of the omit_prefixes.
+ """
+ filtered_morfs = []
+ for morf in morfs:
+ for prefix in omit_prefixes:
+ if self.morf_name(morf).startswith(prefix):
+ break
+ else:
+ filtered_morfs.append(morf)
+
+ return filtered_morfs
+
+ def morf_name_compare(self, x, y):
+ return cmp(self.morf_name(x), self.morf_name(y))
+
+ def report(self, morfs, show_missing=1, ignore_errors=0, file=None, omit_prefixes=[]):
+ if not isinstance(morfs, types.ListType):
+ morfs = [morfs]
+ # On windows, the shell doesn't expand wildcards. Do it here.
+ globbed = []
+ for morf in morfs:
+ if isinstance(morf, strclass):
+ globbed.extend(glob.glob(morf))
+ else:
+ globbed.append(morf)
+ morfs = globbed
+
+ morfs = self.filter_by_prefix(morfs, omit_prefixes)
+ morfs.sort(self.morf_name_compare)
+
+ max_name = max([5,] + map(len, map(self.morf_name, morfs)))
+ fmt_name = "%%- %ds " % max_name
+ fmt_err = fmt_name + "%s: %s"
+ header = fmt_name % "Name" + " Stmts Exec Cover"
+ fmt_coverage = fmt_name + "% 6d % 6d % 5d%%"
+ if show_missing:
+ header = header + " Missing"
+ fmt_coverage = fmt_coverage + " %s"
+ if not file:
+ file = sys.stdout
+ print >>file, header
+ print >>file, "-" * len(header)
+ total_statements = 0
+ total_executed = 0
+ for morf in morfs:
+ name = self.morf_name(morf)
+ try:
+ _, statements, _, missing, readable = self.analysis2(morf)
+ n = len(statements)
+ m = n - len(missing)
+ if n > 0:
+ pc = 100.0 * m / n
+ else:
+ pc = 100.0
+ args = (name, n, m, pc)
+ if show_missing:
+ args = args + (readable,)
+ print >>file, fmt_coverage % args
+ total_statements = total_statements + n
+ total_executed = total_executed + m
+ except KeyboardInterrupt: #pragma: no cover
+ raise
+ except:
+ if not ignore_errors:
+ typ, msg = sys.exc_info()[:2]
+ print >>file, fmt_err % (name, typ, msg)
+ if len(morfs) > 1:
+ print >>file, "-" * len(header)
+ if total_statements > 0:
+ pc = 100.0 * total_executed / total_statements
+ else:
+ pc = 100.0
+ args = ("TOTAL", total_statements, total_executed, pc)
+ if show_missing:
+ args = args + ("",)
+ print >>file, fmt_coverage % args
+
+ # annotate(morfs, ignore_errors).
+
+ blank_re = re.compile(r"\s*(#|$)")
+ else_re = re.compile(r"\s*else\s*:\s*(#|$)")
+
+ def annotate(self, morfs, directory=None, ignore_errors=0, omit_prefixes=[]):
+ morfs = self.filter_by_prefix(morfs, omit_prefixes)
+ for morf in morfs:
+ try:
+ filename, statements, excluded, missing, _ = self.analysis2(morf)
+ self.annotate_file(filename, statements, excluded, missing, directory)
+ except KeyboardInterrupt:
+ raise
+ except:
+ if not ignore_errors:
+ raise
+
+ def annotate_file(self, filename, statements, excluded, missing, directory=None):
+ source = open(filename, 'r')
+ if directory:
+ dest_file = os.path.join(directory,
+ os.path.basename(filename)
+ + ',cover')
+ else:
+ dest_file = filename + ',cover'
+ dest = open(dest_file, 'w')
+ lineno = 0
+ i = 0
+ j = 0
+ covered = 1
+ while 1:
+ line = source.readline()
+ if line == '':
+ break
+ lineno = lineno + 1
+ while i < len(statements) and statements[i] < lineno:
+ i = i + 1
+ while j < len(missing) and missing[j] < lineno:
+ j = j + 1
+ if i < len(statements) and statements[i] == lineno:
+ covered = j >= len(missing) or missing[j] > lineno
+ if self.blank_re.match(line):
+ dest.write(' ')
+ elif self.else_re.match(line):
+ # Special logic for lines containing only 'else:'.
+ # See [GDR 2001-12-04b, 3.2].
+ if i >= len(statements) and j >= len(missing):
+ dest.write('! ')
+ elif i >= len(statements) or j >= len(missing):
+ dest.write('> ')
+ elif statements[i] == missing[j]:
+ dest.write('! ')
+ else:
+ dest.write('> ')
+ elif lineno in excluded:
+ dest.write('- ')
+ elif covered:
+ dest.write('> ')
+ else:
+ dest.write('! ')
+ dest.write(line)
+ source.close()
+ dest.close()
+
+# Singleton object.
+the_coverage = coverage()
+
+# Module functions call methods in the singleton object.
+def use_cache(*args, **kw):
+ return the_coverage.use_cache(*args, **kw)
+
+def start(*args, **kw):
+ return the_coverage.start(*args, **kw)
+
+def stop(*args, **kw):
+ return the_coverage.stop(*args, **kw)
+
+def erase(*args, **kw):
+ return the_coverage.erase(*args, **kw)
+
+def begin_recursive(*args, **kw):
+ return the_coverage.begin_recursive(*args, **kw)
+
+def end_recursive(*args, **kw):
+ return the_coverage.end_recursive(*args, **kw)
+
+def exclude(*args, **kw):
+ return the_coverage.exclude(*args, **kw)
+
+def analysis(*args, **kw):
+ return the_coverage.analysis(*args, **kw)
+
+def analysis2(*args, **kw):
+ return the_coverage.analysis2(*args, **kw)
+
+def report(*args, **kw):
+ return the_coverage.report(*args, **kw)
+
+def annotate(*args, **kw):
+ return the_coverage.annotate(*args, **kw)
+
+def annotate_file(*args, **kw):
+ return the_coverage.annotate_file(*args, **kw)
+
+# Save coverage data when Python exits. (The atexit module wasn't
+# introduced until Python 2.0, so use sys.exitfunc when it's not
+# available.)
+try:
+ import atexit
+ atexit.register(the_coverage.save)
+except ImportError:
+ sys.exitfunc = the_coverage.save
+
+def main():
+ the_coverage.command_line(sys.argv[1:])
+
+# Command-line interface.
+if __name__ == '__main__':
+ main()
+
+
+# A. REFERENCES
+#
+# [GDR 2001-12-04a] "Statement coverage for Python"; Gareth Rees;
+# Ravenbrook Limited; 2001-12-04;
+# <http://www.nedbatchelder.com/code/modules/rees-coverage.html>.
+#
+# [GDR 2001-12-04b] "Statement coverage for Python: design and
+# analysis"; Gareth Rees; Ravenbrook Limited; 2001-12-04;
+# <http://www.nedbatchelder.com/code/modules/rees-design.html>.
+#
+# [van Rossum 2001-07-20a] "Python Reference Manual (releae 2.1.1)";
+# Guide van Rossum; 2001-07-20;
+# <http://www.python.org/doc/2.1.1/ref/ref.html>.
+#
+# [van Rossum 2001-07-20b] "Python Library Reference"; Guido van Rossum;
+# 2001-07-20; <http://www.python.org/doc/2.1.1/lib/lib.html>.
+#
+#
+# B. DOCUMENT HISTORY
+#
+# 2001-12-04 GDR Created.
+#
+# 2001-12-06 GDR Added command-line interface and source code
+# annotation.
+#
+# 2001-12-09 GDR Moved design and interface to separate documents.
+#
+# 2001-12-10 GDR Open cache file as binary on Windows. Allow
+# simultaneous -e and -x, or -a and -r.
+#
+# 2001-12-12 GDR Added command-line help. Cache analysis so that it
+# only needs to be done once when you specify -a and -r.
+#
+# 2001-12-13 GDR Improved speed while recording. Portable between
+# Python 1.5.2 and 2.1.1.
+#
+# 2002-01-03 GDR Module-level functions work correctly.
+#
+# 2002-01-07 GDR Update sys.path when running a file with the -x option,
+# so that it matches the value the program would get if it were run on
+# its own.
+#
+# 2004-12-12 NMB Significant code changes.
+# - Finding executable statements has been rewritten so that docstrings and
+# other quirks of Python execution aren't mistakenly identified as missing
+# lines.
+# - Lines can be excluded from consideration, even entire suites of lines.
+# - The filesystem cache of covered lines can be disabled programmatically.
+# - Modernized the code.
+#
+# 2004-12-14 NMB Minor tweaks. Return 'analysis' to its original behavior
+# and add 'analysis2'. Add a global for 'annotate', and factor it, adding
+# 'annotate_file'.
+#
+# 2004-12-31 NMB Allow for keyword arguments in the module global functions.
+# Thanks, Allen.
+#
+# 2005-12-02 NMB Call threading.settrace so that all threads are measured.
+# Thanks Martin Fuzzey. Add a file argument to report so that reports can be
+# captured to a different destination.
+#
+# 2005-12-03 NMB coverage.py can now measure itself.
+#
+# 2005-12-04 NMB Adapted Greg Rogers' patch for using relative filenames,
+# and sorting and omitting files to report on.
+#
+# 2006-07-23 NMB Applied Joseph Tate's patch for function decorators.
+#
+# 2006-08-21 NMB Applied Sigve Tjora and Mark van der Wal's fixes for argument
+# handling.
+#
+# 2006-08-22 NMB Applied Geoff Bache's parallel mode patch.
+#
+# 2006-08-23 NMB Refactorings to improve testability. Fixes to command-line
+# logic for parallel mode and collect.
+#
+# 2006-08-25 NMB "#pragma: nocover" is excluded by default.
+#
+# 2006-09-10 NMB Properly ignore docstrings and other constant expressions that
+# appear in the middle of a function, a problem reported by Tim Leslie.
+# Minor changes to avoid lint warnings.
+#
+# 2006-09-17 NMB coverage.erase() shouldn't clobber the exclude regex.
+# Change how parallel mode is invoked, and fix erase() so that it erases the
+# cache when called programmatically.
+#
+# 2007-07-21 NMB In reports, ignore code executed from strings, since we can't
+# do anything useful with it anyway.
+# Better file handling on Linux, thanks Guillaume Chazarain.
+# Better shell support on Windows, thanks Noel O'Boyle.
+# Python 2.2 support maintained, thanks Catherine Proulx.
+#
+# 2007-07-22 NMB Python 2.5 now fully supported. The method of dealing with
+# multi-line statements is now less sensitive to the exact line that Python
+# reports during execution. Pass statements are handled specially so that their
+# disappearance during execution won't throw off the measurement.
+#
+# 2007-07-23 NMB Now Python 2.5 is *really* fully supported: the body of the
+# new with statement is counted as executable.
+#
+# 2007-07-29 NMB Better packaging.
+#
+# 2007-09-30 NMB Don't try to predict whether a file is Python source based on
+# the extension. Extensionless files are often Pythons scripts. Instead, simply
+# parse the file and catch the syntax errors. Hat tip to Ben Finney.
+#
+# 2008-05-25 NMB Open files in rU mode to avoid line ending craziness.
+# Thanks, Edward Loper.
+#
+# 2008-09-14 NMB Add support for finding source files in eggs.
+# Don't check for morf's being instances of ModuleType, instead use duck typing
+# so that pseudo-modules can participate. Thanks, Imri Goldberg.
+# Use os.realpath as part of the fixing of filenames so that symlinks won't
+# confuse things. Thanks, Patrick Mezard.
+#
+#
+# C. COPYRIGHT AND LICENCE
+#
+# Copyright 2001 Gareth Rees. All rights reserved.
+# Copyright 2004-2008 Ned Batchelder. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# HOLDERS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+#
+# $Id: coverage.py 100 2008-10-12 12:08:22Z nedbat $
Tip: Filter by directory path e.g. /media app.js to search for public/media/app.js.
Tip: Use camelCasing e.g. ProjME to search for ProjectModifiedEvent.java.
Tip: Filter by extension type e.g. /repo .js to search for all .js files in the /repo directory.
Tip: Separate your search with spaces e.g. /ssh pom.xml to search for src/ssh/pom.xml.
Tip: Use ↑ and ↓ arrow keys to navigate and return to view the file.
Tip: You can also navigate files with Ctrl+j (next) and Ctrl+k (previous) and view the file with Ctrl+o.
Tip: You can also navigate files with Alt+j (next) and Alt+k (previous) and view the file with Alt+o.
|
__label__pos
| 0.918732 |
11,625(10)=…………..(2)
122,6(10)=…………..(8)
Addition of two numbers in octal
Addition of two numbers in hexadecimal
1.48M
Category: informaticsinformatics
Data representation in computer systems and its architecture and components
1.
Data representation in
computer systems and its
architecture and components
2. 11,625(10)=…………..(2)
First step:
Second step:
direction of recording
direction of recording
Answer: 11,625(10)= 1011,101(2)
3.
3
Rules of conversion from binary to decimal number system
Example 1
Example 2
4.
4 decimal to binary number
Rules of conversion from
system
conversion 345 to binary number system:
1
5.
Find 110011+1101
5
Logical addition of numbers:
0+0=0
1+0=1
0+1=1
1+1=1 0
1+1+1=1 1
1 1 1 1 1
110011
+
1101
1000000
6. 122,6(10)=…………..(8)
First step:
direction of recording
Second step:
direction of recording
Answer: 122,6(10)= 172,463…(8)
7.
500,7(10)=…………..(16)
First step:
Second step:
direction of recording
direction of recording
Answer: 500,7(10)= 1F4,B333…(16)
8.
1. Converting binary to decimal
Answer: 101,11(2)= 5,75(10)
2. Converting octal to decimal
Answer: 57,24(8)= 47,3125(10)
3. Converting hexadecimal to decimal
Answer: 7A,84(16)= 122,515625(10)
9. Addition of two numbers in octal
Answer: 6354(8) + 705(8)= 7261(8) Answer: 215,4(8) + 73,6(8)= 311,2(8)
10. Addition of two numbers in hexadecimal
Answer: 1C52(16) + 891(16)= 24E3(16)
Answer: 8D,8(16) + 3B,C(16)= C9,4(16)
English Русский Rules
|
__label__pos
| 0.997098 |
As software systems become increasingly complex, the importance of comprehensive testing methodologies becomes evident. This subsection introduces the concept of End-to-End testing, highlighting its unique role in validating the entire application workflow. It explores how E2E testing differs from other testing types, emphasizing its ability to uncover issues that may go undetected in unit or integration testing phases.
While manual testing has its merits, modern software development's growing complexity and rapid release cycles necessitate a more efficient approach. This part outlines the challenges associated with manual testing, such as time constraints, human error, and scalability issues. It then underscores the importance of automation in testing, setting the stage for the subsequent exploration of automated End-to-End testing as a solution to these challenges.
Understanding Automated End-to-End testing
Understanding Automated End-to-End testing
Definition and Purpose of End-to-End Testing
This clearly defines End-to-End testing and delves into its primary purpose in the software testing landscape. It highlights how E2E testing verifies the seamless interaction of various components within an application, ensuring that the entire system functions as intended from start to finish. By scrutinizing the user journey, E2E testing aims to identify potential bottlenecks, integration issues, and user experience discrepancies.
Critical Advantages of Automated E2E Testing
Automating the E2E testing process offers numerous benefits, contributing to faster and more reliable software delivery. This subsection explores the advantages of incorporating automation into E2E testing, including:
1. Efficiency and Speed: Automated tests can be executed quickly and repeatedly, accelerating the testing process.
2. Comprehensive Test Coverage: Automation enables testing multiple scenarios, ensuring thorough coverage of all functionalities.
3. Early Detection of Defects: Automated tests identify issues in the early stages of development, reducing the cost and effort required for bug fixing.
4. Regression Testing: Automated E2E tests facilitate swift regression testing, verifying that new code changes do not adversely affect existing functionalities.
5. Resource Optimization: Automated testing frees human resources from repetitive tasks, allowing teams to focus on more complex and creative aspects of testing.
Common Challenges in Manual Testing and How Automation Addresses Them
Manual testing, while valuable, comes with its share of challenges that can impede the software development process. This part outlines some common challenges in manual testing, such as:
1. Time-Consuming: Manual testing is often time-intensive, especially for repetitive and regression test cases.
2. Human Error: Manual testing is susceptible to human error, leading to inconsistent results and overlooking potential issues.
3. Limited Scalability: Manual testing becomes challenging to scale efficiently as software projects grow.
4. Costs: Manual testing incurs higher costs in terms of time and resources.
Choosing the Right Tools
Overview of Popular E2E Testing Tools
This provides a comprehensive overview of widely used End-to-End (E2E) testing tools, including but not limited to Selenium, Cypress, and TestCafe. It explores each tool's key features and strengths, highlighting their suitability for different testing scenarios.
A brief history and community support for each tool are also discussed to provide context for potential users.
Selenium:
1. Open-source framework
2. Cross-browser compatibility
3. Extensive programming language support
4. Robust community and extensive documentation
Cypress:
1. JavaScript-based
2. Real-time interactive testing
3. Simplified syntax and architecture
4. Built-in time-travel feature for debugging
TestCafe:
1. Cross-browser testing without browser plugins
2. Supports multiple operating systems
3. Cloud-based testing
4. Built-in smart test recorder
Factors to Consider When Selecting an Automated Testing Tool
Choosing the right E2E testing tool is crucial for the success of automated testing efforts. This subsection outlines vital factors to consider when making this decision, guiding testers and developers through the selection process. These factors include:
1. Compatibility and Platform Support: Ensure the tool supports the browsers and platforms relevant to your application.
2. Ease of Use and Learning Curve: Evaluate the tool's learning curve and how quickly the team can adapt.
3. Integration with Development Workflow: Consider how well the tool integrates with version control systems, continuous integration (CI) pipelines, and other development tools.
4. Community Support and Documentation: Assess the tool's community support, active development, and availability of documentation.
5. Scalability and Parallel Execution: Evaluate the tool's ability to scale with growing test suites and support parallel test execution.
Comparative Analysis of Different Tools and Their Features
This part presents a comparative analysis of the selected E2E testing tools, focusing on various features such as:
1. Ease of Setup and Configuration: How quickly can the tool be configured for testing?
2. Scripting Language Support: The languages supported for scripting test scenarios.
3. Execution Speed: The efficiency and speed of test execution.
4. Reporting and Debugging Capabilities: Tools' capabilities for generating detailed reports and aiding debugging.
5. Community and Support: The strength and responsiveness of the tools' user communities.
Creating Effective Test Cases
Understanding the Application's Architecture for Test Case Design
Before crafting test cases, a deep understanding of the application's architecture is essential. This section guides testers through:
1. System Architecture Overview: Understanding the high-level architecture of the application, including frontend, backend, and database components.
2. Component Interaction: Identifying how different components interact to deliver specific functionalities.
3. API Endpoints and Integrations: Recognizing key API endpoints and external integrations that impact the overall application behaviour.
Identifying Critical Paths and User Journeys
Compelling test cases should focus on critical paths and user journeys within the application. This subsection explores:
1. Critical Path Analysis: Identifying the most critical workflows and functionalities central to the application's success.
2. User Journey Mapping: Creating user journey maps to understand how users interact with the application and defining test scenarios based on these journeys.
3. Edge Cases and Boundary Testing: Considering edge cases and boundary conditions to ensure robustness in handling various inputs.
Writing Scalable and Maintainable Test Scripts
To maximize the benefits of automated testing, this part guides crafting test scripts that are scalable and easy to maintain:
1. Page Object Model (POM): Implementing the Page Object Model design pattern for better script organization and maintenance.
2. Reusable Components: Designing reusable components and functions to avoid redundancy in test scripts.
3. Parameterization and Data-Driven Testing: Using parameterization and data-driven testing techniques to increase the versatility of test scenarios.
4. Test Hooks and Fixtures: Incorporating test hooks and fixtures to consistently set up and tear down test environments.
5. Documentation and Comments: Document test scripts comprehensively and add comments to enhance script readability.
By adhering to these principles, testers can create test cases that not only cover critical aspects of the application but also contribute to the maintainability and scalability of the automated test suite.
Best Practices for Automated E2E Testing
Test Data Management and Isolation
Effectively managing and isolating test data is crucial for maintaining consistent and reliable test results. This section covers best practices for test data:
1. Data Independence: Ensuring test cases are independent of each other by managing and isolating test data for each scenario.
2. Data Generation: Generating dynamic test data when necessary to simulate real-world scenarios and improve test coverage.
3. Data Privacy and Security: Handling sensitive data securely, especially in scenarios where the test environment mirrors the production environment.
Test Environment Setup and Teardown
Properly setting up and tearing down the test environment ensures that each test case starts with a clean slate, avoiding interference between scenarios. This part focuses on best practices for managing test environments:
1. Isolation of Environments: Ensuring isolation between test environments to prevent contamination of data and settings.
2. Automated Setup and Teardown: Automating the environment setup and teardown process to streamline testing workflows.
3. Parallel Execution Considerations: Adapting environment setup and teardown processes for parallel test execution to optimize efficiency.
Handling Asynchronous Operations and Waits Effectively
Automated E2E testing often involves waiting for asynchronous operations to complete. This subsection provides best practices for handling asynchronous operations:
1. Smart Waiting Strategies: Implementing intelligent waiting strategies, such as dynamic waits, to avoid unnecessary delays and improve test execution speed.
2. Explicit Waits: Using explicit waits to pause the execution until a specific condition is met, ensuring synchronization with the application state.
3. Timeout Configuration: Setting appropriate timeouts for different scenarios to balance responsiveness and stability.
4. Handling AJAX Calls: Addressing challenges related to AJAX calls and other asynchronous activities in the application.
Running and Analyzing Tests
Executing Automated E2E Tests Locally and in a CI/CD Environment
This outlines the steps and considerations for running automated End-to-End (E2E) tests both locally and within a Continuous Integration/Continuous Deployment (CI/CD) environment:
Local Execution:
1. Configuring test execution on local development environments.
2. Running tests in various browsers for compatibility testing.
3. Verifying that tests can be executed individually and as part of the entire test suite.
CI/CD Integration:
1. Setting up integration with CI/CD pipelines for automatic test execution.
2. Defining triggers and dependencies to ensure tests run after code changes.
3. Optimizing parallel execution for faster feedback in CI/CD workflows.
Generating and Interpreting Test Reports
Generating comprehensive and understandable test reports is crucial for practical analysis. This subsection covers:
Report Generation:
1. Configuring test frameworks to create detailed reports after each test run.
2. Choosing appropriate report formats (e.g., HTML, JSON, XML) for better readability.
Incorporating Screenshots and Videos:
1. Enhancing test reports with screenshots or videos for visual validation.
2. Ensuring that reports capture relevant information for debugging purposes.
Interpreting Test Results:
1. Understanding and interpreting test results, including pass/fail status and error messages.
2. Identifying trends and patterns in test failures to address root causes.
Strategies for Debugging and Troubleshooting Test Failures
Test failures are inevitable, and effective debugging is crucial for maintaining a robust automated testing suite. This part focuses on strategies for debugging and troubleshooting:
Logging and Debugging Statements:
1. Incorporating logging statements within test scripts to capture intermediate information during test execution.
2. Adding breakpoints and debugging tools for step-by-step analysis.
Failure Analysis:
1. Investigating test failures systematically, starting with identifying the root cause.
2. Utilizing test reports, logs, and screenshots to pinpoint issues.
Collaboration with Development Teams:
1. Establishing effective communication channels between testing and development teams.
2. Collaborating to address complex problems and improve overall test stability.
By mastering the execution and analysis phases of automated E2E testing, teams can streamline their testing processes, obtain valuable insights, and foster continuous improvement in the software development lifecycle.
Continuous Improvement and Maintenance
Implementing a Feedback Loop for Test Results
Continuous improvement relies on an effective feedback loop that facilitates communication and collaboration among team members. This section focuses on establishing a feedback loop for test results:
Automated Notification Systems:
1. Implementing mechanical notification systems to alert teams about test results.
2. Utilizing email notifications, messaging platforms, or integration with project management tools.
Severity Levels and Prioritization:
1. Assigning severity levels to test failures and prioritizing issues based on impact.
2. Streamlining the feedback process for critical failures that require immediate attention.
Feedback Sessions and Retrospectives:
1. Conduct regular feedback sessions and retrospectives to discuss test results and identify areas for improvement.
2. Encouraging open communication and collaboration to address challenges.
Incorporating Test Automation into the Development Lifecycle
Integration into the entire development lifecycle is essential to maximize the benefits of automated testing. This subsection covers strategies for seamless integration:
Shift-Left Testing:
1. Emphasizing early testing in the development process to catch issues as soon as possible.
2. Integrating automated E2E tests into the development workflow for continuous feedback.
Integration with Version Control:
1. Ensuring automated tests are tightly integrated with version control systems to trigger tests on code changes.
2. Automating test runs as part of the pre-commit and post-commit processes.
Collaborative Code Reviews:
1. Incorporating automated test results into code review processes to identify potential issues early.
2. Promoting collaboration between developers and testers during code reviews.
Strategies for Updating Tests as the Application Evolves
As the application evolves, test scripts must adapt to changes. This part outlines strategies for maintaining and updating tests:
Regular Test Maintenance:
1. Establish a regular test maintenance schedule to address deprecated features or changes in application behaviour.
2. Updating test scripts to align with the latest application changes.
Versioning Test Scripts:
1. Implementing version control for test scripts to track changes over time.
2. Ensuring that test scripts are compatible with different versions of the application.
Automated Test Refactoring:
1. Implementing automated tools and practices for test refactoring to streamline updates.
2. Utilizing IDE features and refactoring tools to maintain script readability.
By focusing on continuous improvement, integrating testing into the development lifecycle, and adopting proactive strategies for test maintenance, teams can establish a robust and sustainable automated testing process.
Overcoming Common Challenges
Dealing with Dynamic UI Elements
Due to their changing nature, dynamic User Interface (UI) elements pose a challenge in automated E2E testing. This section addresses strategies to handle active UI elements effectively:
Dynamic Waits:
1. Implementing busy waits allows tests to wait for elements to appear or stabilize before proceeding.
2. Utilizing strategies such as polling to handle dynamic content loading.
XPath and CSS Selectors:
1. Choosing robust XPath or CSS selectors is less prone to changes in the DOM structure.
2. Avoiding reliance on absolute paths and using relative paths for better adaptability.
Element Identification Strategies:
1. Utilizing unique attributes like IDs or data attributes for element identification.
2. Employing multiple locators for critical elements to enhance test stability.
Handling Cross-Browser and Cross-Device Testing
Ensuring consistent functionality across various browsers and devices is vital. This subsection provides insights into practical strategies for cross-browser and cross-device testing:
Browser and Device Matrix:
1. Creating a comprehensive matrix of supported browsers and devices.
2. Prioritizing testing on browsers and devices commonly used by the target audience.
Cloud-Based Testing Platforms:
1. Leveraging cloud-based testing platforms for efficient cross-browser testing.
2. Automating tests to run on multiple browsers in parallel for faster feedback.
Responsive Design Testing:
1. Incorporating tests that validate the application's responsiveness across different screen sizes.
2. Using emulators and real devices to simulate diverse user experiences.
Ensuring Test Stability in Agile Development Environments
Agile development environments demand agility in testing processes. This part addresses strategies for maintaining test stability in elegant settings:
Parallel Test Execution:
1. Leveraging parallel test execution to expedite test cycles and accommodate faster development iterations.
2. Integrating similar testing with continuous integration for seamless automation.
Incremental Testing:
1. Implement incremental testing by testing specific features or user stories in each iteration.
2. Ensuring that changes in one area do not negatively impact other application parts.
Collaboration and Communication:
1. Fostering cooperation between development and testing teams to align on changes and updates.
2. Establishing clear communication channels to address issues that arise during development sprints promptly.
By addressing these common challenges associated with dynamic UI elements, cross-browser and cross-device testing, and maintaining stability in agile environments, testing teams can enhance the effectiveness and reliability of their automated E2E testing processes.
Level Up
They emphasize the fundamental role of testing in delivering high-quality, reliable software and provide an overview of popular E2E testing tools, factors to consider when selecting a tool, and a comparative analysis of different tools and outlining prerequisites for automated E2E testing, installation, and configuration of testing frameworks, and integration with version control systems and CI/CD pipelines.
As teams move forward, adopting these future trends will be crucial in staying at the forefront of E2E testing practices and ensuring ongoing success in delivering high-quality software.
You may also be interested in Best 8 Cross-Browser Testing Tools for 2024
Book a Demo and experience ContextQA testing tool in action with a complimentary, no-obligation session tailored to your business needs.
We make it easy to start with the ContextQA tool: Start Free Trial.
|
__label__pos
| 0.998868 |
3
Como faço para acessar e setar uma variável de uma classe feita em C++ passa para Lua?
// C++
#ifndef SCRIPTSTORAGE_H
#define SCRIPTSTORAGE_H
#include "HTest.h"
#include <luajit/lua.hpp>
#include <iostream>
struct HObject {
unsigned int id;
std::string name;
float x, y, z;
float sx, sy, sz;
float u, v;
HObject()
{
id = 1;
}
};
static bool checkFunctionArgs(lua_State* ls, const char* fname, unsigned int nargs)
{
int fnargs = lua_gettop(ls) - 1;
if(fnargs < (int)nargs)
{
std::cout << "LuaScriptError: " << fname << "() need at least %d parameter(" << nargs << ")\n" << std::endl;;
return true;
}
if(fnargs > (int)nargs)
{
std::cout << "LuaScriptError: " << fname << "() takes " << nargs << " positional arguments but " << fnargs << " were given" << std::endl;
return true;
}
return false;
}
HObject* HObject_check(lua_State* ls, int index)
{
void* ud = 0;
luaL_checktype(ls, index, LUA_TTABLE);
lua_getfield(ls, index, "__self");
ud = luaL_checkudata(ls, index, "HObject:new");
luaL_argcheck(ls, ud != 0, 0, "'HObject:new' expected");
return *((HObject**)ud);
}
static int HObject_newHObject(lua_State* ls)
{
if(checkFunctionArgs(ls, "HObjec:new", 0)){
return 0;
}
luaL_checktype(ls, 1, LUA_TTABLE);
lua_newtable(ls);
lua_pushvalue(ls, 1);
lua_setmetatable(ls, -2);
lua_pushvalue(ls, 1);
lua_setfield(ls, 1, "__index");
HObject** obj = (HObject**)lua_newuserdata(ls, sizeof(HObject*));
*obj = new HObject();
luaL_getmetatable(ls, "HObject:new");
lua_setmetatable(ls, -2);
lua_setfield(ls, -2, "__self");
return 1;
}
static int HObject_destructor(lua_State* ls)
{
HObject* obj = HObject_check(ls, 1);
delete obj;
return 1;
}
void HTest_register(lua_State* ls)
{
static const luaL_Reg hobjec_funcs[] = {
{"new", HObject_newHObject},
{"__gc", HObject_destructor},
{NULL, NULL}
};
luaL_newmetatable(ls, "HObject:new");
luaL_register(ls, 0, hobjec_funcs);
lua_pushvalue(ls, -1);
lua_setfield(ls, -2, "__index");
luaL_register(ls, "HObject", hobjec_funcs);
}
#endif // SCRIPTSTORAGE_H
-- Lua
local obj = HObject:new() -- OK
obj.variavel = 10 -- Exemplo de escrever na variável
print(obj.variavel) -- Exemplo de acessar variável
Eu quero acessar e escrever na variável, mas não sei como lincar o mesmo em Lua. Já consegui lincar a classe HOBject junto com suas funções, só faltam as variáveis.
1 Resposta 1
2
Não existe forma padrão de estabelecer acesso de variáveis em LUA. Tu tens duas opções para contornar isto:
Getter and Setters
A primeira é mais fácil, mas pode parecer meio feia. Tu pode criar getters e setters. Assim você teria que criar um par get e set para cada variável que você deseja ter acessível, por exemplo:
static int getVariavel(lua_State* ls)
{
//Obter seu objeto aqui
lua_pushinteger(ls, obj->variavel);
return 1;
}
static int setVariavel(lua_State* ls)
{
//Obter seu objeto aqui
obj->variavel = lua_tointeger(ls, -1);
lua_pop(1);
return 0;
}
Você tem que criar um par desses para cada variável, mas isso pode ser simplificado com a ajuda de macros e/ou templates. Com este modelo você pode acessar e alterar as variáveis da seguinte forma:
local obj = HObject:new() -- OK
obj:setVariavel(10) -- Exemplo de escrever na variável
print(obj:getVariavel()) -- Exemplo de acessar variável
Filtrando por __index __newindex
A outra forma seria colocar handlers para os eventos __index e __newindex na metatable dos objetos. Assim, tu poderia filtrar os nomes das variáveis e devolver seus valores:
static int index(lua_State *ls)
{
///Obter objeto aqui.
string s = lua_tostring(ls, 2);
if(s == "variavel")
lua_pushinteger(ls, obj->variavel);
else if(s == "variavel2")
lua_pushinteger(ls, obj->variavel2);
// Outras variáveis aqui.
else
{
//Chamar a table com as funções.
}
return 1;
}
static int newIndex(lua_State *ls)
{
//Obter objeto aqui
string s = lua_tostring(ls, 2);
if(s == "variavel")
obj->variavel = lua_tointeger(ls, -1);
else if(s == "variavel2")
obj->variavel2 = lua_tointeger(ls, -1);
//..outras variáveis aqui.
else
//Mostrar msg de erro dizendo que tal variável não existe.
}
Este modelo precisa apenas destas duas funções, mas comparar as strings uma a uma pode ser lento se tiver um número muito grande de atributos e sua class. Por outro lado, o acesso em Lua fica mais bonito, em minha opinião:
-- Lua
local obj = HObject:new() -- OK
obj.variavel = 10 -- Exemplo de escrever na variável
print(obj.variavel) -- Exemplo de acessar variável
2
• TalesM, obrigado pela resposta. Eu fiz com getter e setter mesmo :/. Mas como sera que isso é feito em LuaBind por exemplo? 30/03/2014 às 17:11
• Eu não conheço muito do LuaBind, mas sei que ele automatiza muita coisa, usando templates. Mas por baixo disso tudo ele chama a API da Lua, então deve implementar uma dessas técnicas por debaixo dos panos.
– TalesM
31/03/2014 às 2:41
Você deve fazer log-in para responder a esta pergunta.
Esta não é a resposta que você está procurando? Pesquise outras perguntas com a tag .
|
__label__pos
| 0.892085 |
Jobs, Events & Listeners
Posted 3 years ago by cristian9509
I am trying to wrap my head around these concepts. I understand a bit the workflow but I would love some input, as to when, which and why to use.
So here is an example: User registers and then will need to create a Stripe Subscription.
User registration: I will need to create a user (CreateUser) here and then send them an welcome email (SendWelcomeEmail)
StripeSubscription: Here I will need to do multiple things. When a user first signs up with my app I would need to create a new subscription with Stripe (CreateNewSubscription). Later if they want to change something to their subscription they can cancel (CancelSubscription), resume a cancelled but on grace period subscription (ResumeSubscription) or reactivate a cancelled and expired subscription (ReactivateSubscription). All these actions done by the user would also require that I send them emails based on the action they took.
What should be done by a Job and what should be done by an Event?
Jobs: UserCreate, SubscriptionCreate, SubscriptionCancel, SubscriptionCancel, SubscriptionReactivate? Events: emailUserCreate, emailSubscriptionCreate, etc?
Please sign in or create an account to participate in this conversation.
|
__label__pos
| 0.939229 |
Do you need to make a copy of token data before sending along with an event?
I just fixed a bug in my game. I found that the token data that sends along with an event is not right. I have to make a copy of it before sending to fix this bug. I assume it's because the data has been changed instantly after the sending event codes. However the sending event has not happened yet. When Bolt was trying to send the event, the new data are sent.
Example codes:
Class DataClassA: IProtocolToken, ICloneable {
int a;
public object Clone()
{
return (DataClassA)MemberwiseClone();
}
}
void SendEventFunction_BuggedVersionj(){
var dataToSend = new DataClassA(){ a = 1};
var evnt = EventA.Create();
evnt.tokenData = dataToSend;
evnt.send();
dataToSend.a = 2; // change data in the same frame
}
void SendEventFunction_RightVersion(){
var dataToSend = new DataClassA(){ a = 1};
var evnt = EventA.Create();
evnt.tokenData = dataToSend.Clone() as DataClassA;
evnt.send();
dataToSend.a = 2; // change data in the same frame
}
Comments
• Hello @SPF ,
When you pass a reference to your Token to the Event, it's just a reference, so yes, if you change it right after assigning the reference, you will be changing the same data.
Can you describe a situation that you need to change a Token right after assigning it to an Event?
We don't make a copy of your Token when you assign it, as you may want to reuse it in a future call.
--
Ramon Melo
Photon Bolt Team
• SPF
SPF
edited May 2021
Thanks @ramonmelo , then I'll copy them. They change quite frequently in my project because my main data types are all extended from `IProtocolToken`. So it's handy to pass them as token in event.
Example situation:
My game is a card game, and each `CardData` is extended from `IProtocolToken`. It's quite often that a `CardData` needs to be passed as a token along with events. The instances of `CardData` are all saved in the server's data model, meaning they exist throughout the whole game. And as we know cards are buffed frequently during a game. That's why after passing them as tokens, they still can be changed quite often.
|
__label__pos
| 0.927031 |
Export Data to Excel from Web API in ASP.NET Core using Kendo Grid
So far, we have learned how to display data in Grid from Web API in ASP.NET Core Web Applications using Kendo UI Grid. If you are new to Web API in ASP.NET and you don't know how to work with Kendo UI Grid in ASP.NET Core. Please check these articles.
Now, we will learn more about Kendo Grid features for exporting data in Excel and PDF format.
Kendo UI Grid supports exporting to Excel or PDF document. In this article, we will learn how to use the Excel Export feature of the Kendo UI Grid.
Let's assume that we have already created a Web API in ASP.NET Core and the URL of the API is /api/StudentAPI/GetAllStudents
Code Snippet of API
1. // GET: api/StudentAPI/GetAllStudents
2. //[HttpPost]
3. [Route("api/StudentAPI/GetAllStudents")]
4. public IEnumerable<PersonalDetail> GetAllStudents()
5. {
6. List<PersonalDetail> students = new List<PersonalDetail>
7. {
8. new PersonalDetail{
9. RegNo = "2017-0001",
10. Name = "Nishan",
11. Address = "Kathmandu",
12. PhoneNo = "9849845061",
13. AdmissionDate = DateTime.Now
14. },
15. new PersonalDetail{
16. RegNo = "2017-0002",
17. Name = "Namrata Rai",
18. Address = "Bhaktapur",
19. PhoneNo = "9849845062",
20. AdmissionDate = DateTime.Now
21. },
22. new PersonalDetail{
23. RegNo = "2017-0003",
24. Name = "Junge Rai",
25. Address = "Pokhara",
26. PhoneNo = "9849845063",
27. AdmissionDate = DateTime.Now
28. },
29. new PersonalDetail{
30. RegNo = "2017-0004",
31. Name = "Sunita Ghimire",
32. Address = "Kathmandu",
33. PhoneNo = "9849845064",
34. AdmissionDate = DateTime.Now
35. },
36. new PersonalDetail{
37. RegNo = "2017-0005",
38. Name = "John ",
39. Address = "Bhaktapur",
40. PhoneNo = "9849845065",
41. AdmissionDate = DateTime.Now
42. },
43. new PersonalDetail{
44. RegNo = "2017-0006",
45. Name = "Jenny Moktan ",
46. Address = "Kathmandu",
47. PhoneNo = "9849845066",
48. AdmissionDate = DateTime.Now
49. },
50. new PersonalDetail{
51. RegNo = "2017-0007",
52. Name = "Kalpana Ghimire ",
53. Address = "Pokhara",
54. PhoneNo = "9849845067",
55. AdmissionDate = DateTime.Now
56. },
57. new PersonalDetail{
58. RegNo = "2017-0008",
59. Name = "Krishna Manadal",
60. Address = "Kathmandu",
61. PhoneNo = "9849845067",
62. AdmissionDate = DateTime.Now
63. },
64. new PersonalDetail{
65. RegNo = "2017-0009",
66. Name = "Karishman Luitel",
67. Address = "Bhaktapur",
68. PhoneNo = "9849845067",
69. AdmissionDate = DateTime.Now
70. },
71. new PersonalDetail{
72. RegNo = "2017-0010",
73. Name = "Hari Simkhada",
74. Address = "Bhaktapur",
75. PhoneNo = "9849845067",
76. AdmissionDate = DateTime.Now
77. },
78. };
79. return students;
80. }
The API when called will return the data shown below.
Now, we will perform the following tasks.
• Call the above API(/api/StudentAPI/GetAllStudents ) using Kendo Grid
• Enable Export to Excel using Kendo Grid.
• Make the columns Filterable.
• Make Columns Groupable.
• Make Columns Reorderable.
• Make columns Sortable.
• Enable Columns Show/Hide
• Add additional functions to Column (Search and Multi Check Filter)
Let's add the following lines of code to parse data receied from Web API to Kendo Grid.
1. <link rel="stylesheet" href="http://kendo.cdn.telerik.com/2017.1.118/styles/kendo.common.min.css" />
2. <link rel="stylesheet" href="http://kendo.cdn.telerik.com/2017.1.118/styles/kendo.rtl.min.css" />
3. <link rel="stylesheet" href="http://kendo.cdn.telerik.com/2017.1.118/styles/kendo.silver.min.css" />
4. <link rel="stylesheet" href="http://kendo.cdn.telerik.com/2017.1.118/styles/kendo.mobile.all.min.css" />
5. <script src="http://code.jquery.com/jquery-1.12.4.min.js"></script>
6. <script src="https://cdnjs.cloudflare.com/ajax/libs/jszip/2.4.0/jszip.min.js"></script>
7.
8. <script src="http://kendo.cdn.telerik.com/2017.1.118/js/kendo.all.min.js"></script>
9.
10. <div class="panel panel-primary">
11. <div class="panel-heading">Test Data from API</div>
12. <div class="panel-body">
13. <div id="Grid"></div> <!--end of grid-->
14. </div> <!--end of panel-body-->
15. </div> <!--end of panel-primary-->
Now, let's add Script section.
• to receive data from ASP.NET Core Web API
• to parse the data into Kendo Grid.
• Add Export to Excel Buton on Kendo Grid.
1. <script>
2. $(document).ready(function () {
3. $("#Grid").kendoGrid({
4. toolbar: ["excel"],
5. excel: {
6. fileName: "Demo Excel From Kendo.xlsx",
7. filterable: true, //Allow Filtering
8. allPages: true
9. },
10. dataSource: {
11. type: "json",
12. transport: {
13. contentType: "application/json; charset=utf-8",
14. type: "GET"// type: "GET","POST"
15. dataType: "json"//dataType: "json","odata"
16. read: "/api/StudentAPI/GetAllStudents", //API URL
17. },
18. pageSize: 5,
19. schema: {
20. model: {
21. fields: {
22. RegNo: {
23. type: "string"
24. },
25. Name: {
26. type: "string"
27. },
28. Address: {
29. type: "string"
30. },
31. PhoneNo: {
32. type: "string"
33. },
34. admissionDate: {
35. type: "date"
36. }
37. }
38. }
39. },
40. },
41. filterable: true//Allow Filtering
42. sortable: true//Allow Sorting of Columns
43. groupable: true//Allow Grouping
44. columnMenu: true//Show/Hide Columns
45. reorderable: true//Allow Column Reordering
46. resizable: true,
47. pageable: {
48. refresh: true,
49. pageSizes: true,
50. buttonCount: 5
51. },
52. columns: [{
53. field: "regNo",
54. title: "Regd No",
55. filterable: { multi: true, search: true } //Allow multi select Filter and Search
56. }, {
57. field: "name",
58. title: "Student Name",
59. filterable: { multi: true, search: true } //Allow multi select Filter and Search
60. }, {
61. field: "address",
62. title: "Address",
63. filterable: { multi: true, search: true } //Allow multi select Filter and Search
64. }, {
65. field: "phoneNo",
66. title: "Phone No",
67. filterable: { multi: true, search: true } //Allow multi select Filter and Search
68. }, {
69. field: "admissionDate",
70. title: "Admission Date",
71. format: "{0:MM-dd-yyyy}"//Date time Formatting Eg: 03-27-2017
72. filterable: { multi: true, search: true } //Allow multi select Filter and Search
73. }]
74. });
75. });
76. </script>
Now, run the application and navigate to the corresponding page to see output.
Export to Excel using Kendo UI in ASP.NET Core by Nishan
Toolbar
["excel"] will add an "Export to Excel" button on the grid, as shown in figure above. Now, let's export the above data to Excel. The exported Excel will look like the following.
Export to Excel using Kendo UI in ASP.NET Core by Nishan
You can also group the data by Columns, Reorder, FIlter by Columns and Show/Hide Columns as per your requirement. When you export the Excel, the data will be exported as per the selection.
Sample Grouped and Ungrouped Data before and after Exporting data is shown in given figure.
Ungrouped data
Export to Excel using Kendo UI in ASP.NET Core by Nishan
Grouped data
Export to Excel using Kendo UI in ASP.NET Core by Nishan
Note that Excel Export relies on a JavaScript library called jszip. Let's reference this jszip JavaScript library.
1. <script src="https://cdnjs.cloudflare.com/ajax/libs/jszip/2.4.0/jszip.min.js"></script>
Kendo UI provides awesome and easiest way to configure the export button.
1. $(document).ready(function () {
2. $("#Grid").kendoGrid({
3. toolbar: ["excel"],
4. excel: {
5. fileName: "Demo Excel From Kendo.xsls",
6. filterable: true, //Allow Filtering
7. allPages: true
8. },
9. ............. ///other codes
10. }); //End of kendoGrid
11. }); //End of ready function
What if I want to place a Export to Excel button outside the Grid?
Well, in this case, you can complete this task easily. You just need to write a few lines of JavScript to do this.
Kendo UI has saveAsExcel() methd that serves to export data into Excel easily. Let's assume we have a button with labeled with text Export to Excel just outside of the Kendo Grid.
Now, we will say browser to download the Excel when the button is clicked.
1. <script>
2. $("#btnExportToExcel").kendoButton({
3. // invoke saveAsExcel() when button is clicked
4. click: function()
5. {
6. $("#Grid").data("kendoGrid").saveAsExcel()
7. }
8. });
9. //convert Grid to Kendo Grid using Kendo
10. $("#Grid").kendoGrid({
11. toolbar:[“excel”],
12. excel:{
13. filename:”Data Export to Excel.xslx”,
14. filterable:true, //Allow filtering
15. allPages:false //Include all data to Excel
16. },
17. ............
18. //other code omitted for now.
19. });
20. </script>
We learned how to
• Convert API Data into Kendo UI Grid
• Add some functionality on kendo UI Grid like Filtering, pagination, sortable, ColumnMenu, etc
• Export Data to Excel.
• Export data to Excel creating custom button outside Kendo Grid.
Up Next
Ebook Download
View all
Learn
View all
|
__label__pos
| 0.98141 |
Why do I need a setter, if I am using _ to not have the property changed by others?
Question
If I am using _ to make sure other developers know to not actively change the value of a property, why do I need a setter?
Answer
When we decide to implement setters, it is because even though we used the underscore to visually alert other developers that the property must not be changed directly, we plan to use that property dynamically (aka, we want to have it’s value changed) through the setter to modify its value indirectly and under our own conditions, that way we can make sure there is always a value being assign(not an empty value like null or an empty string), and/or it is of the type we need (ie. string). In short, we give “privatized” properties (because they are only private by developer’s code of honor, not because of JavaScript as you may recap from here) a setter when we want them to be modified in such way that we can prevent unnecessary or unexpected errors.
To clarify, let’s adjust the robot object as an example:
const robot = {
_model: '1E78V2',
_energyLevel: 100,
//lets give it a energy level getter
get energyLevel () {
return this._energyLevel;
}
//and since we want to have the energy level to be modified as the robot
//performs different tasks, we will give it a setter:
set energyLevel(numLevel) {
if( numLevel && typeof numLevel === "Number" && numLevel > 0 ){
// we check first if the argument is true (aka if a value has been given), and if its type is a number, and if the value is greater than 0. If all that is true, then:
this._energyLevel = numLevel;
} else {
console.log( 'Error: Value does not exist, is not a number, or not greater than zero' );
}
}
};
with that setter we can interact with the energy level, making sure that it is under our conditions. So, as we can see, some of the properties that we don’t want to have directly changed, we do intend to use them as mutable properties (having their values change), but we use setters as safety measure so they can be modifed under the conditions that we want.
15 Likes
When numLevel is zero, the first operand will yield false.
9 Likes
Thanks for catching it! Edited.
4 Likes
It will still be false when numLevel is zero. It would appear that (first) operand is not needed.
7 Likes
Hey there!
if( numLevel && typeof numLevel === "Number" && numLevel > 0 )
As far as I understand if we pass 0 as the numLevel it will be false since it zero also means a boolean false right? So we don’t need to use the first operand.
Also if we don’t give a value to the numLevel it will return undefined and we check that with :
numLevel === “Number”
Right?
1 Like
Yes, it is redundant given the final operand checks for positive, non-zero.
Will be false if undefined.
4 Likes
2 posts were split to a new topic: Underscore naming convention
Hi Alex, thanks for the simplistic explanation. It really clears the concept of how setters can help manipulate values based upon certain conditions.
1 Like
why normal methods can’t be used for this purpose? what is their deficiency and what is the necessity of creation setter (also getter) methods?
1 Like
let a=0;
console.log(typeof a);
This is giving result as number instead of boolean.
Why would we expect a boolean?
ADVANCED OBJECTS > Setters exercise
const robot = {
_model: '1E78V2',
_energyLevel: 100,
_numOfSensors: 15,
get numOfSensors(){
if(typeof this._numOfSensors === 'number'){
return this._numOfSensors;
} else {
return 'Sensors are currently down.'
}
},
set numOfSensors (num) {
if(typeof num === 'Number' && num >= 10){
//num is set to greater than 10
return this._numOfSensors = num;
} else {
return 'Pass in a number that is greater than or equal to 0'
}
}
};
robot.numOfSensors = 7;
console.log(robot.numOfSensors);
//gives out 15 from _numOfSensors
In the setter (if statement check), I’ve put the condition that the number should be greater than 10. I had an impression that if I set the value less than 10, it’ll return the else statement (from setter method) since the condition is false.
But it turns out, it doesn’t take the value and outputs value from _numOfSensors.
Can anybody provide an insight into what am I missing to understand?
The nomenclature is specific.
num instanceof Number
vs.
typeof num === 'number'
Thank you for replying.
But I’m sorry, I’m not so clear still - May be because I’m new to all these and still learning.
How can I set it to show else statement when the number less than 10 is passed?
Is your setter throwing an error?
My understanding is that the functionality of typeof will will return specific literal strings(i.e. string, number, boolean and undefined) and the case of the string matters so JavaScript will only act on the check if you write it using lowercase. By using ‘Number’ you are asking Javascript to return something on a string it isn’t designed to do. So like mtf kind of talked about,but, didn’t answer you question directly(I guess he wanted to enter a socratic dialogue with you and have you figure it out) you simply needed to change your code there to a lower case ‘N’(i.e.number) and you code would work to filter out numbers less than 10 as the numbers that will trigger the else return text. Here is link to an explanation at MDN(The Mozilla folks) that will hopefully help in understanding this Click here for explanation of typof. I am only answering because the conversation looked like it died and wanted to help future readers of this forum thread
1 Like
|
__label__pos
| 0.999157 |
C#: Random Text Generator
Whenever I need to generate random text I generally just Google ‘Random Text’ and pull up the first or second site in the list. This works great when you can copy and paste the text from your browser, but I ran across a situation where I needed to generate some random text from within one of my applications. So, I whipped up a small method that generated a random string of a given length and continued on my way. But, soon thereafter the inner programmer got the better of me and I just had to turn the simple method into an entire class that aided in random text generation.
The RandomText class that I created allows you to generate random strings (words), sentences, and paragraphs. Each method is overloaded with parameters that allow you to specify things like the possible word lengths that make up each sentence and the possible sentence lengths that make up each paragraph. Additionally there are methods that take no parameters and will generate a random string (word), sentence, or paragraph using default possible lengths. There are also options for adding punctuation to the sentences include middle of the sentence punctuation like commas and end of sentence punctuation like periods.
I have included the public properties and methods along with their documentation below. The actual source code can be found on codeplex here.
public class RandomText
{
/// <summary>
/// Indicate whether or not to include random middle of the sentance punctuation
/// marks in generated sentances
/// </summary>
public bool AddMiddleOfSentancePunctuationMarks = false;
/// <summary>
/// Indicates whether or not to add an end of sentance punctuation mark
/// </summary>
public bool AddEndOfSentancePunctuation = true;
public RandomText()
/// <summary>
/// Generates a random string of a specfic length.
/// </summary>
/// <returns>Returns a randomly generated string (lower case) of a specific length.</returns>
public string String()
/// <summary>
/// Generates a random string of a specfic length.
/// </summary>
/// <param name="length">The length of the random string to generate.</param>
/// <returns>Returns a randomly generated string (lower case) of a specific length.</returns>
public string String(int length)
/// <summary>
/// Generates a random string of a specfic length.
/// </summary>
/// <param name="length">The length of the random string to generate.</param>
/// <param name="randomCharacterCase">If true, each character in the string will have
/// an equal chance of being either upper case or lower case. If false, the generated
/// string will be all lower case.
/// </param>
/// <returns>Returns a randomly generated string of a specific length.</returns>
public string String(int length, bool randomCharacterCase)
/// <summary>
/// Returns a random number within a specified range.
/// </summary>
/// <param name="minValue">The inclusive lower bound of the random number returned.</param>
/// <param name="maxValue">The exclusive upper bound of the random number returned. maxValue must be
/// greater than or equal to minValue.</param>
/// <returns>A 32-bit signed integer greater than or equal to minValue and less than maxValue;
/// that is, the range of return values includes minValue but not maxValue. If
/// minValue equals maxValue, minValue is returned.</returns>
private int Number(int minValue, int maxValue)
/// <summary>
/// Generates a random sentance.
/// </summary>
/// <returns>Returns a random sentance of random length and words from the default sentance and word lengths.</returns>
public string Sentance()
/// <summary>
/// Generates a random sentance of a given number of words .
/// </summary>
/// <param name="numberOfWords">The number of words in the sentance</param>
/// /// <returns>Returns a random sentance of the specified length.</returns>
public string Sentance(int numberOfWords)
/// <summary>
/// Generates a random sentance of a given number of words and possible word lengths.
/// </summary>
/// <param name="numberOfWords">The number of words in the sentance</param>
/// <param name="possibleWordLengths">An array of integers representing the possible number of characters in each word</param>
/// <returns>Returns a string containing a specified number of random words composed of random characters</returns>
public string Sentance(int numberOfWords, int[] possibleWordLengths)
/// <summary>
/// Generates a random paragraph.
/// </summary>
public string Paragraph()
/// <summary>
/// Generates a random paragraph of a given number of sentances.
/// </summary>
/// <param name="numberOfSentances">The number of sentances in the paragraph.</param>
public string Paragraph(int numberOfSentances)
/// <summary>
/// Generates a random paragraph of a given number of sentances.
/// </summary>
/// <param name="numberOfSentances">The number of sentances in the paragraph.</param>
/// <param name="possibleSentanceLengths">An array of integers representing the possible number of words in each sentance.</param>
public string Paragraph(int numberOfSentances, int[] possibleSentanceLengths)
/// <summary>
/// Generates a random paragraph of a given number of sentances.
/// </summary>
/// <param name="numberOfSentances">The number of sentances in the paragraph.</param>
/// <param name="possibleSentanceLengths">An array of integers representing the possible number of words in each sentance.</param>
/// <param name="possibleWordLengths">An array of integers representing the possible number of characters in each word</param>
/// <returns>Returns a string containing a specified number of random sentances composed of random words and characters</returns>
public string Paragraph(int numberOfSentances, int[] possibleSentanceLengths, int[] possibleWordLengths)
}
6 Responses to “C#: Random Text Generator”
1. C#: Extension Method – Get a Random Element from a Collection « Nick Olsen's Programming Tips Says:
[…] my previous post I created a class to generate random text. Throughout the code I had to get a random element from […]
2. Stephan Says:
Is there a reason you used “Sentance” instead of “Sentence” within your names?
• Nick Olsen Says:
Other than the fact that I apparently can’t spell? No. :)
3. How to: Random String Generator Returning Same String | SevenNet Says:
[…] Here is a blog post that provides a bit more robust class for generating random words, sentences and paragraphs. […]
4. Fixed Random String Generator Returning Same String #dev #it #asnwer | Good Answer Says:
[…] Here is a blog post that provides a bit more robust class for generating random words, sentences and paragraphs. […]
5. Fix: Random String Generator Returning Same String #answer #dev #computers | IT Info Says:
[…] Here is a blog post that provides a bit more robust class for generating random words, sentences and paragraphs. […]
Leave a Reply
Fill in your details below or click an icon to log in:
WordPress.com Logo
You are commenting using your WordPress.com account. Log Out / Change )
Twitter picture
You are commenting using your Twitter account. Log Out / Change )
Facebook photo
You are commenting using your Facebook account. Log Out / Change )
Google+ photo
You are commenting using your Google+ account. Log Out / Change )
Connecting to %s
Follow
Get every new post delivered to your Inbox.
Join 83 other followers
%d bloggers like this:
|
__label__pos
| 0.991411 |
Wahlin on .NET
Requiring SSL for MVC Controllers
ASP.NET MVC provides a lot of flexibility when it comes to changing how controllers, actions, routes and more work within an application. Having flexibility is good especially when you need to make a modification quickly without having to write a lot of code to get the change in place.
My company is in the process of rolling out an ASP.NET MVC site for a client and specific actions and controllers need to have SSL running to ensure that no sensitive information gets out, such as passwords and credit cards. If a user visits the site using http:// I need to switch them to https:// in certain parts of the Web site. Fortunately, ASP.NET MVC is quite extensible so it only took about four to five minutes to get a simple solution in place.
The easiest way I know of to switch to SSL for specific controllers or actions is to create an ActionFilterAttribute that handles redirecting them to an https:// address. Classes that derive from ActionFilterAttribute can be placed immediately above actions or even controllers in cases where the filter needs to apply to all actions in the controller. For my situation I needed entire controllers to be SSL-enabled so I placed the attribute above the controller class name.
Here's an example of the simple RequiresSSL attribute class:
using System;
using System.Web;
using System.Web.Mvc;
namespace Helpers
{
public class RequiresSSL : ActionFilterAttribute
{
public override void OnActionExecuting(
ActionExecutingContext filterContext)
{
HttpRequestBase req = filterContext.HttpContext.Request;
HttpResponseBase res = filterContext.HttpContext.Response;
//Check if we're secure or not and if we're on the local box
if (!req.IsSecureConnection && !req.IsLocal)
{
string url = req.Url.ToString().ToLower()
.Replace("http:", "https:");
res.Redirect(url);
}
base.OnActionExecuting(filterContext);
}
}
}
Some may note that the ToLower() call shown previously could cause consequences with QueryString data. I could take ToString() out but some people may type "HTTP" instead of "http", which would mess up the replace call. For the current application I'm working on, I only have integers being passed around on the QueryString so it didn't affect me at all, but it definitely could affect string data being passed. The solution is to use the UriBuilder class, which works much better in this scenario. Here's a different version of the RequiresSSL class that uses the UriBuilder class to create the SSL redirect URL.
using System;
using System.Web;
using System.Web.Mvc;
namespace Helpers
{
public class RequiresSSL : ActionFilterAttribute
{
public override void OnActionExecuting(
ActionExecutingContext filterContext)
{
HttpRequestBase req = filterContext.HttpContext.Request;
HttpResponseBase res = filterContext.HttpContext.Response;
//Check if we're secure or not and if we're on the local box
if (!req.IsSecureConnection && !req.IsLocal)
{
var builder = new UriBuilder(req.Url)
{
Scheme = Uri.UriSchemeHttps,
Port = 443
};
res.Redirect(builder.Uri.ToString());
}
base.OnActionExecuting(filterContext);
}
}
}
The RequiresSSL attribute can then be placed above the appropriate action or controller:
[HandleError]
[RequiresSSL]
public class AccountController : Controller
{
...
}
If you're running IIS7 and want to get a test SSL certificate setup for testing purposes, check out my good friend Rob Bagby's excellent post on the subject. He walks through the process of creating self-signed certificates and using them on your server.
About the Author
Dan Wahlin (Microsoft MVP for ASP.NET and XML Web Services) is the founder of The Wahlin Group which specializes in .NET and SharePoint onsite, online and video training and consulting solutions. Dan also founded the XML for ASP.NET Developers Web site, which focuses on using ASP.NET, XML, AJAX, Silverlight and Web Services in Microsoft's .NET platform. He's also on the INETA Speaker's Bureau and speaks at conferences and user groups around the world. Dan has written several books on .NET including "Professional Silverlight 2 for ASP.NET Developers," "Professional ASP.NET 3.5 AJAX, ASP.NET 2.0 MVP Hacks and Tips," and "XML for ASP.NET Developers." Read Dan's blog here.
comments powered by Disqus
Featured
Subscribe on YouTube
|
__label__pos
| 0.574891 |
Getting Started with Kubernetes: Minikube | Codabase
Getting Started with Kubernetes: Minikube
5 Min Read
Getting Started with Kubernetes: A Comprehensive Minikube Guide
Embarking on your Kubernetes journey may seem daunting, but fear not! This step-by-step guide will walk you through getting started with Kubernetes using Minikube, a popular and beginner-friendly tool that allows you to run Kubernetes locally. By the end of this tutorial, you’ll have a solid foundation in Kubernetes fundamentals and be ready to tackle more advanced concepts.
Step 1: Installing Minikube
Before diving into Kubernetes, you’ll first need to install Minikube on your local machine. Minikube is a tool that creates a single-node Kubernetes cluster, perfect for learning and testing purposes. You can follow the official installation guide for your respective operating system here: Minikube Installation.
Step 2: Installing kubectl
To interact with your Kubernetes cluster, you’ll need the kubectl command-line tool. This utility allows you to manage your cluster and deploy applications. Follow the official guide for installing kubectl here: Install and Set Up kubectl.
Step 3: Starting Minikube
With both Minikube and kubectl installed, it’s time to start your local Kubernetes cluster. Open a terminal and run the following command:
minikube start
This command initializes a new single-node Kubernetes cluster on your machine. Upon successful initialization, you’ll receive a confirmation message in your terminal.
Step 4: Verifying Cluster Status
To ensure your cluster is up and running, use the kubectl command to check its status:
kubectl get nodes
You should see one node listed with a Ready status, indicating that your Kubernetes cluster is operational.
Step 5: Deploying Your First Application
Now that your Kubernetes cluster is active, you can deploy a sample application. For this tutorial, we’ll use a simple nginx web server. Run the following command:
kubectl create deployment nginx --image=nginx
This command deploys the nginx container image from the Docker Hub registry and creates a new deployment named nginx.
Step 6: Exposing Your Application
To access your newly deployed application, you’ll need to expose it to the outside world. Use the kubectl command to create a NodePort service:
kubectl expose deployment nginx --type=NodePort --port=80
This command maps port 80 on the nginx container to a port on the Minikube node.
Step 7: Accessing Your Application
To view your application, you’ll need to find the URL associated with the NodePort service. Run the following command:
minikube service nginx --url
This command returns the URL where your nginx service is accessible. Open the URL in your browser to see the default nginx welcome page.
Step 8: Scaling Your Application
To see Kubernetes in action, you can scale your application by increasing the number of replicas. Run the following command:
kubectl scale deployment nginx --replicas=3
This command increases the number of nginx replicas to three, demonstrating the power of Kubernetes in managing containerized applications.
Step 9: Checking Deployment Status
To monitor your deployment’s status and view the running replicas, use the following command:
kubectl get deployments
You should see the nginx deployment with three replicas and a Ready status for each.
Step 10: Updating Your Application
Kubernetes makes updating your application a breeze. Let’s say you want to update the nginx image to a newer version. Run the following command:
kubectl set image deployment/nginx nginx=nginx:1.21.1
This command updates the nginx deployment to use the specified version (1.21.1) of the nginx image.
Step 11: Cleaning Up
Once you’ve completed your experiments, it’s crucial to clean up your resources. Start by deleting the nginx service:
kubectl delete service nginx
Next, delete the nginx deployment:
kubectl delete deployment nginx
Finally, stop the Minikube cluster:
minikube stop
Step 12: Further Learning
Congratulations! You’ve successfully navigated the basics of Kubernetes using Minikube. As you continue your Kubernetes journey, consider exploring the following resources on our blog:
1. Deep Dive into Kubernetes Components
2. Understanding Kubernetes Pod
3. Advanced Deployment Strategies in Kubernetes
4. Managing Kubernetes Storage: A Comprehensive Guide
5. Monitoring and Logging in Kubernetes
These articles will further deepen your understanding of Kubernetes and help you master container orchestration. Happy learning!
Share this Article
1 Comment
|
__label__pos
| 0.869725 |
core.helpers
Submodule of khiops.core
Helper functions for specific and/or advanced treatments
Functions
build_multi_table_dictionary_domain
Builds a multi-table dictionary domain from a dictionary with a key
deploy_coclustering
Deploys an individual-variable coclustering on a data table
deploy_predictor_for_metrics
Deploys the necessary data to estimate the performance metrics of a predictor
khiops.core.helpers.build_multi_table_dictionary_domain(dictionary_domain, root_dictionary_name, secondary_table_variable_name)
Builds a multi-table dictionary domain from a dictionary with a key
Parameters:
dictionary_domainDictionaryDomain
DictionaryDomain object. Its root dictionary must have its key set.
root_dictionary_namestr
Name for the new root dictionary
secondary_table_variable_namestr
Name, in the root dictionary, for the “table” variable of the secondary table.
Raises:
TypeError
Invalid type of an argument
ValueError
Invalid values of an argument: - the dictionary domain doesn’t contain at least a dictionary - the dictionary domain’s root dictionary doesn’t have a key set
khiops.core.helpers.deploy_coclustering(dictionary_file_path_or_domain, dictionary_name, data_table_path, coclustering_file_path, key_variable_names, deployed_variable_name, results_dir, detect_format=True, header_line=None, field_separator=None, output_header_line=True, output_field_separator='\t', max_preserved_information=0, max_cells=0, max_part_numbers=None, build_cluster_variable=True, build_distance_variables=False, build_frequency_variables=False, variables_prefix='', results_prefix='', batch_mode=True, log_file_path=None, output_scenario_path=None, task_file_path=None, trace=False)
Deploys an individual-variable coclustering on a data table
This procedure generates the following files in results_dir:
• Coclustering.kdic: A multi-table dictionary file for further deployments of the coclustering with deploy_model
• Keys<data_table_file_name>: A data table file containing only the keys of individual
• Deployed<data_table_file_name>: A data table file containing the deployed coclustering model
Parameters:
dictionary_file_path_or_domainstr or DictionaryDomain
Path of a Khiops dictionary file or a DictionaryDomain object.
dictionary_namestr
Name of the dictionary to be analyzed.
data_table_pathstr
Path of the data table file.
coclustering_file_pathstr
Path of the coclustering model file (extension .khc or .khcj)
key_variable_nameslist of str
Names of the variables forming the unique keys of the individuals.
deployed_variable_namestr
Name of the coclustering variable to deploy.
results_dirstr
Path of the results directory.
detect_formatbool, default True
If True detects automatically whether the data table file has a header and its field separator. It’s ignored if header_line or field_separator are set.
header_linebool, optional (default True if detect_format is False)
If True it uses the first line of the data as column names. Overrides detect_format if set.
field_separatorstr, optional (default “\t” if detect_format is False)
A field separator character, overrides detect_format if set (”” counts as “\t”).
output_header_linebool, default True
If True writes a header line containing the column names in the output table.
output_field_separatorstr, default “\t”
A field separator character (empty string counts as tab).
max_preserved_informationint, default 0
Maximum information preserve in the simplified coclustering. If equal to 0 there is no limit.
max_cellsint, default 0
Maximum number of cells in the simplified coclustering. If equal to 0 there is no limit.
max_part_numbersdict, optional
Dictionary associating variable names to their maximum number of parts to preserve in the simplified coclustering. For variables not present in max_part_numbers there is no limit.
build_cluster_variablebool, default True
If True includes a cluster id variable in the deployment.
build_distance_variablesbool, default False
If True includes a cluster distance variable in the deployment.
build_frequency_variablesbool, default False
If True includes the frequency variables in the deployment.
variables_prefixstr, default “”
Prefix for the variables in the deployment dictionary.
results_prefixstr, default “”
Prefix of the result files.
Options of the KhiopsRunner.run method from the class KhiopsRunner.
Returns:
tuple
A 2-tuple containing:
• The deployed data table path
• The deployment dictionary file path.
Raises:
TypeError
Invalid type dictionary_file_path_or_domain or key_variable_names
ValueError
If the type of the dictionary key variables is not equal to Categorical
Examples
See the following function of the samples.py documentation script:
khiops.core.helpers.deploy_predictor_for_metrics(dictionary_file_path_or_domain, dictionary_name, data_table_path, output_data_table_path, detect_format=True, header_line=None, field_separator=None, sample_percentage=70, sampling_mode='Include sample', additional_data_tables=None, output_header_line=True, output_field_separator='\t', trace=False)
Deploys the necessary data to estimate the performance metrics of a predictor
For each instance for each instance it deploys:
• The true value of the target variable
• The predicted value of the target variable
• The probabilities of each value of the target variable (classifier only)
Note
To obtain the data of the default Khiops test dataset use sample_percentage = 70 and sampling_mode = "Exclude sample".
Parameters:
dictionary_file_path_or_domainstr or DictionaryDomain
Path of a Khiops dictionary file or a DictionaryDomain object.
dictionary_namestr
Name of the predictor dictionary.
data_table_pathstr
Path of the data table file.
output_data_table_pathstr
Path of the scores output data file.
detect_formatbool, default True
If True detects automatically whether the data table file has a header and its field separator. It’s ignored if header_line or field_separator are set.
header_linebool, optional (default True if detect_format is False)
If True it uses the first line of the data as column names. Overrides detect_format if set.
field_separatorstr, optional (default “\t” if detect_format is False)
A field separator character, overrides detect_format if set (”” counts as “\t”).
sample_percentageint, default 70
See sampling_mode option below.
sampling_mode“Include sample” or “Exclude sample”
If equal to “Include sample” deploys the predictor on sample_percentage percent of data and if equal to “Exclude sample” on the complementary 100 - sample_percentage percent of data.
additional_data_tablesdict, optional
A dictionary containing the data paths and file paths for a multi-table dictionary file. For more details see Multi-Table Learning Primer documentation.
output_header_linebool, default True
If True writes a header line containing the column names in the output table.
output_field_separatorstr, default “\t”
A field separator character (”” counts as “\t”).
Options of the KhiopsRunner.run method from the class KhiopsRunner.
|
__label__pos
| 0.862568 |
1
Lo que quiero realizar es solo agarrar el idCategoria del objeto para poder utilizarlo en otra consulta pero marca el siguiente error:
cannot convert value of string to expected argument to int
let objeto = values["idCategoria"]
print(objeto)`
Codigo completo
import Foundation
import UIKit
import WebKit
class HomeViewController: UIViewController, UITableViewDelegate, UITableViewDataSource {
@IBOutlet var tableView: UITableView!
//var identificador = ["1","1","1","1","1","1","1","1","1"]
var values:NSArray = []
//var arr = ["http://totalplanning.guiaparatuseventos.com/IOs/categorias.php"]
//let colors = [UIColor.brownColor(),UIColor.purpleColor(), UIColor.yellowColor(), UIColor.greenColor(), UIColor.blueColor(), UIColor.grayColor()]
override func viewDidLoad() {
super.viewDidLoad()
let proxyViewForStatusBar : UIView = UIView(frame: CGRectMake(0, 0,self.view.frame.size.width, 20))
proxyViewForStatusBar.backgroundColor = UIColor(red: 0.48627450980392156, green: 0.070588235294117646, blue: 0.46274509803921571, alpha: 1)
self.view.addSubview(proxyViewForStatusBar)
get();
}
override func didReceiveMemoryWarning() {
super.didReceiveMemoryWarning()
// Dispose of any resources that can be recreated.
}
func hexStringToUIColor (hex:String) -> UIColor {
var cString:String = hex.stringByTrimmingCharactersInSet(NSCharacterSet.whitespaceAndNewlineCharacterSet() as NSCharacterSet).uppercaseString
if (cString.hasPrefix("#")) {
cString = cString.substringFromIndex(cString.startIndex.advancedBy(1))
}
if ((cString.characters.count) != 6) {
return UIColor.grayColor()
}
var rgbValue:UInt32 = 0
NSScanner(string: cString).scanHexInt(&rgbValue)
return UIColor(
red: CGFloat((rgbValue & 0xFF0000) >> 16) / 255.0,
green: CGFloat((rgbValue & 0x00FF00) >> 8) / 255.0,
blue: CGFloat(rgbValue & 0x0000FF) / 255.0,
alpha: CGFloat(1.0)
)
}
func get(){
let url = NSURL(string:"http://totalplanning.guiaparatuseventos.com/IOs/categorias.php")
let data = NSData(contentsOfURL: url!)
values = ((try! NSJSONSerialization.JSONObjectWithData(data!, options: [])) as? NSArray)!
}
func tableView(tableView: UITableView, numberOfRowsInSection section: Int) -> Int {
return values.count
}
func tableView(tableView: UITableView, cellForRowAtIndexPath indexPath: NSIndexPath) -> UITableViewCell{
let cell = tableView.dequeueReusableCellWithIdentifier("cell", forIndexPath: indexPath) as! SpecialCell
let maindata = values[indexPath.row]
//NSLog("La liga es %@ %@", maindata["idCategoria"] as! String, maindata["nombre"] as! String);
cell.nombre.text = maindata["nombre"] as? String
cell.descripcion.text = maindata["descripcion"] as? String
//cell.idCategoria.text = maindata["idCategoria"] as? String
let colors = [hexStringToUIColor("#FF0000"),hexStringToUIColor("#FF8000"),hexStringToUIColor("#FFFF00"),hexStringToUIColor("#80FF00"),hexStringToUIColor("00FFFF"),hexStringToUIColor("#0080FF"),hexStringToUIColor("#A901DB"),hexStringToUIColor("#FF00FF"),hexStringToUIColor("#FF0080")]
cell.accessoryType = .DisclosureIndicator
cell.imagen.backgroundColor = colors[indexPath.row % colors.count]
return cell
}
func tableView(tableView: UITableView, didSelectRowAtIndexPath indexPath: NSIndexPath) {
let objeto = values["idCategoria"]
print(objeto)
}
}
• ¿Donde le asignas los valores a values[]? anexa tu código más completo asi obtendras mejor ayuda y evitas que sea cerrada tu pregunta – JuankGlezz el 3 ago. 16 a las 19:04
• Listo, acabo de agregar el código completo – Brandon Josué Cerezo el 3 ago. 16 a las 19:27
0
values es un array de JSON y por tanto primero debes coger el objeto y después acceder a su propiedad, es decir:
let objeto = values[indexPath.row]["idCategoria"]
print(objeto)
• Creo que era muy fácil, soy muy nuevo en esto de IOs y otra pregunta después de acceder a esa propiedad quiero mandarla a otro vista que dependiendo el id muestra las subcategorias, ¿se puede hacer eso? – Brandon Josué Cerezo el 3 ago. 16 a las 21:31
• Claro, la forma más correcta sería enviar todo el objeto JSON para tener toda la info. Crea un propiedad en la vista de destino llamada mainData por ejemplo. Una vez la instancias para mostrarla, pásala a la siguiente vista – mhergon el 3 ago. 16 a las 22:04
• Eso si más o menos lo se, solo quiero pasar el id para que en la siguiente vista la utilice en un url para mostrar las subcategorias correspondientes – Brandon Josué Cerezo el 3 ago. 16 a las 22:16
• Pues haz lo mismo, pero con un tipo Int y listo. Una vez lo tengas en la otra vista, en el viewWillAppear utilizado para cargar los datos que sean necesarios – mhergon el 3 ago. 16 a las 22:21
Tu Respuesta
Al pulsar en “Publica tu respuesta”, muestras tu consentimiento a nuestros términos de servicio, política de privacidad y política de cookies
¿No es la respuesta que buscas? Examina otras preguntas con la etiqueta o formula tu propia pregunta.
|
__label__pos
| 0.80748 |
APFS volume decryption paused/stuck/froze, how to fix and recover lost data?
By Katrina | Posted to Mac Data Recovery on November 7th, 2018 |
A week ago I've updated my Macbook Pro to 10.13 and reformat both internal disks (SSH and HDD) to encrypted APFS. So far the SSD is working good but I have a problem with the HDD. I can't mount the APFS volume from HDD to the system. I've tried to mount HDD from Recovery mode and seems what it can't decrypt the volume. According to diskutil apfs list decryption stuck on 31%.
While enjoying the advanced features of APFS encryption, most of people have suffered from decryption failures, such as APFS volume decryption paused/stuck, encrypted APFS drive can't be unlocked with password, recover lost data after APFS decryption failed, etc. This passage will explain why APFS volume decryption paused/stuck and how to recovet data after APFS volume decryption interrupted, stuck, froze, paused.
Why APFS volume decryption paused/stuck/froze?
There are various reasons which can make APFS volume decryption paused/stuck. Among them, catalog files corruption, invalid volume header and damaged partition table are the most common ones. These three elements are usually caused by virus attack, sudden power failure and some improper operations.
How to recover lost data after APFS volume decryption paused/stuck/froze?
If APFS volume decryption paused/stuck, the APFS volume can't be open and all data stored on the volume are lost. First things first, you should recover lost data after APFS volume decryption paused/stuck/froze.
iBoysoft Data Recovery for Mac is the best APFS data recovery software that enables you to recover lost data after APFS volume decryption failed, recover lost data from encrypted APFS drive, recover lost data from formatted APFS drive, recover lost data from corruprted APFS drive, recover lost data from unmountable APFS drive, etc. It supports data recovery in different storage devices, including SSD, hard drive, external HDD, USB flash drive, SD card, memory card, etc.
Tutorial to recover lost data after APFS volume decryption paused/stuck/froze:
Step 1: Launch iBoysoft Data Recovery for Mac on Mac and click "Find Lost APFS Partition" on the top right corner. Then you will see all connected hard drives.
Step 2: Select the drive which contains your APFS encrypted volume and click "Next" button. The software will list all APFS volumes on the selected drive.
Recover lost data after APFS volume decryption paused
Step 3: Choose the APFS encrypted volume that you want to recover lost data from and click "Next".
Recover lost data after APFS volume decryption paused
Step 4: Enter the correct password and click "OK" to start searching for lost files on the volume.
Recover lost data after APFS volume decryption paused
Step 5: Preview the listed searching results, choose files you need, then click "Recover" to get lost files back.
Recover lost data after APFS volume decryption paused
Step 6: Go through to ensure we have got all lost files back.
Besides APFS data recovery, iBoysoft Data Recovery for Mac also support data from HFS+, HFS, FAT32, exFAT drives on macOS 10.14/10.13/10.12 and macOS 10.11/10.10/10.9/10.8/10.7.
APFS volume decryption paused/stuck/froze, how to fix?
After successful data recovery, you are able to solve APFS volume decryption paused/stuck/froze without data loss. Here are the most effective solutions:
Step 1: Launch Disk Utility from Utilities.
Step 2: Click the icon of the APFS drive that paused/stuck/froze to decrypt on the left part of the window.
Step 3: Click the Erase function on the top of the Disk Utility window.
Step 4: Complete the related information (name, format, scheme), then click Erase button.
|
__label__pos
| 0.598714 |
Tag What Is
What is URL Shortener?
Several Scissors by Nick Demou
With constant growing of content generation and publishment through digital mediums like web and apps, The need for ease of sharing and tracking shared articles and pages also increases. A URL Shortener is a tool for content publishers web masters…
|
__label__pos
| 0.999838 |
Change hostname or Server name of a Linux Machine
Checking your Linux host name
First, see if your host name is set correclty using the following commands:
#uname -n
#hostname -a
#hostname -s
#hostname -d
#hostname -f
#hostname
If the above commands return correctly with no errors then all may be well; however, you may want to read on to verify that all settings are correct.
Normally we will set the hostname of a system during the installation process.
Change the hostname on a running Linux system
On any Linux system you can change its hostname with the command ‘hostname‘.
#hostname
without any parameter it will output the current hostname of the system.
#hostname --fqd
it will output the fully qualified domain name (or FQDN) of the system.
#hostname NEW_NAME
will set the hostname of the system to NEW_NAME. This is active right away and will remain like that until the system will be rebooted (because at system boot it will set this from some particular file configurations -- see bellow how to set this permanently). You will most probably need to exit the current shell in order to see the change in your shell prompt.
Permanent hostname change on Debian based Linux systems
Debian based systems use the file /etc/hostname to read the hostname of the system at boot time and set it up using the init script /etc/init.d/hostname.sh
#cat /etc/hostname
debianadmin
So on a Debian based system we can edit the file /etc/hostname and change the name of the system and then run
/etc/init.d/hostname.sh startto make the change active. The hostname saved in this file (/etc/hostname) will be preserved on system reboot (and will be set using the same script we used hostname.sh).
If you want more details check hostname man page
Use sysctl to change the hostname
use sysctl to change the variable kernel.hostname:
Use:
#sysctl kernel.hostname
to read the current hostname, and
#sysctl kernel.hostname=NEW_HOSTNAME
to change it.
If you want more details check sysctl man page
You can use the same process for all debian based distributions and other linux machines
also.
Sponsored Link
26 thoughts on “Change hostname or Server name of a Linux Machine
1. hostname -fqd didn’t work…
I think it should jsut be hostname -f ?
I may be wrong though..
But thanks 😀
JAM
2. Nope. It’s “hostname –fqdn” or “hostname -f”
Sorry, couldn’t resist since everyone else was nitpicking. 🙂
Thanks for the excellent article. Worked like a charm!
3. My question is should you put the whole FQDN in the hostname file for a permanent change, or just the hostname itself?
4. The mailname serves a different purpose from that of hostname but in many
cases their values are the same. For /etc/mailname, dpkg-reconfigure exim4-config should do it.
5. By the fully qualified domain name, does that mean like somehting.com or whatever you want the local hostname to be?
6. thanks for the info.. i edit my network file, hosts file and nw when i run the hostname or hostname -f everything points to new name.. but when i telnet my server it shows the old name… wht would be the problem ?
7. to summarize:
#!/bin/sh
sed s:$(hostname):$1:g /etc/hosts.new
mv /etc/hosts.new /etc/hosts
hostaname $1
hostname > /etc/hostname
dpkg-reconfigure exim4-config
This is a useful script to have on a virtual machine template so you can rename new VMs easily.
8. hmm… the last command is interactive for no reason. the following is a better script:
#!/bin/sh
if [ -z "$1" ] ; then
exit 1
fi
newname=$1
oldname=$(hostname)
for file in /etc/hostname,/etc/hosts,/etc/mailname,/etc/exim4/update-exim4.conf.conf ; do
sed s:${oldname}:${newname}:g ${file}.new && mv ${file}.new ${file}
done
hostaname ${newname}
9. oops, the filenames should be space separated or /etc/{hostname,hosts,mailname,exim4/update-exim4.conf.conf}. And “bin” shoudn’t be Bin.
10. Hi,
Can two machines in the same network have same hostname with different IPs. If so, how ping is resolved.
11. need to change DOMAIN NAME (not hostname). How can I do this?
Mine reads: hostname..domain.tld (i added ‘two dots’ by mistake during install)
12. Thanks for your help!
will it work fine on next reboot if I just change /etc/host & /etc/hostname without restarting hostname service?
13. i tried to install iredmail on my laptop. obviously, my laptop now work as server..im green in using unix based OS…start using it a few month ago…could someone plis help me…really need a very detail explanation….TQ guys…
14. This is very good information. Thanks!
@atm: I like your script. I’m just not very confident about how to go about using scripts. I believe there’s something missing in your information…I’m very newbie about this — what file to save the script — does the sed command part of the script file? or do I execute the command in the prompt….
Thanks
15. We have debian server, sometime our web (umbyogya.com) not accecable (not resolve by other DNS).
I think there is something error in own Name Server.
We try Check : hostname –a
No Result (blank)
How to correct it?
thanks
Leave a comment
Your email address will not be published. Required fields are marked *
|
__label__pos
| 0.669425 |
Version: 2.x
rasa.server
ErrorResponse Objects
class ErrorResponse(Exception)
Common exception to handle failing API requests.
__init__
| __init__(status: Union[int, HTTPStatus], reason: Text, message: Text, details: Any = None, help_url: Optional[Text] = None) -> None
Creates error.
Arguments:
• status - The HTTP status code to return.
• reason - Short summary of the error.
• message - Detailed explanation of the error.
• details - Additional details which describe the error. Must be serializable.
• help_url - URL where users can get further help (e.g. docs).
ensure_loaded_agent
ensure_loaded_agent(app: Sanic, require_core_is_ready: bool = False) -> Callable[[Callable], Callable[..., Any]]
Wraps a request handler ensuring there is a loaded and usable agent.
Require the agent to have a loaded Core model if require_core_is_ready is True.
ensure_conversation_exists
ensure_conversation_exists() -> "SanicView"
Wraps a request handler ensuring the conversation exists.
requires_auth
requires_auth(app: Sanic, token: Optional[Text] = None) -> Callable[["SanicView"], "SanicView"]
Wraps a request handler with token authentication.
event_verbosity_parameter
event_verbosity_parameter(request: Request, default_verbosity: EventVerbosity) -> EventVerbosity
Create EventVerbosity object using request params if present.
get_test_stories
get_test_stories(processor: "MessageProcessor", conversation_id: Text, until_time: Optional[float], fetch_all_sessions: bool = False) -> Text
Retrieves test stories from processor for all conversation sessions for conversation_id.
Arguments:
• processor - An instance of MessageProcessor.
• conversation_id - Conversation ID to fetch stories for.
• until_time - Timestamp up to which to include events.
• fetch_all_sessions - Whether to fetch stories for all conversation sessions. If False, only the last conversation session is retrieved.
Returns:
The stories for conversation_id in test format.
update_conversation_with_events
async update_conversation_with_events(conversation_id: Text, processor: "MessageProcessor", domain: Domain, events: List[Event]) -> DialogueStateTracker
Fetches or creates a tracker for conversation_id and appends events to it.
Arguments:
• conversation_id - The ID of the conversation to update the tracker for.
• processor - An instance of MessageProcessor.
• domain - The domain associated with the current Agent.
• events - The events to append to the tracker.
Returns:
The tracker for conversation_id with the updated events.
validate_request_body
validate_request_body(request: Request, error_message: Text) -> None
Check if request has a body.
validate_events_in_request_body
validate_events_in_request_body(request: Request) -> None
Validates events format in request body.
authenticate
async authenticate(_: Request) -> NoReturn
Callback for authentication failed.
create_ssl_context
create_ssl_context(ssl_certificate: Optional[Text], ssl_keyfile: Optional[Text], ssl_ca_file: Optional[Text] = None, ssl_password: Optional[Text] = None) -> Optional["SSLContext"]
Create an SSL context if a proper certificate is passed.
Arguments:
• ssl_certificate - path to the SSL client certificate
• ssl_keyfile - path to the SSL key file
• ssl_ca_file - path to the SSL CA file for verification (optional)
• ssl_password - SSL private key password (optional)
Returns:
SSL context if a valid certificate chain can be loaded, None otherwise.
configure_cors
configure_cors(app: Sanic, cors_origins: Union[Text, List[Text], None] = "") -> None
Configure CORS origins for the given app.
add_root_route
add_root_route(app: Sanic) -> None
Add '/' route to return hello.
async_if_callback_url
async_if_callback_url(f: Callable[..., Coroutine]) -> Callable
Decorator to enable async request handling.
If the incoming HTTP request specified a callback_url query parameter, the request will return immediately with a 204 while the actual request response will be sent to the callback_url. If an error happens, the error payload will also be sent to the callback_url.
Arguments:
• f - The request handler function which should be decorated.
Returns:
The decorated function.
run_in_thread
run_in_thread(f: Callable[..., Coroutine]) -> Callable
Decorator which runs request on a separate thread.
Some requests (e.g. training or cross-validation) are computional intense requests. This means that they will block the event loop and hence the processing of other requests. This decorator can be used to process these requests on a separate thread to avoid blocking the processing of incoming requests.
Arguments:
• f - The request handler function which should be decorated.
Returns:
The decorated function.
inject_temp_dir
inject_temp_dir(f: Callable[..., Coroutine]) -> Callable
Decorator to inject a temporary directory before a request and clean up after.
Arguments:
• f - The request handler function which should be decorated.
Returns:
The decorated function.
create_app
create_app(agent: Optional["Agent"] = None, cors_origins: Union[Text, List[Text], None] = "*", auth_token: Optional[Text] = None, response_timeout: int = DEFAULT_RESPONSE_TIMEOUT, jwt_secret: Optional[Text] = None, jwt_method: Text = "HS256", endpoints: Optional[AvailableEndpoints] = None) -> Sanic
Class representing a Rasa HTTP server.
|
__label__pos
| 0.620373 |
KolesnichenkoDS KolesnichenkoDS - 8 months ago 71
C++ Question
What these macros do (LLVM)?
I found this code in Sparc LLVM backend:
extern "C" void LLVMInitializeSparcTargetInfo() {
RegisterTarget<Triple::sparc, /*HasJIT=*/true> X(TheSparcTarget, "sparc",
"Sparc");
RegisterTarget<Triple::sparcv9, /*HasJIT=*/true> Y(TheSparcV9Target,
"sparcv9", "Sparc V9");
RegisterTarget<Triple::sparcel, /*HasJIT=*/true> Z(TheSparcelTarget,
"sparcel", "Sparc LE");
}
I don't understand what
X
,
Y
,
Z
macros do and I cannot find any definition or documentation for them.
Answer
What makes you think that these are macros?
To me, X, Y and Z look like throwaway names for instantiation of registration objects.
You should instead be looking for the documentation of RegisterTarget.
|
__label__pos
| 0.889819 |
Archive
Archive for the ‘graph theory’ Category
Basic implementation of Dijkstra’s algorithm
25 February 2012 Leave a comment
Dijkstra’s algorithm as presented in Algorithm 2.5, page 75 of the book Algorithmic Graph Theory is meant to be a general template. Lots of details have been left out, one in particular is how to implement line 6 of the algorithm. This one line of Dijkstra’s algorithm has been the subject of numerous research papers on how to efficiently implement a search technique for Dijkstra’s algorithm. A simple search technique is linear search, where you search some array or list from start to finish. A more efficient search technique for line 6 is a binary heap. To implement infinity as stated on line 1 of Algorithm 2.5, you would simply let a very large number represent infinity. This number should ideally be larger than any weight in the graph you are searching.
Below is a basic implementation of Dijkstra’s algorithm following the general template of Algorithm 2.5. This implementation is meant to be for searching in simple, unweighted, undirected, connected graphs. Because the graph to search is assumed to be unweighted, I simply let each edge have unit weight and represent infinity as the integer 10^9. The implementation below should provide a basis on which to implement Dijkstra’s algorithm for, say, weighted graphs and other types of graphs. To use the implementation below, save it to a Python file, load the file into a Sage session using load(), and call the function dijkstra().
def dijkstra(G, s):
"""
Shortest paths in a graph using Dijkstra's algorithm.
INPUT:
- G -- a simple, unweighted, undirected, connected graph. Thus each edge
has unit weight.
- s -- a vertex in G from which to start the search.
OUTPUT:
A list D of distances such that D[v] is the distance of a shortest path
from s to v. A dictionary P of vertex parents such that P[v] is the
parent of v.
EXAMPLES:
sage: G = graphs.PetersenGraph()
sage: dijkstra(G, 0)
([0, 1, 2, 2, 1, 1, 2, 2, 2, 2], {1: 0, 2: 1, 3: 4, 4: 0, 5: 0, 6: 1, 7: 5, 8: 5, 9: 4})
sage: G = Graph({0:{1:1, 3:1}, 1:{2:1, 3:1, 4:1}, 2:{4:1}})
sage: dijkstra(G, 0)
([0, 1, 2, 1, 2], {1: 0, 2: 1, 3: 0, 4: 1})
"""
n = G.order() # how many vertices
m = G.size() # how many edges
D = [1000000000 for _ in range(n)] # largest weights; represent +infinity
D[s] = 0 # distance from vertex to itself is zero
P = {} # a dictionary for fast look-up
Q = set(G.vertices())
while len(Q) > 0:
v = mindist(D, Q)
Q.remove(v)
Adj = set(G.neighbors(v))
for u in Adj.intersection(Q):
if D[u] > D[v] + 1: # each edge has unit weight, so add 1
D[u] = D[v] + 1
P.setdefault(u, v) # the parent of u is v
return D, P
def mindist(D, Q):
"""
Choose a vertex in Q such that it has minimal distance.
INPUT:
- D -- a list of vertices with corresponding distances. Each distance
D[v] corresponding to a vertex v means that v is that much further away
from a source vertex.
- Q -- all vertices to consider.
OUTPUT:
A vertex with minimum distance.
"""
v = None # start the search here
low = 1000000000 # the running minimum distance; represent +infinity
for u in Q:
if D[u] < low:
v = u
low = D[v]
return v
Advertisements
Categories: graph theory, mathematics, Sage
DaMN book now on Softpedia
21 June 2011 Leave a comment
My book-in-progress Algorithmic Graph Theory, co-authored with David Joyner and Nathann Cohen, is now listed on Softpedia. These days I rather refer to the book as the DaMN book. The name is taken from the first letter of the first name of each author.
Typeset trees using TikZ/PGF
31 March 2011 Leave a comment
Some combinatorial trees typeset using TikZ/PGF. The Linux filesystem hierarchy:
The Linux filesystem hierarchy.
\begin{tikzpicture}
[-,thick]
\footnotesize
\node {\texttt{/}} [edge from parent fork down]
child {node {\texttt{bin}}}
child {node {\texttt{etc}}}
child {node {\texttt{home}}
child {node {\texttt{anne}}}
child {node {\texttt{sam}}}
child {node {$\dots$}}
}
child {node {\texttt{lib}}}
child {node {\texttt{opt}}}
child {node {\texttt{proc}}}
child {node {\texttt{tmp}}}
child {node {\texttt{usr}}
child {node {\texttt{bin}}
child {node {\texttt{acyclic}}}
child {node {\texttt{diff}}}
child {node {\texttt{dot}}}
child {node {\texttt{gc}}}
child {node {\texttt{neato}}}
child {node {$\dots$}}
}
child {node {\texttt{include}}}
child {node {\texttt{local}}}
child {node {\texttt{share}}}
child {node {\texttt{src}}}
child {node {$\dots$}}
}
child {node {$\dots$}};
\end{tikzpicture}
Classification tree of organisms:
Classification tree of organisms.
\begin{tikzpicture}
[sibling distance=6cm,-,thick]
\footnotesize
\node {organism}
child {node {plant}
[sibling distance=2cm]
child {node {tree}
child {node {deciduous}}
child {node {evergreen}}
}
child {node {flower}}
}
child {node {animal}
[sibling distance=2.5cm]
child {node {invertebrate}}
child {node {vetebrate}
[sibling distance=4.7cm]
child {node {bird}
[sibling distance=1.5cm]
child {node {finch}}
child {node {rosella}}
child {node {sparrow}}
}
child {node {mammal}
[sibling distance=1.5cm]
child {node {dolphin}}
child {node {human}}
child {node {whale}}
}
}
};
\end{tikzpicture}
The Bernoulli family tree of mathematicians:
Bernoulli family tree of mathematicians.
\begin{tikzpicture}
[-,thick,%
every node/.style={shape=rectangle,inner sep=3pt,draw,thick}]
\footnotesize
\node {Nikolaus senior} [edge from parent fork down]
[sibling distance=4cm]
child {node {Jacob}}
child {node {Nicolaus}
child {node {Nicolaus I}}
}
child {node {Johann}
[sibling distance=2cm]
child {node {Nicolaus II}}
child {node {Daniel}}
child {node {Johann II}
child {node {Johann III}}
child {node {Daniel II}}
child {node {Jakob II}}
}
};
\end{tikzpicture}
An expression tree:
Expression tree for the perfect square identity.
\begin{tikzpicture}
[-,thick]
\node {$+$}
[sibling distance=2.5cm]
child {node {$\times$}
[sibling distance=1cm]
child {node {$a$}}
child {node {$a$}}
}
child {node {$\times$}
[sibling distance=1cm]
child {node {$2$}}
child {node {$a$}}
child {node {$b$}}
}
child {node {$\times$}
[sibling distance=1cm]
child {node {$b$}}
child {node {$b$}}
};
\end{tikzpicture}
Simple graphs, bridges of Konigsberg and directed graphs
26 March 2011 Leave a comment
Some combinatorial graphs drawn using TikZ/PGF. The seven bridges of Konigsberg:
Seven bridges of Konigsberg
\begin{tikzpicture}
[lineDecorate/.style={-,thick},%
nodeDecorate/.style={shape=circle,inner sep=2pt,draw,thick}]
%% nodes or vertices
\foreach \nodename/\x/\y/\direction/\navigate in {
a/0/0/left/west, b/0/2/left/west, c/0/4/left/west, d/4/2/right/east}
{
\node (\nodename) at (\x,\y) [nodeDecorate] {};
\node [\direction] at (\nodename.\navigate) {\footnotesize$\nodename$};
}
%% edges or lines
\path
\foreach \startnode/\endnode in {a/d, b/d, c/d}
{
(\startnode) edge[lineDecorate] node {} (\endnode)
}
\foreach \startnode/\endnode in {a/b, b/c, c/b, b/a}
{
(\startnode) edge[lineDecorate,bend left] node {} (\endnode)
};
\end{tikzpicture}
A house graph:
House graph
\begin{tikzpicture}
[lineDecorate/.style={-,thick},%
nodeDecorate/.style={shape=circle,inner sep=2pt,draw,thick}]
%% nodes or vertices
\foreach \nodename/\x/\y/\direction/\navigate in {
a/0/5/above/north, b/2/3/right/east, e/-2/3/left/west,
c/2/0/right/east, d/-2/0/left/west}
{
\node (\nodename) at (\x,\y) [nodeDecorate] {};
\node [\direction] at (\nodename.\navigate) {\footnotesize$\nodename$};
}
%% edges or lines
\path
\foreach \startnode/\endnode in {a/b, b/c, b/e, c/d, d/e, e/a}
{
(\startnode) edge[lineDecorate] node {} (\endnode)
};
\end{tikzpicture}
A weighted multigraph:
Weighted multigraph
\begin{tikzpicture}
[nodeDecorate/.style={shape=circle,inner sep=1pt,draw,thick}]
%% nodes or vertices
\foreach \nodename/\x/\y in {v_1/4/0, v_2/0/0, v_3/0/3, v_4/4/3, v_5/7/1.5}
{
\node (\nodename) at (\x,\y) [nodeDecorate] {\scriptsize$\nodename$};
}
%% edges or lines
\tikzstyle{EdgeStyle}=[->,>=stealth,thick]
\tikzstyle{LabelStyle}=[fill=white]
\foreach \startnode/\endnode/\bend/\weight in {
v_1/v_2/bend left/1, v_1/v_3/bend left/3, v_2/v_3/bend left/1,
v_2/v_4/bend left/3, v_3/v_1/bend left/1, v_3/v_2/bend left=0/2,
v_3/v_4/bend left/1, v_4/v_1/bend left=0/3, v_4/v_5/bend left/2,
v_5/v_1/bend left=0/3, v_5/v_1/bend left/6, v_5/v_4/bend left/1}
{
\scriptsize
\Edge[label=$\weight$,style=\bend](\startnode)(\endnode)
}
\end{tikzpicture}
Version 0.7 of book “Algorithmic Graph Theory” released
24 February 2011 4 comments
Here is version 0.7 of the book Algorithmic Graph Theory. The relevant download options are:
Version 0.7 fleshes out the chapter “Random Graphs”. Here is the content of the chapter in brief:
1. Network statistics
2. Binomial random graph model
3. Erdos-Renyi model
4. Small-world networks
5. Scale-free networks
Version 0.6 of book “Algorithmic Graph Theory” released
6 January 2011 Leave a comment
Happy new year, folks! As a new year’s gift to you, here is version 0.6 of the book Algorithmic Graph Theory. The relevant download options are:
Version 0.6 adds the new chapter “Tree Data Structures” that discusses priority queues and various efficient implementations of priority queues, including binary heaps and binomial heaps. Here is the content of the new chapter in brief:
1. Priority queues
2. Binary heaps
3. Binomial heaps
4. Binary search trees
Version 0.5 of the book “Algorithmic Graph Theory”
30 November 2010 Leave a comment
I’m happy as a clam to announce version 0.5 of the book Algorithmic Graph Theory for your reading pleasure.
The main focus of this release is to flesh out the chapter on trees and forests. Along the way, numerous problems/exercises are added to the introductory chapter “Introduction to Graph Theory” and the chapter “Graph Algorithms”. Needless to say, there are also the multitude of typo fixes throughout the book. We, the authors of the book, gratefully acknowledge contributions from the following people while preparing this release:
• Caroline Melles
• Pravin Paratey
See the section “Acknowledgments” in the book for full details on their contributions. Here is an outline of topics covered in the newly fleshed out chapter “Trees and Forests”:
• Definitions and examples relating to trees and forests.
• Various basic characterizations of trees.
• Techniques for constructing minimum spanning trees: a randomized spanning tree construction algorithm and the usual suspects including Kruskal’s algorithm, Prim’s algorithm, and Boruvka’s algorithm.
• Binary trees and an algorithm to construct a random binary tree. Application topics include coding theory, Gray code, and Huffman code.
• The usual suspects of tree traversal algorithms: level-order, pre-order, post-order, and in-order.
|
__label__pos
| 0.999987 |
What number is "MMCCXLVII"?
A: 2247
MMCCXLVII = 2247
Your question is, "What is MMCCXLVII in Numbers?". The answer is '2247'. Here we will explain how to convert, write and read the Roman numeral letters MMCCXLVII in the correct Arabic number translation.
How is MMCCXLVII converted to numbers?
To convert MMCCXLVII to numbers the translation involves breaking the numeral into place values (ones, tens, hundreds, thousands), like this:
Place ValueNumberRoman Numeral
Conversion2000 + 200 + 40 + 7MM + CC + XL + VII
Thousands2000MM
Hundreds200CC
Tens40XL
Ones7VII
How is MMCCXLVII written in numbers?
To write MMCCXLVII as numbers correctly you combine the converted roman numerals together. The highest numerals should always precede the lower numerals to provide you the correct written translation, like in the table above.
2000+200+40+7 = (MMCCXLVII) = 2247
More from Roman Numerals.co
MMCCXLVIII
Now you know the translation for Roman numeral MMCCXLVII into numbers, see the next numeral to learn how it is conveted to numbers.
Convert another numeral
Convert another Roman numeral in to Arabic numbers.
|
__label__pos
| 0.999169 |
Hi, i am workingon an docker-compose file to super...
# fleet
w
Hi, i am workingon an docker-compose file to super fast put fleet as a docker on my hosts:
version: '3.7'
services:
fleet:
image: fleetdm/fleet:latest
container_name: fleet
depends_on:
- db
- redis
secrets:
- db-password
- server-certificate
- server-key
- jwt-key
environment:
FLEET_MYSQL_ADDRESS: localhost:3306
FLEET_MYSQL_DATABASE: kolide
FLEET_MYSQL_USERNAME: fleet
FLEET_MYSQL_PASSWORD: 1234
FLEET_REDIS_ADDRESS: localhost:6379
FLEET_SERVER_CERT: /run/secrets/server-certificate
FLEET_SERVER_KEY: /run/secrets/server-key
FLEET_AUTH_JWT_KEY: /run/secrets/db-password
restart: always
networks:
- my_network
ports:
- "1337:1337"
command: [ "fleet", "prepare", "db"]
entrypoint:
- /usr/bin/fleet
- serve
db:
image: mysql:5.7
container_name: db
secrets:
- db-password
restart: always
volumes:
- ./db:/var/lib/mysql
environment:
MYSQL_DATABASE: kolide
MYSQL_ROOT_PASSWORD_FILE: /run/secrets/db-password
MYSQL_USER: fleet
MYSQL_PASSWORD: 1234
networks:
- my_network
ports:
- "3306:3306"
redis:
image: redis:latest
container_name: redis
restart: always
networks:
- my_network
ports:
- "6379:6379"
secrets:
db-password:
file: ./password.txt
server-certificate:
file: ./server.cert
server-key:
file: ./server.key
jwt-key:
file: ./jwt.key
networks:
my_network:
driver: bridge
but the fleet docker will not connect to the mysql instance:
fleet | mysql="could not connect to db: dial tcp 127.0.0.1:3306: connect: connection refused"
I also tried it with
mysql:8.0
but the error stays the same. I would be very thankfull for any ideas!
z
I'm not familiar with bridge networking on Docker but you probably need a different hostname in your
FLEET_MYSQL_ADDRESS
.
m
bridge is the default Docker network mode. You need to replace localhost by db, e.g.
db:3306
. The same applies to redis. There is also no need to expose the mysql and redis port to the host. I based my setup on the osquery-in-a-box repo
ty 2
|
__label__pos
| 0.994791 |
function namespaces
M.E.Farmer mefjr75 at hotmail.com
Wed Mar 9 01:25:46 CET 2005
Hello Darren,
I am not sure why you are using execfile().
Py> help(execfile)
Help on built-in function execfile:
execfile(...)
execfile(filename[, globals[, locals]])
Read and execute a Python script from a file.
The globals and locals are dictionaries, defaulting to the current
globals and locals. If only globals is given, locals defaults to
it.
Do you realize that execfile actually runs your script?
A simple import would probably be the best way to go.
#contents of myfile.py:
testvar = [1,2,3,4]
# someother.py
import myfile
print myfile.testvar
But to answer the question :
#contents of myfile.py:
testvar = [1,2,3,4]
# someother.py
# the problem is you are executing a script
# then searching in the wrong namespace.
def myfunction(filename):
execfile(filename, globals())
print testvar
hth,
M.E.Farmer
More information about the Python-list mailing list
|
__label__pos
| 0.727464 |
Rietveld Code Review Tool
Help | Bug tracker | Discussion group | Source code | Sign in
(273780)
Side by Side Diff: Modules/_decimal/libmpdec/io.c
Issue 7652: Merge C version of decimal into py3k.
Patch Set: Created 7 years, 5 months ago
Left:
Right:
Use n/p to move between diff chunks; N/P to move between comments. Please Sign in to add in-line comments.
Jump to:
View unified diff | Download patch
« no previous file with comments | « Modules/_decimal/libmpdec/fourstep.h ('k') | Modules/_decimal/libmpdec/io.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 /*
2 * Copyright (c) 2008-2010 Stefan Krah. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28
29 #include "mpdecimal.h"
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <ctype.h>
34 #include <limits.h>
35 #include <assert.h>
36 #include <errno.h>
37 #include <locale.h>
38 #include "bits.h"
39 #include "constants.h"
40 #include "memory.h"
41 #include "typearith.h"
42 #include "io.h"
43
44
45 /* This file contains functions for decimal <-> string conversions, including
46 PEP-3101 formatting for numeric types. */
47
48
49 /*
50 * Work around the behavior of tolower() and strcasecmp() in certain
51 * locales. For example, in tr_TR.utf8:
52 *
53 * tolower((unsigned char)'I') == 'I'
54 *
55 * u is the exact uppercase version of l; n is strlen(l) or strlen(l)+1
56 */
57 static inline int
58 _mpd_strneq(const char *s, const char *l, const char *u, size_t n)
59 {
60 while (--n != SIZE_MAX) {
61 if (*s != *l && *s != *u) {
62 return 0;
63 }
64 s++; u++; l++;
65 }
66
67 return 1;
68 }
69
70 static mpd_ssize_t
71 strtoexp(const char *s)
72 {
73 char *end;
74 mpd_ssize_t retval;
75
76 errno = 0;
77 retval = mpd_strtossize(s, &end, 10);
78 if (errno == 0 && !(*s != '\0' && *end == '\0'))
79 errno = EINVAL;
80
81 return retval;
82 }
83
84 /*
85 * Scan 'len' words. The most significant word contains 'r' digits,
86 * the remaining words are full words. Skip dpoint. The string 's' must
87 * consist of digits and an optional single decimal point at 'dpoint'.
88 */
89 static void
90 string_to_coeff(mpd_uint_t *data, const char *s, const char *dpoint, int r,
91 size_t len)
92 {
93 int j;
94
95 if (r > 0) {
96 data[--len] = 0;
97 for (j = 0; j < r; j++, s++) {
98 if (s == dpoint) s++;
99 data[len] = 10 * data[len] + (*s - '0');
100 }
101 }
102
103 while (--len != SIZE_MAX) {
104 data[len] = 0;
105 for (j = 0; j < MPD_RDIGITS; j++, s++) {
106 if (s == dpoint) s++;
107 data[len] = 10 * data[len] + (*s - '0');
108 }
109 }
110 }
111
112 /*
113 * Partially verify a numeric string of the form:
114 *
115 * [cdigits][.][cdigits][eE][+-][edigits]
116 *
117 * If successful, return a pointer to the location of the first
118 * relevant coefficient digit. This digit is either non-zero or
119 * part of one of the following patterns:
120 *
121 * ["0\x00", "0.\x00", "0.E", "0.e", "0E", "0e"]
122 *
123 * The locations of a single optional dot or indicator are stored
124 * in 'dpoint' and 'exp'.
125 *
126 * The end of the string is stored in 'end'. If an indicator [eE]
127 * occurs without trailing [edigits], the condition is caught
128 * later by strtoexp().
129 */
130 static const char *
131 scan_dpoint_exp(const char *s, const char **dpoint, const char **exp,
132 const char **end)
133 {
134 const char *coeff = NULL;
135
136 *dpoint = NULL;
137 *exp = NULL;
138 for (; *s != '\0'; s++) {
139 switch (*s) {
140 case '.':
141 if (*dpoint != NULL || *exp != NULL)
142 return NULL;
143 *dpoint = s;
144 break;
145 case 'E': case 'e':
146 if (*exp != NULL)
147 return NULL;
148 *exp = s;
149 if (*(s+1) == '+' || *(s+1) == '-')
150 s++;
151 break;
152 default:
153 if (!isdigit((uchar)*s))
154 return NULL;
155 if (coeff == NULL && *exp == NULL) {
156 if (*s == '0') {
157 if (!isdigit((uchar)*(s+1)))
158 if (!(*(s+1) == '.' &&
159 isdigit((uchar)*(s+2))))
160 coeff = s;
161 }
162 else {
163 coeff = s;
164 }
165 }
166 break;
167
168 }
169 }
170
171 *end = s;
172 return coeff;
173 }
174
175 /* scan the payload of a NaN */
176 static const char *
177 scan_payload(const char *s, const char **end)
178 {
179 const char *coeff;
180
181 while (*s == '0')
182 s++;
183 coeff = s;
184
185 while (isdigit((uchar)*s))
186 s++;
187 *end = s;
188
189 return (*s == '\0') ? coeff : NULL;
190 }
191
192 /* convert a character string to a decimal */
193 void
194 mpd_qset_string(mpd_t *dec, const char *s, const mpd_context_t *ctx,
195 uint32_t *status)
196 {
197 mpd_ssize_t q, r, len;
198 const char *coeff, *end;
199 const char *dpoint = NULL, *exp = NULL;
200 size_t digits;
201 uint8_t sign = MPD_POS;
202
203 mpd_set_flags(dec, 0);
204 dec->len = 0;
205 dec->exp = 0;
206
207 /* sign */
208 if (*s == '+') {
209 s++;
210 }
211 else if (*s == '-') {
212 mpd_set_negative(dec);
213 sign = MPD_NEG;
214 s++;
215 }
216
217 if (_mpd_strneq(s, "nan", "NAN", 3)) { /* NaN */
218 s += 3;
219 mpd_setspecial(dec, sign, MPD_NAN);
220 if (*s == '\0')
221 return;
222 /* validate payload: digits only */
223 if ((coeff = scan_payload(s, &end)) == NULL)
224 goto conversion_error;
225 /* payload consists entirely of zeros */
226 if (*coeff == '\0')
227 return;
228 digits = end - coeff;
229 /* prec >= 1, clamp is 0 or 1 */
230 if (digits > (size_t)(ctx->prec-ctx->clamp))
231 goto conversion_error;
232 } /* sNaN */
233 else if (_mpd_strneq(s, "snan", "SNAN", 4)) {
234 s += 4;
235 mpd_setspecial(dec, sign, MPD_SNAN);
236 if (*s == '\0')
237 return;
238 /* validate payload: digits only */
239 if ((coeff = scan_payload(s, &end)) == NULL)
240 goto conversion_error;
241 /* payload consists entirely of zeros */
242 if (*coeff == '\0')
243 return;
244 digits = end - coeff;
245 if (digits > (size_t)(ctx->prec-ctx->clamp))
246 goto conversion_error;
247 }
248 else if (_mpd_strneq(s, "inf", "INF", 3)) {
249 s += 3;
250 if (*s == '\0' || _mpd_strneq(s, "inity", "INITY", 6)) {
251 /* numeric-value: infinity */
252 mpd_setspecial(dec, sign, MPD_INF);
253 return;
254 }
255 goto conversion_error;
256 }
257 else {
258 /* scan for start of coefficient, decimal point, indicator, end */
259 if ((coeff = scan_dpoint_exp(s, &dpoint, &exp, &end)) == NULL)
260 goto conversion_error;
261
262 /* numeric-value: [exponent-part] */
263 if (exp) {
264 /* exponent-part */
265 end = exp; exp++;
266 dec->exp = strtoexp(exp);
267 if (errno) {
268 if (!(errno == ERANGE &&
269 (dec->exp == MPD_SSIZE_MAX ||
270 dec->exp == MPD_SSIZE_MIN)))
271 goto conversion_error;
272 }
273 }
274
275 digits = end - coeff;
276 if (dpoint) {
277 size_t fracdigits = end-dpoint-1;
278 if (dpoint > coeff) digits--;
279
280 if (fracdigits > MPD_MAX_PREC) {
281 goto conversion_error;
282 }
283 if (dec->exp < MPD_SSIZE_MIN+(mpd_ssize_t)fracdigits) {
284 dec->exp = MPD_SSIZE_MIN;
285 }
286 else {
287 dec->exp -= (mpd_ssize_t)fracdigits;
288 }
289 }
290 if (digits > MPD_MAX_PREC) {
291 goto conversion_error;
292 }
293 if (dec->exp > MPD_EXP_INF) {
294 dec->exp = MPD_EXP_INF;
295 }
296 if (dec->exp == MPD_SSIZE_MIN) {
297 dec->exp = MPD_SSIZE_MIN+1;
298 }
299 }
300
301 _mpd_idiv_word(&q, &r, (mpd_ssize_t)digits, MPD_RDIGITS);
302
303 len = (r == 0) ? q : q+1;
304 if (len == 0) {
305 goto conversion_error; /* GCOV_NOT_REACHED */
306 }
307 if (!mpd_qresize(dec, len, status)) {
308 mpd_seterror(dec, MPD_Malloc_error, status);
309 return;
310 }
311 dec->len = len;
312
313 string_to_coeff(dec->data, coeff, dpoint, (int)r, len);
314
315 mpd_setdigits(dec);
316 mpd_qfinalize(dec, ctx, status);
317 return;
318
319 conversion_error:
320 /* standard wants a positive NaN */
321 mpd_seterror(dec, MPD_Conversion_syntax, status);
322 }
323
324 /* Print word x with n decimal digits to string s. dot is either NULL
325 or the location of a decimal point. */
326 #define EXTRACT_DIGIT(s, x, d, dot) \
327 if (s == dot) *s++ = '.'; *s++ = '0' + (char)(x / d); x %= d
328 static inline char *
329 word_to_string(char *s, mpd_uint_t x, int n, char *dot)
330 {
331 switch(n) {
332 #ifdef CONFIG_64
333 case 20: EXTRACT_DIGIT(s, x, 10000000000000000000ULL, dot); /* GCOV_NOT_ REACHED */
334 case 19: EXTRACT_DIGIT(s, x, 1000000000000000000ULL, dot);
335 case 18: EXTRACT_DIGIT(s, x, 100000000000000000ULL, dot);
336 case 17: EXTRACT_DIGIT(s, x, 10000000000000000ULL, dot);
337 case 16: EXTRACT_DIGIT(s, x, 1000000000000000ULL, dot);
338 case 15: EXTRACT_DIGIT(s, x, 100000000000000ULL, dot);
339 case 14: EXTRACT_DIGIT(s, x, 10000000000000ULL, dot);
340 case 13: EXTRACT_DIGIT(s, x, 1000000000000ULL, dot);
341 case 12: EXTRACT_DIGIT(s, x, 100000000000ULL, dot);
342 case 11: EXTRACT_DIGIT(s, x, 10000000000ULL, dot);
343 #endif
344 case 10: EXTRACT_DIGIT(s, x, 1000000000UL, dot);
345 case 9: EXTRACT_DIGIT(s, x, 100000000UL, dot);
346 case 8: EXTRACT_DIGIT(s, x, 10000000UL, dot);
347 case 7: EXTRACT_DIGIT(s, x, 1000000UL, dot);
348 case 6: EXTRACT_DIGIT(s, x, 100000UL, dot);
349 case 5: EXTRACT_DIGIT(s, x, 10000UL, dot);
350 case 4: EXTRACT_DIGIT(s, x, 1000UL, dot);
351 case 3: EXTRACT_DIGIT(s, x, 100UL, dot);
352 case 2: EXTRACT_DIGIT(s, x, 10UL, dot);
353 default: if (s == dot) *s++ = '.'; *s++ = '0' + (char)x;
354 }
355
356 *s = '\0';
357 return s;
358 }
359
360 /* Print exponent x to string s. Undefined for MPD_SSIZE_MIN. */
361 static inline char *
362 exp_to_string(char *s, mpd_ssize_t x)
363 {
364 char sign = '+';
365
366 if (x < 0) {
367 sign = '-';
368 x = -x;
369 }
370 *s++ = sign;
371
372 return word_to_string(s, x, mpd_word_digits(x), NULL);
373 }
374
375 /* Print the coefficient of dec to string s. len(dec) > 0. */
376 static inline char *
377 coeff_to_string(char *s, const mpd_t *dec)
378 {
379 mpd_uint_t x;
380 mpd_ssize_t i;
381
382 /* most significant word */
383 x = mpd_msword(dec);
384 s = word_to_string(s, x, mpd_word_digits(x), NULL);
385
386 /* remaining full words */
387 for (i=dec->len-2; i >= 0; --i) {
388 x = dec->data[i];
389 s = word_to_string(s, x, MPD_RDIGITS, NULL);
390 }
391
392 return s;
393 }
394
395 /* Print the coefficient of dec to string s. len(dec) > 0. dot is either
396 NULL or a pointer to the location of a decimal point. */
397 static inline char *
398 coeff_to_string_dot(char *s, char *dot, const mpd_t *dec)
399 {
400 mpd_uint_t x;
401 mpd_ssize_t i;
402
403 /* most significant word */
404 x = mpd_msword(dec);
405 s = word_to_string(s, x, mpd_word_digits(x), dot);
406
407 /* remaining full words */
408 for (i=dec->len-2; i >= 0; --i) {
409 x = dec->data[i];
410 s = word_to_string(s, x, MPD_RDIGITS, dot);
411 }
412
413 return s;
414 }
415
416 /* Format type */
417 #define MPD_FMT_LOWER 0x00000000
418 #define MPD_FMT_UPPER 0x00000001
419 #define MPD_FMT_TOSCI 0x00000002
420 #define MPD_FMT_TOENG 0x00000004
421 #define MPD_FMT_EXP 0x00000008
422 #define MPD_FMT_FIXED 0x00000010
423 #define MPD_FMT_PERCENT 0x00000020
424 #define MPD_FMT_SIGN_SPACE 0x00000040
425 #define MPD_FMT_SIGN_PLUS 0x00000080
426
427 /* Default place of the decimal point for MPD_FMT_TOSCI, MPD_FMT_EXP */
428 #define MPD_DEFAULT_DOTPLACE 1
429
430 /*
431 * Set *result to the string representation of a decimal. Return the length
432 * of *result, not including the terminating '\0' character.
433 *
434 * Formatting is done according to 'flags'. A return value of -1 with *result
435 * set to NULL indicates MPD_Malloc_error.
436 *
437 * 'dplace' is the default place of the decimal point. It is always set to
438 * MPD_DEFAULT_DOTPLACE except for zeros in combination with MPD_FMT_EXP.
439 */
440 static mpd_ssize_t
441 _mpd_to_string(char **result, const mpd_t *dec, int flags, mpd_ssize_t dplace)
442 {
443 char *decstring = NULL, *cp = NULL;
444 mpd_ssize_t ldigits;
445 mpd_ssize_t mem = 0, k;
446
447 if (mpd_isspecial(dec)) {
448
449 mem = sizeof "-Infinity";
450 if (mpd_isnan(dec) && dec->len > 0) {
451 /* diagnostic code */
452 mem += dec->digits;
453 }
454 cp = decstring = mpd_alloc(mem, sizeof *decstring);
455 if (cp == NULL) {
456 *result = NULL;
457 return -1;
458 }
459
460 if (mpd_isnegative(dec)) {
461 *cp++ = '-';
462 }
463 else if (flags&MPD_FMT_SIGN_SPACE) {
464 *cp++ = ' ';
465 }
466 else if (flags&MPD_FMT_SIGN_PLUS) {
467 *cp++ = '+';
468 }
469
470 if (mpd_isnan(dec)) {
471 if (mpd_isqnan(dec)) {
472 strcpy(cp, "NaN");
473 cp += 3;
474 }
475 else {
476 strcpy(cp, "sNaN");
477 cp += 4;
478 }
479 if (dec->len > 0) { /* diagnostic code */
480 cp = coeff_to_string(cp, dec);
481 }
482 }
483 else if (mpd_isinfinite(dec)) {
484 strcpy(cp, "Infinity");
485 cp += 8;
486 }
487 else { /* debug */
488 abort(); /* GCOV_NOT_REACHED */
489 }
490 }
491 else {
492 assert(dec->len > 0);
493
494 /*
495 * For easier manipulation of the decimal point's location
496 * and the exponent that is finally printed, the number is
497 * rescaled to a virtual representation with exp = 0. Here
498 * ldigits denotes the number of decimal digits to the left
499 * of the decimal point and remains constant once initialized.
500 *
501 * dplace is the location of the decimal point relative to
502 * the start of the coefficient. Note that 3) always holds
503 * when dplace is shifted.
504 *
505 * 1) ldigits := dec->digits - dec->exp
506 * 2) dplace := ldigits (initially)
507 * 3) exp := ldigits - dplace (initially exp = 0)
508 *
509 * 0.00000_.____._____000000.
510 * ^ ^ ^ ^
511 * | | | |
512 * | | | `- dplace >= digits
513 * | | `- dplace in the middle of the coefficient
514 * | ` dplace = 1 (after the first coefficient digit)
515 * `- dplace <= 0
516 */
517
518 ldigits = dec->digits + dec->exp;
519
520 if (flags&MPD_FMT_EXP) {
521 ;
522 }
523 else if (flags&MPD_FMT_FIXED || (dec->exp <= 0 && ldigits > -6)) {
524 /* MPD_FMT_FIXED: always use fixed point notation.
525 * MPD_FMT_TOSCI, MPD_FMT_TOENG: for a certain range,
526 * override exponent notation. */
527 dplace = ldigits;
528 }
529 else if (flags&MPD_FMT_TOENG) {
530 if (mpd_iszero(dec)) {
531 /* If the exponent is divisible by three,
532 * dplace = 1. Otherwise, move dplace one
533 * or two places to the left. */
534 dplace = -1 + mod_mpd_ssize_t(dec->exp+2, 3);
535 }
536 else { /* ldigits-1 is the adjusted exponent, which
537 * should be divisible by three. If not, move
538 * dplace one or two places to the right. */
539 dplace += mod_mpd_ssize_t(ldigits-1, 3);
540 }
541 }
542
543 /*
544 * Basic space requirements:
545 *
546 * [-][.][coeffdigits][E][-][expdigits+1][%]['\0']
547 *
548 * If the decimal point lies outside of the coefficient digits,
549 * space is adjusted accordingly.
550 */
551 if (dplace <= 0) {
552 mem = -dplace + dec->digits + 2;
553 }
554 else if (dplace >= dec->digits) {
555 mem = dplace;
556 }
557 else {
558 mem = dec->digits;
559 }
560 mem += (MPD_EXPDIGITS+1+6);
561
562 cp = decstring = mpd_alloc(mem, sizeof *decstring);
563 if (cp == NULL) {
564 *result = NULL;
565 return -1;
566 }
567
568
569 if (mpd_isnegative(dec)) {
570 *cp++ = '-';
571 }
572 else if (flags&MPD_FMT_SIGN_SPACE) {
573 *cp++ = ' ';
574 }
575 else if (flags&MPD_FMT_SIGN_PLUS) {
576 *cp++ = '+';
577 }
578
579 if (dplace <= 0) {
580 /* space: -dplace+dec->digits+2 */
581 *cp++ = '0';
582 *cp++ = '.';
583 for (k = 0; k < -dplace; k++) {
584 *cp++ = '0';
585 }
586 cp = coeff_to_string(cp, dec);
587 }
588 else if (dplace >= dec->digits) {
589 /* space: dplace */
590 cp = coeff_to_string(cp, dec);
591 for (k = 0; k < dplace-dec->digits; k++) {
592 *cp++ = '0';
593 }
594 }
595 else {
596 /* space: dec->digits+1 */
597 cp = coeff_to_string_dot(cp, cp+dplace, dec);
598 }
599
600 /*
601 * Conditions for printing an exponent:
602 *
603 * MPD_FMT_TOSCI, MPD_FMT_TOENG: only if ldigits != dplace
604 * MPD_FMT_FIXED: never (ldigits == dplace)
605 * MPD_FMT_EXP: always
606 */
607 if (ldigits != dplace || flags&MPD_FMT_EXP) {
608 /* space: expdigits+2 */
609 *cp++ = (flags&MPD_FMT_UPPER) ? 'E' : 'e';
610 cp = exp_to_string(cp, ldigits-dplace);
611 }
612
613 if (flags&MPD_FMT_PERCENT) {
614 *cp++ = '%';
615 }
616 }
617
618 assert(cp < decstring+mem);
619 assert(cp-decstring < MPD_SSIZE_MAX);
620
621 *cp = '\0';
622 *result = decstring;
623 return (mpd_ssize_t)(cp-decstring);
624 }
625
626 char *
627 mpd_to_sci(const mpd_t *dec, int fmt)
628 {
629 char *res;
630 int flags = MPD_FMT_TOSCI;
631
632 flags |= fmt ? MPD_FMT_UPPER : MPD_FMT_LOWER;
633 (void)_mpd_to_string(&res, dec, flags, MPD_DEFAULT_DOTPLACE);
634 return res;
635 }
636
637 char *
638 mpd_to_eng(const mpd_t *dec, int fmt)
639 {
640 char *res;
641 int flags = MPD_FMT_TOENG;
642
643 flags |= fmt ? MPD_FMT_UPPER : MPD_FMT_LOWER;
644 (void)_mpd_to_string(&res, dec, flags, MPD_DEFAULT_DOTPLACE);
645 return res;
646 }
647
648 mpd_ssize_t
649 mpd_to_sci_size(char **res, const mpd_t *dec, int fmt)
650 {
651 int flags = MPD_FMT_TOSCI;
652
653 flags |= fmt ? MPD_FMT_UPPER : MPD_FMT_LOWER;
654 return _mpd_to_string(res, dec, flags, MPD_DEFAULT_DOTPLACE);
655 }
656
657 mpd_ssize_t
658 mpd_to_eng_size(char **res, const mpd_t *dec, int fmt)
659 {
660 int flags = MPD_FMT_TOENG;
661
662 flags |= fmt ? MPD_FMT_UPPER : MPD_FMT_LOWER;
663 return _mpd_to_string(res, dec, flags, MPD_DEFAULT_DOTPLACE);
664 }
665
666 /* Copy a single UTF-8 char to dest. See: The Unicode Standard, version 5.2,
667 chapter 3.9: Well-formed UTF-8 byte sequences. */
668 static int
669 _mpd_copy_utf8(char dest[5], const char *s)
670 {
671 const uchar *cp = (const uchar *)s;
672 uchar lb, ub;
673 int count, i;
674
675
676 if (*cp == 0) {
677 /* empty string */
678 dest[0] = '\0';
679 return 0;
680 }
681 else if (*cp <= 0x7f) {
682 /* ascii */
683 dest[0] = *cp;
684 dest[1] = '\0';
685 return 1;
686 }
687 else if (0xc2 <= *cp && *cp <= 0xdf) {
688 lb = 0x80; ub = 0xbf;
689 count = 2;
690 }
691 else if (*cp == 0xe0) {
692 lb = 0xa0; ub = 0xbf;
693 count = 3;
694 }
695 else if (*cp <= 0xec) {
696 lb = 0x80; ub = 0xbf;
697 count = 3;
698 }
699 else if (*cp == 0xed) {
700 lb = 0x80; ub = 0x9f;
701 count = 3;
702 }
703 else if (*cp <= 0xef) {
704 lb = 0x80; ub = 0xbf;
705 count = 3;
706 }
707 else if (*cp == 0xf0) {
708 lb = 0x90; ub = 0xbf;
709 count = 4;
710 }
711 else if (*cp <= 0xf3) {
712 lb = 0x80; ub = 0xbf;
713 count = 4;
714 }
715 else if (*cp == 0xf4) {
716 lb = 0x80; ub = 0x8f;
717 count = 4;
718 }
719 else {
720 /* invalid */
721 goto error;
722 }
723
724 dest[0] = *cp++;
725 if (*cp < lb || ub < *cp) {
726 goto error;
727 }
728 dest[1] = *cp++;
729 for (i = 2; i < count; i++) {
730 if (*cp < 0x80 || 0xbf < *cp) {
731 goto error;
732 }
733 dest[i] = *cp++;
734 }
735 dest[i] = '\0';
736
737 return count;
738
739 error:
740 dest[0] = '\0';
741 return -1;
742 }
743
744 int
745 mpd_validate_lconv(mpd_spec_t *spec)
746 {
747 size_t n;
748 #if CHAR_MAX == SCHAR_MAX
749 const char *cp = spec->grouping;
750 while (*cp != '\0') {
751 if (*cp++ < 0) {
752 return -1;
753 }
754 }
755 #endif
756 n = strlen(spec->dot);
757 if (n == 0 || n > 4) {
758 return -1;
759 }
760 if (strlen(spec->sep) > 4) {
761 return -1;
762 }
763
764 return 0;
765 }
766
767 int
768 mpd_parse_fmt_str(mpd_spec_t *spec, const char *fmt, int caps)
769 {
770 char *cp = (char *)fmt;
771 int have_align = 0, n;
772
773 /* defaults */
774 spec->min_width = 0;
775 spec->prec = -1;
776 spec->type = caps ? 'G' : 'g';
777 spec->align = '>';
778 spec->sign = '-';
779 spec->dot = "";
780 spec->sep = "";
781 spec->grouping = "";
782
783
784 /* presume that the first character is a UTF-8 fill character */
785 if ((n = _mpd_copy_utf8(spec->fill, cp)) < 0) {
786 return 0;
787 }
788
789 /* alignment directive, prefixed by a fill character */
790 if (*cp && (*(cp+n) == '<' || *(cp+n) == '>' ||
791 *(cp+n) == '=' || *(cp+n) == '^')) {
792 cp += n;
793 spec->align = *cp++;
794 have_align = 1;
795 } /* alignment directive */
796 else {
797 /* default fill character */
798 spec->fill[0] = ' ';
799 spec->fill[1] = '\0';
800 if (*cp == '<' || *cp == '>' ||
801 *cp == '=' || *cp == '^') {
802 spec->align = *cp++;
803 have_align = 1;
804 }
805 }
806
807 /* sign formatting */
808 if (*cp == '+' || *cp == '-' || *cp == ' ') {
809 spec->sign = *cp++;
810 }
811
812 /* zero padding */
813 if (*cp == '0') {
814 /* zero padding implies alignment, which should not be
815 * specified twice. */
816 if (have_align) {
817 return 0;
818 }
819 spec->align = 'z';
820 spec->fill[0] = *cp++;
821 spec->fill[1] = '\0';
822 }
823
824 /* minimum width */
825 if (isdigit((uchar)*cp)) {
826 if (*cp == '0') {
827 return 0;
828 }
829 errno = 0;
830 spec->min_width = mpd_strtossize(cp, &cp, 10);
831 if (errno == ERANGE || errno == EINVAL) {
832 return 0;
833 }
834 }
835
836 /* thousands separator */
837 if (*cp == ',') {
838 spec->dot = ".";
839 spec->sep = ",";
840 spec->grouping = "\003\003";
841 cp++;
842 }
843
844 /* fraction digits or significant digits */
845 if (*cp == '.') {
846 cp++;
847 if (!isdigit((uchar)*cp)) {
848 return 0;
849 }
850 errno = 0;
851 spec->prec = mpd_strtossize(cp, &cp, 10);
852 if (errno == ERANGE || errno == EINVAL) {
853 return 0;
854 }
855 }
856
857 /* type */
858 if (*cp == 'E' || *cp == 'e' || *cp == 'F' || *cp == 'f' ||
859 *cp == 'G' || *cp == 'g' || *cp == '%') {
860 spec->type = *cp++;
861 }
862 else if (*cp == 'N' || *cp == 'n') {
863 /* locale specific conversion */
864 struct lconv *lc;
865 /* separator has already been specified */
866 if (*spec->sep) {
867 return 0;
868 }
869 spec->type = *cp++;
870 spec->type = (spec->type == 'N') ? 'G' : 'g';
871 lc = localeconv();
872 spec->dot = lc->decimal_point;
873 spec->sep = lc->thousands_sep;
874 spec->grouping = lc->grouping;
875 if (mpd_validate_lconv(spec) < 0) {
876 return 0;
877 }
878 }
879
880 /* check correctness */
881 if (*cp != '\0') {
882 return 0;
883 }
884
885 return 1;
886 }
887
888 /*
889 * The following functions assume that spec->min_width <= MPD_MAX_PREC, which
890 * is made sure in mpd_qformat_spec. Then, even with a spec that inserts a
891 * four-byte separator after each digit, nbytes in the following struct
892 * cannot overflow.
893 */
894
895 /* Multibyte string */
896 typedef struct {
897 mpd_ssize_t nbytes; /* length in bytes */
898 mpd_ssize_t nchars; /* length in chars */
899 mpd_ssize_t cur; /* current write index */
900 char *data;
901 } mpd_mbstr_t;
902
903 static inline void
904 _mpd_bcopy(char *dest, const char *src, mpd_ssize_t n)
905 {
906 while (--n >= 0) {
907 dest[n] = src[n];
908 }
909 }
910
911 static inline void
912 _mbstr_copy_char(mpd_mbstr_t *dest, const char *src, mpd_ssize_t n)
913 {
914 dest->nbytes += n;
915 dest->nchars += (n > 0 ? 1 : 0);
916 dest->cur -= n;
917
918 if (dest->data != NULL) {
919 _mpd_bcopy(dest->data+dest->cur, src, n);
920 }
921 }
922
923 static inline void
924 _mbstr_copy_ascii(mpd_mbstr_t *dest, const char *src, mpd_ssize_t n)
925 {
926 dest->nbytes += n;
927 dest->nchars += n;
928 dest->cur -= n;
929
930 if (dest->data != NULL) {
931 _mpd_bcopy(dest->data+dest->cur, src, n);
932 }
933 }
934
935 static inline void
936 _mbstr_copy_pad(mpd_mbstr_t *dest, mpd_ssize_t n)
937 {
938 dest->nbytes += n;
939 dest->nchars += n;
940 dest->cur -= n;
941
942 if (dest->data != NULL) {
943 char *cp = dest->data + dest->cur;
944 while (--n >= 0) {
945 cp[n] = '0';
946 }
947 }
948 }
949
950 /*
951 * Copy a numeric string to dest->data, adding separators in the integer
952 * part according to spec->grouping. If leading zero padding is enabled
953 * and the result is smaller than spec->min_width, continue adding zeros
954 * and separators until the minimum width is reached.
955 *
956 * The final length of dest->data is stored in dest->nbytes. The number
957 * of UTF-8 characters is stored in dest->nchars.
958 *
959 * First run (dest->data == NULL): determine the length of the result
960 * string and store it in dest->nbytes.
961 *
962 * Second run (write to dest->data): data is written in chunks and in
963 * reverse order, starting with the rest of the numeric string.
964 */
965 static void
966 _mpd_add_sep_dot(mpd_mbstr_t *dest,
967 const char *sign, /* location of optional sign */
968 const char *src, mpd_ssize_t n_src, /* integer part and length */
969 const char *dot, /* location of optional decimal point */
970 const char *rest, mpd_ssize_t n_rest, /* remaining part and len gth */
971 const mpd_spec_t *spec)
972 {
973 mpd_ssize_t n_sep, n_sign, consume;
974 const char *g;
975 int pad = 0;
976
977 n_sign = sign ? 1 : 0;
978 n_sep = (mpd_ssize_t)strlen(spec->sep);
979 /* Initial write index: set to location of '\0' in the output string.
980 * Irrelevant for the first run. */
981 dest->cur = dest->nbytes;
982 dest->nbytes = dest->nchars = 0;
983
984 _mbstr_copy_ascii(dest, rest, n_rest);
985
986 if (dot) {
987 _mbstr_copy_char(dest, dot, (mpd_ssize_t)strlen(dot));
988 }
989
990 g = spec->grouping;
991 consume = *g;
992 while (1) {
993 /* If the group length is 0 or CHAR_MAX or greater than the
994 * number of source bytes, consume all remaining bytes. */
995 if (*g == 0 || *g == CHAR_MAX || consume > n_src) {
996 consume = n_src;
997 }
998 n_src -= consume;
999 if (pad) {
1000 _mbstr_copy_pad(dest, consume);
1001 }
1002 else {
1003 _mbstr_copy_ascii(dest, src+n_src, consume);
1004 }
1005
1006 if (n_src == 0) {
1007 /* Either the real source of intpart digits or the virtu al
1008 * source of padding zeros is exhausted. */
1009 if (spec->align == 'z' &&
1010 dest->nchars + n_sign < spec->min_width) {
1011 /* Zero padding is set and length < min_width:
1012 * Generate n_src additional characters. */
1013 n_src = spec->min_width - (dest->nchars + n_sign );
1014 /* Next iteration:
1015 * case *g == 0 || *g == CHAR_MAX:
1016 * consume all padding characters
1017 * case consume < g*:
1018 * fill remainder of current group
1019 * case consume == g*
1020 * copying is a no-op */
1021 consume = *g - consume;
1022 /* Switch on virtual source of zeros. */
1023 pad = 1;
1024 continue;
1025 }
1026 break;
1027 }
1028
1029 if (n_sep > 0) {
1030 /* If padding is switched on, separators are counted
1031 * as padding characters. This rule does not apply if
1032 * the separator would be the first character of the
1033 * result string. */
1034 if (pad && n_src > 1) n_src -= 1;
1035 _mbstr_copy_char(dest, spec->sep, n_sep);
1036 }
1037
1038 /* If non-NUL, use the next value for grouping. */
1039 if (*g && *(g+1)) g++;
1040 consume = *g;
1041 }
1042
1043 if (sign) {
1044 _mbstr_copy_ascii(dest, sign, 1);
1045 }
1046
1047 if (dest->data) {
1048 dest->data[dest->nbytes] = '\0';
1049 }
1050 }
1051
1052 /*
1053 * Convert a numeric-string to its locale-specific appearance.
1054 * The string must have one of these forms:
1055 *
1056 * 1) [sign] digits [exponent-part]
1057 * 2) [sign] digits '.' [digits] [exponent-part]
1058 *
1059 * Not allowed, since _mpd_to_string() never returns this form:
1060 *
1061 * 3) [sign] '.' digits [exponent-part]
1062 *
1063 * Input: result->data := original numeric string (ASCII)
1064 * result->bytes := strlen(result->data)
1065 * result->nchars := strlen(result->data)
1066 *
1067 * Output: result->data := modified or original string
1068 * result->bytes := strlen(result->data)
1069 * result->nchars := number of characters (possibly UTF-8)
1070 */
1071 static int
1072 _mpd_apply_lconv(mpd_mbstr_t *result, const mpd_spec_t *spec, uint32_t *status)
1073 {
1074 const char *sign = NULL, *intpart = NULL, *dot = NULL;
1075 const char *rest, *dp;
1076 char *decstring;
1077 mpd_ssize_t n_int, n_rest;
1078
1079 /* original numeric string */
1080 dp = result->data;
1081
1082 /* sign */
1083 if (*dp == '+' || *dp == '-' || *dp == ' ') {
1084 sign = dp++;
1085 }
1086 /* integer part */
1087 assert(isdigit((uchar)*dp));
1088 intpart = dp++;
1089 while (isdigit((uchar)*dp)) {
1090 dp++;
1091 }
1092 n_int = (mpd_ssize_t)(dp-intpart);
1093 /* decimal point */
1094 if (*dp == '.') {
1095 dp++; dot = spec->dot;
1096 }
1097 /* rest */
1098 rest = dp;
1099 n_rest = result->nbytes - (mpd_ssize_t)(dp-result->data);
1100
1101 if (dot == NULL && (*spec->sep == '\0' || *spec->grouping == '\0')) {
1102 /* _mpd_add_sep_dot() would not change anything */
1103 return 1;
1104 }
1105
1106 /* Determine the size of the new decimal string after inserting the
1107 * decimal point, optional separators and optional padding. */
1108 decstring = result->data;
1109 result->data = NULL;
1110 _mpd_add_sep_dot(result, sign, intpart, n_int, dot,
1111 rest, n_rest, spec);
1112
1113 result->data = mpd_alloc(result->nbytes+1, 1);
1114 if (result->data == NULL) {
1115 *status |= MPD_Malloc_error;
1116 mpd_free(decstring);
1117 return 0;
1118 }
1119
1120 /* Perform actual writes. */
1121 _mpd_add_sep_dot(result, sign, intpart, n_int, dot,
1122 rest, n_rest, spec);
1123
1124 mpd_free(decstring);
1125 return 1;
1126 }
1127
1128 /* Add padding to the formatted string if necessary. */
1129 static int
1130 _mpd_add_pad(mpd_mbstr_t *result, const mpd_spec_t *spec, uint32_t *status)
1131 {
1132 if (result->nchars < spec->min_width) {
1133 mpd_ssize_t add_chars, add_bytes;
1134 size_t lpad = 0, rpad = 0;
1135 size_t n_fill, len, i, j;
1136 char align = spec->align;
1137 uint8_t err = 0;
1138 char *cp;
1139
1140 n_fill = strlen(spec->fill);
1141 add_chars = (spec->min_width - result->nchars);
1142 /* max value: MPD_MAX_PREC * 4 */
1143 add_bytes = add_chars * (mpd_ssize_t)n_fill;
1144
1145 cp = result->data = mpd_realloc(result->data,
1146 result->nbytes+add_bytes+1,
1147 sizeof *result->data, &err);
1148 if (err) {
1149 *status |= MPD_Malloc_error;
1150 mpd_free(result->data);
1151 return 0;
1152 }
1153
1154 if (align == 'z') {
1155 align = '=';
1156 }
1157
1158 if (align == '<') {
1159 rpad = add_chars;
1160 }
1161 else if (align == '>' || align == '=') {
1162 lpad = add_chars;
1163 }
1164 else { /* align == '^' */
1165 lpad = add_chars/2;
1166 rpad = add_chars-lpad;
1167 }
1168
1169 len = result->nbytes;
1170 if (align == '=' && (*cp == '-' || *cp == '+' || *cp == ' ')) {
1171 /* leave sign in the leading position */
1172 cp++; len--;
1173 }
1174
1175 memmove(cp+n_fill*lpad, cp, len);
1176 for (i = 0; i < lpad; i++) {
1177 for (j = 0; j < n_fill; j++) {
1178 cp[i*n_fill+j] = spec->fill[j];
1179 }
1180 }
1181 cp += (n_fill*lpad + len);
1182 for (i = 0; i < rpad; i++) {
1183 for (j = 0; j < n_fill; j++) {
1184 cp[i*n_fill+j] = spec->fill[j];
1185 }
1186 }
1187
1188 result->nbytes += add_bytes;
1189 result->nchars += add_chars;
1190 result->data[result->nbytes] = '\0';
1191 }
1192
1193 return 1;
1194 }
1195
1196 /* Round a number to prec digits. The adjusted exponent stays the same
1197 or increases by one if rounding up crosses a power of ten boundary.
1198 If result->digits would exceed MPD_MAX_PREC+1, MPD_Invalid_operation
1199 is set and the result is NaN. */
1200 static inline void
1201 _mpd_round(mpd_t *result, const mpd_t *a, mpd_ssize_t prec,
1202 const mpd_context_t *ctx, uint32_t *status)
1203 {
1204 mpd_ssize_t exp = a->exp + a->digits - prec;
1205
1206 if (prec <= 0) {
1207 mpd_seterror(result, MPD_Invalid_operation, status); /* GCOV_NOT _REACHED */
1208 return; /* GCOV_NOT_REACHED */
1209 }
1210 if (mpd_isspecial(a) || mpd_iszero(a)) {
1211 mpd_qcopy(result, a, status); /* GCOV_NOT_REACHED */
1212 return; /* GCOV_NOT_REACHED */
1213 }
1214
1215 mpd_qrescale_fmt(result, a, exp, ctx, status);
1216 if (result->digits > prec) {
1217 mpd_qrescale_fmt(result, result, exp+1, ctx, status);
1218 }
1219 }
1220
1221 /*
1222 * Return the string representation of an mpd_t, formatted according to 'spec'.
1223 * The format specification is assumed to be valid. Memory errors are indicated
1224 * as usual. This function is quiet.
1225 */
1226 char *
1227 mpd_qformat_spec(const mpd_t *dec, const mpd_spec_t *spec,
1228 const mpd_context_t *ctx, uint32_t *status)
1229 {
1230 mpd_uint_t dt[MPD_MINALLOC_MAX];
1231 mpd_t tmp = {MPD_STATIC|MPD_STATIC_DATA,0,0,0,MPD_MINALLOC_MAX,dt};
1232 mpd_ssize_t dplace = MPD_DEFAULT_DOTPLACE;
1233 mpd_mbstr_t result;
1234 mpd_spec_t stackspec;
1235 char type = spec->type;
1236 int flags = 0;
1237
1238
1239 if (spec->min_width > MPD_MAX_PREC) {
1240 *status |= MPD_Invalid_operation;
1241 return NULL;
1242 }
1243
1244 if (isupper((uchar)type)) {
1245 type = tolower((uchar)type);
1246 flags |= MPD_FMT_UPPER;
1247 }
1248 if (spec->sign == ' ') {
1249 flags |= MPD_FMT_SIGN_SPACE;
1250 }
1251 else if (spec->sign == '+') {
1252 flags |= MPD_FMT_SIGN_PLUS;
1253 }
1254
1255 if (mpd_isspecial(dec)) {
1256 if (spec->align == 'z') {
1257 stackspec = *spec;
1258 stackspec.fill[0] = ' ';
1259 stackspec.fill[1] = '\0';
1260 stackspec.align = '>';
1261 spec = &stackspec;
1262 }
1263 }
1264 else {
1265 uint32_t workstatus = 0;
1266 mpd_ssize_t prec;
1267
1268 switch (type) {
1269 case 'g': flags |= MPD_FMT_TOSCI; break;
1270 case 'e': flags |= MPD_FMT_EXP; break;
1271 case '%': flags |= MPD_FMT_PERCENT;
1272 if (!mpd_qcopy(&tmp, dec, status)) {
1273 return NULL;
1274 }
1275 tmp.exp += 2;
1276 dec = &tmp;
1277 type = 'f'; /* fall through */
1278 case 'f': flags |= MPD_FMT_FIXED; break;
1279 default: abort(); /* debug: GCOV_NOT_REACHED */
1280 }
1281
1282 if (spec->prec >= 0) {
1283 if (spec->prec > MPD_MAX_PREC) {
1284 *status |= MPD_Invalid_operation;
1285 goto error;
1286 }
1287
1288 switch (type) {
1289 case 'g':
1290 prec = (spec->prec == 0) ? 1 : spec->prec;
1291 if (dec->digits > prec) {
1292 _mpd_round(&tmp, dec, prec, ctx,
1293 &workstatus);
1294 dec = &tmp;
1295 }
1296 break;
1297 case 'e':
1298 if (mpd_iszero(dec)) {
1299 dplace = 1-spec->prec;
1300 }
1301 else {
1302 _mpd_round(&tmp, dec, spec->prec+1, ctx,
1303 &workstatus);
1304 dec = &tmp;
1305 }
1306 break;
1307 case 'f':
1308 mpd_qrescale(&tmp, dec, -spec->prec, ctx,
1309 &workstatus);
1310 dec = &tmp;
1311 break;
1312 }
1313 }
1314
1315 if (type == 'f') {
1316 if (mpd_iszero(dec) && dec->exp > 0) {
1317 mpd_qrescale(&tmp, dec, 0, ctx, &workstatus);
1318 dec = &tmp;
1319 }
1320 }
1321
1322 if (workstatus&MPD_Errors) {
1323 *status |= (workstatus&MPD_Errors);
1324 goto error;
1325 }
1326 }
1327
1328 /*
1329 * At this point, for all scaled or non-scaled decimals:
1330 * 1) 1 <= digits <= MAX_PREC+1
1331 * 2) adjexp(scaled) = adjexp(orig) [+1]
1332 * 3) case 'g': MIN_ETINY <= exp <= MAX_EMAX+1
1333 * case 'e': MIN_ETINY-MAX_PREC <= exp <= MAX_EMAX+1
1334 * case 'f': MIN_ETINY <= exp <= MAX_EMAX+1
1335 * 4) max memory alloc in _mpd_to_string:
1336 * case 'g': MAX_PREC+36
1337 * case 'e': MAX_PREC+36
1338 * case 'f': 2*MPD_MAX_PREC+30
1339 */
1340 result.nbytes = _mpd_to_string(&result.data, dec, flags, dplace);
1341 result.nchars = result.nbytes;
1342 if (result.nbytes < 0) {
1343 *status |= MPD_Malloc_error;
1344 goto error;
1345 }
1346
1347 if (*spec->dot != '\0' && !mpd_isspecial(dec)) {
1348 if (result.nchars > MPD_MAX_PREC+36) {
1349 /* Since a group length of one is not explicitly
1350 * disallowed, ensure that it is always possible to
1351 * insert a four byte separator after each digit. */
1352 *status |= MPD_Invalid_operation;
1353 mpd_free(result.data);
1354 goto error;
1355 }
1356 if (!_mpd_apply_lconv(&result, spec, status)) {
1357 goto error;
1358 }
1359 }
1360
1361 if (spec->min_width) {
1362 if (!_mpd_add_pad(&result, spec, status)) {
1363 goto error;
1364 }
1365 }
1366
1367 mpd_del(&tmp);
1368 return result.data;
1369
1370 error:
1371 mpd_del(&tmp);
1372 return NULL;
1373 }
1374
1375 char *
1376 mpd_qformat(const mpd_t *dec, const char *fmt, const mpd_context_t *ctx,
1377 uint32_t *status)
1378 {
1379 mpd_spec_t spec;
1380
1381 if (!mpd_parse_fmt_str(&spec, fmt, 1)) {
1382 *status |= MPD_Invalid_operation;
1383 return NULL;
1384 }
1385
1386 return mpd_qformat_spec(dec, &spec, ctx, status);
1387 }
1388
1389 /*
1390 * The specification has a *condition* called Invalid_operation and an
1391 * IEEE *signal* called Invalid_operation. The former corresponds to
1392 * MPD_Invalid_operation, the latter to MPD_IEEE_Invalid_operation.
1393 * MPD_IEEE_Invalid_operation comprises the following conditions:
1394 *
1395 * [MPD_Conversion_syntax, MPD_Division_impossible, MPD_Division_undefined,
1396 * MPD_Fpu_error, MPD_Invalid_context, MPD_Invalid_operation,
1397 * MPD_Malloc_error]
1398 *
1399 * In the following functions, 'flag' denotes the condition, 'signal'
1400 * denotes the IEEE signal.
1401 */
1402
1403 static const char *mpd_flag_string[MPD_NUM_FLAGS] = {
1404 "Clamped",
1405 "Conversion_syntax",
1406 "Division_by_zero",
1407 "Division_impossible",
1408 "Division_undefined",
1409 "Fpu_error",
1410 "Inexact",
1411 "Invalid_context",
1412 "Invalid_operation",
1413 "Malloc_error",
1414 "Not_implemented",
1415 "Overflow",
1416 "Rounded",
1417 "Subnormal",
1418 "Underflow",
1419 };
1420
1421 static const char *mpd_signal_string[MPD_NUM_FLAGS] = {
1422 "Clamped",
1423 "IEEE_Invalid_operation",
1424 "Division_by_zero",
1425 "IEEE_Invalid_operation",
1426 "IEEE_Invalid_operation",
1427 "IEEE_Invalid_operation",
1428 "Inexact",
1429 "IEEE_Invalid_operation",
1430 "IEEE_Invalid_operation",
1431 "IEEE_Invalid_operation",
1432 "Not_implemented",
1433 "Overflow",
1434 "Rounded",
1435 "Subnormal",
1436 "Underflow",
1437 };
1438
1439 /* print conditions to buffer, separated by spaces */
1440 int
1441 mpd_snprint_flags(char *dest, int nmemb, uint32_t flags)
1442 {
1443 char *cp;
1444 int n, j;
1445
1446 assert(nmemb >= MPD_MAX_FLAG_STRING);
1447
1448 *dest = '\0'; cp = dest;
1449 for (j = 0; j < MPD_NUM_FLAGS; j++) {
1450 if (flags & (1U<<j)) {
1451 n = snprintf(cp, nmemb, "%s ", mpd_flag_string[j]);
1452 if (n < 0 || n >= nmemb) return -1;
1453 cp += n; nmemb -= n;
1454 }
1455 }
1456
1457 if (cp != dest) {
1458 *(--cp) = '\0';
1459 }
1460
1461 return (int)(cp-dest);
1462 }
1463
1464 /* print conditions to buffer, in list form */
1465 int
1466 mpd_lsnprint_flags(char *dest, int nmemb, uint32_t flags, const char *flag_strin g[])
1467 {
1468 char *cp;
1469 int n, j;
1470
1471 assert(nmemb >= MPD_MAX_FLAG_LIST);
1472 if (flag_string == NULL) {
1473 flag_string = mpd_flag_string;
1474 }
1475
1476 *dest = '[';
1477 *(dest+1) = '\0';
1478 cp = dest+1;
1479 --nmemb;
1480
1481 for (j = 0; j < MPD_NUM_FLAGS; j++) {
1482 if (flags & (1U<<j)) {
1483 n = snprintf(cp, nmemb, "%s, ", flag_string[j]);
1484 if (n < 0 || n >= nmemb) return -1;
1485 cp += n; nmemb -= n;
1486 }
1487 }
1488
1489 /* erase the last ", " */
1490 if (cp != dest+1) {
1491 cp -= 2;
1492 }
1493
1494 *cp++ = ']';
1495 *cp = '\0';
1496
1497 return (int)(cp-dest); /* strlen, without NUL terminator */
1498 }
1499
1500 /* print signals to buffer, in list form */
1501 int
1502 mpd_lsnprint_signals(char *dest, int nmemb, uint32_t flags, const char *signal_s tring[])
1503 {
1504 char *cp;
1505 int n, j;
1506 int ieee_invalid_done = 0;
1507
1508 assert(nmemb >= MPD_MAX_SIGNAL_LIST);
1509 if (signal_string == NULL) {
1510 signal_string = mpd_signal_string;
1511 }
1512
1513 *dest = '[';
1514 *(dest+1) = '\0';
1515 cp = dest+1;
1516 --nmemb;
1517
1518 for (j = 0; j < MPD_NUM_FLAGS; j++) {
1519 uint32_t f = flags & (1U<<j);
1520 if (f) {
1521 if (f&MPD_IEEE_Invalid_operation) {
1522 if (ieee_invalid_done) {
1523 continue;
1524 }
1525 ieee_invalid_done = 1;
1526 }
1527 n = snprintf(cp, nmemb, "%s, ", signal_string[j]);
1528 if (n < 0 || n >= nmemb) return -1;
1529 cp += n; nmemb -= n;
1530 }
1531 }
1532
1533 /* erase the last ", " */
1534 if (cp != dest+1) {
1535 cp -= 2;
1536 }
1537
1538 *cp++ = ']';
1539 *cp = '\0';
1540
1541 return (int)(cp-dest); /* strlen, without NUL terminator */
1542 }
1543
1544 /* The following two functions are mainly intended for debugging. */
1545 void
1546 mpd_fprint(FILE *file, const mpd_t *dec)
1547 {
1548 char *decstring;
1549
1550 decstring = mpd_to_sci(dec, 1);
1551 if (decstring != NULL) {
1552 fprintf(file, "%s\n", decstring);
1553 mpd_free(decstring);
1554 }
1555 else {
1556 fputs("mpd_fprint: output error\n", file); /* GCOV_NOT_REACHED * /
1557 }
1558 }
1559
1560 void
1561 mpd_print(const mpd_t *dec)
1562 {
1563 char *decstring;
1564
1565 decstring = mpd_to_sci(dec, 1);
1566 if (decstring != NULL) {
1567 printf("%s\n", decstring);
1568 mpd_free(decstring);
1569 }
1570 else {
1571 fputs("mpd_fprint: output error\n", stderr); /* GCOV_NOT_REACHED */
1572 }
1573 }
1574
1575
OLDNEW
« no previous file with comments | « Modules/_decimal/libmpdec/fourstep.h ('k') | Modules/_decimal/libmpdec/io.h » ('j') | no next file with comments »
RSS Feeds Recent Issues | This issue
This is Rietveld 894c83f36cb7+
|
__label__pos
| 0.998357 |
.NET Tutorials, Forums, Interview Questions And Answers
Welcome :Guest
Sign In
Register
Win Surprise Gifts!!!
Congratulations!!!
Top 5 Contributors of the Month
Sharon Maxwell
Post New Web Links
Dynamically add class to div with FileInfo
Posted By: Posted Date: September 07, 2010 Points: 0 Category :ASP.Net
Hey all. I'm still really new to this, so I apologize if I explain my problem poorly. I'll do my best. I used this method to add an "active" class to my main navigation on another site. Now I'm using it to change the background image for a div based on the current page. Problem is, it's not working in this case. When I view the source I just see the tag sitting sitting there like it wasn't compiled: <div class="CategoryDetail <%= background %>" id="ctl00_ctl00_MainContent_uxCategory_cat_detail"> Here's the code behind:using System; using System.Collections.Generic; using System.Data; using System.Configuration; using System.Collections; using System.Web; using System.Web.Security; using System.Web.UI; using System.Web.UI.WebControls; using System.Web.UI.WebControls.WebParts; using System.Web.UI.HtmlControls; using ZNode.Libraries.Framework.Business; using ZNode.Libraries.ECommerce.Catalog; using ZNode.Libraries.DataAccess.Custom; public partial class Themes_Default_Category_Category : System.Web.UI.UserControl { public string background; protected void Page_Load(object sender, EventArgs e) { System.IO.FileInfo fi = new System.IO.FileInfo(System.Web.HttpContext.Current.Request.Url.AbsolutePath); switch (fi.Name)
View Complete Post
More Related Resource Links
Dynamically Generate a WCF Proxy class at runtime and Callling web methods via reflection
Hi,
I'm busy writing an application where the user will have to specify the location of a ?wsdl file for asmx and/or svc type services.
My application will have to :
1. Deduce from the wsdl what methods (with parameters and return types) are available on the service.
2. Provide a way to either generate a Proxy class in memory to execute methods against with e.g: Reflection
for instanse : genericProxy.Invoke("HelloWorldMethod",parameter1,parameter2);
I want to avoid using something like this : notSoGenericProxy.HelloWorldMethod(parameter1,parameter2); seeing that my application will have no
idea what service it will have to work with at runtime.
Is there a way to achieve this?
Thank you for any help in advance!
Class Diagrams
Class diagrams show the static structure of the systems. Classes define the properties of the objects which belong to them. These include:
Attributes - (second container) the data properties of the classes including type, default value and constraints.
Creating a Simple Class in C#
The second article in the C# Object-Oriented Programming tutorial describes how classes are created in C# and how behaviour, in the form of publicly visible and private, hidden methods can be added. This demonstrates some of the uses of encapsulation
Static Class for Beginners
In general, the members of a class can be accessed by instance of that class when they are in public, but there comes a situation to access or call members of a class without an instance and this can be achieved by declaring them as static
Dynamically Create Controls in ASP.NET by Using Visual C# .NET
This article demonstrates how to dynamically create a control for an .aspx page. The sample project does the following:
Creates two TextBox controls.
Verifies that the contents (TextBox.text) and the attributes of the TextBox are saved across posts to the server.
Illustrates handling events that are posted by a dynamically-created control
Dynamically Create Controls in ASP.NET with Visual Basic .NET
This step-by-step article describes how to dynamically create controls for an ASPX Web page.
The sample project does the following:
It creates two TextBox controls.
It verifies that the TextBox contents (TextBox.text) and attributes are saved across posts to the server.
It describes how events that are posted by a dynamically created control are handled.
abstract away the source of the connection string using a class with a static property.
ASP.NET provides a configuration system we can use to keep our applications flexible at runtime. In this article we will examine some tips and best practices for using the configuration system for the best results.
The element of a web.config file is a place to store connection strings, server names, file paths, and other miscellaneous settings needed by an application to perform work. The items inside appSettings are items that need to be configurable depending upon the environment, for instance, any database connection strings will change as you move your application from a testing and staging server into production.
Encapsulation
Let's abstract away the source of the connection string using a class with a static property.
MessageQueue Class
This sample demonstrates two way communications using message queuing between a device application and a desktop application. One application sends book order messages to a message queue and another application receives these book orders from the message queue and responds to the device queue that the orders have been processed.
dynamically adding sql server columns based on number of files in fileuploader
I want to create a new table to sql database at runtime. The column names would be "Name", "Date", "Event". I would also like to have one more column name "Image" but the problem is that since the amount of images will vary, I want the application to count the amount of files in the Multiple fileuploader and then add a image column to the table for the respected amount of files.
For example if i am uploading 3 files in the multiple fileuploader, the names should be "Name", "Date", "Event", "Image1", "image2", Image3"
I also would like that table name to be name dynamically from the textbox Name when the user enters the information.
I have the following code to make a new table, but I dont know how to name the fields at runtime like previously asked please help!!!
Dim objConn As New SqlConnection("Server=<servername>;uid=<userid>;pwd=<password>;database=master")
objConn.Open()
How to change the default page base class?
I've tried setting the <pages pageBaseType="DynamicWebPage" /> value in web.config, but when I response.write out the page type, I'm still getting Microsoft.WebPages.WebPage.
I'm simply trying to sub-class WebPage and add some additional functionality such as a dynamic PageData dictionary similar to Phil Haack's dynamic ViewData dictionary.
Thanks,
Adam
Can I replace the EntityObject as base-class in Entity Designer 3.5 sp1?
I'm using EF3.5sp1 for a project, and I've been working around many of its problems... as such, my entity partial classes have a lot of logic in them for change-tracking and whatnot. Obviously, I'd like to factor this logic out into a common base class, but of course all my objects must derive from EntityObject. Is it possible to tell them to derive from some class of my own invention (which would actually likely derive from EntityObject themselves)?
I'd rather avoid editing the EDMX file unless the changes will be safe on entity-model-update, since I'm still making changes to the database schema in this early state of my project.
Thanks
Also, is there any good workaround for the fact that the change isn't tracked when you change EntityReference.EntityKey?
using the linqDataSource control programmatically (dynamically)
Hello,
my scenario is this: I want to use the linqDataSource control to bind data to a gridview for it's efficient built-in paging and sorting features *but* I don't want the data to be bound until the user has input various search parameters, so I need to bind the data from a button click event. Also, the search options will be somewhat complex in that there will be several optional search parameters, via various dropdowns and textboxes, so I can't just declaratively/statically setup all the WHERE parameters on the linqDataSource (at least I'm assuming I can't, or that it may result in poor performance if I do and just assign default values like the everything wildcard '%' for the cases the user doesn't use a particular search parameter). Given that brief description, here is an example of what I'm doing, but with only one WHERE parameter for simplicity, the real code will be adding anywhere from 1 to about 12 WHERE parameters depending on how the user sets the search options in the GUI.
the linqDataSource on the aspx page:
<asp:LinqDataSource ID="ldsMain" runat="server"
ContextTypeName="myProject.myProjectDataContext"
Select="new (field1, field2, field3, etc..)"
TableName="theTableName">
</asp:LinqDat
Search for a string in my datasource dynamically
I am displaying an Rssfeed on my page by using XmlDataSource and Datalist controls.
I want the user to be able to limit the information that is returned to my datalist. To do this I have a textbox on the form that allows the user to enter their search criteria.
I need to be able to dynamically create a query string utilizing the information that the user entered in the textbox. I am also using a drop down list and allowing the user to select whether they want to search in the title, description, or all parts of the xml file.
I am having trouble figuring out how to reference the data contained in my textbox in my query string. Where the query string says 'asp', I want to replace 'asp' with the content from my sSearch variable. Can anyone tell me how to reference my variable in the query string???
string sSearch = txtSearch.Text;
string SQuery = "";
if (DropDownList2.SelectedIndex == 0)
{
SQuery = "[contains(title), 'asp')]";
}
Singleton Pattern and Abstract Class
I know what Singleton Pattern means and Abstract class means.
What I wanted to know was how would this apply to real world.
Could anyone give me any good example or simple explanation.
Say I have a simple website, why would I use any of the above if any.
Why would it simplify my architechture.
Thanks in advace.
How to implement custom HttpContext for each request under class that implements IHTTPHandler
Hello All,
I created an application and implemented IHTTPHandler for all incoming request ending with ".aspx" extension.
Under "ProcessRequest" module, I am creating an instance of HttpContext (with URL attributes different from my application's URL i.e. if I am working on localhost then speciying Yahoo.com as its URL) and assigning it to "context" which comes as method argument.
After redirection, an error is generated. Also, the custom HTTPContext is not passed to the requested page (default.aspx, in my case.)
Code is as follows.
Public Sub ProcessRequest(ByVal context As System.Web.HttpContext) Implements System.Web.IHttpHandler.ProcessRequest
Try
Dim requestedUrl As String
Dim targetUrl As String
Dim urlLength As Integer
'Save settings which will be used while redirecting to appropriate page
requestedUrl = context.Request.RawUrl
If requestedUrl.IndexOf("?") >= 0 Then
targetUrl = requestedUrl.Substring(0, requestedUrl.IndexOf("?"))
Else
targetUrl = requestedUrl
End If
If targetUrl = Nothing Or targetUrl.Length = 0 Then
targetUrl = requestedUrl
End If
urlLengt
Persist data in a dynamically populated ListBox in a user control
Hi All,
I am trying to persist data in a dynamically populated ListBox in a user control. Here are the details
I have a user control which contains a a listBox and button. On Page_Load of user control I am populating the ListBox control with some values. On Click of button, I am trying to fetch the values which I have selected in ListBox. Though this seems very simple and straight forward I am unable to fetch values.
My Page_Load Function
void Page_Load(object sender, EventArgs e)
{
If(!IspostBack)
{
listBox.items.Add("Item1");
listBox.items.Add("item2");
listBox.items.Add("item3");
}
}
My Click Function
Protected Void Button1_Click(object sender, EventArgs e)
{
string strValue = listBox.SelectedValue
}
I have placed brake point at Page_Load and on click function, I am seeing that the moment the breakpoin
Using a GenericHandler as a WebResource inside a Class Library
Hi,
I hope this is the right place to ask this question.
I'm trying to make a control class library. I want to have a GenericHandler as a WebResource so I could call it from the web project that will use that control.
I need this handler will get parameters from the query string and return JSON data acourding to what it get in the QS.
I know how to create JS and images WebResources, so I tried the same on a generic handler I added to the class library project but all it does is return the source of the handler file and not the result.
Is what I'm trying to do possible? If so, what should I do?
Thank :-)
Categories:
ASP.NetWindows Application .NET Framework C# VB.Net ADO.Net
Sql Server SharePoint Silverlight Others All
Hall of Fame Twitter Terms of Service Privacy Policy Contact Us Archives Tell A Friend
|
__label__pos
| 0.804612 |
How to Convert DOCX to TXT with Java REST API
This tutorial helps you understand how to convert DOCX to TXT with Java REST API in the cloud. In order to export DOCX to TXT format, we’re using Aspose.Words for Java Cloud SDK. If you require DOCX to TXT Conversion in Java Low Code API then this can be achieved with the help of below steps and code.
Prerequisite
Steps to Convert DOCX to TXT in Java REST API
1. Set Client ID and Client Secret for the API
2. Create an instance of WordsAPI class with client credentials
3. Specify input and output files
4. Read input DOCX file and upload to cloud storage
5. Create an instance of WordsAPI with input and output file formats
6. Call convertDocument method to Convert DOCX to TXT using REST API
7. Save the output TXT file on local disk
Code for DOCX to TXT Conversion in Java Low Code API
The above code snippet enables you to convert DOCX to TXT with Java REST API. You merely have to provide DOCX file with the help of the Aspose.Words REST API SDK for Java and download output TXT file to save it locally by using Aspose conversion API online.
This DOCX to TXT Conversion can be operated with any no code or low code apps on any platform.
Please check out a related feature at the following link: How to Convert DOCX to PDF with Java REST API
English
|
__label__pos
| 0.553322 |
Membuat URL Shortener Dengan Javascript
Membuat URL Shortener Dengan Javascript
Membuat URL Shortener Dengan Javascript
GoSEO.id - URL Shortener merupakan suatu teknik yang membuat suatu halaman situs dapat diakses dengan alamat yang pendek selain alamat aslinya. Tujuan URL Shortener adalah sebagai penghemat ruang dan mempermudah pengunjung menghafal alamat suatu halaman situs.
URL Shortener akan sangat berguna untuk pemain affiliate, sehingga URL yang kita bagikan tidak seperti URL Postingan Blogger.
Berikut kode/script cara membuat URL shortener dengan javascript by GoSEO :
<script>
//<![CDATA[
var uri = window.location.toString();
if (uri.indexOf("?") > 0) {
var clean_uri = uri.substring(0, uri.indexOf("?"));
window.history.replaceState({}, document.title, clean_uri);
}
var key = window.location.href.split("do/")[1].replace("/","")
var urls={
"soalcpns":"https://www.goseo.id/2019/12/ebook-soal-cpns.html"
}
if(key){
if(urls[key]){
window.location.href=urls[key]
}else{
document.write("'"+key+"' not found :(");
}
}
//]]>
</script>
Letakkan kode/script berikut di atas </head>. Sebagai catatan, kode soalcpns yang dikuningkan merupakan shortener dan URL Postingan https://www.goseo.id/2019/12/ebook-soal-cpns.html. Jadi kamu hanya perlu menyesuaikan shortener dan URL Postingan dengan punyamu sendiri.
Sehingga dari kode/script diatas kita akan mendapatkan URL Shortener seperti ini :
goseo.id/do/soalcpns
Dan URL tersebut akan redirect ke :
https://www.goseo.id/2019/12/ebook-soal-cpns.html
Bagaimana? Singkat bukan? Trus bagaimana jika ingin URL lainnya? Gampaaanng, begini kode/scriptnya :
<script>
//<![CDATA[
var uri = window.location.toString();
if (uri.indexOf("?") > 0) {
var clean_uri = uri.substring(0, uri.indexOf("?"));
window.history.replaceState({}, document.title, clean_uri);
}
var key = window.location.href.split("do/")[1].replace("/","")
var urls={
"soalcpns":"https://www.goseo.id/2019/12/ebook-soal-cpns.html",
"fastloadads":"https://www.goseo.id/2019/12/mempercepat-loading-iklan-adsense-terbaru.html"
}
if(key){
if(urls[key]){
window.location.href=urls[key]
}else{
document.write("'"+key+"' not found :(");
}
}
//]]>
</script>
Yang perlu kamu perhatikan adalah tanda , (koma). Pada URL terakhir tidak menggunakan tanda , (koma). Begitu juga untuk penambahan URL lainnya.
Sekian postingan dengan tutorial pada hari ini. Jika ada yang kurang dipahami silahkan tinggalkan komentar.
Show comments
Hide comments
0 Comments
Posting Komentar
|
__label__pos
| 0.999377 |
answersLogoWhite
0
Best Answer
Subtract 2 from 3 to get 1, add that to the 1 we already had to get 2, and multiply that by 4. In short form: 4*(1+(3-2)) = 8.
User Avatar
Wiki User
2012-08-21 01:48:58
This answer is:
🙏
0
🤨
0
😮
0
User Avatar
Study guides
Algebra
20 cards
A polynomial of degree zero is a constant term
The grouping method of factoring can still be used when only some of the terms share a common factor A True B False
The sum or difference of p and q is the of the x-term in the trinomial
A number a power of a variable or a product of the two is a monomial while a polynomial is the of monomials
➡️
See all cards
3.7
323 Reviews
Add your answer:
Earn +20 pts
Q: How do you get 8 with the numbers 1 2 3 and 4?
Write your answer...
Submit
Related questions
How do you make 3 with numbers 1 2 and 4?
4 + 1 - 2 = 3
Is nine a triangle number?
No, the numbers 1,1+2, 1+2+3, 1+2+3+4, 1+2+3+4+5... are triangular numbers.
How do you get the number 3 using the numbers 1 2 3 4?
2*4-3-1
How do you get 15 with the numbers 1 2 3 and 4?
It is: 2*(3+4)+1 = 15
How do you get 19 out of the numbers 1 2 3 4?
4 x (3 + 2) - 1.
When you subtract a square number from another the answer is 3 what are the 2 numbers?
1 x 1 = 1 2 x 2 = 4 4 - 1 = 3 Answer: the numbers are 1 and 2
What is between -3 and 4?
If it is integers, you have -2, -1, 0, 1, 2 and 3. If rational numbers or irrational numbers or real numbers, there are an infinity of them between -3 and 4.
How many 4 digit numbers can you make out of 1 1 2 and 3?
If the digits are repeatable, the answer is 4*4*4*4. If the numbers cannot be repeated, then it is 4*3*2*1.
What are all the prime numbers from 1-4?
The prime numbers from 1 to 4 are 2 and 3.
How do you get 29 out of the numbers 1 2 3 4?
32 + 1 - 4 or 4! + (3 x 2) -1 or 1(4! + 3 + 2) equal 29.
How do you get the number 17 out of the numbers 1 2 3 4?
(4 + 1) x 3 + 2.
What is the median of these numbers 2 3 2 4 1 8 4?
the answer is 3
What is 3 square numbers?
1*1=1 2*2=4 3*3=9
How do you get the numbers 1 to 31 or just 16 and 31 using the numbers -1 2 -3 and 4?
Proof: First, we conduct prime numbers. 1=2-1 2 3=4-1 4 5=2-3*(-1) 7=4-3*(-1) 11=4*2-3*(-1) 13=4*(-3)+2-1 Then 6=32=(4-1)2 8=4*2 9=(-3)2 10=2*5=2*2-3*(-1) 12=3*4 14=7*2 15=5*3 16=8*2 31 =42*2-1
How do you get 26 with the numbers 1 2 3 and 4?
4! + (1 + 3)/2 = 24 + 4/2 = 24 + 2 = 26
How do you add mixed numbers with like denominators?
To add mixed numbers with like denominators, first add the whole numbers, then add the fractions. For example: 2 2/3 + 4 1/3 = ?? First add the whole numbers. 2 2/3 + 4 1/3 = 6? Then add the fractions. 2 2/3 + 4 1/3 = 6 3/3 Simplify. 3/3 is one. 2 2/3 + 4 1/3 = 6 + 1 = 7.
If the sum of two numbers is 4 and their difference is -2 what are the numbers?
6
What are fourths in numbers?
1/4, 2/4, 3/4
How do you make 46 with the numbers 1 2 3 and 4?
4! * 2 + 1 - 3 =24*2 + 1 - 3 = 48 + 1 - 3 = 46
How do you get the number 21 out of the numbers 1 2 3 4?
1+2=3 3+4=7 7x3=21 (1+2)x(3+4)
What is the mean of these numbers 1 1 1 1 3 3 4?
2
How can the numbers 1 2 3 4 equal 49?
It could be: 1*(3+4)2 = 49
How do you get 11 with only the numbers 1 2 3 and 4?
(2 x 4) + (3 x 1)
How do you make the number 7 with only the numbers 1 2 3 and 4?
(4 + 3) * (2 - 1)
How do get 22 only using these numbers once 1 2 3 4?
2*(4*3 - 1)
|
__label__pos
| 1 |
Introduction to Networking for VMware Admins: Part 1, The Basics
An article by slowe from blog.scottlowe.org
This is the kick-off to a series of posts introducing networking concepts to VMware administrators. Based on the feedback I’ve gotten in speaking to VMware admins, networking is an area in which a lot of VMware-focused folks aren’t particularly comfortable. So, I thought it might be helpful to put up a few blog posts on networking concepts for VMware administrators. (If you’re already familiar with networking concepts, you probably don’t need to read this—unless you just have some free time on your hands.)
In this first article, I’ll cover some important networking basics. This will set the stage for discussions that will take place in future articles. Here are some of the topics that I’m going to cover in this first article:
• Layer 2 versus Layer 3: the OSI and DoD models
• Theory into reality: TCP/IP and Ethernet
• Bridging, switching, and routing
• Spanning Tree Protocol (STP)
• ARP and Flooding
Ready? Let’s get started.
Layer 2 Versus Layer 3: The OSI and DoD Models
In networking, two “models” of how networked systems should communicate drive almost everything. In the beginning, there was the OSI (Open Systems Interconnection) model, a seven layer model that described and defined how two systems might communicate with each other in a standardized fashion across a network. This model is largely theoretical, but it is still important to understand because it shapes much of what is done today in networking. Each of these layers was written with the idea of being as generic and interoperable as possible; the idea was that you could have standards (or protocols) at each layer so that the layers could evolve somewhat independently of one another.
Two of these layers—layer 2, the Data Link layer, and layer 3, the Network layer—are the basis for the “layer 2″ and “layer 3″ discussions that you so frequently hear thrown about when someone is discussing networking. A “sub-layer” of the Data Link layer (layer 2) is also the basis for another term that you’ll hear frequently thrown about in networking: the MAC address. This sublayer is the Media Access Control, or MAC, sublayer, and it’s where MAC addresses are used.
<aside>Note that “MAC” and “Mac” are very different! The first pertains to the Media Access Control sublayer of the OSI model; the second pertains to a line of computers made by Apple, Inc. You shouldn’t call an Apple computer a MAC, and you shouldn’t refer to a Media Access Control address as a Mac address. OK, stepping off the soap box now…</aside>
The third layer of the OSI model, the Network layer, is the “layer 3″ that so often referenced in network discussions. Protocols like Internet Protocol (IP) and IPX operate at this layer.
After the creation of the OSI model, the US Department of Defense started work on what would eventually become the Internet. (No, it wasn’t invented by Al Gore. Sorry, Al.) As part of that work—I won’t cover that here as there have been plenty of other write-ups of that history—they created a four-layer model that became known as the DoD model or the TCP/IP model. The four layers of the DoD model—Link, Internet, Transport, and Application—have a rough correlation to the seven layers of the OSI model, as shown here. Despite this fact, discussions of “layer 2″ and “layer 3″ still refer to the Data Link and Network layers of the OSI model, and not to the DoD-TCP/IP model.
And that leads us to our next section…
Theory Into Reality: Ethernet and TCP/IP
The OSI model, in particular, is highly theoretical. As far as I know, there are no implementations that actually implement all seven layers. (They might implement the functions of all seven layers, but not the actual layers themselves.) However, the abstract nature of the OSI model was beneficial in the early days, when there were a number of different physical media types (Ethernet, Token Ring, ATM, etc.) and different network protocols (NetBEUI/NetBIOS, IPX/SPX, TCP/IP, SNA, etc.). Over time, though, the networking industry has—in the data center, at least, where this discussion is primarily focused—whittled itself down to just two standards that are almost universally deployed: Ethernet and TCP/IP.
Therefore, as I move through this series, I’m going to assume the use of Ethernet and TCP/IP. Yes, other protocols will almost certainly be present, but in the data center I think it is reasonably safe to assume the use of Ethernet and TCP/IP.
So, when I talk about “layer 2,” then, I am talking about Ethernet (and all of its variations: Ethernet, Fast Ethernet, Gigabit Ethernet, 10 Gigabit Ethernet). When I talk about a MAC address, I’m talking about an Ethernet address.
Similarly, when I talk about “layer 3,” I’m talking about Internet Protocol (the IP in TCP/IP).
This is not to say that other standards and other protocols don’t exist, but simply to narrow down the discussion so that I can productively move ahead with what I need to discuss.
Bridging, Switching, and Routing
A bridge is a device that operates at the Data Link layer (layer 2 of the OSI model) and is therefore referred to as a “layer 2 device” or a “Data Link device.” According to most definitions, a bridge has only two ports, and serves to connect two separate networks. A bridge can’t and doesn’t understand anything more than layer 2 stuff—it doesn’t know anything about upper layer protocols. Although there are different types of bridges, for the purposes of our discussion I’m going to assume the use of transparent bridges, meaning that the bridges are transparent to the hosts communicating across them. Cisco has more information on transparent bridging, which is the most common form of bridging in Ethernet-based networks.
A switch is, essentially, a multi-port bridge. It also operates at OSI layer 2 and doesn’t know about or understand anything with regards to upper layer protocols. Like a transparent bridge, a switch is invisible to the hosts communicating across it.
Neither bridges nor switches modify the frames that move across them.
A router is a device that operates at OSI layer 3. Because network protocols exist at layer 3, routers are generally protocol specific. I’ve limited the discussion here to TCP/IP, but routers exist for other network protocols as well. A key difference between bridges/switches and routers is that routers actually modify the packets moving across them, typically by changing the layer 2 addresses in the packet and by decrementing the Time To Live (TTL) counter. The TTL counter is a field that keeps a packet from endlessly circling the network; when the TTL expires (reaches 0), the packet is discarded. Ethernet frames do not have a TTL. (Routers also change the source and destination layer 2 addresses, but I’ll discuss that in more detail in a future post.)
Although you will see references to layer 3 switching, for the purposes of this series I will use switching to refer strictly to layer 2 functions and routing to refer strictly to layer 3 functions.
Now might also be a good time to discuss the idea of a broadcast domain (more information here). A broadcast domain encompasses all the devices and hosts that can reach each other by broadcast at the Data Link layer (layer 2). In other words, a broadcast domain will include all the devices and hosts that are connected by bridges or switches, but it will not include devices or hosts connected by a router. Broadcast domains are separated by layer 3 devices like routers.
Spanning Tree Protocol
Pop quiz time! Here’s your question: what happens if an Ethernet frame enters a switch, is forwarded out a port connected to another switch, which forwards the frame out a port back to the original switch?
This, boys and girls, is what is called a bridging (or switching) loop, and it is a Very Bad Thing. As I stated earlier, Ethernet frames don’t have a TTL, so there’s no notion of ensuring that a frame doesn’t circle the network endlessly.
Contrast that with what’s called a routing loop, where packets are sent to a router, which in turns sends them to another router, where they are then sent back to the first router, etc., etc. In this case, though, the TTL will be decremented each time the packet passes through the router, and eventually the TTL will expire. When the TTL expires, the packet is removed from the network.
To protect networks against bridging loops, the network experts created something called Spanning Tree Protocol (STP). The purpose of STP is to ensure that loops aren’t created as switches are connected to one another in larger networks. (In a single switch network, there’s obviously no need for STP.) This is clearly an admirable goal, but a side effect of STP is that it prevents multiple layer 2 paths between switches. I’ll have more to say about STP in future posts, but for now understand that STP is a necessary component in larger environments in order to prevent bridging loops.
ARP and Flooding
Recall from earlier in this post that addresses are employed at OSI layer 2 (these are called MAC addresses and, in the context of this discussion, are Ethernet addresses that look something like aa:bb:cc:11:22:33). You probably also already know that addresses are also employed at OSI layer 3, where IP resides. IP addresses—specifically IPv4 addresses, no need to discuss IPv6 just yet—are typically expressed in what’s called dotted decimal notation and look something like 192.168.100.123.
There’s a reason I’m mentioning all of this. In order for two systems (I’ll call them Host A and Host B) to communicate with each other, they must know how to get from A to B (and back again). This means they need to know the correct addresses in order to be able to communicate. Generally, the hosts will know the IP address (or hostname, which will resolve to IP address via the Domain Name System [DNS]), such as in the example of your laptop connecting to a web server via a URL like “http://192.168.100.123″ or “http://www.vmware.com”.
Stepping back to our layer 2 vs. layer 3 discussion for a second, recall that IP addresses operate a layer 3. However, in order to communicate across an Ethernet network, the hosts need to know more than just the IP addresses—the hosts also need to know the Ethernet (MAC) addresses that operate at layer 2.
That’s where ARP (Address Resolution Protocol) comes into play. ARP performs the necessary function of associating an IP address with a MAC address. When Host A needs to communicate with Host B and knows the IP address of Host B but not its MAC address, it will send out an ARP query. This ARP query is sent to the Ethernet broadcast address of ff:ff:ff:ff:ff:ff. Because this is a layer 2 broadcast address, it will reach all other hosts in the same broadcast domain (I defined what a broadcast domain is earlier). Assuming Host B is on the same broadcast domain as Host A, then Host B will receive the ARP query and will respond directly to Host A (not via broadcast, but via unicast to Host A’s MAC address). If Host B is not on the the same broadcast domain, then it won’t see the ARP query, won’t respond to Host A, and Host A will fail to connect to Host B.
Flooding is a term used to describe the behavior of a switch under certain conditions. Layer 2 broadcasts, such as ARP queries, have to be sent to all the ports on the switch; after all, a broadcast frame is supposed to be sent to all hosts. A broadcast frame has to be flooded to all ports. However, there are other instances in which flooding occurs. Recall that a switch is a multi-port bridge, operating at layer 2, that directs frames from a source MAC address to a destination MAC address. What happens if the switch doesn’t know which destination MAC address corresponds to which switch port? In this case, the switch must flood the frame out all ports, because it doesn’t have the necessary MAC address-to-port mapping, even though it’s not a broadcast frame. The switch then listens for the response to that frame in order to learn what port should be used next time it needs to direct traffic to that particular MAC address. When it does learn the mapping between port and MAC address, it stores that in an internal data structure so that it can use it for future traffic flows.
I’ll wrap up this first post here. I’ll build on and expand upon these basics in the next post. In the meantime, feel free to post any questions you might have in the comments below. Networking experts, if I have misrepresented something—keeping in mind that I’ve simplified certain concepts to keep things digestible for newcomers—you are welcome to post corrections or clarifications in the comments. Courteous comments are always welcome.
This article was originally posted on blog.scottlowe.org. Visit the site for more information on virtualization, servers, storage, and other enterprise technologies.
Introduction to Networking: Part 1, The Basics
Tags:
Comments
No comments so far.
• Leave a Reply
Your gravatar
Your Name
|
__label__pos
| 0.536595 |
CATCH UP ON THE INDUSTRY / ADS 101
What are cookies and how are they used in digital advertising?
MEMBER EXCLUSIVE
<p>The digital advertising industry has been abuzz lately, discussing the <a href="https://adthrive.com/tag/death-of-the-cookie">future removal of third-party cookies</a> and how online publishers will be affected. </p> <p>As a publisher, you’re probably familiar with the term “cookie” and know that cookies play a role in digital advertising. But while we’re quick to toss around the term, how many of us could give a clear description of cookies — what they are and how the
|
__label__pos
| 0.697185 |
----------------------------------- randint Sat Oct 29, 2011 3:55 pm Remainder Theorem ----------------------------------- The remainder theorem, known as P(x)/d(x), where P(x) is any polynomial, d(x) is a linear. When you use the the root of d(x) and "plug" it into P(x), you get the remainder of the function. ----------------------------------- Zren Sat Oct 29, 2011 6:28 pm RE:Remainder Theorem ----------------------------------- You might want to consider showing which coefficient your input is for. Eg: 'Enter coefficient n (nx^3): ' ~ Also, when a user's input is incorrect, you should not be continuing. try { degree = Integer.parseInt (br.readLine()) + 1; } catch (NumberFormatException n) { System.out.println ("You have not entered a valid integer!"); } You should either loop input until a valid input has been given, or you should be exiting your logic process when it happens. [code] C:\Users\Admin\Desktop>java Remainder_Theorem Enter the highest degree term: 0 Enter coefficient: a You have not entered a valid number! Enter the slope: a You have not entered a valid number! 0.0x is a factor of this polynomial. [/code] I'll also hint that since you're getting a double as input more than once, that you should consider writing a function to getInputDouble() that'll do the looping/error catching inside of that, and would return a valid number. ----------------------------------- randint Sun Oct 30, 2011 1:00 pm RE:Remainder Theorem ----------------------------------- Yes, this is correct, in the newer version, I will add System.exit (0); in the catch blocks.
|
__label__pos
| 0.815132 |
When building a backend for the TX Text Control Document Editor and Document Viewer, the TX Text Control NuGet packages implement the necessary endpoints and handlers for the communication between the client-side libraries and the backend.
It is recommended that these endpoints be secured using a middleware that checks for an access token. This demo implementation does not create and store access tokens, but illustrates how incoming requests can be checked for an access token. Typically, your actual authorization layer, such as OAuth, creates the access tokens.
The following diagram illustrates the request flow with an integrated custom security middleware.
Security Middleware in ASP.NET Core
The client-side part of the Document Editor or Viewer requests a resource from the backend by sending an access token with the request. The custom security middleware captures this request as part of the request pipeline.
The access token is validated, and if valid, the request is forwarded to the appropriate TX Text Control middleware, and if not, an UnauthorizedAccessException is thrown.
Custom Security Middleware
The middleware implementation is shown in the following code.
namespace TXTextControl
{
public class TXSecurityMiddleware
{
private RequestDelegate m_next;
// stored access token usually retrieved from any storage
// implemented thought OAuth or any other identity protocol
private const string access_token = "821e2f35-86e3-4917-a963-b0c4228d1315";
public TXSecurityMiddleware(RequestDelegate next)
{
m_next = next;
}
public async Task Invoke(HttpContext context)
{
// Check if the request is a TX Text Control request
if (context.WebSockets.IsWebSocketRequest &&
context.WebSockets.WebSocketRequestedProtocols.Contains("TXTextControl.Web") ||
(context.Request.Query.ContainsKey("access_token") &&
context.GetEndpoint()?.DisplayName?.Contains("TXTextControl.Web.MVC.DocumentViewer") == true))
{
// Retrieve access token from the query string
var accessToken = context.Request.Query["access_token"];
// Showcase only: Easy comparison of tokens
if (accessToken != access_token)
{
throw new UnauthorizedAccessException();
}
else
{
await m_next.Invoke(context);
}
}
else if (m_next != null)
{
await m_next.Invoke(context);
}
}
}
}
view raw test.cs hosted with ❤ by GitHub
Access Token
The access token in this example is hard-coded and would normally be generated and validated by the authorization strategy you have in place.
Registering the Middleware
The middleware is registered in the Program.cs request pipeline. The following entries must be added after the app.UseRouting() entry, assuming you have created a backend based on this tutorial.
app.UseWebSockets();
// Add the TX Security Middleware to the request pipeline
app.UseMiddleware<TXTextControl.TXSecurityMiddleware>();
// TX Text Control specific middleware
app.UseTXWebSocketMiddleware();
app.UseTXDocumentViewer();
view raw test.cs hosted with ❤ by GitHub
The order in which requests get processed is very important. You need to add the custom security middleware first, followed by the TX Text Control middleware entries.
Passing the Access Tokens
When the client-side part of the Document Editor or Viewer sends a request to the backend, the access token must be passed with the request. The following code shows how to pass the access token within either the WebSocketURL or BasePath properties.
Document Editor
@using TXTextControl.Web.MVC
@{
// use an access token (for example returned by OAuth)
var sAccessToken = "821e2f35-86e3-4917-a963-b0c4228d1315";
// build WebSocketURL including access token in query string
var sProtocol = (Context.Request.IsHttps) ? "wss://" : "ws://";
var sWebSocketURL = sProtocol + Context.Request.Host
+ "/TXWebSocket?access_token=" + sAccessToken;
}
@Html.TXTextControl().TextControl(settings =>
{
settings.WebSocketURL = sWebSocketURL; // pass built WebSocketURL
}).Render()
view raw test.cshtml hosted with ❤ by GitHub
Document Viewer
@using TXTextControl.Web.MVC.DocumentViewer
@{
// use an access token (for example returned by OAuth)
var sAccessToken = "821e2f35-86e3-4917-a963-b0c4228d1315";
// build BasePath including access token in query string
var sProtocol = (Context.Request.IsHttps) ? "https://" : "http://";
var sBasePathURL = sProtocol + Context.Request.Host
+ "?access_token=" + sAccessToken;
}
<div style="width: 800px; height: 600px;">
@Html.TXTextControl().DocumentViewer(settings =>
{
settings.BasePath = sBasePathURL; // pass the base path
settings.Dock = DocumentViewerSettings.DockStyle.Fill;
}).Render()
</div>
view raw test.cshtml hosted with ❤ by GitHub
Conclusion
Securing document editing and viewing endpoints within web applications is critical to preventing unauthorized access. This article provides a comprehensive guide on integrating security middleware into ASP.NET Core to fortify these endpoints effectively.
|
__label__pos
| 0.94223 |
This is a pre-production deployment of Warehouse, however changes made here WILL affect the production instance of PyPI.
Latest Version Dependencies status unknown Test status unknown Test coverage unknown
Project Description
Django imgix
============
[](https://travis-ci.org/pancentric/django-imgix)
A simple Django application for creating [Imgix](https://www.imgix.com/ "Imgix") formatted image links in your templates
Installation
------------
Dependencies:
This app requires Django > 1.4 and imgix>0.1
1. Run ``` pip install django-imgix ```
2. Add ``` 'django_imgix' ``` to your ``` INSTALLED_APPS ```:
```
INSTALLED_APPS = (
...
'django_imgix',
)
```
----------
Configuration
-------------
There are a few settings you can use to configure how django-imgix works:
**IMGIX_DOMAINS** (*required*)
Give the domain name, or list of domain names, that you have registered with Imgix:
```
IMGIX_DOMAINS = 'my-domain.imgix.net'
...
or
...
IMGIX_DOMAINS = [
'my-domain-1.imgix.net',
'my-domain-2.imgix.net',
'my-domain-3.imgix.net',
]
```
**IMGIX_HTTPS**
Boolean value, defaults to `False` if not specified. If set to `True` it enables HTTPS support.
**IMGIX_SIGN_KEY**
If you want to produce signed URLs you need to enable secure URLs in the 'Source' tab in your Imgix.com account. This will generate a secret key that you need to specify here, e.g.
```
IMGIX_SIGN_KEY = 'jUIrLPuMEm2aCRj'
```
This will make a hash from the image url and all parameters that you have supplied, which will be appended as a url parameter `s=hash` to the image, e.g.
`https://my-domain.imgix.net/media/images/dsc_0001.jpg?fm=jpg&h=720&w=1280s=976ae7332b279147ac0812c1770db07f`
**IMGIX_DETECT_FORMAT**
Boolean value, defaults to `False` if not specified. If set to `True` django-imgix will automatically detect popular image extensions and apply the `fm=image_extension` attribute to the image url, where `image_extension` is one of the formats listed [here](https://www.imgix.com/docs/reference/format#param-fm "Imgix fm parameter")
Example:
```
{% load imgix_tags %}
{% get_imgix '/media/images/dsc_0001.jpg' w=1280 h=720 %}
```
will produce
`https://my-domain.imgix.net/media/images/dsc_0001.jpg?fm=jpg&h=720&w=1280`
Currently supported image formats for IMGIX_DETECT_FORMAT are jpg, jpeg, png, gif, jp2, jxr and webp.
**IMGIX_ALIASES**
Read about aliases in the **Usage** section below.
----------
Usage
-----
Django-imgix's functionality comes in the form of a template tag, `get_imgix`, that gets an image url as its first argument and then an N number of optional arguments:
```
{% load imgix_tags %}
<img src="{% get_imgix 'image_url' key=value ... %}"/>
```
Your `'image_url'` should be a relative URL, as it will be appended to a domain specified in `IMGIX_DOMAINS`, to form an absolute URL.
You can add as many `key=value` pairs as you want. Each `key=value` pair results in a url parameter
that Imgix can recognise and use to generate your thumbnail.
For a full list of supported parameters, see [here](https://www.imgix.com/docs/reference/ "Imgix API reference")
There is a special argument, `wh=WIDTHxHEIGHT`, which is made specifically so that transition from other image processing libraries such as **easy_thumbnails** is easier.
For example,
`{% get_imgix '/media/images/dsc_0001.jpg' wh='1280x720' %}`
is the same as saying
`{% get_imgix '/media/images/dsc_0001.jpg' w=1280 h=720 %}`
which resolves to
`http://my-domain.imgix.net/media/images/dsc_0001.jpg?h=720&w=1280`
`wh` will take precedence over `w` and `h` arguments, unless you use a 0 as one of the values in `wh`, e.g.
`{% get_imgix '/media/images/dsc_0001.jpg' wh='1280x0' w='777' h='555' %}`
will result in
`http://my-domain.imgix.net/media/images/dsc_0001.jpg?h=555&w=1280`
#### **Aliases**
If you don't want to list all your `key=value` parameters inline all the time, you can group them into aliases.
To do that, first specify the aliases in your settings file:
```
IMGIX_ALIASES = {
'alias_one': {'w': 200, 'h': 300, 'lossless': 1, 'auto': 'format'},
'alias_two': {'w': 450, 'h': 160, 'fm':'jpg', 'q': 70 },
}
```
Then, in your template, either simply provide the alias name as the first unnamed argument, or use `alias='alias_name'`:
```
{% load imgix_tags %}
<img src="{% get_imgix 'image_url' 'alias_one' %}"/>
... or ...
<img src="{% get_imgix 'image_url' alias='alias_one' %}"/>
```
Providing an alias means that any other arguments will be ignored.
Release History
Release History
1.1.0
This version
History Node
TODO: Figure out how to actually get changelog content.
Changelog content for this version goes here.
Donec et mollis dolor. Praesent et diam eget libero egestas mattis sit amet vitae augue. Nam tincidunt congue enim, ut porta lorem lacinia consectetur. Donec ut libero sed arcu vehicula ultricies a non tortor. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Show More
1.0.0
History Node
TODO: Figure out how to actually get changelog content.
Changelog content for this version goes here.
Donec et mollis dolor. Praesent et diam eget libero egestas mattis sit amet vitae augue. Nam tincidunt congue enim, ut porta lorem lacinia consectetur. Donec ut libero sed arcu vehicula ultricies a non tortor. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Show More
0.1
History Node
TODO: Figure out how to actually get changelog content.
Changelog content for this version goes here.
Donec et mollis dolor. Praesent et diam eget libero egestas mattis sit amet vitae augue. Nam tincidunt congue enim, ut porta lorem lacinia consectetur. Donec ut libero sed arcu vehicula ultricies a non tortor. Lorem ipsum dolor sit amet, consectetur adipiscing elit.
Show More
Supported By
WebFaction WebFaction Technical Writing Elastic Elastic Search Pingdom Pingdom Monitoring Dyn Dyn DNS HPE HPE Development Sentry Sentry Error Logging CloudAMQP CloudAMQP RabbitMQ Heroku Heroku PaaS Kabu Creative Kabu Creative UX & Design Fastly Fastly CDN DigiCert DigiCert EV Certificate Rackspace Rackspace Cloud Servers DreamHost DreamHost Log Hosting
|
__label__pos
| 0.969321 |
Android OpenGL ES Part 2 — Shaders
Explore VertexShader and FragmentShader with OpenGL, plus insights into optional shaders.
May 27 2021 · 5 min read
Background
In previous article we learned how to draw our first triangle. We learned minimal code to draw shape using vertex buffer and simple shaders. We started from GLSurfaceView and went through the very basic steps required to draw mess on screen. I recommend having a look at previous story.
Now, it’s time to learn shaders in depth. In this article we are going to look at VertexShader and FragmentShader in detail. OpenGL allows us to use many other optional shaders too.
We are what we repeatedly do. Excellence, then, is not an act, but a habit. Try out Justly and start building your habits today!
Before we start we need to understand OpenGl graphics pipeline. It consists of following steps:
1_KWr65L4ojahRxvakb0sDGQ.png
1. We pass our vertex point for processing.
2. Vertex processor transform vertices and create 3D object.
3. Rasterization process each elements break down into pixel-sized pieces we can say fragment.
4. Pixels pass through FragmentShader which fill each and every fragment with given color.
5. Finally merged output is ready to display on screen. This output is sent to the screen in pixels.
Pretty simple!!
Let's Get Started!
The complete shader program contains many sub-program for every stage. Each mini-program(shader) is compiled, and the whole set are linked together to form the executable shader program— called a program by OpenGL.
Each shader resembles a small C program. Each of these shaders can be stored in a C string, or in a plain text file.
Shader Structure
in <type> <in variable name>;
in <type> <in variable name>;
out <type> <out variable name>;
uniform <type> <uniform name>;
init main(){
// Process input and/or do some graphics stuff
...
// Output processed stuff to output variable
<out variable here> = stuff here;
}
Vertex Shader
The vertex shader is responsible for transforming vertex positions into clip space. It can also be used to send data from the vertex buffer to fragment shaders. Vertex shaders perform operations on each vertex, and the results of these operations are used in the fragment shaders which do additional calculations per pixel.
private val vertexShaderCode =
"attribute vec4 vPosition;" +
"void main() {" +
" gl_Position = vPosition;" +
"}"
We can see the in key-word for input to the program from the previous stage. GLSL(Shader) also has an out key-word for sending a variable to the next stage. The input to a vertex buffer (the in variables) come from blocks of memory called vertex buffers. We usually copy our vertex positions into vertex buffers before running our main loop.
Fragment Shader
Once all of the vertex shaders have computed the position of every vertex, then the fragment shader runs once for every fragment between vertices. The fragment shader is responsible for setting the color of each fragment.
private val fragmentShaderCode =
"precision mediump float;" +
"uniform vec4 vColor;" +
"void main() {" +
" gl_FragColor = vColor;" +
"}"
The uniform key-word indicate that we are sending in a variable to the shader program from the CPU. The colors are rgba, or red, green, blue, alpha. The values of each component are floats between 0.0 and 1.0.
Now, we are going to create a nice multicolor triangle. We will only modify our Triangle class which I created in previous article.
Defining Triangle coordinates
// Define points for equilateral triangles.
val triangleVertices = floatArrayOf(
0.0f, 0.5f, 0.0f, //TOP
0.5f, -0.5f, 0.0f, //BOTTOM-LEFT
-0.5f, -0.5f, 0.0f //BOTTOM-RIGHT
)
Defining vertex and fragment shader
private val vertexShader = """
attribute vec4 aPosition;
uniform vec4 aColor;
out vec4 vColor;
void main()
{
vColor = aColor;
gl_Position = aPosition;
}"""
Here First, we have one attribute for position. We will get the value of position from vertex buffer. And the second uniform value is for our vertex color. Then we define out vec4 which passes the value of color from vertex to fragment shader.
private val fragmentShader = """
precision mediump float;
in vec4 vColor;
void main()
{
gl_FragColor = vColor;
}"""
Fragment shader on the other hand has the value for color of each vertex. The value of color we will get from our vertex shader.
Define vertex buffer
private var vertexBuffer: FloatBuffer =
ByteBuffer.allocateDirect(triangleVertices.size * mBytesPerFloat).run {
// use the device hardware's native byte order
order(ByteOrder.nativeOrder())
// create a floating point buffer from the ByteBuffer
asFloatBuffer().apply {
// add the coordinates to the FloatBuffer
put(triangleVertices)
// set the buffer to read the first coordinate
position(0)
}
}
Load shader in to shader handler/program.
private var mProgram: Int
/** How many bytes per float. */
private val mBytesPerFloat = 4
/** How many elements per vertex. */
private val mStrideBytes = 3 * mBytesPerFloat
/** This will be used to pass in model position information. */
private var mPositionHandle = 0
/** This will be used to pass in model color information. */
private var mColorHandle = 0
private fun loadShader(type: Int, shaderCode: String): Int {
// create a vertex shader type (GLES20.GL_VERTEX_SHADER)
// or a fragment shader type (GLES20.GL_FRAGMENT_SHADER)
return GLES20.glCreateShader(type).also { shader ->
// add the source code to the shader and compile it
GLES20.glShaderSource(shader, shaderCode)
GLES20.glCompileShader(shader)
}
}
init {
val vertexShader: Int = loadShader(GLES20.GL_VERTEX_SHADER, vertexShader)
val fragmentShader: Int =loadShader(GLES20.GL_FRAGMENT_SHADER, fragmentShader)
mProgram = GLES20.glCreateProgram().also {
GLES20.glAttachShader(it, vertexShader)
GLES20.glAttachShader(it, fragmentShader)
// creates OpenGL ES program executables
GLES20.glLinkProgram(it)
}
// Set program handles. These will later be used to pass in values to the program.
mPositionHandle = GLES20.glGetAttribLocation(mProgram, "aPosition")
mColorHandle = GLES20.glGetUniformLocation(mProgram, "aColor")
// Add program to OpenGL ES environment
GLES20.glUseProgram(mProgram)
}
First, we create our shader and then we create our program by attaching our shader to it. And then we link our program to make it executable.
Now, it’s time to do real draw. Here’s our draw method which we going to call from Renderer’s onDrawFrame() .
fun draw() {
GLES20.glClear(GLES20.GL_DEPTH_BUFFER_BIT or GLES20.GL_COLOR_BUFFER_BIT)
GLES20.glVertexAttribPointer(
mPositionHandle, 3, GLES20.GL_FLOAT, false,
mStrideBytes, vertexBuffer
)
GLES20.glEnableVertexAttribArray(mPositionHandle)
// Set color for drawing the triangle
val time = System.currentTimeMillis().toDouble()
val blueColor = ((sin(time) / 2f) + 0.5f).toFloat()
val redColor = ((cos(time) / 2f) + 0.5f).toFloat()
val color = floatArrayOf(redColor, 0.0f, blueColor, 1.0f)
GLES20.glUniform4fv(mColorHandle, 1, color, 0)
// Draw the triangle
GLES20.glDrawArrays(GLES20.GL_TRIANGLES, 0, 3)
}
This is where stuff actually draw on screen. We first clear the screen.
We then tell OpenGL to how to use this data by using glVertexAttribPointer. OpenGl use specified data and feed in to vertex shader by applying it to our position attribute.
Then We calculate our blue and red color value from system current time. Using color handle we set our calculated color to color uniform by using glUniform4fv() .
And at the end we tell which type of element we want to draw by specifying position of starting component of vertex and the size of vertex in glDrawArrays()
Hurray!! after you run your application. You have nice animated triangle in your emulator or phone’s screen. It looks something like this. You can fine complete source code the sample app HERE.
Multicolor-animated Triangle
radhika-s image
Radhika saliya
Android developer | Sharing knowledge of Jetpack Compose & android development
radhika-s image
Radhika saliya
Android developer | Sharing knowledge of Jetpack Compose & android development
Let's Work Together
Not sure where to start? We also offer code and architecture reviews, strategic planning, and more.
cta-image
Get Free Consultation
footer
Subscribe Here!
Follow us on
2024 Canopas Software LLP. All rights reserved.
|
__label__pos
| 0.886973 |
• Advertisement
Sign in to follow this
GLSL and Environment Mapping
This topic is 4224 days old which is more than the 365 day threshold we allow for new replies. Please post a new topic.
If you intended to correct an error in the post then please contact us.
Recommended Posts
Attempted to write a GLSL shader for environment mapping. It works, though with some graphical artifacts. When I turn around, I see a seam in the environment map (texture is tileable), and when I look up, I see a strange type of artifact that looks much like.. well.. hard to describe.. it looks like when you apply the UV Mapping type "circular" to a mesh in 3D Studio Max... anyways, it looks weird. Here is the fragment shader (where all the magic is done). If anyone sees anything out of the ordinary, let me know.
const vec3 Xunit = vec3(1.0, 0.0, 0.0);
const vec3 Yunit = vec3(0.0, 1.0, 0.0);
uniform sampler2D TexMap1;
uniform sampler2D TexMap2;
uniform int UseFog;
uniform float MixRatio;
varying vec3 normal;
varying vec4 ecPos;
void main()
{
// Calculate global ambience.
vec4 ambientGlobal = gl_LightModel.ambient * gl_FrontMaterial.ambient;
// Calculate normal.
vec3 n = normalize(normal);
// Initiate color to global ambience.
vec4 color = ambientGlobal;
// Compute reflection vector.
vec3 reflectDir = reflect(vec3(ecPos), normal);
// Compute altitude and azimuth angles.
vec2 index;
index.y = dot(normalize(reflectDir), Yunit);
reflectDir.y = 0.0;
index.x = dot(normalize(reflectDir), Xunit);
// Translate index values into proper range.
if (reflectDir.z > 0.0)
{
index = (index + 1.0) * 0.5;
}
else
{
index.t = (index.t + 1.0) * 0.5;
index.s = (-index.s) * 0.5 + 1.0;
}
// For each of the 4 lights affecting this object, calculate their color addition.
for (int i = 1; i < 5; i++)
{
if (gl_LightSource.linearAttenuation > 0.0)
{
// Calculate light's direction.
vec3 vec = vec3(gl_LightSource.position - ecPos);
vec3 lightDir = normalize(vec);
// Calculate light distance.
float dist = length(vec);
// Calculate half vector.
vec3 halfVector = normalize(gl_LightSource.halfVector.xyz);
// Calculate diffuse component.
vec4 diffuse = gl_FrontMaterial.diffuse * gl_LightSource.diffuse;
// Calculate ambient component.
vec4 ambient = gl_FrontMaterial.ambient * gl_LightSource.ambient;
// Calculate dot product between normal and light.
float NdotL = max(dot(n, normalize(lightDir)), 0.0);
// Calculate attenuation.
float att = 1.0 / (gl_LightSource.constantAttenuation +
gl_LightSource.linearAttenuation * dist +
gl_LightSource.quadraticAttenuation * dist * dist);
// Add attenuation and diffuse and ambient components.
color += att * (diffuse * NdotL + ambient);
// Calculate dot product between normal and half vector.
vec3 halfV = normalize(halfVector);
float NdotHV = max(dot(n, halfV), 0.0);
// Add specular component and calculate highlight.
color += att * gl_FrontMaterial.specular * gl_LightSource.specular *
pow(NdotHV, gl_FrontMaterial.shininess);
}
}
// Get fragment from envmap.
vec3 envColor = vec3(texture2D(TexMap2, index));
// Multiply the average color with texture color.
color = color * texture2D(TexMap1, gl_TexCoord[0].st);
// Mix lighting, texture map and environment map.
vec3 finalColor = mix(envColor, vec3(color), MixRatio);
// Calculate exp2 fog.
if (UseFog == 1)
{
const float LOG2E = 1.442695;
float fog = exp2(-gl_Fog.density * gl_Fog.density *
gl_FogFragCoord * gl_FogFragCoord * LOG2E);
fog = clamp(fog, 0.0, 1.0);
finalColor = mix(vec3(gl_Fog.color), color.rgb, fog);
}
// Set fragment color.
gl_FragColor = vec4(finalColor, color.a);
}
Share this post
Link to post
Share on other sites
Advertisement
Sign in to follow this
• Advertisement
|
__label__pos
| 0.859388 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.