code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
!function(a,b,c){function d(a){var b={},d=/^jQuery\d+$/;return c.each(a.attributes,function(a,c){c.specified&&!d.test(c.name)&&(b[c.name]=c.value)}),b}function e(a,d){var e=this,f=c(e);if(e.value==f.attr("placeholder")&&f.hasClass("placeholder"))if(f.data("placeholder-password")){if(f=f.hide().next().show().attr("id",f.removeAttr("id").data("placeholder-id")),a===!0)return f[0].value=d;f.focus()}else e.value="",f.removeClass("placeholder"),e==b.activeElement&&e.select()}function f(){var a,b=this,f=c(b),g=this.id;if(""==b.value){if("password"==b.type){if(!f.data("placeholder-textinput")){try{a=f.clone().attr({type:"text"})}catch(h){a=c("<input>").attr(c.extend(d(this),{type:"text"}))}a.removeAttr("name").data({"placeholder-password":!0,"placeholder-id":g}).bind("focus.placeholder",e),f.data({"placeholder-textinput":a,"placeholder-id":g}).before(a)}f=f.removeAttr("id").hide().prev().attr("id",g).show()}f.addClass("placeholder"),f[0].value=f.attr("placeholder")}else f.removeClass("placeholder")}var g,h,i="placeholder"in b.createElement("input"),j="placeholder"in b.createElement("textarea"),k=c.fn,l=c.valHooks;i&&j?(h=k.placeholder=function(){return this},h.input=h.textarea=!0):(h=k.placeholder=function(){var a=this;return a.filter((i?"textarea":":input")+"[placeholder]").not(".placeholder").bind({"focus.placeholder":e,"blur.placeholder":f}).data("placeholder-enabled",!0).trigger("blur.placeholder"),a},h.input=i,h.textarea=j,g={get:function(a){var b=c(a);return b.data("placeholder-enabled")&&b.hasClass("placeholder")?"":a.value},set:function(a,d){var g=c(a);return g.data("placeholder-enabled")?(""==d?(a.value=d,a!=b.activeElement&&f.call(a)):g.hasClass("placeholder")?e.call(a,!0,d)||(a.value=d):a.value=d,g):a.value=d}},i||(l.input=g),j||(l.textarea=g),c(function(){c(b).delegate("form","submit.placeholder",function(){var a=c(".placeholder",this).each(e);setTimeout(function(){a.each(f)},10)})}),c(a).bind("beforeunload.placeholder",function(){c(".placeholder").each(function(){this.value=""})}))}(this,document,jQuery);
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/data/static/foundation/js/vendor/placeholder.js
|
placeholder.js
|
function FastClick(a){"use strict";var b,c=this;if(this.trackingClick=!1,this.trackingClickStart=0,this.targetElement=null,this.touchStartX=0,this.touchStartY=0,this.lastTouchIdentifier=0,this.touchBoundary=10,this.layer=a,!a||!a.nodeType)throw new TypeError("Layer must be a document node");this.onClick=function(){return FastClick.prototype.onClick.apply(c,arguments)},this.onMouse=function(){return FastClick.prototype.onMouse.apply(c,arguments)},this.onTouchStart=function(){return FastClick.prototype.onTouchStart.apply(c,arguments)},this.onTouchMove=function(){return FastClick.prototype.onTouchMove.apply(c,arguments)},this.onTouchEnd=function(){return FastClick.prototype.onTouchEnd.apply(c,arguments)},this.onTouchCancel=function(){return FastClick.prototype.onTouchCancel.apply(c,arguments)},FastClick.notNeeded(a)||(this.deviceIsAndroid&&(a.addEventListener("mouseover",this.onMouse,!0),a.addEventListener("mousedown",this.onMouse,!0),a.addEventListener("mouseup",this.onMouse,!0)),a.addEventListener("click",this.onClick,!0),a.addEventListener("touchstart",this.onTouchStart,!1),a.addEventListener("touchmove",this.onTouchMove,!1),a.addEventListener("touchend",this.onTouchEnd,!1),a.addEventListener("touchcancel",this.onTouchCancel,!1),Event.prototype.stopImmediatePropagation||(a.removeEventListener=function(b,c,d){var e=Node.prototype.removeEventListener;"click"===b?e.call(a,b,c.hijacked||c,d):e.call(a,b,c,d)},a.addEventListener=function(b,c,d){var e=Node.prototype.addEventListener;"click"===b?e.call(a,b,c.hijacked||(c.hijacked=function(a){a.propagationStopped||c(a)}),d):e.call(a,b,c,d)}),"function"==typeof a.onclick&&(b=a.onclick,a.addEventListener("click",function(a){b(a)},!1),a.onclick=null))}FastClick.prototype.deviceIsAndroid=navigator.userAgent.indexOf("Android")>0,FastClick.prototype.deviceIsIOS=/iP(ad|hone|od)/.test(navigator.userAgent),FastClick.prototype.deviceIsIOS4=FastClick.prototype.deviceIsIOS&&/OS 4_\d(_\d)?/.test(navigator.userAgent),FastClick.prototype.deviceIsIOSWithBadTarget=FastClick.prototype.deviceIsIOS&&/OS ([6-9]|\d{2})_\d/.test(navigator.userAgent),FastClick.prototype.needsClick=function(a){"use strict";switch(a.nodeName.toLowerCase()){case"button":case"select":case"textarea":if(a.disabled)return!0;break;case"input":if(this.deviceIsIOS&&"file"===a.type||a.disabled)return!0;break;case"label":case"video":return!0}return/\bneedsclick\b/.test(a.className)},FastClick.prototype.needsFocus=function(a){"use strict";switch(a.nodeName.toLowerCase()){case"textarea":return!0;case"select":return!this.deviceIsAndroid;case"input":switch(a.type){case"button":case"checkbox":case"file":case"image":case"radio":case"submit":return!1}return!a.disabled&&!a.readOnly;default:return/\bneedsfocus\b/.test(a.className)}},FastClick.prototype.sendClick=function(a,b){"use strict";var c,d;document.activeElement&&document.activeElement!==a&&document.activeElement.blur(),d=b.changedTouches[0],c=document.createEvent("MouseEvents"),c.initMouseEvent(this.determineEventType(a),!0,!0,window,1,d.screenX,d.screenY,d.clientX,d.clientY,!1,!1,!1,!1,0,null),c.forwardedTouchEvent=!0,a.dispatchEvent(c)},FastClick.prototype.determineEventType=function(a){"use strict";return this.deviceIsAndroid&&"select"===a.tagName.toLowerCase()?"mousedown":"click"},FastClick.prototype.focus=function(a){"use strict";var b;this.deviceIsIOS&&a.setSelectionRange&&0!==a.type.indexOf("date")&&"time"!==a.type?(b=a.value.length,a.setSelectionRange(b,b)):a.focus()},FastClick.prototype.updateScrollParent=function(a){"use strict";var b,c;if(b=a.fastClickScrollParent,!b||!b.contains(a)){c=a;do{if(c.scrollHeight>c.offsetHeight){b=c,a.fastClickScrollParent=c;break}c=c.parentElement}while(c)}b&&(b.fastClickLastScrollTop=b.scrollTop)},FastClick.prototype.getTargetElementFromEventTarget=function(a){"use strict";return a.nodeType===Node.TEXT_NODE?a.parentNode:a},FastClick.prototype.onTouchStart=function(a){"use strict";var b,c,d;if(a.targetTouches.length>1)return!0;if(b=this.getTargetElementFromEventTarget(a.target),c=a.targetTouches[0],this.deviceIsIOS){if(d=window.getSelection(),d.rangeCount&&!d.isCollapsed)return!0;if(!this.deviceIsIOS4){if(c.identifier===this.lastTouchIdentifier)return a.preventDefault(),!1;this.lastTouchIdentifier=c.identifier,this.updateScrollParent(b)}}return this.trackingClick=!0,this.trackingClickStart=a.timeStamp,this.targetElement=b,this.touchStartX=c.pageX,this.touchStartY=c.pageY,a.timeStamp-this.lastClickTime<200&&a.preventDefault(),!0},FastClick.prototype.touchHasMoved=function(a){"use strict";var b=a.changedTouches[0],c=this.touchBoundary;return Math.abs(b.pageX-this.touchStartX)>c||Math.abs(b.pageY-this.touchStartY)>c?!0:!1},FastClick.prototype.onTouchMove=function(a){"use strict";return this.trackingClick?((this.targetElement!==this.getTargetElementFromEventTarget(a.target)||this.touchHasMoved(a))&&(this.trackingClick=!1,this.targetElement=null),!0):!0},FastClick.prototype.findControl=function(a){"use strict";return void 0!==a.control?a.control:a.htmlFor?document.getElementById(a.htmlFor):a.querySelector("button, input:not([type=hidden]), keygen, meter, output, progress, select, textarea")},FastClick.prototype.onTouchEnd=function(a){"use strict";var b,c,d,e,f,g=this.targetElement;if(!this.trackingClick)return!0;if(a.timeStamp-this.lastClickTime<200)return this.cancelNextClick=!0,!0;if(this.cancelNextClick=!1,this.lastClickTime=a.timeStamp,c=this.trackingClickStart,this.trackingClick=!1,this.trackingClickStart=0,this.deviceIsIOSWithBadTarget&&(f=a.changedTouches[0],g=document.elementFromPoint(f.pageX-window.pageXOffset,f.pageY-window.pageYOffset)||g,g.fastClickScrollParent=this.targetElement.fastClickScrollParent),d=g.tagName.toLowerCase(),"label"===d){if(b=this.findControl(g)){if(this.focus(g),this.deviceIsAndroid)return!1;g=b}}else if(this.needsFocus(g))return a.timeStamp-c>100||this.deviceIsIOS&&window.top!==window&&"input"===d?(this.targetElement=null,!1):(this.focus(g),this.sendClick(g,a),this.deviceIsIOS4&&"select"===d||(this.targetElement=null,a.preventDefault()),!1);return this.deviceIsIOS&&!this.deviceIsIOS4&&(e=g.fastClickScrollParent,e&&e.fastClickLastScrollTop!==e.scrollTop)?!0:(this.needsClick(g)||(a.preventDefault(),this.sendClick(g,a)),!1)},FastClick.prototype.onTouchCancel=function(){"use strict";this.trackingClick=!1,this.targetElement=null},FastClick.prototype.onMouse=function(a){"use strict";return this.targetElement?a.forwardedTouchEvent?!0:a.cancelable&&(!this.needsClick(this.targetElement)||this.cancelNextClick)?(a.stopImmediatePropagation?a.stopImmediatePropagation():a.propagationStopped=!0,a.stopPropagation(),a.preventDefault(),!1):!0:!0},FastClick.prototype.onClick=function(a){"use strict";var b;return this.trackingClick?(this.targetElement=null,this.trackingClick=!1,!0):"submit"===a.target.type&&0===a.detail?!0:(b=this.onMouse(a),b||(this.targetElement=null),b)},FastClick.prototype.destroy=function(){"use strict";var a=this.layer;this.deviceIsAndroid&&(a.removeEventListener("mouseover",this.onMouse,!0),a.removeEventListener("mousedown",this.onMouse,!0),a.removeEventListener("mouseup",this.onMouse,!0)),a.removeEventListener("click",this.onClick,!0),a.removeEventListener("touchstart",this.onTouchStart,!1),a.removeEventListener("touchmove",this.onTouchMove,!1),a.removeEventListener("touchend",this.onTouchEnd,!1),a.removeEventListener("touchcancel",this.onTouchCancel,!1)},FastClick.notNeeded=function(a){"use strict";var b,c;if("undefined"==typeof window.ontouchstart)return!0;if(c=+(/Chrome\/([0-9]+)/.exec(navigator.userAgent)||[,0])[1]){if(!FastClick.prototype.deviceIsAndroid)return!0;if(b=document.querySelector("meta[name=viewport]")){if(-1!==b.content.indexOf("user-scalable=no"))return!0;if(c>31&&window.innerWidth<=window.screen.width)return!0}}return"none"===a.style.msTouchAction?!0:!1},FastClick.attach=function(a){"use strict";return new FastClick(a)},"undefined"!=typeof define&&define.amd?define(function(){"use strict";return FastClick}):"undefined"!=typeof module&&module.exports?(module.exports=FastClick.attach,module.exports.FastClick=FastClick):window.FastClick=FastClick;
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/data/static/foundation/js/vendor/fastclick.js
|
fastclick.js
|
window.Modernizr=function(a,b,c){function d(a){t.cssText=a}function e(a,b){return d(x.join(a+";")+(b||""))}function f(a,b){return typeof a===b}function g(a,b){return!!~(""+a).indexOf(b)}function h(a,b){for(var d in a){var e=a[d];if(!g(e,"-")&&t[e]!==c)return"pfx"==b?e:!0}return!1}function i(a,b,d){for(var e in a){var g=b[a[e]];if(g!==c)return d===!1?a[e]:f(g,"function")?g.bind(d||b):g}return!1}function j(a,b,c){var d=a.charAt(0).toUpperCase()+a.slice(1),e=(a+" "+z.join(d+" ")+d).split(" ");return f(b,"string")||f(b,"undefined")?h(e,b):(e=(a+" "+A.join(d+" ")+d).split(" "),i(e,b,c))}function k(){o.input=function(c){for(var d=0,e=c.length;e>d;d++)E[c[d]]=!!(c[d]in u);return E.list&&(E.list=!(!b.createElement("datalist")||!a.HTMLDataListElement)),E}("autocomplete autofocus list placeholder max min multiple pattern required step".split(" ")),o.inputtypes=function(a){for(var d,e,f,g=0,h=a.length;h>g;g++)u.setAttribute("type",e=a[g]),d="text"!==u.type,d&&(u.value=v,u.style.cssText="position:absolute;visibility:hidden;",/^range$/.test(e)&&u.style.WebkitAppearance!==c?(q.appendChild(u),f=b.defaultView,d=f.getComputedStyle&&"textfield"!==f.getComputedStyle(u,null).WebkitAppearance&&0!==u.offsetHeight,q.removeChild(u)):/^(search|tel)$/.test(e)||(d=/^(url|email)$/.test(e)?u.checkValidity&&u.checkValidity()===!1:u.value!=v)),D[a[g]]=!!d;return D}("search tel url email datetime date month week time datetime-local number range color".split(" "))}var l,m,n="2.7.2",o={},p=!0,q=b.documentElement,r="modernizr",s=b.createElement(r),t=s.style,u=b.createElement("input"),v=":)",w={}.toString,x=" -webkit- -moz- -o- -ms- ".split(" "),y="Webkit Moz O ms",z=y.split(" "),A=y.toLowerCase().split(" "),B={svg:"http://www.w3.org/2000/svg"},C={},D={},E={},F=[],G=F.slice,H=function(a,c,d,e){var f,g,h,i,j=b.createElement("div"),k=b.body,l=k||b.createElement("body");if(parseInt(d,10))for(;d--;)h=b.createElement("div"),h.id=e?e[d]:r+(d+1),j.appendChild(h);return f=["­",'<style id="s',r,'">',a,"</style>"].join(""),j.id=r,(k?j:l).innerHTML+=f,l.appendChild(j),k||(l.style.background="",l.style.overflow="hidden",i=q.style.overflow,q.style.overflow="hidden",q.appendChild(l)),g=c(j,a),k?j.parentNode.removeChild(j):(l.parentNode.removeChild(l),q.style.overflow=i),!!g},I=function(b){var c=a.matchMedia||a.msMatchMedia;if(c)return c(b).matches;var d;return H("@media "+b+" { #"+r+" { position: absolute; } }",function(b){d="absolute"==(a.getComputedStyle?getComputedStyle(b,null):b.currentStyle).position}),d},J=function(){function a(a,e){e=e||b.createElement(d[a]||"div"),a="on"+a;var g=a in e;return g||(e.setAttribute||(e=b.createElement("div")),e.setAttribute&&e.removeAttribute&&(e.setAttribute(a,""),g=f(e[a],"function"),f(e[a],"undefined")||(e[a]=c),e.removeAttribute(a))),e=null,g}var d={select:"input",change:"input",submit:"form",reset:"form",error:"img",load:"img",abort:"img"};return a}(),K={}.hasOwnProperty;m=f(K,"undefined")||f(K.call,"undefined")?function(a,b){return b in a&&f(a.constructor.prototype[b],"undefined")}:function(a,b){return K.call(a,b)},Function.prototype.bind||(Function.prototype.bind=function(a){var b=this;if("function"!=typeof b)throw new TypeError;var c=G.call(arguments,1),d=function(){if(this instanceof d){var e=function(){};e.prototype=b.prototype;var f=new e,g=b.apply(f,c.concat(G.call(arguments)));return Object(g)===g?g:f}return b.apply(a,c.concat(G.call(arguments)))};return d}),C.flexbox=function(){return j("flexWrap")},C.flexboxlegacy=function(){return j("boxDirection")},C.canvas=function(){var a=b.createElement("canvas");return!(!a.getContext||!a.getContext("2d"))},C.canvastext=function(){return!(!o.canvas||!f(b.createElement("canvas").getContext("2d").fillText,"function"))},C.webgl=function(){return!!a.WebGLRenderingContext},C.touch=function(){var c;return"ontouchstart"in a||a.DocumentTouch&&b instanceof DocumentTouch?c=!0:H(["@media (",x.join("touch-enabled),("),r,")","{#modernizr{top:9px;position:absolute}}"].join(""),function(a){c=9===a.offsetTop}),c},C.geolocation=function(){return"geolocation"in navigator},C.postmessage=function(){return!!a.postMessage},C.websqldatabase=function(){return!!a.openDatabase},C.indexedDB=function(){return!!j("indexedDB",a)},C.hashchange=function(){return J("hashchange",a)&&(b.documentMode===c||b.documentMode>7)},C.history=function(){return!(!a.history||!history.pushState)},C.draganddrop=function(){var a=b.createElement("div");return"draggable"in a||"ondragstart"in a&&"ondrop"in a},C.websockets=function(){return"WebSocket"in a||"MozWebSocket"in a},C.rgba=function(){return d("background-color:rgba(150,255,150,.5)"),g(t.backgroundColor,"rgba")},C.hsla=function(){return d("background-color:hsla(120,40%,100%,.5)"),g(t.backgroundColor,"rgba")||g(t.backgroundColor,"hsla")},C.multiplebgs=function(){return d("background:url(https://),url(https://),red url(https://)"),/(url\s*\(.*?){3}/.test(t.background)},C.backgroundsize=function(){return j("backgroundSize")},C.borderimage=function(){return j("borderImage")},C.borderradius=function(){return j("borderRadius")},C.boxshadow=function(){return j("boxShadow")},C.textshadow=function(){return""===b.createElement("div").style.textShadow},C.opacity=function(){return e("opacity:.55"),/^0.55$/.test(t.opacity)},C.cssanimations=function(){return j("animationName")},C.csscolumns=function(){return j("columnCount")},C.cssgradients=function(){var a="background-image:",b="gradient(linear,left top,right bottom,from(#9f9),to(white));",c="linear-gradient(left top,#9f9, white);";return d((a+"-webkit- ".split(" ").join(b+a)+x.join(c+a)).slice(0,-a.length)),g(t.backgroundImage,"gradient")},C.cssreflections=function(){return j("boxReflect")},C.csstransforms=function(){return!!j("transform")},C.csstransforms3d=function(){var a=!!j("perspective");return a&&"webkitPerspective"in q.style&&H("@media (transform-3d),(-webkit-transform-3d){#modernizr{left:9px;position:absolute;height:3px;}}",function(b){a=9===b.offsetLeft&&3===b.offsetHeight}),a},C.csstransitions=function(){return j("transition")},C.fontface=function(){var a;return H('@font-face {font-family:"font";src:url("https://")}',function(c,d){var e=b.getElementById("smodernizr"),f=e.sheet||e.styleSheet,g=f?f.cssRules&&f.cssRules[0]?f.cssRules[0].cssText:f.cssText||"":"";a=/src/i.test(g)&&0===g.indexOf(d.split(" ")[0])}),a},C.generatedcontent=function(){var a;return H(["#",r,"{font:0/0 a}#",r,':after{content:"',v,'";visibility:hidden;font:3px/1 a}'].join(""),function(b){a=b.offsetHeight>=3}),a},C.video=function(){var a=b.createElement("video"),c=!1;try{(c=!!a.canPlayType)&&(c=new Boolean(c),c.ogg=a.canPlayType('video/ogg; codecs="theora"').replace(/^no$/,""),c.h264=a.canPlayType('video/mp4; codecs="avc1.42E01E"').replace(/^no$/,""),c.webm=a.canPlayType('video/webm; codecs="vp8, vorbis"').replace(/^no$/,""))}catch(d){}return c},C.audio=function(){var a=b.createElement("audio"),c=!1;try{(c=!!a.canPlayType)&&(c=new Boolean(c),c.ogg=a.canPlayType('audio/ogg; codecs="vorbis"').replace(/^no$/,""),c.mp3=a.canPlayType("audio/mpeg;").replace(/^no$/,""),c.wav=a.canPlayType('audio/wav; codecs="1"').replace(/^no$/,""),c.m4a=(a.canPlayType("audio/x-m4a;")||a.canPlayType("audio/aac;")).replace(/^no$/,""))}catch(d){}return c},C.localstorage=function(){try{return localStorage.setItem(r,r),localStorage.removeItem(r),!0}catch(a){return!1}},C.sessionstorage=function(){try{return sessionStorage.setItem(r,r),sessionStorage.removeItem(r),!0}catch(a){return!1}},C.webworkers=function(){return!!a.Worker},C.applicationcache=function(){return!!a.applicationCache},C.svg=function(){return!!b.createElementNS&&!!b.createElementNS(B.svg,"svg").createSVGRect},C.inlinesvg=function(){var a=b.createElement("div");return a.innerHTML="<svg/>",(a.firstChild&&a.firstChild.namespaceURI)==B.svg},C.smil=function(){return!!b.createElementNS&&/SVGAnimate/.test(w.call(b.createElementNS(B.svg,"animate")))},C.svgclippaths=function(){return!!b.createElementNS&&/SVGClipPath/.test(w.call(b.createElementNS(B.svg,"clipPath")))};for(var L in C)m(C,L)&&(l=L.toLowerCase(),o[l]=C[L](),F.push((o[l]?"":"no-")+l));return o.input||k(),o.addTest=function(a,b){if("object"==typeof a)for(var d in a)m(a,d)&&o.addTest(d,a[d]);else{if(a=a.toLowerCase(),o[a]!==c)return o;b="function"==typeof b?b():b,"undefined"!=typeof p&&p&&(q.className+=" "+(b?"":"no-")+a),o[a]=b}return o},d(""),s=u=null,function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x<style>"+b+"</style>",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=s.elements;return"string"==typeof a?a.split(" "):a}function e(a){var b=r[a[p]];return b||(b={},q++,a[p]=q,r[q]=b),b}function f(a,c,d){if(c||(c=b),k)return c.createElement(a);d||(d=e(c));var f;return f=d.cache[a]?d.cache[a].cloneNode():o.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!f.canHaveChildren||n.test(a)||f.tagUrn?f:d.frag.appendChild(f)}function g(a,c){if(a||(a=b),k)return a.createDocumentFragment();c=c||e(a);for(var f=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)f.createElement(h[g]);return f}function h(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return s.shivMethods?f(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(s,b.frag)}function i(a){a||(a=b);var d=e(a);return!s.shivCSS||j||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),k||h(a,d),a}var j,k,l="3.7.0",m=a.html5||{},n=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,o=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,p="_html5shiv",q=0,r={};!function(){try{var a=b.createElement("a");a.innerHTML="<xyz></xyz>",j="hidden"in a,k=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){j=!0,k=!0}}();var s={elements:m.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output progress section summary template time video",version:l,shivCSS:m.shivCSS!==!1,supportsUnknownElements:k,shivMethods:m.shivMethods!==!1,type:"default",shivDocument:i,createElement:f,createDocumentFragment:g};a.html5=s,i(b)}(this,b),o._version=n,o._prefixes=x,o._domPrefixes=A,o._cssomPrefixes=z,o.mq=I,o.hasEvent=J,o.testProp=function(a){return h([a])},o.testAllProps=j,o.testStyles=H,o.prefixed=function(a,b,c){return b?j(a,b,c):j(a,"pfx")},q.className=q.className.replace(/(^|\s)no-js(\s|$)/,"$1$2")+(p?" js "+F.join(" "):""),o}(this,this.document);
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/data/static/foundation/js/vendor/modernizr.js
|
modernizr.js
|
annalist = {
resize_handler: function (event) {
/*
* Adjust sizing of selected controls when window is resized.
*/
if (window.matchMedia(Foundation.media_queries['small']).matches)
{
$(".small-size-4").attr("size", 4);
$(".small-rows-4").attr("rows", 4);
/* $(".small-only-text-right").attr("text-align", "right"); doesn't work */
$(".medium-add-margin").attr("width", "100%")
$(".medium-up-text-right").removeClass("text-right");
}
else
{
$(".medium-up-text-right").addClass("text-right");
};
if (window.matchMedia(Foundation.media_queries['medium']).matches)
{
$(".medium-size-8").attr("size", 8);
$(".medium-rows-8").attr("rows", 8);
$(".medium-size-12").attr("size", 12);
$(".medium-rows-12").attr("rows", 12);
$(".medium-add-margin").attr("width", "95%")
};
},
select_button_change: function (event) {
/*
* Select character to display in button based on whether or not
* a value is selected: "+" if there is no selection, which causes
* a view to be created to define a new value, or "writing hand"
* (u+270D) for editing the selected value.
*/
var div = event.data
var sel = div.find("select");
var val = sel.val();
var btn = div.find("div.view-value.new-button > button > span.select-edit-button-text");
/*
if (window.console) {
console.log("select_button_change");
console.log("sel: "+sel.html());
console.log("btn: "+btn.html());
console.log("val: "+val);
}
*/
if (typeof btn !== "undefined") {
btn.text(val ? "\u270D" : "+");
}
},
select_button_init: function (index) {
/*
* Initialize logic for selection new/edit button
*/
var div = $(this);
var sel = div.find("select");
sel.on("change", div, annalist.select_button_change);
sel.trigger("change");
}
};
$(window).resize(Foundation.utils.throttle(annalist.resize_handler, 10));
$(document).ready( function ()
{
/* For new or copy operations, select the entity_id field */
/* console.log("annalist.js ready") */
var e = $("input[type='hidden'][name='action']");
if (e.length)
{
var newactions = ["new", "copy"]
if (newactions.indexOf(e.attr("value")) >= 0)
{
$("input[name='entity_id']").focus().select()
}
}
/* For add/edit buttons on select widget, choose symbol based on state */
$("div.view-value > select").parent().parent()
.has("div.view-value.new-button")
.each(annalist.select_button_init);
});
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/data/static/js/annalist.js
|
annalist.js
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Annalist collection views
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from django.conf import settings
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.urls import resolve, reverse
from annalist import message
from annalist import layout
from annalist.identifiers import ANNAL, RDFS
from annalist.exceptions import Annalist_Error
from annalist.models import entitytypeinfo
from annalist.models.site import Site
from annalist.models.collection import Collection
from annalist.models.recordtype import RecordType
from annalist.models.recordview import RecordView
from annalist.models.recordlist import RecordList
from annalist.models.collectiondata import migrate_coll_data, migrate_coll_config_dirs
from annalist.views.uri_builder import uri_with_params
from annalist.views.uri_builder import (
uri_param_dict,
scope_params, continuation_params,
uri_params, uri_with_params,
)
from annalist.views.displayinfo import DisplayInfo
from annalist.views.generic import AnnalistGenericView
from annalist.views.confirm import ConfirmView
class CollectionView(AnnalistGenericView):
"""
View class to handle requests to display an Annalist collection.
Redirects to a default view or list view.
"""
def __init__(self):
super(CollectionView, self).__init__()
self.default_continuation = self.view_uri("AnnalistSiteView")
return
def collection_view_setup(self, coll_id, action, request_dict):
"""
Assemble display information for collection view request handler
"""
viewinfo = DisplayInfo(self, action, request_dict, self.default_continuation)
viewinfo.get_site_info(self.get_request_host())
viewinfo.get_coll_info(coll_id)
viewinfo.check_authorization(action)
self.default_continuation = self.view_uri("AnnalistCollectionView", coll_id=coll_id)
if not viewinfo.http_response:
errs = migrate_coll_config_dirs(viewinfo.collection)
if errs:
viewinfo.report_error("\n".join(errs))
return viewinfo
def get(self, request, coll_id):
"""
Display the specified collection
"""
viewinfo = self.collection_view_setup(coll_id, "view", request.GET.dict())
if viewinfo.http_response:
log.debug(
"CollectionView.get: response %d: %s"%
(viewinfo.http_response.status_code, viewinfo.http_response.reason_phrase)
)
return viewinfo.http_response
# Select and display view of collection
default_view, default_type, default_entity = viewinfo.get_default_view_type_entity()
if default_view and default_type and default_entity:
redirect_uri = self.view_uri(
"AnnalistEntityDataView",
coll_id=coll_id,
view_id=default_view, type_id=default_type, entity_id=default_entity
)
else:
redirect_uri = self.view_uri(
"AnnalistEntityDefaultListAll",
coll_id=coll_id
)
return HttpResponseRedirect(redirect_uri)
class CollectionEditView(AnnalistGenericView):
"""
View class to handle requests to an Annalist collection edit URI
"""
def __init__(self):
super(CollectionEditView, self).__init__()
self.default_continuation = self.view_uri("AnnalistSiteView")
return
def collection_edit_setup(self, coll_id, action, request_dict):
"""
Assemble display information for collection view request handler
"""
viewinfo = DisplayInfo(self, action, request_dict, self.default_continuation)
viewinfo.get_site_info(self.get_request_host())
viewinfo.get_coll_info(coll_id)
viewinfo.get_request_type_info(entitytypeinfo.COLL_ID)
viewinfo.check_authorization(action)
self.default_continuation = self.view_uri("AnnalistCollectionView", coll_id=coll_id)
return viewinfo
# GET
def get(self, request, coll_id):
"""
Form for editing (customizing) the current collection
"""
def resultdata(viewinfo):
def get_id(e):
return e.get_id()
coll = viewinfo.collection
context = (
{ 'continuation_url': viewinfo.get_continuation_url() or ""
, 'continuation_param': viewinfo.get_continuation_param()
, 'types': sorted(coll.types(altscope=None), key=get_id)
, 'lists': sorted(coll.lists(altscope=None), key=get_id)
, 'views': sorted(coll.views(altscope=None), key=get_id)
, 'select_rows': "6"
})
context.update(viewinfo.context_data())
context['heading'] = message.CUSTOMIZE_COLLECTION_HEADING%context
# "Customize collection — %(coll_label)s"%context
return context
continuation_url = None
# View permission only to display form,
# as it presents useful information even when not editing.
viewinfo = self.collection_edit_setup(coll_id, "view", request.GET.dict())
if viewinfo.http_response:
return viewinfo.http_response
# Flush caches and regenerate JSON-LD context when
# invoking collection customize view
viewinfo.flush_collection_caches()
viewinfo.collection.generate_coll_jsonld_context()
# Generate customize page view
self.help_markdown = viewinfo.collection.get(RDFS.CURIE.comment, None)
return (
self.render_html(resultdata(viewinfo), 'annalist_collection_edit.html') or
self.error(self.error406values())
)
# POST
def post(self, request, coll_id):
"""
Update some aspect of the current collection
"""
# Note: in many cases, this function redirects to a URI that displays a form
# to gather further details of values to update. Values returned by
# POST to this view are then passed as URI segments in the GET request
# that renders the form. Maybe there's an easier way than all this
# URI-wrangling?
redirect_uri = None
http_response = None
viewinfo = self.collection_edit_setup(coll_id, "config", request.POST.dict())
if viewinfo.http_response:
return viewinfo.http_response
if "close" in request.POST:
redirect_uri = viewinfo.get_continuation_next()
if "migrate" in request.POST:
msgs = migrate_coll_data(viewinfo.collection)
msg_vals = {'id': coll_id}
if msgs:
for msg in msgs:
log.warning(msg)
err = message.MIGRATE_COLLECTION_ERROR%msg_vals
msg = "\n".join([err]+msgs)
log.error(msg)
http_response = self.error(dict(self.error500values(),message=msg))
else:
# Redisplay current page with completion message
viewuri = self.get_request_path()
http_response = self.redirect_info(
self.get_request_path(),
view_params=continuation_params(uri_param_dict(viewuri)),
info_message=message.MIGRATED_COLLECTION_DATA%msg_vals
)
return http_response
# Edit collection metadata
if "metadata" in request.POST:
redirect_uri = self.item_edit_uri(
layout.SITEDATA_ID, "_coll", "Collection_view", coll_id,
message.NO_COLLECTION_METADATA,
viewinfo.get_continuation_here(),
viewinfo.get_continuation_url()
)
# Record types
type_id = request.POST.get('typelist', None)
if "type_new" in request.POST:
redirect_uri = self.item_new_uri(
coll_id, "_type", "Type_view",
viewinfo.get_continuation_here()
)
if "type_copy" in request.POST:
redirect_uri = self.item_copy_uri(
coll_id, "_type", "Type_view", type_id,
message.NO_TYPE_FOR_COPY,
viewinfo.get_continuation_here(),
viewinfo.get_continuation_url()
)
if "type_edit" in request.POST:
redirect_uri = self.item_edit_uri(
coll_id, "_type", "Type_view", type_id,
message.NO_TYPE_FOR_COPY,
viewinfo.get_continuation_here(),
viewinfo.get_continuation_url()
)
if "type_delete" in request.POST:
http_response = viewinfo.confirm_delete_entity_response(
layout.TYPE_TYPEID, type_id,
self.view_uri("AnnalistRecordTypeDeleteView", coll_id=coll_id),
form_action_field="type_delete",
form_value_field="typelist",
response_messages=
{ "no_entity": message.NO_TYPE_FOR_DELETE
, "confirm_completion": message.REMOVE_RECORD_TYPE
}
)
# List views
list_id = request.POST.get('listlist', None)
if "list_new" in request.POST:
redirect_uri = self.item_new_uri(
coll_id, "_list", "List_view",
viewinfo.get_continuation_here()
)
if "list_copy" in request.POST:
redirect_uri = self.item_copy_uri(
coll_id, "_list", "List_view", list_id,
message.NO_LIST_FOR_COPY,
viewinfo.get_continuation_here(),
viewinfo.get_continuation_url()
)
if "list_edit" in request.POST:
redirect_uri = self.item_edit_uri(
coll_id, "_list", "List_view", list_id,
message.NO_LIST_FOR_COPY,
viewinfo.get_continuation_here(),
viewinfo.get_continuation_url()
)
if "list_delete" in request.POST:
http_response = viewinfo.confirm_delete_entity_response(
layout.LIST_TYPEID, list_id,
self.view_uri("AnnalistRecordListDeleteView", coll_id=coll_id),
form_action_field="list_delete",
form_value_field="listlist",
response_messages=
{ "no_entity": message.NO_LIST_FOR_DELETE
, "confirm_completion": message.REMOVE_RECORD_LIST
}
)
# Record views
view_id = request.POST.get('viewlist', None)
if "view_new" in request.POST:
redirect_uri = self.item_new_uri(
coll_id, "_view", "View_view",
viewinfo.get_continuation_here()
)
if "view_copy" in request.POST:
redirect_uri = self.item_copy_uri(
coll_id, "_view", "View_view", view_id,
message.NO_VIEW_FOR_COPY,
viewinfo.get_continuation_here(),
viewinfo.get_continuation_url()
)
if "view_edit" in request.POST:
redirect_uri = self.item_edit_uri(
coll_id, "_view", "View_view", view_id,
message.NO_VIEW_FOR_COPY,
viewinfo.get_continuation_here(),
viewinfo.get_continuation_url()
)
if "view_delete" in request.POST:
http_response = viewinfo.confirm_delete_entity_response(
layout.VIEW_TYPEID, view_id,
self.view_uri("AnnalistRecordViewDeleteView", coll_id=coll_id),
form_action_field="view_delete",
form_value_field="viewlist",
response_messages=
{ "no_entity": message.NO_VIEW_FOR_DELETE
, "confirm_completion": message.REMOVE_RECORD_VIEW
}
)
# Invoke selected view and/or render status response
if redirect_uri:
http_response = http_response or HttpResponseRedirect(redirect_uri)
if http_response:
return http_response
e = Annalist_Error(request.POST, "Unexpected values in POST to "+self.get_request_path())
log.exception(str(e))
return self.error(
dict(self.error500values(),
message=str(e)+" - see server log for details"
)
)
# raise Annalist_Error(request.POST, "Unexpected values in POST to "+self.get_request_path())
# POST helper methods
def item_new_uri(self, coll_id, type_id, view_id, continuation_here):
# @@TODO: pass in viewinfo rather than continuation URL
redirect_uri = uri_with_params(
self.view_uri("AnnalistEntityNewView",
coll_id=coll_id, view_id=view_id, type_id=type_id, action="new"
),
{'continuation_url': continuation_here}
)
return redirect_uri
def item_edit_copy_uri(self,
coll_id, type_id, view_id, entity_id, no_entity_msg,
continuation_here, continuation_url, action):
# NOTE: continuation_url is the continuation URL from the current page,
# and is used as part of the URL used to redisplay the current
# page with an error message.
# @@TODO: pass in viewinfo rather than continuation URLs
if not entity_id:
continuation_url_dict = {}
if continuation_url:
continuation_url_dict = {'continuation_url': continuation_url}
return uri_with_params(
self.get_request_path(),
self.error_params(no_entity_msg),
continuation_url_dict
)
redirect_uri = (
uri_with_params(
self.view_uri("AnnalistEntityEditView", action=action,
coll_id=coll_id, view_id=view_id, type_id=type_id, entity_id=entity_id
),
{'continuation_url': continuation_here}
)
)
return redirect_uri
def item_copy_uri(self,
coll_id, type_id, view_id, entity_id, no_entity_msg,
continuation_here, continuation_url):
# NOTE: continuation_url is the continuation URL from the current page,
# and is used as part of the URL used to redisplay the current
# page with an error message.
# @@TODO: pass in viewinfo rather than continuation URLs
redirect_uri = self.item_edit_copy_uri(
coll_id, type_id, view_id, entity_id, no_entity_msg,
continuation_here, continuation_url,
"copy")
return redirect_uri
def item_edit_uri(self,
coll_id, type_id, view_id, entity_id, no_entity_msg,
continuation_here, continuation_url):
# NOTE: continuation_url is the continuation URL from the current page,
# and is used as part of the URL used to redisplay the current
# page with an error message.
# @@TODO: pass in viewinfo rather than continuation URLs
redirect_uri = self.item_edit_copy_uri(
coll_id, type_id, view_id, entity_id, no_entity_msg,
continuation_here, continuation_url,
"edit")
return redirect_uri
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/collection.py
|
collection.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Annalist base classes for record editing views and form response handlers
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from django.conf import settings
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.urls import resolve, reverse
from annalist.views.uri_builder import continuation_params
from annalist.views.displayinfo import DisplayInfo
from annalist.views.generic import AnnalistGenericView
# -------------------------------------------------------------------------------------------
#
# Generic delete entity confirmation response handling class
#
# -------------------------------------------------------------------------------------------
class EntityDeleteConfirmedBaseView(AnnalistGenericView):
"""
View class to perform completion of confirmed entity deletion, requested
from collection edit view.
"""
def __init__(self):
super(EntityDeleteConfirmedBaseView, self).__init__()
return
def complete_remove_entity(self,
coll_id, type_id, entity_id,
default_continuation_url, request_params):
"""
Complete action to remove an entity.
"""
continuation_url = (
request_params.get('completion_url', None) or
default_continuation_url
)
continuation_url_params = continuation_params(request_params)
viewinfo = DisplayInfo(self, "delete", request_params, continuation_url)
viewinfo.get_site_info(self.get_request_host())
viewinfo.get_coll_info(coll_id)
viewinfo.get_request_type_info(type_id)
viewinfo.check_authorization("delete")
if viewinfo.http_response:
return viewinfo.http_response
typeinfo = viewinfo.curr_typeinfo
message_vals = {'id': entity_id, 'type_id': type_id, 'coll_id': coll_id}
messages = (
{ 'entity_removed': typeinfo.entitymessages['entity_removed']%message_vals
})
err = typeinfo.entityclass.remove(typeinfo.entityparent, entity_id)
if err:
return self.redirect_error(
continuation_url, continuation_url_params, error_message=str(err)
)
return self.redirect_info(
continuation_url, continuation_url_params,
info_message=messages['entity_removed']
)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/entitydeletebase.py
|
entitydeletebase.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from django.conf import settings
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.urls import resolve, reverse
from annalist import layout
from annalist import message
from annalist.exceptions import Annalist_Error
from annalist.identifiers import RDFS, ANNAL
from annalist.util import (
make_type_entity_id, split_type_entity_id, extract_entity_id,
make_resource_url
)
import annalist.models.entitytypeinfo as entitytypeinfo
from annalist.models.collection import Collection
from annalist.models.recordtype import RecordType
from annalist.models.recordtypedata import RecordTypeData
from annalist.models.entitytypeinfo import EntityTypeInfo, CONFIG_PERMISSIONS
from annalist.models.entityfinder import EntityFinder
from annalist.views.uri_builder import uri_with_params
from annalist.views.displayinfo import DisplayInfo
from annalist.views.confirm import ConfirmView, dict_querydict
from annalist.views.generic import AnnalistGenericView
from annalist.views.entityvaluemap import EntityValueMap
from annalist.views.simplevaluemap import SimpleValueMap, StableValueMap
from annalist.views.fieldlistvaluemap import FieldListValueMap
from annalist.views.fieldvaluemap import FieldValueMap
from annalist.views.repeatvaluesmap import RepeatValuesMap
from annalist.views.fields.field_description import FieldDescription, field_description_from_view_field
from annalist.views.fields.bound_field import bound_field, get_entity_values
# -------------------------------------------------------------------------------------------
#
# Mapping table data (not view-specific)
#
# -------------------------------------------------------------------------------------------
# Table used as basis, or initial values, for a dynamically generated
# entity-value map for list displays
listentityvaluemap = (
[ SimpleValueMap(c='help_filename', e=None, f=None )
, SimpleValueMap(c='url_type_id', e=None, f=None )
, SimpleValueMap(c='url_list_id', e=None, f=None )
, SimpleValueMap(c='list_choices', e=None, f=None )
, SimpleValueMap(c='collection_view', e=None, f=None )
, SimpleValueMap(c='default_view_id', e=None, f=None )
, SimpleValueMap(c='default_view_enable', e=None, f=None )
, SimpleValueMap(c='customize_view_enable', e=None, f=None )
, SimpleValueMap(c='search_for', e=None, f='search_for' )
, SimpleValueMap(c='scope', e=None, f='scope' )
, SimpleValueMap(c='continuation_url', e=None, f='continuation_url' )
, SimpleValueMap(c='continuation_param', e=None, f=None )
# Field data is handled separately during processing of the form description
# Form and interaction control (hidden fields)
])
# -------------------------------------------------------------------------------------------
#
# List entities view - form rendering and POST response handling
#
# -------------------------------------------------------------------------------------------
class EntityGenericListView(AnnalistGenericView):
"""
View class for generic entity list view
"""
_entityformtemplate = 'annalist_entity_list.html'
def __init__(self):
super(EntityGenericListView, self).__init__()
self.help = "entity-list-help"
return
# Helper functions
def list_setup(self, coll_id, type_id, list_id, request_dict):
"""
Assemble display information for list view request handlers
"""
# log.info("list_setup coll_id %s, type_id %s, list_id %s"%(coll_id, type_id, list_id))
self.collection_view_url = self.get_collection_view_url(coll_id)
listinfo = DisplayInfo(self, "list", request_dict, self.collection_view_url)
listinfo.get_site_info(self.get_request_host())
listinfo.get_coll_info(coll_id)
listinfo.get_request_type_info(type_id)
listinfo.get_list_info(listinfo.get_list_id(listinfo.type_id, list_id))
listinfo.check_authorization("list")
return listinfo
def get_list_entityvaluemap(self, listinfo, context_extra_values):
"""
Creates an entity/value map table in the current object incorporating
information from the form field definitions for an indicated list display.
"""
# Locate and read view description
entitymap = EntityValueMap(listentityvaluemap)
# log.debug(
# "EntityGenericListView.get_list_entityvaluemap entitylist %r"%
# listinfo.recordlist.get_values()
# )
#
# Need to generate
# 1. 'fields': (context receives list of field descriptions used to generate row headers)
# 2. 'entities': (context receives a bound field that displays entry for each entity)
#
# NOTE - supplied entity has single field ANNAL.CURIE.entity_list (see 'get' below)
# entitylist template uses 'fields' from context to display headings
list_fields = listinfo.recordlist.get(ANNAL.CURIE.list_fields, [])
fieldlistmap = FieldListValueMap('fields', listinfo.collection, list_fields, None)
entitymap.add_map_entry(fieldlistmap) # For access to field headings
repeatrows_field_descr = (
{ ANNAL.CURIE.id: "List_rows"
, RDFS.CURIE.label: "Fields"
, RDFS.CURIE.comment:
"This resource describes the repeated field description used when "+
"displaying and/or editing a record view description"
, ANNAL.CURIE.field_name: "List_rows"
, ANNAL.CURIE.field_render_type: "RepeatListRow"
, ANNAL.CURIE.property_uri: ANNAL.CURIE.entity_list
})
repeatrows_descr = FieldDescription(
listinfo.collection,
repeatrows_field_descr,
field_list=list_fields
)
entitymap.add_map_entry(FieldValueMap(c="List_rows", f=repeatrows_descr))
return entitymap
# Helper functions assemble and return data for list of entities
def strip_context_values(self, listinfo, entity, base_url):
"""
Return selected values from entity data,
with context reference removed and entity id updated.
"""
# entityvals = entity.get_values()
entityvals = get_entity_values(listinfo.curr_typeinfo, entity)
entityvals.pop('@context', None)
entityref = make_type_entity_id(
entityvals[ANNAL.CURIE.type_id], entityvals[ANNAL.CURIE.id]
)
entityvals['@id'] = base_url+entityref
return entityvals
def assemble_list_data(self, listinfo, scope, search_for):
"""
Assemble and return a dict structure of JSON data used to generate
entity list responses.
"""
# Prepare list and entity IDs for rendering form
selector = listinfo.recordlist.get_values().get(ANNAL.CURIE.list_entity_selector, "")
user_perms = self.get_permissions(listinfo.collection)
entity_list = (
EntityFinder(listinfo.collection, selector=selector)
.get_entities_sorted(
user_perms, type_id=listinfo.type_id, altscope=scope,
context={'list': listinfo.recordlist}, search=search_for
)
)
base_url = self.get_collection_base_url(listinfo.coll_id)
list_url = self.get_list_url(
listinfo.coll_id, listinfo.list_id,
type_id=listinfo.type_id,
scope=scope,
search=search_for
)
entityvallist = [ self.strip_context_values(listinfo, e, base_url) for e in entity_list ]
# log.debug("@@ listinfo.list_id %s, coll base_url %s"%(listinfo.list_id, base_url))
# log.info(
# "EntityListDataView.assemble_list_data: list_url %s, base_url %s, context_url %s"%
# (list_url, base_url, base_url+layout.COLL_CONTEXT_FILE)
# )
jsondata = (
{ '@id': list_url
, '@context': [
{ "@base": base_url },
base_url+layout.COLL_CONTEXT_FILE
]
, ANNAL.CURIE.entity_list: entityvallist
})
# print "@@@@ assemble_list_data: jsondata %r"%(jsondata,)
return jsondata
# GET
def get(self, request, coll_id=None, type_id=None, list_id=None):
"""
Create a form for listing entities.
"""
scope = request.GET.get('scope', None)
search_for = request.GET.get('search', "")
log.info(
"views.entitylist.get: coll_id %s, type_id %s, list_id %s, scope %s, search '%s'"%
(coll_id, type_id, list_id, scope, search_for)
)
log.log(settings.TRACE_FIELD_VALUE, " %s"%(self.get_request_path()))
listinfo = self.list_setup(coll_id, type_id, list_id, request.GET.dict())
if listinfo.http_response:
return listinfo.http_response
self.help_markdown = listinfo.recordlist.get(RDFS.CURIE.comment, None)
log.debug("listinfo.list_id %s"%listinfo.list_id)
# Prepare list and entity IDs for rendering form
try:
entityvallist = self.assemble_list_data(listinfo, scope, search_for)
# Set up initial view context
context_extra_values = (
{ 'continuation_url': listinfo.get_continuation_url() or ""
, 'continuation_param': listinfo.get_continuation_param()
, 'request_url': self.get_request_path()
, 'scope': scope
, 'url_type_id': type_id
, 'url_list_id': list_id
, 'search_for': search_for
, 'list_choices': self.get_list_choices_field(listinfo)
, 'collection_view': self.collection_view_url
, 'default_view_id': listinfo.recordlist[ANNAL.CURIE.default_view]
, 'default_view_enable': 'disabled="disabled"'
, 'customize_view_enable': 'disabled="disabled"'
, 'collection': listinfo.collection
})
if listinfo.authorizations['auth_config']:
context_extra_values['customize_view_enable'] = ""
if list_id:
context_extra_values['default_view_enable'] = ""
entityvaluemap = self.get_list_entityvaluemap(listinfo, context_extra_values)
listcontext = entityvaluemap.map_value_to_context(
entityvallist,
**context_extra_values
)
listcontext.update(listinfo.context_data())
except Exception as e:
log.exception(str(e))
return self.error(
dict(self.error500values(),
message=str(e)+" - see server log for details"
)
)
# log.info("EntityGenericListView.get listcontext %r"%(listcontext))
# Generate and return form data
json_redirect_url = make_resource_url("", self.get_request_path(), layout.ENTITY_LIST_FILE)
turtle_redirect_url = make_resource_url("", self.get_request_path(), layout.ENTITY_LIST_TURTLE)
return (
self.render_html(listcontext, self._entityformtemplate)
or
self.redirect_json(json_redirect_url)
or
self.redirect_turtle(turtle_redirect_url)
or
self.error(self.error406values())
)
# POST
def post(self, request, coll_id=None, type_id=None, list_id=None):
"""
Handle response from dynamically generated list display form.
"""
log.info("views.entitylist.post: coll_id %s, type_id %s, list_id %s"%(coll_id, type_id, list_id))
log.log(settings.TRACE_FIELD_VALUE, " %s"%(self.get_request_path()))
# log.log(settings.TRACE_FIELD_VALUE, " form data %r"%(request.POST))
listinfo = self.list_setup(coll_id, type_id, list_id, request.POST.dict())
if listinfo.http_response:
return listinfo.http_response
if 'close' in request.POST:
return HttpResponseRedirect(listinfo.get_continuation_url() or self.collection_view_url)
# Process requested action
action = None
redirect_path = None
redirect_cont = listinfo.get_continuation_here()
redirect_params = {}
entity_ids = request.POST.getlist('entity_select')
log.debug("entity_ids %r"%(entity_ids))
if len(entity_ids) > 1:
listinfo.display_error_response(message.TOO_MANY_ENTITIES_SEL)
else:
entity_type = type_id or listinfo.get_list_type_id()
entity_id = None
if len(entity_ids) == 1:
(entity_type, entity_id) = split_type_entity_id(entity_ids[0], entity_type)
log.info("EntityList.post entity_ids: entity_type %s, entity_id %s"%(entity_type, entity_id))
if "new" in request.POST:
action = "new"
redirect_path = listinfo.get_new_view_uri(coll_id, entity_type)
if "copy" in request.POST:
action = "copy"
if not entity_id:
listinfo.display_error_response(message.NO_ENTITY_FOR_COPY)
else:
redirect_path = listinfo.get_edit_view_uri(
coll_id, entity_type, entity_id, action
)
if "edit" in request.POST:
action = "edit"
if not entity_id:
listinfo.display_error_response(message.NO_ENTITY_FOR_EDIT)
else:
redirect_path = listinfo.get_edit_view_uri(
coll_id, entity_type, entity_id, action
)
if "delete" in request.POST:
action = "delete"
confirmed_deletion_uri = self.view_uri(
"AnnalistEntityDataDeleteView",
coll_id=coll_id, type_id=entity_type
)
return listinfo.confirm_delete_entity_response(
entity_type, entity_id,
confirmed_deletion_uri
)
if "default_view" in request.POST:
auth_check = listinfo.check_authorization("config")
if auth_check:
return auth_check
listinfo.collection.set_default_list(list_id)
listinfo.add_info_message(
message.DEFAULT_LIST_UPDATED%{'coll_id': coll_id, 'list_id': list_id}
)
redirect_path, redirect_params = listinfo.redisplay_path_params()
redirect_cont = listinfo.get_continuation_next()
if ( ("list_type" in request.POST) or ("list_all" in request.POST) ):
action = "list"
redirect_path = self.get_list_url(
coll_id, extract_entity_id(request.POST['list_choice']),
type_id=None if "list_all" in request.POST else type_id
)
redirect_params = dict(
scope="all" if "list_scope_all" in request.POST else None,
search=request.POST['search_for']
)
redirect_cont = listinfo.get_continuation_next()
# redirect_cont = None
if "customize" in request.POST:
action = "config"
redirect_path = self.view_uri(
"AnnalistCollectionEditView",
coll_id=coll_id
)
if redirect_path:
if redirect_cont:
redirect_params.update(
{ "continuation_url": redirect_cont }
)
listinfo.redirect_response(
redirect_path, redirect_params=redirect_params, action=action
)
if listinfo.http_response:
return listinfo.http_response
# Report unexpected form data
# This shouldn't happen, but just in case...
# Redirect to continuation with error
log.error("Unexpected form data posted to %s: %r"%(request.get_full_path(), request.POST))
err_values = self.error_params(
message.UNEXPECTED_FORM_DATA%(request.POST),
message.SYSTEM_ERROR
)
redirect_uri = uri_with_params(listinfo.get_continuation_next(), err_values)
return HttpResponseRedirect(redirect_uri)
def get_list_choices_field(self, listinfo):
"""
Returns a bound_field object that displays as a list-choice selection drop-down.
"""
# @@TODO: Possibly create FieldValueMap and return map_entity_to_context value?
# or extract this logic and share? See also entityedit view choices
field_description = field_description_from_view_field(
listinfo.collection,
{ ANNAL.CURIE.field_id: "List_choice"
, ANNAL.CURIE.field_placement: "small:0,12;medium:0,9" },
{}
)
entityvals = { field_description['field_property_uri']: listinfo.list_id }
return bound_field(field_description, entityvals)
def get_list_url(self, coll_id, list_id, type_id=None, scope=None, search=None, query_params={}):
"""
Return a URL for accessing the current list display
"""
list_uri_params = (
{ 'coll_id': coll_id
, 'list_id': list_id
})
if type_id:
list_uri_params['type_id'] = type_id
if scope:
query_params = dict(query_params, scope=scope)
if search:
query_params = dict(query_params, search=search)
list_url = (
uri_with_params(
self.view_uri("AnnalistEntityGenericList", **list_uri_params),
query_params
)
)
return list_url
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/entitylist.py
|
entitylist.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Collection resource access (JSON-LD context and maybe more)
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import sys
import os
import logging
log = logging.getLogger(__name__)
from django.http import HttpResponse
from annalist import message
from annalist import layout
from annalist.models.entityresourceaccess import (
find_entity_resource,
site_fixed_json_resources,
get_resource_file
)
from annalist.views.displayinfo import DisplayInfo
from annalist.views.generic import AnnalistGenericView
class SiteResourceAccess(AnnalistGenericView):
"""
View class for site resource access
This view class returns a site data resource.
"""
# NOTE: the logic of this view is modelled on `entityresource`, but currently
# the only resource recognized is the JSON-LD context.
# @@TODO: define common superclass with `entityresource` to share common logic.
# @@TESTME: recheck test coverage when refactoring done (currently 36%)
def __init__(self):
super(SiteResourceAccess, self).__init__()
return
# GET
def get(self, request, coll_id=None, resource_ref=None, view_id=None):
"""
Access specified site entity resource
"""
log.info("SiteResourceAccess.get: resource_ref %s"%(resource_ref,))
viewinfo = self.view_setup(request.GET.dict())
if viewinfo.http_response:
return viewinfo.http_response
# Locate resource
resource_info = find_entity_resource(
viewinfo.site, resource_ref, fixed_resources=site_fixed_json_resources
)
if resource_info is None:
return self.error(
dict(self.error404values(),
message=message.SITE_RESOURCE_NOT_DEFINED%
{ 'ref': resource_ref
}
)
)
site_baseurl = viewinfo.reqhost + self.get_site_base_url()
resource_file, resource_type = get_resource_file(
viewinfo.site, resource_info, site_baseurl
)
if resource_file is None:
return self.error(
dict(self.error404values(),
message=message.SITE_RESOURCE_NOT_EXIST%
{ 'ref': resource_ref
}
)
)
# Return resource
try:
response = self.resource_response(resource_file, resource_type)
except Exception as e:
log.exception(str(e))
response = self.error(
dict(self.error500values(),
message=str(e)+" - see server log for details"
)
)
finally:
resource_file.close()
return response
def view_setup(self, request_dict):
"""
Assemble display information for view request handler
"""
action = "view"
self.default_continuation_url = None
viewinfo = DisplayInfo(self, action, request_dict, self.default_continuation_url)
viewinfo.get_site_info(self.get_request_host())
viewinfo.check_authorization(action)
viewinfo.site._ensure_values_loaded()
return viewinfo
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/siteresource.py
|
siteresource.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
This module defines a class that is used to gather information about a response
to a form submission.
The intent of this module is to collect and isolate various response-handling
housekeeping functions into a common module to reduce code clutter in
the Annalist form response processing handler.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2015, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from annalist.views.uri_builder import uri_param_dict
class ResponseInfo(object):
"""
This class collects and organizes information generated while processing
a form response.
"""
def __init__(self):
"""
Initialize a ResponseInfo object
"""
self._http_response = None # Set when an HTTP response is determined (typically an error)
self._response_conf = None # Operation confirmation message
self._response_err = None # Error message/flag (string)
self._response_info = None # String reporting details of confirmation or error
self._templates = None # Message templates dictionary
self._updated = False # Set True if entity values need to be updated
return
def __str__(self):
str = (
"{\n"+
"_http_response: %s\n"%(self._http_response)+
"_response_conf: %s\n"%(self._response_conf)+
"_response_err: %s\n"%(self._response_err)+
"_response_info: %s\n"%(self._response_info)+
"_updated: %s\n"%(self._updated)+
"}\n")
return str
def __repr__(self):
return str(self)
def set_http_response(self, http_response):
if self._http_response is None:
self._http_response = http_response
return self # Result can be 'responseinfo' return value
def get_http_response(self):
return self._http_response
def has_http_response(self):
return self._http_response is not None
def set_response_confirmation(self, response_conf, response_info):
if not self.is_response_error():
self._response_conf = response_conf
self._response_info = response_info
return
def set_response_error(self, response_err, response_info):
if not self.is_response_error():
self._response_err = response_err
self._response_info = response_info
return
def is_response_confirmation(self):
return self._response_conf is not None
def is_response_error(self):
return self._response_err is not None
def get_response_conf(self):
return self._response_conf
def get_response_err(self):
return self._response_err
def get_response_info(self):
return self._response_info
def set_updated(self):
self._updated = True
return
def is_updated(self):
return self._updated
def set_message_templates(self, templates):
if self._templates is None:
self._templates = templates
else:
self._templates.update(templates)
return self._templates
def get_message_templates():
return self._templates
def get_message(self, key):
return self._templates.get(key, "ResponseInfo.get_message: unknown key %r"%key)
def get_formatted(self, key, values):
t = self._templates.get(
key, "ResponseInfo.get_formatted: unknown key %r (values %%r)"%key
)
return t%values
def http_redirect(self, base_view, next_uri):
"""
Generate an HTTP redirect response that incorporates any confirmation messages
set up for the current responseinfo. The responseinfo object is updated with
the generated response, which is also returned as the result of thismethod.
"""
# NOTE: supplied URI is presumed to include continuation and other parameters.
# These need to be extracted and passed separately to the underlying
# `redirect_error` or `redirect_info` method so that they can be reassembled
# along with the status messages.
param_dict = uri_param_dict(next_uri)
if self._http_response is None:
if self.is_response_error():
resp = base_view.redirect_error(next_uri,
view_params=param_dict,
error_head=self._response_err,
error_message=self._response_info
)
else:
resp = base_view.redirect_info(next_uri,
view_params=param_dict,
info_head=self._response_conf,
info_message=self._response_info
)
self.set_http_response(resp)
return self._http_response
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/responseinfo.py
|
responseinfo.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Static hack view to allow Django serving of static files using Django's
internal static file server.
This approach is explicitly NOT recommended by Django for production
web servers, but has been created to allow a Django application to be
deployed locally without requiring a separate web server to be deployed.
It is claimed to be very inefficient, and may be insecure, and as such
should not be used for an open Internet deployment.
The logic has been copied and adapted from django.contrib.staticfiles.views
For deployment, add something like the following to the site-level urls.py file:
urlpatterns += patterns('',
url(r'^static/(?P<path>.*)$', serve_static),
)
For production deployment, configure the front-end proxy or WSGI server to
serve files from the static area directly, rather than passing requests to Annalist,
and use `annalist-manager collectstatic` to gather static files to a common
location for serving.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import posixpath
import logging
log = logging.getLogger(__name__)
from django.conf import settings
from django.http import Http404
from urllib.parse import unquote
from django.views import static
from django.contrib.staticfiles import finders
def serve_static(request, path, insecure=False, **kwargs):
"""
Serve static files below a given point in the directory structure or
from locations inferred from the staticfiles finders.
To use, put a URL pattern such as:
(r'^static/(?P<path>.*)$', 'annalist.views.statichack.serve_static')
in your `annalist_root/urls.py` URL configuration file.
It uses the django.views.static.serve() view to serve the found files.
"""
# log.info("serve_static %s"%(path))
try:
normalized_path = posixpath.normpath(unquote(path)).lstrip('/')
absolute_path = finders.find(normalized_path)
if not absolute_path:
if path.endswith('/') or path == '':
raise Http404("Directory indexes are not allowed here.")
raise Http404("Resource '%s' could not be found (%s)" % (path, normalized_path))
document_root, path = os.path.split(absolute_path)
# log.info("document_root %s, path %s"%(document_root, path))
except Exception as e:
log.info(str(e))
raise
return static.serve(request, path, document_root=document_root, **kwargs)
def serve_favicon(request, path, insecure=False, **kwargs):
"""
Serve favicon: prepends "images/" to image path.
"""
return serve_static(request, "images/"+path, insecure=insecure, **kwargs)
def serve_pages(request, coll_id, page_ref, insecure=False, **kwargs):
"""
Serve static files below a given point in the directory structure or
from locations inferred from the staticfiles finders.
To use, put a URL pattern such as:
url(r'^c/(?P<coll_id>\w{1,128})/p/(?P<page_ref>[\w/.-]{1,250})$',
'annalist.views.statichack.serve_pages`),
in your `annalist_root/annalist/urls.py` URL configuration file.
It uses the django.views.static.serve() view to serve the found files.
"""
# log.info("serve_pages %s"%(path))
try:
page_path = settings.BASE_SITE_DIR+"/c/"+coll_id+"/p/"+page_ref
log.info("statichack.serve_pages %s"%(page_path,))
normalized_path = posixpath.normpath(unquote(page_path))
if not os.path.exists(normalized_path):
if page_path.endswith('/') or page_path == '':
raise Http404("Directory indexes are not allowed here.")
raise Http404("Page '%s' could not be found" % page_path)
document_root, path = os.path.split(normalized_path)
# log.info("document_root %s, path %s"%(document_root, path))
except Exception as e:
log.info(str(e))
raise
return static.serve(request, path, document_root=document_root, **kwargs)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/statichack.py
|
statichack.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Entity list view
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from django.conf import settings
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.urls import resolve, reverse
from annalist.views.uri_builder import continuation_params
from annalist.views.entitydeletebase import EntityDeleteConfirmedBaseView
# -------------------------------------------------------------------------------------------
#
# Entity delete confirmation response handling
#
# -------------------------------------------------------------------------------------------
class EntityDataDeleteConfirmedView(EntityDeleteConfirmedBaseView):
"""
View class to perform completion of confirmed entity data deletion,
anticipated to be requested from a data list or record view.
"""
def __init__(self):
super(EntityDataDeleteConfirmedView, self).__init__()
return
# POST
def post(self, request, coll_id, type_id):
"""
Process options to complete action to remove an entity data record.
"""
log.debug("EntityDataDeleteConfirmedView.post: %r"%(request.POST))
if "entity_delete" in request.POST:
return self.complete_remove_entity(
# NOTE about QueryDict:
# request.POST['entity_id'] returns a single value
# - the last one provided if multiple values are present
coll_id, type_id, request.POST['entity_id'],
self.view_uri("AnnalistEntityDefaultListAll", coll_id=coll_id),
request.POST.dict()
)
return self.error(self.error400values())
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/entitydelete.py
|
entitydelete.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Analist site views
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from django.conf import settings
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.urls import resolve, reverse
from annalist.identifiers import ANNAL, RDFS
from annalist.exceptions import Annalist_Error, EntityNotFound_Error
from annalist import message
from annalist import util
from annalist import layout
import annalist.models.entitytypeinfo as entitytypeinfo
from annalist.models.entitytypeinfo import CONFIG_PERMISSIONS, SITE_PERMISSIONS
from annalist.models.site import Site
from annalist.models.collection import Collection
from annalist.views.displayinfo import DisplayInfo
from annalist.views.generic import AnnalistGenericView
from annalist.views.confirm import ConfirmView
from annalist.views.uri_builder import uri_with_params
class SiteView(AnnalistGenericView):
"""
View class to handle requests to the annalist site home URI
"""
def __init__(self):
super(SiteView, self).__init__()
self.help_markdown = None
return
# GET
def get(self, request):
"""
Create a rendering of the current site home page, containing (among other things)
a list of defined collections.
"""
log.info("SiteView.get")
viewinfo = DisplayInfo(self, "view", {}, None) # No continuation
viewinfo.get_site_info(self.get_request_host())
viewinfo.get_coll_info(layout.SITEDATA_ID)
viewinfo.get_request_type_info(entitytypeinfo.COLL_ID)
viewinfo.check_authorization("view")
if viewinfo.http_response:
return viewinfo.http_response
self.help_markdown = viewinfo.collection.get(RDFS.CURIE.comment, None)
resultdata = viewinfo.sitedata
resultdata.update(viewinfo.context_data())
# log.info("SiteView.get: site_data %r"%(self.site_data()))
return (
self.check_site_data() or
self.render_html(resultdata, 'annalist_site.html') or
self.error(self.error406values())
)
# POST
def post(self, request):
"""
Process options to add or remove a collection in an Annalist site
"""
log.debug("site.post: %r"%(request.POST.lists()))
collections = request.POST.getlist("select", [])
coll_id = collections[0] if collections else "_"
coll_ids = {'ids': ", ".join(collections)}
perm_req = None
perm_scope = None
none_msg = None
many_msg = None
redirect_uri = None
http_response = None
# Process POST option
if "view" in request.POST:
# Collection data is considered part of configuration, hence CONFIG_PERMISSIONS:
perm_req = CONFIG_PERMISSIONS["view"]
# Use Collection or Site permissions:
perm_scope = "all"
none_msg = message.NO_COLLECTION_VIEW
many_msg = message.MANY_COLLECTIONS_VIEW
target_uri = self.view_uri("AnnalistEntityEditView",
coll_id=layout.SITEDATA_ID,
view_id="Collection_view",
type_id="_coll",
entity_id=coll_id,
action="view"
)
redirect_uri = uri_with_params(
target_uri,
{'continuation_url': self.continuation_here()}
)
elif "edit" in request.POST:
perm_req = CONFIG_PERMISSIONS["edit"]
perm_scope = "all"
none_msg = message.NO_COLLECTION_EDIT
many_msg = message.MANY_COLLECTIONS_EDIT
target_uri = self.view_uri("AnnalistEntityEditView",
coll_id=layout.SITEDATA_ID,
view_id="Collection_view",
type_id="_coll",
entity_id=coll_id,
action="edit"
)
redirect_uri = uri_with_params(
target_uri,
{'continuation_url': self.continuation_here()}
)
elif "remove" in request.POST:
perm_req = "DELETE_COLLECTION"
perm_scope = "all" # Collection or site permissions
none_msg = message.NO_COLLECTIONS_REMOVE
elif "new" in request.POST:
perm_req = "CREATE_COLLECTION"
perm_scope = "site" # Site permission required
new_id = request.POST["new_id"]
new_label = request.POST["new_label"]
# Common checks
if none_msg and not collections:
http_response = self.redirect_info(
self.view_uri("AnnalistSiteView"),
info_message=none_msg, info_head=message.NO_ACTION_PERFORMED
)
elif many_msg and len(collections) > 1:
http_response = self.redirect_info(
self.view_uri("AnnalistSiteView"),
info_message=many_msg%coll_ids,
info_head=message.NO_ACTION_PERFORMED
)
elif perm_req:
if perm_scope == "all":
# Check collections for permissions
for cid in collections:
if http_response is None:
site = self.site(host=self.get_request_host())
sitedata = self.site_data()
coll = Collection.load(site, cid, altscope="site")
http_response = (
self.authorize("ADMIN", coll) and # Either of these...
self.authorize(perm_req, coll)
)
coll = None
else:
# Check site only for permissions
http_response = (
self.authorize("ADMIN", None) and
self.authorize(perm_req, None)
)
if http_response is not None:
return http_response
# Perform selected option
if redirect_uri:
log.info("Redirect to %s"%redirect_uri)
return HttpResponseRedirect(redirect_uri)
if "remove" in request.POST:
if layout.SITEDATA_ID in collections:
log.warning("Attempt to delete site data collection %(ids)s"%(coll_ids))
http_response = self.error(self.error403values(scope="DELETE_SITE"))
else:
http_response = ConfirmView.render_form(request,
action_description= message.REMOVE_COLLECTIONS%coll_ids,
action_params= request.POST,
confirmed_action_uri= self.view_uri('AnnalistSiteActionView'),
cancel_action_uri= self.view_uri('AnnalistSiteView'),
title= self.site_data()["title"]
)
return http_response
if "new" in request.POST:
log.info("New collection %s: %s"%(new_id, new_label))
error_message = None
if not new_id:
error_message = message.MISSING_COLLECTION_ID
elif not util.valid_id(new_id):
error_message = message.INVALID_COLLECTION_ID%{'coll_id': new_id}
if error_message:
return self.redirect_error(
self.view_uri("AnnalistSiteView"),
error_message=error_message
)
coll_meta = (
{ RDFS.CURIE.label: new_label
, RDFS.CURIE.comment: ""
})
# Add collection
coll = self.site().add_collection(new_id, coll_meta)
coll.generate_coll_jsonld_context()
user = self.request.user
user_id = user.username
user_uri = "mailto:"+user.email
user_name = "%s %s"%(user.first_name, user.last_name)
user_description = "User %s: permissions for %s in collection %s"%(user_id, user_name, new_id)
coll.create_user_permissions(
user_id, user_uri,
user_name, user_description,
user_permissions=["VIEW", "CREATE", "UPDATE", "DELETE", "CONFIG", "ADMIN"]
)
return self.redirect_info(
self.view_uri("AnnalistSiteView"),
info_message=message.CREATED_COLLECTION_ID%{'coll_id': new_id}
)
# elif "remove" in request.POST:
# collections = request.POST.getlist("select", [])
# if collections:
# # Check authorization
# if layout.SITEDATA_ID in collections:
# log.warning("Attempt to delete site data collection %r"%(collections))
# auth_required = self.error(self.error403values(scope="DELETE_SITE"))
# else:
# auth_required = (
# self.authorize("ADMIN", None) and # either of these..
# self.authorize("DELETE_COLLECTION", None)
# )
# return (
# # Get user to confirm action before actually doing it
# auth_required or
# ConfirmView.render_form(request,
# action_description= message.REMOVE_COLLECTIONS%{'ids': ", ".join(collections)},
# action_params= request.POST,
# confirmed_action_uri= self.view_uri('AnnalistSiteActionView'),
# cancel_action_uri= self.view_uri('AnnalistSiteView'),
# title= self.site_data()["title"]
# )
# )
# else:
# return self.redirect_info(
# self.view_uri("AnnalistSiteView"),
# info_message=message.NO_COLLECTIONS_REMOVE, info_head=message.NO_ACTION_PERFORMED
# )
# if "new" in request.POST:
# # Create new collection with name and label supplied
# new_id = request.POST["new_id"]
# new_label = request.POST["new_label"]
# log.debug("New collection %s: %s"%(new_id, new_label))
# if not new_id:
# return self.redirect_error(
# self.view_uri("AnnalistSiteView"),
# error_message=message.MISSING_COLLECTION_ID
# )
# if not util.valid_id(new_id):
# return self.redirect_error(
# self.view_uri("AnnalistSiteView"),
# error_message=message.INVALID_COLLECTION_ID%{'coll_id': new_id}
# )
# # Create new collection with name and label supplied
# auth_required = (
# self.authorize("ADMIN", None) and # either of these..
# self.authorize("CREATE_COLLECTION", None)
# )
# if auth_required:
# return auth_required
# coll_meta = (
# { RDFS.CURIE.label: new_label
# , RDFS.CURIE.comment: ""
# })
# coll = self.site().add_collection(new_id, coll_meta)
# # Generate initial context
# coll.generate_coll_jsonld_context()
# # Create full permissions in new collection for creating user
# user = self.request.user
# user_id = user.username
# user_uri = "mailto:"+user.email
# user_name = "%s %s"%(user.first_name, user.last_name)
# user_description = "User %s: permissions for %s in collection %s"%(user_id, user_name, new_id)
# coll.create_user_permissions(
# user_id, user_uri,
# user_name, user_description,
# user_permissions=["VIEW", "CREATE", "UPDATE", "DELETE", "CONFIG", "ADMIN"]
# )
# return self.redirect_info(
# self.view_uri("AnnalistSiteView"),
# info_message=message.CREATED_COLLECTION_ID%{'coll_id': new_id}
# )
log.warning("Invalid POST request: %r"%(request.POST.lists()))
return self.error(self.error400values())
class SiteActionView(AnnalistGenericView):
"""
View class to perform completion of confirmed action requested from site view
"""
def __init__(self):
super(SiteActionView, self).__init__()
return
# POST
def post(self, request):
"""
Process options to complete action to add or remove a collection
"""
log.debug("siteactionview.post: %r"%(request.POST))
if "remove" in request.POST:
log.debug("Complete remove %r"%(request.POST.getlist("select")))
auth_required = (
self.authorize("ADMIN", None) and # either of these..
self.authorize("DELETE_COLLECTION", None)
)
if auth_required:
return auth_required
coll_ids = request.POST.getlist("select")
for coll_id in coll_ids:
if coll_id == layout.SITEDATA_ID:
err = Annalist_Error("Attempt to delete site data collection (%s)"%coll_id)
log.warning(str(err))
else:
err = self.site().remove_collection(coll_id)
if err:
return self.redirect_error(
self.view_uri("AnnalistSiteView"),
error_message=str(err))
return self.redirect_info(
self.view_uri("AnnalistSiteView"),
info_message=message.COLLECTION_REMOVED%{'ids': ", ".join(coll_ids)}
)
else:
return self.error(self.error400values())
return HttpResponseRedirect(self.view_uri("AnnalistSiteView"))
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/site.py
|
site.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Annalist action confirmation view definition
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os.path
import json
import random
import logging
import uuid
import copy
import logging
log = logging.getLogger(__name__)
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import QueryDict
from django.template import loader
from django.urls import resolve, reverse
from django.conf import settings
from annalist.views.generic import AnnalistGenericView
def querydict_dict(querydict):
"""
Converts a Django QueryDict value to a regular dictionary, preserving multiple items.
"""
return dict(querydict.lists())
def dict_querydict(dict_):
"""
Converts a value created by querydict_dict back into a Django QueryDict value.
"""
q = QueryDict("", mutable=True)
for k, v in dict_.items():
q.setlist(k, v)
q._mutable = False
return q
def querydict_dumps(querydict):
"""
Converts a Django QueryDict value to a serialized form, preserving multiple items.
Serializes as JSON where each key has a list value.
"""
return json.dumps(querydict_dict(querydict))
def querydict_loads(querydict_s):
"""
Converts a serialized Django QueryDict back to its internal value.
"""
return dict_querydict(json.loads(querydict_s))
class ConfirmView(AnnalistGenericView):
"""
View class to handle response to request to confirm action
"""
def __init__(self):
super(ConfirmView, self).__init__()
return
@staticmethod
def render_form(request,
action_description="Are you sure you want to do that?", # message describing requested action
action_params={},
confirmed_action_uri="/", # URI to to POST to complete action
cancel_action_uri="/", # URI to dispatch to cancel action
title=None):
"""
Render form that requests a user to confirm an action to be performed and,
depending on the user's response, redirects to 'confirmed_action_uri' or
'cancel_action_uri'
"""
form_data = (
{ "action_description": action_description
, "action_params": querydict_dumps(action_params)
, "confirmed_action": confirmed_action_uri
, "cancel_action": cancel_action_uri
, "title": title
, "suppress_user": True
})
template = loader.get_template('annalist_confirm.html')
context = form_data
log.debug("confirmview form data: %r"%(form_data))
return HttpResponse(template.render(context, request=request))
# POST
def post(self, request):
"""
Handle POST of form data soliciting user confirmation of action.
Creates a new request object with the original action POST data, and dispatches
to the appropriate completion view function. This function should, in turn,
return a redirect to an appropriate continuation display.
If the operation is canceled, then this function returns an HTTP redirect to
the "cancel_action" continuation URI.
"""
log.debug("confirmview.post: %r"%(request.POST))
if "confirm" in request.POST:
action_request = copy.copy(request)
action_request.POST = querydict_loads(request.POST["action_params"])
view, args, kwargs = resolve(request.POST["confirmed_action"])
return view(action_request, *args, **kwargs)
return HttpResponseRedirect(request.POST["cancel_action"])
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/confirm.py
|
confirm.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Annalist action confirmation view definition
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import os
import logging
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.template import loader
from django.conf import settings
from utils.ContentNegotiationView import ContentNegotiationView
from annalist import layout
from annalist.models.annalistuser import AnnalistUser
from annalist.views.generic import AnnalistGenericView
# Helper function to get last N lines of log file, rather than loading
# entire file into memory (was getting MemoryError falures.)
#
# Adapted from: https://stackoverflow.com/a/13790289/324122
def tail(f, lines=1, _buffer=4098):
"""
Tail a file and get X lines from the end
"""
lines_found = []
# block counter will be multiplied by buffer
# to get the block size from the end
block_counter = -1
# loop until we find more than X lines
# (Looking for `>lines` because the first line may be truncated)
while len(lines_found) <= lines:
try:
f.seek(block_counter * _buffer, os.SEEK_END)
except IOError: # either file is too small, or too many lines requested
f.seek(0)
lines_found = f.readlines()
break
lines_found = f.readlines()
block_counter -= 1
return lines_found[-lines:]
class ServerLogView(AnnalistGenericView):
"""
View class to handle requests to the Annalist user profile URI
"""
def __init__(self):
super(ServerLogView, self).__init__()
return
# GET
def get(self, request):
def resultdata():
serverlogname = settings.ANNALIST_LOG_PATH
log.info("ServerLogView: serverlogname %s"%(serverlogname,))
with open(serverlogname, "r") as serverlogfile:
# serverlog = list(serverlogfile) # Generates MemoryError with large logs
serverlog = tail(serverlogfile, 2000)
return (
{ 'title': self.site_data()["title"]
, 'serverlogname': serverlogname
, 'serverlog': "".join(serverlog)
, 'info_coll_id': layout.SITEDATA_ID
, 'continuation_url': continuation_url
})
continuation_url = self.continuation_next(
request.GET, self.view_uri("AnnalistHomeView")
)
return (
# self.authenticate(continuation_url) or
self.authorize("ADMIN", None) or
self.render_html(resultdata(), 'annalist_serverlog.html') or
self.error(self.error406values())
)
def post(self, request):
continuation_url = request.POST.get("continuation_url", "../")
return HttpResponseRedirect(continuation_url)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/serverlog.py
|
serverlog.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import sys
import os
import json
# from rdflib import Graph, URIRef, Literal
from django.http import HttpResponse
from annalist import message
from annalist import layout
from annalist.models.entityresourceaccess import (
find_entity_resource,
# entity_resource_file,
# json_resource_file,
# turtle_resource_file,
# make_turtle_resource_info,
get_resource_file
)
from annalist.views.displayinfo import DisplayInfo
from annalist.views.generic import AnnalistGenericView
class EntityResourceAccess(AnnalistGenericView):
"""
View class for entity resource access
This view class returns a data resource, not a browser form, which may be based on
the entity data itself (from the internally stored JSON), or the content of an
attached data resource (e.g. image, audio, etc.)
This view may be used as the target of content negotiation redirects, and no
further content negotiation is attempted. Rather, the URI is expected to reference
the form of the resource to be returned (cf. 'find_entity_resource' function).
This allows links to specific resource formats to be obtained for use by clients
that don't have access to set HTTP content negotiation headers.
"""
def __init__(self):
super(EntityResourceAccess, self).__init__()
return
# GET
def get(self, request,
coll_id=None, type_id=None, entity_id=None, resource_ref=None, view_id=None):
"""
Access specified entity resource
"""
log.info(
"views.entityresource.get: coll_id %s, type_id %s, entity_id %s, resource_ref %s"%
(coll_id, type_id, entity_id, resource_ref)
)
viewinfo = self.view_setup(
coll_id, type_id, entity_id, request.GET.dict()
)
if viewinfo.http_response:
return viewinfo.http_response
# Load values from entity
typeinfo = viewinfo.curr_typeinfo
entity = self.get_entity(viewinfo.src_entity_id, typeinfo, "view")
entity_label = (message.ENTITY_MESSAGE_LABEL%
{ 'coll_id': viewinfo.coll_id
, 'type_id': viewinfo.type_id
, 'entity_id': viewinfo.src_entity_id
})
if entity is None:
return self.error(
dict(self.error404values(),
message=message.ENTITY_DOES_NOT_EXIST%
{ 'type_id': viewinfo.type_id
, 'id': viewinfo.src_entity_id
, 'label': entity_label
}
)
)
# Locate and open resource file
resource_info = find_entity_resource(entity, resource_ref)
if resource_info is None:
return self.error(
dict(self.error404values(),
message=message.RESOURCE_NOT_DEFINED%
{ 'id': entity_label
, 'ref': resource_ref
}
)
)
entity_baseurl = viewinfo.reqhost + self.get_entity_base_url(coll_id, type_id, entity_id)
resource_file, resource_type = get_resource_file(entity, resource_info, entity_baseurl)
if resource_file is None:
msg = (message.RESOURCE_DOES_NOT_EXIST%
{ 'id': entity_label
, 'ref': resource_info["resource_path"]
})
log.debug("EntityResourceAccess.get: "+msg)
return self.error(dict(self.error404values(), message=msg))
# Return resource
try:
return_type = resource_type
# URL parameter ?type=mime/type overrides specified content type
#
# @@TODO: this is to allow links to return different content-types
# (e.g., JSON as text/plain so it is displayed in the browser):
# is there a cleaner way?
if "type" in viewinfo.request_dict:
return_type = viewinfo.request_dict["type"]
links=[
{ "rel": "canonical"
, "ref": entity_baseurl
}]
response = self.resource_response(resource_file, return_type, links=links)
except Exception as e:
log.exception(str(e))
response = self.error(
dict(self.error500values(),
message=str(e)+" - see server log for details"
)
)
finally:
resource_file.close()
return response
def view_setup(self, coll_id, type_id, entity_id, request_dict):
"""
Assemble display information for entity view request handler
"""
action = "view"
self.default_continuation_url = None
viewinfo = DisplayInfo(self, action, request_dict, self.default_continuation_url)
viewinfo.get_site_info(self.get_request_host())
viewinfo.get_coll_info(coll_id)
viewinfo.get_request_type_info(type_id)
viewinfo.get_entity_info(action, entity_id)
viewinfo.check_authorization(action)
return viewinfo
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/entityresource.py
|
entityresource.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Entity list data views
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2015, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import json
import logging
log = logging.getLogger(__name__)
from django.conf import settings
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from annalist import message
from annalist import layout
from annalist.identifiers import RDFS, ANNAL
from annalist.util import make_type_entity_id
from annalist.models.entityfinder import EntityFinder
from annalist.models.entityresourceaccess import (
find_list_resource,
json_resource_file
)
from annalist.views.entitylist import EntityGenericListView
# -------------------------------------------------------------------------------------------
#
# List entities view - form rendering and POST response handling
#
# -------------------------------------------------------------------------------------------
class EntityListDataView(EntityGenericListView):
"""
View class for generic entity list returned as JSON-LD
"""
def __init__(self):
super(EntityListDataView, self).__init__()
return
# GET
def get(self, request, coll_id=None, type_id=None, list_id=None, list_ref=None):
"""
Return a list of entities as a JSON-LD object
NOTE: The current implementation returns a full copy of each of the
selected entities.
"""
scope = request.GET.get('scope', None)
search_for = request.GET.get('search', "")
log.info(
"views.entitylistdata.get: coll_id %s, type_id %s, list_id %s, list_ref %s, scope %s, search %s"%
(coll_id, type_id, list_id, list_ref, scope, search_for)
)
listinfo = self.list_setup(coll_id, type_id, list_id, request.GET.dict())
if listinfo.http_response:
return listinfo.http_response
# log.debug("@@ listinfo.list_id %s, coll base_url %s"%(listinfo.list_id, base_url))
# Prepare list data for rendering
try:
jsondata = self.assemble_list_data(listinfo, scope, search_for)
except Exception as e:
log.exception(str(e))
return self.error(
dict(self.error500values(),
message=str(e)+" - see server log for details"
)
)
entity_list_info = find_list_resource(type_id, list_id, list_ref)
if entity_list_info is None:
return self.error(
dict(self.error404values(),
message=message.LIST_NOT_DEFINED%
{ 'list_id': list_id
, 'type_id': type_id
, 'list_ref': list_ref
}
)
)
coll_baseurl = listinfo.reqhost + self.get_collection_base_url(coll_id)
list_baseurl = listinfo.reqhost + self.get_list_base_url(coll_id, type_id, list_id)
if "resource_access" in entity_list_info:
# Use indicated resource access renderer
list_file_access = entity_list_info["resource_access"]
else:
list_file_access = json_resource_file
list_file = list_file_access(list_baseurl, jsondata, entity_list_info)
if list_file is None:
return self.error(
dict(self.error404values(),
message=message.LIST_NOT_ACCESSED%
{ 'list_id': list_id
, 'type_id': type_id
, 'list_ref': list_ref
}
)
)
# Construct and return list response
try:
return_type = entity_list_info["resource_type"]
# URL parameter ?type=mime/type overrides specified content type
#
# @@TODO: this is to allow links to return different content-types:
# is there a cleaner way?
if "type" in listinfo.request_dict:
return_type = listinfo.request_dict["type"]
links=[
{ "rel": "canonical"
, "ref": list_baseurl
}]
response = self.resource_response(list_file, return_type, links=links)
except Exception as e:
log.exception(str(e))
response = self.error(
dict(self.error500values(),
message=str(e)+" - see server log for details"
)
)
finally:
list_file.close()
return response
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/entitylistdata.py
|
entitylistdata.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Collection resource access (JSON-LD context and maybe more)
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import sys
import os
import logging
log = logging.getLogger(__name__)
from django.http import HttpResponse
from annalist import message
from annalist import layout
import annalist.models.entitytypeinfo as entitytypeinfo
from annalist.models.entityresourceaccess import (
collection_fixed_json_resources,
find_entity_resource,
get_resource_file
)
from annalist.views.displayinfo import DisplayInfo
from annalist.views.generic import AnnalistGenericView
class CollectionResourceAccess(AnnalistGenericView):
"""
View class for collection resource access
This view class returns a collection data resource.
"""
def __init__(self):
super(CollectionResourceAccess, self).__init__()
return
# GET
def get(self, request,
coll_id=None, resource_ref=None, view_id=None):
"""
Access specified entity resource
"""
log.debug(
"CollectionResourceAccess.get: coll_id %s, resource_ref %s"%
(coll_id, resource_ref)
)
viewinfo = self.view_setup(
coll_id, request.GET.dict()
)
if viewinfo.http_response:
return viewinfo.http_response
coll = viewinfo.collection
if coll is None:
return self.error(
dict(self.error404values(),
message=message.COLLECTION_NOT_EXISTS%{'id': coll_id}
)
)
# Locate resource
resource_info = find_entity_resource(
viewinfo.site, resource_ref, fixed_resources=collection_fixed_json_resources
)
log.debug("CollectionResourceAccess.get: resource_info %r"%(resource_info,))
if resource_info is None:
return self.error(
dict(self.error404values(),
message=message.COLL_RESOURCE_NOT_DEFINED%
{ 'id': coll_id
, 'ref': resource_ref
}
)
)
coll_baseurl = viewinfo.reqhost + self.get_collection_base_url(coll_id)
resource_file, resource_type = get_resource_file(
coll, resource_info, coll_baseurl
)
if resource_file is None:
return self.error(
dict(self.error404values(),
message=message.COLL_RESOURCE_NOT_EXIST%
{ 'id': coll_id
, 'ref': resource_ref
}
)
)
# Return resource
try:
response = self.resource_response(resource_file, resource_info["resource_type"])
except Exception as e:
log.exception(str(e))
response = self.error(
dict(self.error500values(),
message=str(e)+" - see server log for details"
)
)
finally:
resource_file.close()
return response
def view_setup(self, coll_id, request_dict):
"""
Assemble display information for view request handler
"""
action = "view"
self.default_continuation_url = None
viewinfo = DisplayInfo(self, action, request_dict, self.default_continuation_url)
viewinfo.get_site_info(self.get_request_host())
viewinfo.get_coll_info(coll_id)
viewinfo.get_request_type_info(entitytypeinfo.COLL_ID)
viewinfo.check_authorization(action)
return viewinfo
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/collectionresource.py
|
collectionresource.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import json
import re
import traceback
from packaging.version import Version
# from distutils.version import LooseVersion
from django.conf import settings
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.urls import resolve, reverse
from utils.py3porting import urljoin, urlsplit
import annalist
from annalist import message
from annalist import layout
from annalist.identifiers import RDF, RDFS, ANNAL
from annalist.util import valid_id, extract_entity_id
from annalist.models.entitytypeinfo import (
EntityTypeInfo,
SITE_PERMISSIONS, CONFIG_PERMISSIONS
)
from annalist.models.collection import Collection
from annalist.models.recordtype import RecordType
from annalist.models.recordlist import RecordList
from annalist.models.recordview import RecordView
from annalist.models.annalistuser import default_user_id, unknown_user_id
from annalist.views.confirm import ConfirmView, dict_querydict
from annalist.views.uri_builder import (
uri_param_dict,
scope_params,
uri_params, uri_with_params,
continuation_url_chain, continuation_chain_url,
url_update_type_entity_id
)
from annalist.views.fields.render_entityid import EntityIdValueMapper
# -------------------------------------------------------------------------------------------
#
# Table of authorization symbols added to the display context according to
# permissions help by the requesting user
#
# -------------------------------------------------------------------------------------------
context_authorization_map = (
{ "auth_create": ["CREATE"]
, "auth_delete": ["DELETE"]
, "auth_update": ["UPDATE"]
, "auth_config": ["CONFIG"]
, "auth_create_coll": ["CREATE_COLLECTION", "ADMIN"]
, "auth_delete_coll": ["DELETE_COLLECTION", "ADMIN"]
})
# -------------------------------------------------------------------------------------------
#
# Helper functions
#
# -------------------------------------------------------------------------------------------
def make_data_ref(request_url, data_ref, resource_type=None):
"""
Returns a URI reference that can be used as a reference a
data resource based on the supplied request URL,
data resource reference and optional type.
Scope-related query parameters from the original request_url are
preserved, and others are discarded.
"""
params = scope_params(uri_param_dict(request_url))
if resource_type:
params['type'] = resource_type
return uri_with_params(data_ref, params)
def apply_substitutions(context, text_in):
"""
Apply substitutions from the supplied `context` to the text supplied as `text_in`,
returning the resulting string. This has been introduced to make it easier to
create meaningful links in help pages, with values HOST, SITE, COLL and BASE
added to the context to make references to the current host, site, collection
and entity base URI respectively.
Substitutions are made for the following patterns found in `text_in`
`$name` followed by a non-alphanumeric, non-':' character
`$name:`
`$[curie]`
`$$`
In the first two, `name` consists of a sequence of alphabetic, numeric or '_'
characters, and the pattern is replaced by the corresponding value from the context.
In the this pattern, `curie` may contain several additional characters that may occur
in a compact URI (CURIE); the pattern is replaced by the corresponding context value.
The final pattern is an escape sequence for inserting a single '$' into the output
which might otherwise be treated as a context value substitution.
If no corresponding context value is found for a substitution pattern, the pattern is
copied as-is to the output.
Any other occurrence of '$' (i.e. not part of any pattern above) is untouched.
>>> context = { 'aa': '-aa-', 'bb': '-bb-', 'c:c': '-cc-'}
>>> apply_substitutions(context, "foo bar") == 'foo bar'
True
>>> apply_substitutions(context, "foo $aa bar") == 'foo -aa- bar'
True
>>> apply_substitutions(context, "foo $bb:bar") == 'foo -bb-bar'
True
>>> apply_substitutions(context, "foo $[c:c] bar") == 'foo -cc- bar'
True
>>> apply_substitutions(context, "foo $$ bar") == 'foo $ bar'
True
>>> apply_substitutions(context, "foo $dd bar") == 'foo $dd bar'
True
>>> apply_substitutions(context, "foo $ee bar") == 'foo $ee bar'
True
>>> apply_substitutions(context, "foo $[f:f] bar") == 'foo $[f:f] bar'
True
>>> apply_substitutions(context, "foo $aa $bb: $[c:c] $[f:f] bar") == 'foo -aa- -bb- -cc- $[f:f] bar'
True
"""
def sub_fn(matchobj):
matched = matchobj.group(1) or matchobj.group(2)
if matchobj.group(3):
return "$"
elif matched in context:
return context[matched]
return matchobj.group(0)
namechars = "_A-Za-z0-9"
curiechars = "-@.~+*=:;,/?#!"+namechars
# 1----) 2----) 3--)
sub_re = re.compile("\$([%s]+):?|\$\[([%s]+)\]|\$(\$)"%(namechars, curiechars))
text_out = sub_re.sub(sub_fn, text_in)
return text_out
# -------------------------------------------------------------------------------------------
#
# Display information class
#
# -------------------------------------------------------------------------------------------
class DisplayInfo(object):
"""
This class collects and organizes common information needed to process various
kinds of view requests.
A number of methods are provided that collect different kinds of information,
allowing the calling method flexibility over what information is actually
gathered. All methods follow a common pattern loosely modeled on an error
monad, which uses a Django HttpResponse object to record the first problem
found in the information gathering chain. Once an error has been detected,
subsequent methods do not update the display information, but simply return
the error response object.
The information gathering methods do have some dependencies and must be
invoked in a sequence that ensures the dependencies are satisfied.
view is the view object that is being rendered. This is an instance
of a class derived from `AnnalistGenericView`, which in turn is
derived from `django.views.generic.View`.
action is the user action for which the form has ben invoked
(e.g. "new", "copy", "edit", etc.)
request_dict is a dictionary of request parameters
For GET requests, this derives from the URI query parameters;
for POST requests it is derived from the submitted form data.
default_continue is a default continuation URI to be used when returning from the
current view without an explciitly specified continuation in
the request.
"""
def __init__(self, view, action, request_dict, default_continue):
self.view = view
self.action = action
self.is_saved = False
self.request_dict = request_dict
self.continuation_url = request_dict.get('continuation_url', None)
self.default_continue = default_continue
# Collection/Type/Entity ids - to be supplied based on form data in POST
self.orig_coll_id = None
self.orig_type_id = None
self.orig_entity_id = None
self.orig_typeinfo = None
self.curr_coll_id = None
self.curr_type_id = None
self.curr_entity_id = None
self.curr_typeinfo = None
self.src_entity_id = None
# Type-specific messages
self.type_messages = None
# Default no permissions:
self.authorizations = dict([(k, False) for k in context_authorization_map])
self.reqhost = None
self.site = None
self.sitedata = None
self.coll_id = None
self.collection = None
self.orig_coll = None # Original collection for copy
self.perm_coll = None # Collection used for permissions checking
self.type_id = None # Type Id from request URI, not dependent on form data
self.list_id = None
self.recordlist = None
self.view_id = None
self.recordview = None
self.entitydata = None
# Response data
self.http_response = None
self.info_messages = []
self.error_messages = []
return
def set_orig_coll_id(self, orig_coll_id=None):
"""
For GET and POST operations, set up details of the collection from which
an existing identified entity is accessible. This is used later to check
collection access permissions.
"""
self.orig_coll_id = EntityIdValueMapper.decode(orig_coll_id)
# If inherited from another collection, update origin collection object
if self.orig_coll_id and (self.orig_coll_id != self.coll_id):
c = Collection.load(self.site, self.orig_coll_id, altscope="all")
if c:
self.orig_coll = c
return self.http_response
def set_coll_type_entity_id(self,
orig_coll_id=None,
orig_type_id=None, orig_entity_id=None,
curr_type_id=None, curr_entity_id=None
):
"""
For a form POST operation, sets updated collection, type and entity
identifiers from the form data.
The original collection id may be different by virtue of inheritance
from another collection (via 'orig_coll_id' parameter).
The current type identifier may be different by virtue of the type being
renamed in the form data (via 'curr_type_id' parameter).
"""
# log.debug(
# "@@ DisplaytInfo.set_coll_type_entity_id: %s/%s/%s -> %s/%s"%
# ( orig_coll_id, orig_type_id, orig_entity_id,
# curr_type_id, curr_entity_id
# )
# )
self.set_orig_coll_id(orig_coll_id)
if self.http_response:
return self.http_response
self.orig_type_id = EntityIdValueMapper.decode(orig_type_id)
self.orig_entity_id = EntityIdValueMapper.decode(orig_entity_id)
self.curr_type_id = EntityIdValueMapper.decode(curr_type_id)
self.curr_entity_id = EntityIdValueMapper.decode(curr_entity_id)
if self.orig_coll_id and (self.orig_coll_id != self.coll_id):
self.orig_typeinfo = EntityTypeInfo(self.orig_coll, orig_type_id)
if self.curr_type_id and (self.curr_type_id != self.type_id):
self.curr_typeinfo = EntityTypeInfo(self.collection, curr_type_id)
return self.http_response
def set_messages(self, messages):
"""
Save type-specific messages for later reporting
"""
self.type_messages = messages
return self.http_response
def get_site_info(self, reqhost):
"""
Get site information: site entity object and a copy of the site description data.
Also saves a copy of the host name to which the current request was directed.
"""
if not self.http_response:
self.reqhost = reqhost
self.site = self.view.site(host=reqhost)
self.sitedata = self.view.site_data()
return self.http_response
def get_coll_info(self, coll_id):
"""
Check collection identifier, and get reference to collection object.
"""
assert (self.site is not None)
if not self.http_response:
if not Collection.exists(self.site, coll_id):
self.http_response = self.view.error(
dict(self.view.error404values(),
message=message.COLLECTION_NOT_EXISTS%{'id': coll_id}
)
)
else:
self.coll_id = coll_id
#@@TODO: try with altscope="site"?
self.collection = Collection.load(self.site, coll_id, altscope="all")
self.orig_coll = self.collection
self.perm_coll = self.collection
ver = self.collection.get(ANNAL.CURIE.software_version, None) or "0.0.0"
if Version(ver) > Version(annalist.__version__):
self.http_response = self.view.error(
dict(self.view.error500values(),
message=message.COLLECTION_NEWER_VERSION%{'id': coll_id, 'ver': ver}
)
)
self.add_error_messages(self.collection.get_errors())
return self.http_response
def flush_collection_caches(self):
"""
Called to flush collection caches so that changes made independently of
the caches can be used.
NOTE: this is currently called by the top-level collection customize view.
This is a bit of a hack to ensure that it is always possible for the user
to force caches to be flushed, e.g. when type informatiuon is updated in a
different tab or by another user.
"""
assert (self.collection is not None)
self.collection.flush_collection_caches()
return
def update_coll_version(self):
"""
Called when an entity has been updated to also update the data version
associated with the collection if it was previously created by an older
version of Annalist.
"""
assert (self.collection is not None)
if not self.http_response:
self.collection.update_software_compatibility_version()
return
def saved(self, is_saved=None):
"""
Make note that current entity has been saved, and/or return saved status
"""
if is_saved is not None:
self.is_saved = is_saved
return self.is_saved
def get_type_info(self, type_id):
"""
Check type identifier, and get a reference to the corresponding type
information object.
This method may be called to override the type id from the original request
URI, and the DisplayInfo 'type_id' value is not updated so that the value
from the original request URI is not lost.
See also method 'get_request_type_info'.
"""
# print "@@ get_type_info: type_id %s"%(type_id,)
if not self.http_response:
assert ((self.site and self.collection) is not None)
if type_id:
self.curr_typeinfo = EntityTypeInfo(self.collection, type_id)
self.orig_typeinfo = self.curr_typeinfo
if not self.curr_typeinfo.recordtype:
# log.warning("DisplayInfo.get_type_data: RecordType %s not found"%type_id)
self.http_response = self.view.error(
dict(self.view.error404values(),
message=message.RECORD_TYPE_NOT_EXISTS%(
{'id': type_id, 'coll_id': self.coll_id})
)
)
return self.http_response
def get_request_type_info(self, type_id):
"""
Save and check type identifier from request URI, and get a reference to
the corresponding type information object.
"""
self.type_id = type_id
return self.get_type_info(type_id)
def get_list_info(self, list_id):
"""
Retrieve list definition to use for display
"""
if not self.http_response:
assert ((self.site and self.collection) is not None)
assert list_id
# log.debug(
# "DisplayInfo.get_list_info: collection.get_alt_entities %r"%
# [ c.get_id() for c in self.collection.get_alt_entities(altscope="all") ]
# )
if not self.check_list_id(list_id):
log.warning("DisplayInfo.get_list_info: RecordList %s not found"%list_id)
msg1 = message.RECORD_LIST_NOT_EXISTS%{'id': list_id, 'coll_id': self.coll_id}
self.add_error_message(msg1)
list_id = self.get_list_id(self.type_id, None)
msg2 = message.DISPLAY_ALTERNATIVE_LIST%{'id': list_id, 'coll_id': self.coll_id}
self.add_error_message(msg2)
self.list_id = list_id
self.recordlist = RecordList.load(self.collection, list_id, altscope="all")
if "@error" in self.recordlist:
self.http_response = self.view.error(
dict(self.view.error500values(),
message=message.RECORD_LIST_LOAD_ERROR%(
{ 'id': list_id
, 'file': self.recordlist["@error"]
, 'message': self.recordlist["@message"]
})
)
)
elif self.type_id is None and self.curr_typeinfo is None:
self.get_type_info(
extract_entity_id(self.recordlist[ANNAL.CURIE.default_type])
)
# log.debug("DisplayInfo.get_list_info: %r"%(self.recordlist.get_values()))
return self.http_response
def get_view_info(self, view_id):
"""
Retrieve view definition to use for display
"""
if not self.http_response:
assert ((self.site and self.collection) is not None)
if not self.check_view_id(view_id):
log.warning("DisplayInfo.get_view_info: RecordView %s not found"%view_id)
log.warning("Collection: %r"%(self.collection))
log.warning("Collection._altparent: %r"%(self.collection._altparent))
# log.warning("\n".join(traceback.format_stack()))
msg1 = message.RECORD_VIEW_NOT_EXISTS%{'id': view_id, 'coll_id': self.coll_id}
self.add_error_message(msg1)
view_id = self.get_view_id(self.type_id, None)
msg2 = message.DISPLAY_ALTERNATIVE_VIEW%{'id': view_id, 'coll_id': self.coll_id}
self.add_error_message(msg2)
self.view_id = view_id
self.recordview = RecordView.load(self.collection, view_id, altscope="all")
if "@error" in self.recordview:
self.http_response = self.view.error(
dict(self.view.error500values(),
message=message.RECORD_VIEW_LOAD_ERROR%(
{ 'id': list_id
, 'file': self.recordview["@error"]
, 'message': self.recordview["@message"]
})
)
)
# log.debug("DisplayInfo.get_view_info: %r"%(self.recordview.get_values()))
return self.http_response
def get_entity_info(self, action, entity_id):
"""
Set up entity id and info to use for display
Also handles some special case permissions settings if the entity is a Collection.
"""
if not self.http_response:
assert self.curr_typeinfo is not None
self.src_entity_id = entity_id
self.curr_entity_id = entity_id
if action in ["new", "copy"]:
self.use_entity_id = self.curr_typeinfo.entityclass.allocate_new_id(
self.curr_typeinfo.entityparent, base_id=entity_id
)
else:
self.use_entity_id = entity_id
# Special case permissions when accessing collection metadata:
# use the collection itself rather than the site data collection to which it belongs.
if self.type_id == "_coll":
# log.info("DisplayInfo.get_entity_info: access collection data for %s"%entity_id)
c = Collection.load(self.site, entity_id, altscope="all")
if c:
self.perm_coll = c
return self.http_response
# Support methods for response generation
def check_authorization(self, action):
"""
Check authorization. Return None if all is OK, or HttpResonse object.
Also, save copy of key authorizations for later rendering.
"""
if not self.http_response:
# Save key authorizations for later rendering
for k in context_authorization_map:
for p in context_authorization_map[k]:
self.authorizations[k] = (
self.authorizations[k] or
self.view.authorize(p, self.perm_coll) is None
)
# Check requested action
action = action or "view"
if self.curr_typeinfo:
# log.info(
# "@@ check_authorization (curr) action %s, type_id %s, entity_id %s"%
# (action, self.curr_typeinfo.type_id, self.curr_entity_id)
# )
permissions_map = (
self.curr_typeinfo.get_entity_permissions_map(self.curr_entity_id)
)
else:
# Use site permissions map (some site operations don't have an entity type?)
permissions_map = SITE_PERMISSIONS
# Previously, default permission map was applied in view.form_action_auth if no
# type-based map was provided.
self.http_response = (
self.http_response or
self.view.form_action_auth(
action, self.perm_coll, permissions_map,
continuation_url=self.get_continuation_here()
)
)
if ( (not self.http_response) and
(self.orig_coll_id) and
(self.orig_coll_id != self.perm_coll.get_id()) ):
# Copying content from different collection: check access
if self.orig_typeinfo:
# log.info(
# "@@ check_authorization (orig) action %s, type_id %s, entity_id %s"%
# (action, self.orig_typeinfo.type_id, self.orig_entity_id)
# )
orig_permissions_map = (
self.orig_typeinfo.get_entity_permissions_map(self.orig_entity_id)
)
else:
orig_permissions_map = SITE_PERMISSIONS
self.http_response = self.view.form_action_auth("view",
self.orig_coll, orig_permissions_map,
continuation_url=self.get_continuation_here()
)
return self.http_response
#@@TODO: not sure if this will be useful...
# def reset_info_messages(self):
# """
# Reset informational messages (for form redisplay)
# cf. entityedit.form_re_render
# """
# self.info_messages = []
# return
#@@
def add_info_message(self, message):
"""
Save message to be displayed on successful completion
"""
self.info_messages.append(message)
return self.http_response
def add_error_message(self, message):
"""
Save error message to be displayed on completion of the current operation
"""
self.error_messages.append(message)
return self.http_response
def add_error_messages(self, messages):
"""
Save list of error message to be displayed on completion of the current operation
"""
self.error_messages.extend(messages)
return self.http_response
def redisplay_path_params(self):
"""
Gathers URL details based on the current request URL that can be used
to construct a URL to redisplay the current page.
Returns a pair of values:
redisplay_path, redisplay_params
Where 'redisplay_path' is the URL path for the current request,
and 'redisplay_params' is a selection of request URL parameters that
are used to select data for the current display (see 'scope_params').
"""
redisplay_path = self.view.get_request_path()
redisplay_params = scope_params(uri_param_dict(redisplay_path))
redisplay_params.update(self.get_continuation_url_dict())
return (redisplay_path, redisplay_params)
def redirect_response(self, redirect_path, redirect_params={}, action=None):
"""
Return an HTTP redirect response, with information or warning messages as
requested included as parameters.
redirect_path the URI base path to redirect to.
redirect_params an optional dictionary of parameters to be applied to the
redirection URI.
action an action that must be authorized if the redirection is to occur,
otherwise an error is reported and the current page redisplayed.
If None or not supplied, no authorization check is performed.
Returns an HTTP response value.
"""
# @TODO: refactor existing redirect response code (here and in list/edit modules)
# to use this method. (Look for other instances of HttpResponseRedirect)
# print "@@ redirect_response: http_response %r"%(self.http_response,)
if not self.http_response:
redirect_msg_params = dict(redirect_params)
if self.info_messages:
redirect_msg_params.update(self.view.info_params("\n\n".join(self.info_messages)))
if self.error_messages:
redirect_msg_params.update(self.view.error_params("\n\n".join(self.error_messages)))
redirect_uri = (
uri_with_params(
redirect_path,
redirect_msg_params
)
)
self.http_response = (
(action and self.check_authorization(action))
or
HttpResponseRedirect(redirect_uri)
)
return self.http_response
def display_error_response(self, err_msg):
"""
Return an HTTP response that redisplays the current view with an error
message displayed.
err_msg is the error message to be displayed.
"""
redirect_path, redirect_params = self.redisplay_path_params()
self.add_error_message(err_msg)
return self.redirect_response(redirect_path, redirect_params=redirect_params)
def report_error(self, message):
"""
Set up error response
"""
log.error(message)
if not self.http_response:
self.http_response = self.view.error(
dict(self.view.error400values(),
message=message
)
)
return self.http_response
def confirm_delete_entity_response(self,
entity_type_id, entity_id,
complete_action_uri,
form_action_field="entity_delete",
form_value_field="entity_id",
response_messages = {}
):
"""
This method generates a response when the user has requested deletion
of an entity from the current collection. It includes logic to request
confirmation of the requested action before proceeding to actually remove
the entity.
The request for confirmation is handled via class "ConfirmView" (confirm.py),
and actual deletion and continuation is performed via the view specified by
"complete_action_view", which is typically realized by a subclass of
"EntityDeleteConfirmedBaseView" (entitydeletebase.py)
entity_type_id is the type id of the entity to be deleted.
entity_id is the entity id of the entity to be deleted.
complete_action_uri identifies a view to be invoked by an HTTP POST operation
to complete the entity deletion.
form_action_field names the form field that is used to trigger entity deletion
in the completion view. Defaults to "entity_delete"
form_value_field names the form field that is used to provide the identifier of
the entity or entities to be deleted.
response_messages is a dictionary of messages to be used to indicate
various outcomes:
"no_entity" is the entity for deletion is not specified.
"cannot_delete" if entity does not exist or cannot be deleted.
"confirm_completion" to report co,mpletion of entity deletion.
If no message dictionary is provided, or if no value is provided
for a particular outcome, a default message value will be used.
Messages may use value interpolation for %(type_id)s and %(id)s.
"""
def _get_message(msgid):
return response_messages.get(msgid, default_messages.get(msgid)%message_params)
default_messages = (
{ "no_entity": message.NO_ENTITY_FOR_DELETE
, "cannot_delete": message.CANNOT_DELETE_ENTITY
, "type_has_values": message.TYPE_VALUES_FOR_DELETE
, "confirm_completion": message.REMOVE_ENTITY_DATA
})
entity_coll_id = self.collection.get_id()
message_params = (
{ "id": entity_id
, "type_id": entity_type_id
, "coll_id": entity_coll_id
})
if not entity_id:
self.display_error_response(_get_message("no_entity"))
elif not self.entity_exists(entity_id, entity_type_id):
self.display_error_response(_get_message("cannot_delete"))
elif self.entity_is_type_with_values(entity_id, entity_type_id):
self.display_error_response(_get_message("type_has_values"))
if not self.http_response:
# Get user to confirm action before actually doing it
# log.info(
# "entity_coll_id %s, type_id %s, entity_id %s, confirmed_action_uri %s"%
# (entity_coll_id, entity_type_id, entity_id, confirmed_action_uri)
# )
delete_params = (
{ form_action_field: ["Delete"]
, form_value_field: [entity_id]
, "completion_url": [self.get_continuation_here()]
, "search_for": [self.request_dict.get('search_for',"")]
})
curi = self.get_continuation_url()
if curi:
delete_params["continuation_url"] = [curi]
return (
self.check_authorization("delete")
or
ConfirmView.render_form(self.view.request,
action_description= _get_message("confirm_completion"),
action_params= dict_querydict(delete_params),
confirmed_action_uri= complete_action_uri,
cancel_action_uri= self.get_continuation_here(),
title= self.view.site_data()["title"]
)
)
return self.http_response
# Additonal support functions for list views
def get_type_list_id(self, type_id):
"""
Return default list_id for listing defined type, or None
"""
list_id = None
if type_id:
if self.curr_typeinfo.recordtype:
list_id = extract_entity_id(
self.curr_typeinfo.recordtype.get(ANNAL.CURIE.type_list, None)
)
else:
log.warning("DisplayInfo.get_type_list_id no type data for %s"%(type_id))
return list_id
def check_list_id(self, list_id):
"""
Check for existence of list definition:
if it exists, return the supplied list_id, else None.
"""
if list_id and RecordList.exists(self.collection, list_id, altscope="all"):
return list_id
return None
def get_list_id(self, type_id, list_id):
"""
Return supplied list_id if defined, otherwise find default list_id for
entity type or collection (unless an error has been detected).
"""
if not self.http_response:
list_id = (
list_id or
self.check_list_id(self.get_type_list_id(type_id)) or
self.check_list_id(self.collection.get_default_list()) or
("Default_list" if type_id else "Default_list_all")
)
if not list_id:
log.warning("get_list_id failure: %s, type_id %s"%(list_id, type_id))
return list_id
def get_list_view_id(self):
return extract_entity_id(
self.recordlist.get(ANNAL.CURIE.default_view, None) or "Default_view"
)
def get_list_type_id(self):
return extract_entity_id(
self.recordlist.get(ANNAL.CURIE.default_type, None) or "Default_type"
)
# Additional support functions for field definition view
def get_uri_type_id(self, type_uri):
"""
Return type id fpr given type URI, or "Default_type".
This accesses the per-collection cache of type URI:Id mappings
"""
assert (self.collection is not None)
type_id = "Default_type"
if type_uri:
type_ref = self.collection.get_uri_type(type_uri)
if type_ref:
type_id = type_ref.get_id()
return type_id
# Additional support functions for collection view
def get_default_view_type_entity(self):
"""
Return default view_id, type_id and entity_id to display for collection,
or None for any values not defined.
"""
view_id, type_id, entity_id = self.collection.get_default_view()
return (self.check_view_id(view_id), type_id, entity_id)
# Additonal support functions for entity views
def check_view_id(self, view_id):
"""
Check for existence of view definition:
if it exists, return the supplied view_id, else None.
"""
if view_id and RecordView.exists(self.collection, view_id, altscope="all"):
return view_id
return None
def get_view_id(self, type_id, view_id):
"""
Get view id or suitable default using type if defined.
"""
if not self.http_response:
view_id = (
view_id or
self.check_view_id(self.curr_typeinfo.get_default_view_id())
)
if not view_id:
log.warning("get_view_id: %s, type_id %s"%(view_id, self.type_id))
return view_id
def entity_exists(self, entity_id, entity_type):
"""
Test a supplied entity is defined in the current collection,
returning true or False.
entity_id entity id that is to be tested..
entity_type type of entity to test.
"""
typeinfo = self.curr_typeinfo
if not typeinfo or typeinfo.get_type_id() != entity_type:
typeinfo = EntityTypeInfo(self.collection, entity_type)
return typeinfo.entityclass.exists(typeinfo.entityparent, entity_id)
def entity_is_type_with_values(self, entity_id, entity_type):
"""
Test if indicated entity is a type with values defined.
"""
if entity_type == layout.TYPE_TYPEID:
typeinfo = EntityTypeInfo(
self.collection, entity_id
)
return next(typeinfo.enum_entity_ids(), None) is not None
return False
def get_new_view_uri(self, coll_id, type_id):
"""
Get URI for entity new view from list display
"""
return self.view.view_uri(
"AnnalistEntityNewView",
coll_id=coll_id,
view_id=self.get_list_view_id(),
type_id=type_id,
action="new"
)
def get_edit_view_uri(self, coll_id, type_id, entity_id, action):
"""
Get URI for entity edit or copy view from list display
"""
# Use default view for entity type
# (Use list view id only for new entities)
return self.view.view_uri(
"AnnalistEntityDefaultDataView",
coll_id=coll_id,
type_id=type_id,
entity_id=entity_id,
action=action
)
# return self.view.view_uri(
# "AnnalistEntityEditView",
# coll_id=coll_id,
# view_id=self.get_list_view_id(),
# type_id=type_id,
# entity_id=entity_id,
# action=action
# )
def get_src_entity_resource_url(self, resource_ref):
"""
Return URL for accessing source entity resource data
(not including any view information contained in the current request URL).
Contains special logic for accessing collection and site metadata
"""
assert self.coll_id is not None
assert self.curr_typeinfo is not None
type_id = self.curr_typeinfo.get_type_id()
if type_id == layout.COLL_TYPEID:
entity_id = self.src_entity_id or layout.SITEDATA_ID
base_url = self.view.get_collection_base_url(entity_id)
else:
entity_id = self.src_entity_id or "__unknown_src_entity__"
base_url = self.view.get_entity_base_url(
self.coll_id, type_id,
entity_id
)
return urljoin(base_url, resource_ref)
# Additonal support functions
def get_continuation_param(self):
"""
Return continuation URL specified for the current request, or None.
"""
cont_here = self.view.continuation_here(self.request_dict, self.default_continue)
return uri_params({"continuation_url": cont_here})
def get_continuation_url(self):
"""
Return continuation URL specified for the current request, or None.
"""
return self.continuation_url
def get_continuation_url_dict(self):
"""
Return dictionary with continuation URL specified for the current request.
"""
c = self.get_continuation_url()
return {'continuation_url': c} if c else {}
def get_continuation_next(self):
"""
Return continuation URL to be used when returning from the current view.
Uses the default continuation if no value supplied in request dictionary.
"""
log.debug(
"get_continuation_next '%s', default '%s'"%
(self.continuation_url, self.default_continue)
)
return self.continuation_url or self.default_continue
def get_continuation_here(self, base_here=None):
"""
Return continuation URL back to the current view.
"""
# @@TODO: consider merging logic from generic.py, and eliminating method there
continuation_here = self.view.continuation_here(
request_dict=self.request_dict,
default_cont=self.get_continuation_url(),
base_here=base_here
)
# log.info("DisplayInfo.get_continuation_here: %s"%(continuation_here))
return continuation_here
def update_continuation_url(self,
old_type_id=None, new_type_id=None,
old_entity_id=None, new_entity_id=None
):
"""
Update continuation URI to reflect renamed type or entity.
"""
curi = self.continuation_url
if curi:
hops = continuation_url_chain(curi)
for i in range(len(hops)):
uribase, params = hops[i]
uribase = url_update_type_entity_id(uribase,
old_type_id=old_type_id, new_type_id=new_type_id,
old_entity_id=old_entity_id, new_entity_id=new_entity_id
)
hops[i] = (uribase, params)
curi = continuation_chain_url(hops)
self.continuation_url = curi
return curi
def get_entity_data_ref(self, name_ext=".jsonld", return_type=None):
"""
Returns a relative reference (from entity base) for the metadata for the
current entity using the supplied name extension.
"""
assert self.curr_typeinfo is not None
data_ref = self.curr_typeinfo.entityclass.meta_resource_name(name_ext=name_ext)
# log.info("@@ get_entity_data_ref: data_ref "+data_ref)
data_ref = make_data_ref(
self.view.get_request_path(), # For parameter values
data_ref,
return_type
)
# log.info("@@ get_entity_data_ref: data_ref "+data_ref)
return data_ref
def get_entity_jsonld_ref(self, return_type=None):
"""
Returns a relative reference (from entity base) for the metadata for the
current entity, to be returned as JSON-LD data.
"""
jsonld_ref = self.get_entity_data_ref(name_ext=".jsonld", return_type=return_type)
return jsonld_ref
def get_entity_jsonld_url(self, return_type=None):
"""
Returns a string that can be used as a reference to the entity metadata resource,
optionally with a specified type parameter added.
Extracts appropriate local reference, and combines with entity URL path.
"""
data_ref = self.get_entity_jsonld_ref(return_type=return_type)
data_url = self.get_src_entity_resource_url(data_ref)
log.debug(
"get_entity_jsonld_url: _entityfile %s, data_ref %s, data_url %s"%
(self.curr_typeinfo.entityclass._entityfile, data_ref, data_url)
)
return data_url
def get_entity_turtle_ref(self, return_type=None):
"""
Returns a relative reference (from entity base) for the metadata for the
current entity, to be returned as Turtle data.
"""
turtle_ref = self.get_entity_data_ref(name_ext=".ttl", return_type=return_type)
return turtle_ref
def get_entity_turtle_url(self, return_type=None):
"""
Returns a string that can be used as a reference to the entity metadata resource,
optionally with a specified type parameter added.
Extracts appropriate local reference, and combines with entity URL path.
"""
turtle_ref = self.get_entity_turtle_ref(return_type=return_type)
turtle_url = self.get_src_entity_resource_url(turtle_ref)
log.debug(
"get_entity_turtle_url: _entityfile %s, turtle_ref %s, turtle_url %s"%
(self.curr_typeinfo.entityclass._entityfile, turtle_ref, turtle_url)
)
return turtle_url
def get_entity_list_ref(self, list_name=layout.ENTITY_LIST_FILE, return_type=None):
"""
Returns a string that can be used as a reference to the entity list data
relative to the current list URL.
"""
return make_data_ref(
self.view.get_request_path(),
list_name,
return_type
)
def context_data(self, entity_label=None):
"""
Return dictionary of rendering context data available from the
elements assembled.
Values that are added here to the view context are used for view rendering,
and are not passed to the entity value mapping process.
NOTE: values that are needed to be accessible as part of bound_field values
must be provided earlier in the form generation process, as elements of the
"context_extra_values" dictionary.
Context values set here do not need to be named in the valuye map used to
create the view context.
"""
site_url_parts = urlsplit(self.site._entityurl)
context = (
{ 'site_label': self.sitedata["title"]
, 'title': self.sitedata["title"]
, 'heading': self.sitedata["title"]
, 'action': self.action
, 'coll_id': self.coll_id
, 'type_id': self.type_id
, 'view_id': self.view_id
, 'list_id': self.list_id
, 'collection': self.collection
, 'info_coll_id': self.coll_id or layout.SITEDATA_ID
, "SITE": site_url_parts.path
, "HOST": self.reqhost
})
context.update(self.authorizations)
if self.collection:
coll_url_parts = urlsplit(self.collection._entityurl)
context.update(
{ 'heading': self.collection[RDFS.CURIE.label]
, 'coll_label': self.collection[RDFS.CURIE.label]
, "COLL": coll_url_parts.path
, "BASE": coll_url_parts.path + layout.COLL_BASE_REF
, "PAGE": coll_url_parts.path + layout.COLL_PAGE_REF
})
context['title'] = "%(coll_label)s"%context
if self.recordview:
context.update(
{ 'heading': self.recordview[RDFS.CURIE.label]
, 'view_label': self.recordview[RDFS.CURIE.label]
, 'edit_view_button': self.recordview.get(ANNAL.CURIE.open_view, "yes")
})
context['title'] = "%(view_label)s - %(coll_label)s"%context
task_buttons = self.recordview.get(ANNAL.CURIE.task_buttons, None)
edit_task_buttons = self.recordview.get(ANNAL.CURIE.edit_task_buttons, task_buttons)
view_task_buttons = self.recordview.get(ANNAL.CURIE.view_task_buttons, task_buttons)
self.add_task_button_context('edit_task_buttons', edit_task_buttons, context)
self.add_task_button_context('view_task_buttons', view_task_buttons, context)
if self.recordlist:
context.update(
{ 'heading': self.recordlist[RDFS.CURIE.label]
, 'list_label': self.recordlist[RDFS.CURIE.label]
, 'entity_list_ref': self.get_entity_list_ref()
, 'entity_list_ref_json': self.get_entity_list_ref(return_type="application/json")
, 'entity_list_ref_turtle': self.get_entity_list_ref(list_name=layout.ENTITY_LIST_TURTLE)
})
context['title'] = "%(list_label)s - %(coll_label)s"%context
if self.curr_typeinfo:
context.update(
{ 'entity_data_ref': self.get_entity_jsonld_url()
, 'entity_data_ref_json': self.get_entity_jsonld_url(return_type="application/json")
, 'entity_turtle_ref': self.get_entity_turtle_url()
})
if entity_label:
context.update(
{ 'entity_label': entity_label
})
# context['heading'] = "%(entity_label)s - %(view_label)s"%context
context['title'] = "%(entity_label)s - %(view_label)s - %(coll_label)s"%context
if hasattr(self.view, 'help') and self.view.help:
context.update(
{ 'help_filename': self.view.help
})
if hasattr(self.view, 'help_markdown') and self.view.help_markdown:
substituted_text = apply_substitutions(context, self.view.help_markdown)
context.update(
{ 'help_markdown': substituted_text
})
if self.info_messages:
context.update(
{ "info_head": message.ACTION_COMPLETED
, "info_message": "\n".join(self.info_messages)
})
if self.error_messages:
context.update(
{ "error_head": message.DATA_ERROR
, "error_message": "\n".join(self.error_messages)
})
return context
def add_task_button_context(self, task_buttons_name, task_buttons, context):
"""
Adds context values to a supplied context dictionary corresponding
to the supplied task_buttons value(s) from a view description.
@NOTE: subsequent versions of this function may extract values from
an identified task description record.
"""
if isinstance(task_buttons, list):
context.update(
{ task_buttons_name:
[ { 'button_id': b[ANNAL.CURIE.button_id]
, 'button_name': extract_entity_id(b[ANNAL.CURIE.button_id])
, 'button_label': b.get(ANNAL.CURIE.button_label, "@@annal:button_label@@")
, 'button_help': b.get(ANNAL.CURIE.button_help, "@@annal:button_help@@")
} for b in task_buttons
]
})
elif task_buttons is not None:
log.error(
"DisplayInfo.context_data: Unexpected value for task_buttons: %r"%
(task_buttons)
)
return
def __str__(self):
attrs = (
[ "view"
, "action"
, "authorizations"
, "reqhost"
, "site"
, "sitedata"
, "coll_id"
, "collection"
, "type_id"
, "entitytypeinfo"
, "list_id"
, "recordlist"
, "view_id"
, "recordview"
, "entity_id"
])
fields = ["DisplayInfo("]
for attr in attrs:
val = getattr(self, attr, None)
if val is not None:
fields.append("%s: %r"%(attr, val))
fields.append(")")
return ("\n ".join(fields))
def __repr__(self):
return str(self)
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/displayinfo.py
|
displayinfo.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import sys
import os
import traceback
from django.conf import settings
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.urls import resolve, reverse
from utils.py3porting import is_string, to_unicode, urljoin
from annalist.identifiers import RDFS, ANNAL
from annalist.exceptions import Annalist_Error
from annalist import message
from annalist import layout
from annalist.util import (
valid_id, split_type_entity_id, extract_entity_id,
label_from_id,
open_url, copy_resource_to_fileobj
)
import annalist.models.entitytypeinfo as entitytypeinfo
from annalist.models.entitytypeinfo import EntityTypeInfo, get_built_in_type_ids, CONFIG_PERMISSIONS
from annalist.models.recordview import RecordView
from annalist.models.recordtypedata import RecordTypeData
from annalist.models.entitydata import EntityData
from annalist.views.uri_builder import uri_base, uri_with_params
from annalist.views.displayinfo import DisplayInfo
from annalist.views.responseinfo import ResponseInfo
from annalist.views.generic import AnnalistGenericView
from annalist.views.entityvaluemap import EntityValueMap
from annalist.views.simplevaluemap import SimpleValueMap, StableValueMap
from annalist.views.fieldlistvaluemap import FieldListValueMap
from annalist.views.fields.field_description import FieldDescription, field_description_from_view_field
from annalist.views.fields.bound_field import bound_field, get_entity_values
# -------------------------------------------------------------------------------------------
#
# Mapping table data (not view-specific)
#
# -------------------------------------------------------------------------------------------
# Table used as basis, or initial values, for a dynamically generated entity-value map
baseentityvaluemap = (
[ SimpleValueMap(c='url_type_id', e=None, f=None )
, SimpleValueMap(c='view_choices', e=None, f=None )
, SimpleValueMap(c='edit_view_button', e=None, f=None )
, SimpleValueMap(c='edit_view_enable', e=None, f=None )
, SimpleValueMap(c='default_view_enable', e=None, f=None )
, SimpleValueMap(c='customize_view_enable', e=None, f=None )
, StableValueMap(c='entity_id', e=ANNAL.CURIE.id, f='entity_id' )
, SimpleValueMap(c='entity_uri', e=ANNAL.CURIE.uri, f='entity_uri' )
# The "record_type" value (in context and form data) is intended to reflect the actual
# type of the displayed entity. Currently, it is not used:
, SimpleValueMap(c='record_type', e=ANNAL.CURIE.type, f='record_type' )
, SimpleValueMap(c='view_id', e=None, f='view_id' )
, SimpleValueMap(c='orig_id', e=None, f='orig_id' )
, SimpleValueMap(c='orig_type', e=None, f='orig_type' )
, SimpleValueMap(c='orig_coll', e=None, f='orig_coll' )
, SimpleValueMap(c='action', e=None, f='action' )
, SimpleValueMap(c='continuation_url', e=None, f='continuation_url' )
, SimpleValueMap(c='continuation_param', e=None, f=None )
# + Field data: added separately during processing of the form description
# + Form and interaction control (hidden fields)
])
# -------------------------------------------------------------------------------------------
#
# Entity edit view - form rendering and POST response handling
#
# -------------------------------------------------------------------------------------------
class GenericEntityEditView(AnnalistGenericView):
"""
View class for generic entity edit view
"""
_entityedittemplate = 'annalist_entity_edit.html'
_entityviewtemplate = 'annalist_entity_view.html'
def __init__(self):
super(GenericEntityEditView, self).__init__()
return
# GET
def get(self, request,
coll_id=None, type_id=None, entity_id=None,
view_id=None, action=None):
"""
Create a form for editing an entity.
"""
log.info(
"views.entityedit.get: coll_id %s, type_id %s, entity_id %s, view_id %s, action %s"%
(coll_id, type_id, entity_id, view_id, action)
)
log.log(settings.TRACE_FIELD_VALUE, " %s"%(self.get_request_path()))
self.get_view_template(action, type_id, entity_id)
action = action or "view"
viewinfo = self.view_setup(
action, coll_id, type_id, view_id, entity_id, request.GET.dict()
)
# viewinfo.check_authorization(action)
if viewinfo.http_response:
return viewinfo.http_response
# Create local entity object or load values from existing
typeinfo = viewinfo.curr_typeinfo
entity = self.get_entity(
viewinfo.src_entity_id or viewinfo.use_entity_id, typeinfo, action
)
# log.debug("@@ GenericEntityEditView.get %r"%(entity,))
if entity is None:
entity_label = (message.ENTITY_MESSAGE_LABEL%
{ 'coll_id': viewinfo.coll_id
, 'type_id': viewinfo.type_id
, 'entity_id': viewinfo.src_entity_id
})
msg = (message.ENTITY_DOES_NOT_EXIST%
{ 'type_id': viewinfo.type_id
, 'id': viewinfo.src_entity_id
, 'label': entity_label
})
log.info(msg)
responseinfo = ResponseInfo()
responseinfo.set_response_error(message.DATA_ERROR, msg)
return responseinfo.http_redirect(self, viewinfo.get_continuation_next())
# log.info("@@ EntityEdit.get: ancestry %s/%s/%s"%(entity._parent._ancestorid, type_id, entity_id))
orig_entity_coll_id = viewinfo.orig_typeinfo.get_ancestor_id(entity)
viewinfo.set_orig_coll_id(orig_coll_id=orig_entity_coll_id)
if viewinfo.check_authorization(action):
return viewinfo.http_response
# Set up values for rendered form response
self.help_markdown = viewinfo.recordview.get(RDFS.CURIE.comment, None)
entityvals = get_entity_values(
viewinfo.curr_typeinfo, entity,
entity_id=viewinfo.use_entity_id,
action=viewinfo.action
)
context_extra_values = (
{ 'request_url': self.get_request_path()
, 'url_type_id': type_id
, 'orig_id': viewinfo.src_entity_id
, 'orig_type': type_id
, 'orig_coll': orig_entity_coll_id
, 'edit_view_enable': 'disabled="disabled"'
, 'default_view_enable': 'disabled="disabled"'
, 'customize_view_enable': 'disabled="disabled"'
, 'continuation_param': viewinfo.get_continuation_param()
})
if viewinfo.authorizations['auth_config']:
context_extra_values['edit_view_enable'] = ""
context_extra_values['default_view_enable'] = ""
context_extra_values['customize_view_enable'] = ""
add_field = request.GET.get('add_field', None) #@@ redundant?
try:
response = self.form_render(
viewinfo, entity, entityvals, context_extra_values,
add_field #@@TODO: remove param?
)
except Exception as e:
# -- This should be redundant, but...
log.error("Exception in GenericEntityEditView.get (%r)"%(e))
log.error("".join(traceback.format_stack()))
# --
log.exception(str(e))
response = self.error(
dict(self.error500values(),
message=str(e)+" - see server log for details"
)
)
return response
# POST
def post(self, request,
coll_id=None, type_id=None, entity_id=None,
view_id=None, action=None):
"""
Handle response from generic entity editing form.
"""
log.info(
"views.entityedit.post: coll_id %s, type_id %s, entity_id %s, view_id %s, action %s"%
(coll_id, type_id, entity_id, view_id, action)
)
log.log(settings.TRACE_FIELD_VALUE, " %s"%(self.get_request_path()))
# log.log(settings.TRACE_FIELD_VALUE,
# " form data %r"%(request.POST)
# )
if request.FILES:
for f in request.FILES:
log.info(
" file upload %s: %s (%d bytes) %s"%
(f, request.FILES[f].name, request.FILES[f].size,
request.FILES[f].content_type
)
)
self.get_view_template(action, type_id, entity_id)
action = request.POST.get('action', action)
view_id = request.POST.get('view_id', view_id)
viewinfo = self.view_setup(
action, coll_id, type_id, view_id, entity_id, request.POST.dict()
)
# Get key form data values
# Except for entity_id, use values from URI when form does not supply a value
# (entity_id may be autogenerated later)
orig_entity_id = request.POST.get('orig_id', entity_id)
orig_entity_type_id = request.POST.get('orig_type', type_id)
orig_entity_coll_id = request.POST.get('orig_coll', coll_id)
curr_entity_type_id = extract_entity_id(request.POST.get('entity_type', type_id))
curr_entity_id = request.POST.get('entity_id', None)
viewinfo.set_coll_type_entity_id(
orig_coll_id=orig_entity_coll_id,
orig_type_id=orig_entity_type_id, orig_entity_id=orig_entity_id,
curr_type_id=curr_entity_type_id, curr_entity_id=curr_entity_id
)
viewinfo.check_authorization(action)
if viewinfo.http_response:
return viewinfo.http_response
# log.info(
# " coll_id %s, type_id %s, entity_id %s, view_id %s, action %s"%
# (coll_id, type_id, entity_id, view_id, action)
# )
typeinfo = viewinfo.curr_typeinfo
context_extra_values = (
{ 'request_url': self.get_request_path()
, 'url_type_id': type_id
, 'orig_id': orig_entity_id
, 'orig_type': orig_entity_type_id
, 'orig_coll': orig_entity_coll_id
, 'save_id': viewinfo.curr_entity_id
, 'save_type': viewinfo.curr_type_id
, 'save_coll': viewinfo.coll_id
, 'continuation_param': viewinfo.get_continuation_param()
})
message_vals = dict(context_extra_values, id=entity_id, type_id=type_id, coll_id=coll_id)
messages = (
{ 'parent_heading': typeinfo.entitymessages['parent_heading']%message_vals
, 'parent_missing': typeinfo.entitymessages['parent_missing']%message_vals
, 'entity_heading': typeinfo.entitymessages['entity_heading']%message_vals
, 'entity_invalid_id': typeinfo.entitymessages['entity_invalid_id']%message_vals
, 'entity_exists': typeinfo.entitymessages['entity_exists']%message_vals
, 'entity_not_exists': typeinfo.entitymessages['entity_not_exists']%message_vals
, 'entity_type_heading': typeinfo.entitymessages['entity_type_heading']%message_vals
, 'entity_type_invalid': typeinfo.entitymessages['entity_type_invalid']%message_vals
, 'remove_field_error': message.REMOVE_FIELD_ERROR
, 'move_field_error': message.MOVE_FIELD_ERROR
, 'no_field_selected': message.NO_FIELD_SELECTED
})
viewinfo.set_messages(messages)
# Process form response and respond accordingly
# #@@TODO: this should be redundant - create as-needed, not before
# # as of 2014-11-07, removing this causes test failures
# # as of 2019-06-07, tests run OK without this
# if not typeinfo.entityparent._exists():
# # Create RecordTypeData when not already exists
# RecordTypeData.create(viewinfo.collection, typeinfo.entityparent.get_id(), {})
# #@@
try:
response = self.form_response(viewinfo, context_extra_values)
except Exception as e:
# -- This should be redundant, but...
log.error("Exception in GenericEntityEditView.post (%r)"%(e))
log.error("".join(traceback.format_stack()))
# --
log.exception(str(e))
response = self.error(
dict(self.error500values(),
message=str(e)+" - see server log for details"
)
)
return response
# Helper functions
def view_setup(self, action, coll_id, type_id, view_id, entity_id, request_dict):
"""
Assemble display information for entity view request handler
"""
self.default_continuation_url = self.view_uri(
"AnnalistEntityDefaultListType", coll_id=coll_id, type_id=type_id
)
viewinfo = DisplayInfo(self, action, request_dict, self.default_continuation_url)
viewinfo.get_site_info(self.get_request_host())
viewinfo.get_coll_info(coll_id)
viewinfo.get_request_type_info(type_id)
viewinfo.get_view_info(viewinfo.get_view_id(type_id, view_id))
viewinfo.get_entity_info(action, entity_id)
# viewinfo.check_authorization(action)
return viewinfo
def get_view_template(self, action, type_id, entity_id):
"""
Returns name of template to use for the current view.
The `action` parameter must be that provided via the URI used to invoke the view,
and not taken from a submitted form. This ensures that the template used is
consistently based on the URI used, and not subject to any vagaries of submitted
form data.
"""
# @@TODO: clean up this code to save and use values in viewinfo rather than `self`
# i.e. 'formtemplate' and 'uri_action'; rename for greater clarity?
if action in ["new", "copy", "edit"]:
self.formtemplate = self._entityedittemplate
self.uri_action = "edit"
else:
self.formtemplate = self._entityviewtemplate
self.uri_action = "view"
self.uri_type_id = type_id
self.uri_entity_id = entity_id
return self.formtemplate
def get_form_refresh_uri(self, viewinfo, view_id=None, action=None, params=None):
"""
Return a URI to refresh the current form display, with options to override the
view identifier and/or action to use. The defaults just refresh the current
display, except that a "new" action becomes "edit" on the assumption that
the new entity is saved before the refresh occurs.
'params', if supplied, is a dictionary of additional query parameters to be added
to the resulting URI.
If the entity has been renamed on the submitted form, this is taken into account
when re-displaying.
"""
view_uri_params = (
{ 'coll_id': viewinfo.coll_id
, 'type_id': viewinfo.curr_type_id
, 'entity_id': viewinfo.curr_entity_id or viewinfo.orig_entity_id
, 'view_id': view_id or viewinfo.view_id # form_data['view_choice']
, 'action': action or self.uri_action
})
more_uri_params = viewinfo.get_continuation_url_dict()
if params:
more_uri_params.update(params)
refresh_uri = (
uri_with_params(
self.view_uri("AnnalistEntityEditView", **view_uri_params),
more_uri_params
)
)
return refresh_uri
def form_refresh_on_success(self, viewinfo, responseinfo, params=None):
"""
Helper function returns HttpResponse value that refreshes the current
page (via redirect) if the supplied `responseinfo` indicates success,
otherwise returns the indicated error response.
"""
if not responseinfo.has_http_response():
responseinfo.set_http_response(
HttpResponseRedirect(self.get_form_refresh_uri(viewinfo, params=params))
)
return responseinfo.get_http_response()
def get_view_entityvaluemap(self, viewinfo, entity_values):
"""
Creates an entity/value map table in the current object incorporating
information from the form field definitions for an indicated view.
"""
entitymap = EntityValueMap(baseentityvaluemap)
# log.debug(
# "GenericEntityEditView.get_view_entityvaluemap entityview: %r"%
# viewinfo.recordview.get_values()
# )
view_fields = viewinfo.recordview.get_values()[ANNAL.CURIE.view_fields]
fieldlistmap = FieldListValueMap('fields',
viewinfo.collection, view_fields,
{'view': viewinfo.recordview, 'entity': entity_values}
)
entitymap.add_map_entry(fieldlistmap)
return entitymap
def get_view_choices_field(self, viewinfo):
"""
Returns a bound_field object that displays as a view-choice selection drop-down.
"""
# @@TODO: Possibly create FieldValueMap and return map_entity_to_context value?
# or extract this logic and share?
field_description = field_description_from_view_field(
viewinfo.collection,
{ ANNAL.CURIE.field_id: "View_choice" },
None
)
entityvals = { field_description.get_field_property_uri(): viewinfo.view_id }
return bound_field(field_description, entityvals)
def get_form_display_context(self, viewinfo, entityvaluemap, entityvals, **context_extra_values):
"""
Return a form display context dictionary with data from the supplied
entity values, augmented with inferred values and other context data.
"""
# log.info("get_form_display_context, entityvals: %r"%(entityvals,))
context_extra_values.update(
{ 'continuation_url': viewinfo.get_continuation_url() or ""
, 'view_choices': self.get_view_choices_field(viewinfo)
})
entityvals = viewinfo.curr_typeinfo.get_entity_implied_values(entityvals)
form_context = entityvaluemap.map_value_to_context(entityvals, **context_extra_values)
form_context.update(viewinfo.context_data(entity_label=entityvals.get(RDFS.CURIE.label, None)))
return form_context
def merge_entity_form_values(self, orig_entityvals, entityformvals):
"""
Logic that merges updated values from a form response into a set of
stored entity values.
Values that correspond to an uploaded or imported file are not updated.
(This is a bit ad hoc, needed to overcome the fact that previously uploaded
file information is not part of the form data being merged.)
"""
# @@TODO: consider more positive method for detecting previous upload; e.g. @type value
def is_previous_upload(ov, k):
return (
(k in ov) and
isinstance(ov[k], dict) and
("resource_name" in ov[k])
)
# log.info("merge_entity_form_values orig_entityvals: %r"%(orig_entityvals,))
# log.info("merge_entity_form_values entityformvals: %r"%(entityformvals,))
upd_entityvals = orig_entityvals.copy()
for k in entityformvals:
if entityformvals[k] is not None:
if not is_previous_upload(orig_entityvals, k):
upd_entityvals[k] = entityformvals[k]
# log.info("orig entity_values %r"%(entity_values,))
return upd_entityvals
def form_render(self, viewinfo, entity, entityvals, context_extra_values, add_field):
#@@ remove add_field?
"""
Returns an HTTP response that renders a view of an entity,
using supplied entity data
"""
assert entity, "No entity value provided"
coll_id = viewinfo.coll_id
type_id = viewinfo.type_id
entity_id = entity.get_id()
coll = viewinfo.collection
# Set up initial view context
try:
entityvaluemap = self.get_view_entityvaluemap(viewinfo, entity)
except Annalist_Error as e:
return viewinfo.report_error(str(e))
#@@ TODO: remove this?
# This logic is currently unused - it was provided for add field button
# on entity edit, now using view/edit view description buttons instead.
# There is a test case that depends on this logic:
# annalist.tests.test_recordview.RecordViewEditViewTest.test_get_recordview_edit_add_field
if add_field:
add_field_desc = self.find_repeat_id(entityvaluemap, add_field)
if add_field_desc:
# Add empty fields per named repeat group
self.add_entity_field(add_field_desc, entity)
#@@
viewcontext = self.get_form_display_context(
viewinfo, entityvaluemap, entityvals, **context_extra_values
)
# log.info("form_render.viewcontext['fields'] %r"%(viewcontext['fields'],))
# Generate and return form data
entity_baseurl = (
viewinfo.reqhost +
viewinfo.get_src_entity_resource_url("")
)
entity_json_url = urljoin(entity_baseurl, viewinfo.get_entity_data_ref())
entity_turtle_url = urljoin(entity_baseurl, viewinfo.get_entity_turtle_ref())
entity_links = [
{ "rel": "canonical"
, "ref": entity_baseurl
}]
return (
self.render_html(viewcontext, self.formtemplate, links=entity_links)
or
self.redirect_json(entity_json_url, links=entity_links)
or
self.redirect_turtle(entity_turtle_url, links=entity_links)
or
self.error(self.error406values())
)
def form_re_render(self, responseinfo,
viewinfo, entityvaluemap, entityvals, context_extra_values={},
error_head=None, error_message=None):
"""
Returns response info object with HTTP response that is a re-rendering of the current form
with current values and error message displayed.
"""
#@@ viewinfo.reset_info_messages()
form_context = self.get_form_display_context(
viewinfo, entityvaluemap, entityvals, **context_extra_values
)
# log.info("********\nform_context %r"%form_context)
form_context['info_head'] = None
form_context['info_message'] = None
form_context['error_head'] = error_head
form_context['error_message'] = error_message
if error_message:
responseinfo.set_response_error(error_head, error_message)
responseinfo.set_http_response(
self.render_html(form_context, self.formtemplate)
)
if not responseinfo.has_http_response():
errorvalues = self.error406values()
http_response = self.error(errorvalues)
responseinfo.set_response_error(
"%(status)03d: %(reason)s"%errorvalues,
errorvalues.message
)
return responseinfo
# @@TODO: refactor form_response to separate methods for each action
# form_response should handle initial checking and dispatching.
# The refactoring should attempt to separate methods that use the
# form data to analyse the response received from methods that process
# update trhe entity data, or display a new form based on entity data.
# The `entityformvals` local variable which contains entity data updated
# with values extracted from the form response should be used as the
# link between these facets of response processing.
def form_response(self, viewinfo, context_extra_values):
"""
Handle POST response from entity edit form.
"""
log.log(settings.TRACE_FIELD_VALUE,
"form_response entity_id %s, orig_entity_id %s, type_id %s, orig_type_id %s"%
(viewinfo.curr_entity_id, viewinfo.orig_entity_id, viewinfo.curr_type_id, viewinfo.orig_type_id)
)
form_data = self.request.POST
if ('cancel' in form_data) or ('close' in form_data):
return HttpResponseRedirect(viewinfo.get_continuation_next())
responseinfo = ResponseInfo()
typeinfo = viewinfo.curr_typeinfo
messages = viewinfo.type_messages
orig_entity = self.get_entity(viewinfo.orig_entity_id, typeinfo, viewinfo.action)
# log.info("orig_entity %r"%(orig_entity.get_values(),))
try:
#@@TODO: when current display is "view" (not edit), there are no form values
# to be saved. Is this redundant? Currently, it works because logic
# uses existing values where new ones are not provided. But there
# are some unexpected warnings generated by 'decode' for fields not
# present (e.g., user permisisons/TokenSet).
entityvaluemap = self.get_view_entityvaluemap(viewinfo, orig_entity)
entityformvals = entityvaluemap.map_form_data_to_values(form_data, orig_entity)
except Annalist_Error as e:
return viewinfo.report_error(str(e))
# Save updated details
if 'save' in form_data:
responseinfo = self.save_entity(
viewinfo, entityvaluemap, entityformvals, context_extra_values,
responseinfo=responseinfo
)
# log.info("save: continuation_url '%s'"%(viewinfo.get_continuation_next()))
return responseinfo.http_redirect(self, viewinfo.get_continuation_next())
# Import data described by a field with an activated "Import" button
import_field = self.find_import(entityvaluemap, form_data)
if import_field:
responseinfo = self.save_entity(
viewinfo, entityvaluemap, entityformvals, context_extra_values,
import_field=import_field,
responseinfo=responseinfo
)
return self.form_refresh_on_success(viewinfo, responseinfo)
# Update or define new view or type (invoked from generic entity editing view)
# Save current entity and redirect to view edit with new field added, and
# current page as continuation.
if 'use_view' in form_data:
# Save entity, then redirect to selected view
responseinfo = self.save_entity(
viewinfo, entityvaluemap, entityformvals, context_extra_values,
responseinfo=responseinfo
)
if not responseinfo.has_http_response():
view_uri_params = (
{ 'coll_id': viewinfo.coll_id
, 'type_id': viewinfo.curr_type_id
, 'view_id': extract_entity_id(form_data['view_choice'])
, 'entity_id': viewinfo.curr_entity_id or viewinfo.orig_entity_id
, 'action': self.uri_action
})
redirect_uri = (
uri_with_params(
self.view_uri("AnnalistEntityEditView", **view_uri_params),
viewinfo.get_continuation_url_dict()
)
)
responseinfo.set_http_response(HttpResponseRedirect(redirect_uri))
return responseinfo.get_http_response()
# Make the current view default for the current collection.
if "default_view" in form_data:
responseinfo = self.save_entity(
viewinfo, entityvaluemap, entityformvals, context_extra_values,
responseinfo=responseinfo
)
if not responseinfo.has_http_response():
auth_check = viewinfo.check_authorization("config")
if auth_check:
return auth_check
viewinfo.collection.set_default_view(
view_id=viewinfo.view_id, type_id=viewinfo.orig_type_id, entity_id=viewinfo.orig_entity_id
)
action = "list"
msg = (message.DEFAULT_VIEW_UPDATED%
{ 'coll_id': viewinfo.coll_id
, 'view_id': viewinfo.view_id
, 'type_id': viewinfo.orig_type_id
, 'entity_id': viewinfo.orig_entity_id
})
redirect_uri = (
uri_with_params(
self.get_request_path(),
self.info_params(msg),
viewinfo.get_continuation_url_dict()
)
)
responseinfo.set_http_response(HttpResponseRedirect(redirect_uri))
return responseinfo.get_http_response()
# Display "customize" page (collection edit view)
if "customize" in form_data:
responseinfo = self.save_entity(
viewinfo, entityvaluemap, entityformvals, context_extra_values,
responseinfo=responseinfo
)
if not responseinfo.has_http_response():
responseinfo.set_http_response(
viewinfo.check_authorization("config")
)
if not responseinfo.has_http_response():
cont_here = viewinfo.get_continuation_here(
base_here=self.view_uri(
"AnnalistEntityEditView",
coll_id=viewinfo.coll_id,
view_id=viewinfo.view_id,
type_id=viewinfo.curr_type_id,
entity_id=viewinfo.curr_entity_id or viewinfo.orig_entity_id,
action=self.uri_action
)
)
redirect_uri = (
uri_with_params(
self.view_uri(
"AnnalistCollectionEditView",
coll_id=viewinfo.coll_id
),
{'continuation_url': cont_here}
)
)
responseinfo.set_http_response(HttpResponseRedirect(redirect_uri))
return responseinfo.get_http_response()
# If "Edit" or "Copy" button invoked, initiate new view of current entity
edit_action = (
"edit" if 'edit' in form_data else
"copy" if 'copy' in form_data else
"view" if 'view' in form_data else None
)
if edit_action is not None:
view_edit_uri_base = self.view_uri("AnnalistEntityEditView",
coll_id=viewinfo.coll_id,
type_id=self.uri_type_id, # entity_type_id,
view_id=viewinfo.view_id,
entity_id=self.uri_entity_id, # entity_id,
action=edit_action
)
responseinfo = self.save_invoke_edit_entity(
viewinfo, entityvaluemap, entityformvals, context_extra_values,
view_edit_uri_base, edit_action,
{},
responseinfo=responseinfo
)
return responseinfo.get_http_response()
# New entity buttons
#
# These may use explicit button ids per the table below, or may be part of
# an enumerated-value field used to create a new referenced entity instance.
#
# In all cases, the current entity is saved and the browser is redirected
# to a new page to enter details of a new/updated entity of the appropriate
# type.
#
new_button_map = (
{ 'new_type':
{ 'type_id': entitytypeinfo.TYPE_ID
, 'view_id': "Type_view"
}
, 'new_view':
{ 'type_id': entitytypeinfo.VIEW_ID
, 'view_id': "View_view"
}
, 'new_field':
{ 'type_id': entitytypeinfo.FIELD_ID
, 'view_id': "Field_view"
}
, 'new_group':
{ 'type_id': entitytypeinfo.GROUP_ID
, 'view_id': "Field_group_view"
}
})
new_type_id = None
for button_id in new_button_map.keys():
if button_id in form_data:
new_type_id = new_button_map[button_id]['type_id']
new_view_id = new_button_map[button_id]['view_id']
break
new_enum = self.find_new_enum(entityvaluemap, form_data)
if new_enum:
new_type_id = extract_entity_id(new_enum['field_ref_type'])
if not valid_id(new_type_id):
# Report problem with field definition...
err_msg = message.NO_REFER_TO_TYPE%new_enum
log.info(err_msg)
self.form_re_render(responseinfo,
viewinfo, entityvaluemap, entityformvals, context_extra_values,
error_head=message.CREATE_FIELD_ENTITY_ERROR,
error_message=err_msg
)
return responseinfo.get_http_response()
new_typeinfo = EntityTypeInfo(
viewinfo.collection, new_type_id
)
new_view_id = new_typeinfo.get_default_view_id()
if new_type_id:
edit_entity_id = None
edit_type_id, edit_entity_id = split_type_entity_id(
new_enum and new_enum.get('enum_value', None),
default_type_id=new_type_id)
edit_action = "new"
edit_url_id = "AnnalistEntityNewView"
if edit_entity_id:
# Entity selected: edit (use type from selected entity)
edit_typeinfo = EntityTypeInfo(
viewinfo.collection, edit_type_id
)
edit_view_id = edit_typeinfo.get_default_view_id()
edit_action = "edit"
new_edit_url_base = self.view_uri("AnnalistEntityEditView",
coll_id=viewinfo.coll_id,
view_id=edit_view_id,
type_id=edit_type_id,
entity_id=edit_entity_id,
action=edit_action
)
else:
# No entity selected: create new
new_edit_url_base = self.view_uri("AnnalistEntityNewView",
coll_id=viewinfo.coll_id,
view_id=new_view_id,
type_id=new_type_id,
action=edit_action
)
responseinfo = self.save_invoke_edit_entity(
viewinfo, entityvaluemap, entityformvals, context_extra_values,
new_edit_url_base, edit_action,
{},
responseinfo=responseinfo
)
return responseinfo.get_http_response()
# Add field from entity view (as opposed to view description view)
# See below call of 'find_add_field' for adding field in view description
# @@TODO: remove references to add_view_field option (use just 'open_view')
if ('add_view_field' in form_data) or ('open_view' in form_data):
view_edit_uri_base = self.view_uri("AnnalistEntityEditView",
coll_id=viewinfo.coll_id,
view_id="View_view",
type_id=entitytypeinfo.VIEW_ID,
entity_id=viewinfo.view_id,
action=self.uri_action
)
add_field_param = (
{"add_field": "View_fields"} if ('add_view_field' in form_data) else {}
)
# log.info("Open view: entity_id: %s"%viewinfo.curr_entity_id)
auth_req = "view" if viewinfo.action == "view" else "config"
responseinfo = self.save_invoke_edit_entity(
viewinfo, entityvaluemap, entityformvals, context_extra_values,
view_edit_uri_base, auth_req,
add_field_param,
responseinfo=responseinfo
)
return responseinfo.get_http_response()
# Add new instance of repeating field, and redisplay
add_field = self.find_add_field(entityvaluemap, form_data)
# log.info("*** Add field: "+repr(add_field))
if add_field:
responseinfo.set_http_response(
self.update_repeat_field_group(
viewinfo, add_field, entityvaluemap, entityformvals,
**context_extra_values
)
)
return self.form_refresh_on_success(viewinfo, responseinfo)
# Remove Field(s), and redisplay
remove_field = self.find_remove_field(entityvaluemap, form_data)
if remove_field:
if remove_field['remove_fields']:
responseinfo.set_http_response(
self.update_repeat_field_group(
viewinfo, remove_field, entityvaluemap, entityformvals,
**context_extra_values
)
)
else:
log.debug("form_response: No field(s) selected for remove_field")
self.form_re_render(responseinfo,
viewinfo, entityvaluemap, entityformvals, context_extra_values,
error_head=messages['remove_field_error'],
error_message=messages['no_field_selected']
)
return self.form_refresh_on_success(viewinfo, responseinfo)
# Move field and redisplay
move_field = self.find_move_field(entityvaluemap, form_data)
if move_field:
if move_field['move_fields']:
http_response = self.update_repeat_field_group(
viewinfo, move_field, entityvaluemap, entityformvals, **context_extra_values
)
else:
log.debug("form_response: No field selected for move up/down")
self.form_re_render(responseinfo,
viewinfo, entityvaluemap, entityformvals, context_extra_values,
error_head=messages['move_field_error'],
error_message=messages['no_field_selected']
)
return self.form_refresh_on_success(viewinfo, responseinfo)
# Task buttons
#
# These are buttons on selected displays that are used to invoke a complex
# task using information from the current view.
task_id = self.find_task_button(entityvaluemap, form_data)
if task_id:
responseinfo = self.save_invoke_task(
viewinfo, entityvaluemap, entityformvals,
context_extra_values,
task_id=task_id,
responseinfo=responseinfo
)
return self.form_refresh_on_success(viewinfo, responseinfo)
# Report unexpected form data
# This shouldn't happen, but just in case...
# Redirect to continuation with error
err_values = self.error_params(
message.UNEXPECTED_FORM_DATA%(form_data),
message.SYSTEM_ERROR
)
log.warning("Unexpected form data %s"%(err_values))
log.warning("Continue to %s"%(viewinfo.get_continuation_next()))
for k, v in form_data.items():
log.info(" form[%s] = %r"%(k,v))
redirect_uri = uri_with_params(viewinfo.get_continuation_next(), err_values)
responseinfo.set_http_response(HttpResponseRedirect(redirect_uri))
return responseinfo.get_http_response()
def save_entity(
self, viewinfo, entityvaluemap, entityformvals, context_extra_values,
import_field=None, responseinfo=None
):
"""
This method contains logic to save entity data modified through a form
interface. If an entity is being edited (as opposed to created or copied)
and the entity id or type have been changed, then new entity data is written
and the original entity data is removed. If an entity was being viewed,
no data is saved.
Decision table:
Action | coll_id | type_id | entity_id | Target | Result
| same? | same? | same? | exists? |
------------------------------------------------------------
View | -- | -- | -- | -- | Do nothing
| | | | |
New | -- | -- | -- | Y | Error
| -- | -- | -- | N | Save_new
| | | | |
Copy | Y | Y | Y | -- | Error
| -- | -- | -- | Y | Error
| -- | -- | -- | N | Save_copy
| | | | |
| | | | |
Edit | Y | Y | Y | N | Error
| Y | Y | Y | Y | Save_update
| | | | |
| Y | Y | N | Y | Error
| Y | Y | N | N | Save_rename *1
| | | | |
| Y | N | -- | Y | Error
| Y | N | -- | N | Save_rename_type
| | | | |
| N | -- | -- | Y | Error (shouldn't happen)
| N | -- | -- | N | Save_copy
*1 special case when type is '_type' or '_coll'
viewinfo contains display context for the form which is being processed
entityvaluemap a list of field descriptions that are used to map values between
the edited entity and the form display, including references to
field descriptions that control how values are rendered.
entityformvals a dictionary of entity values extracted from the submitted form;
these are used either for redisplaying the form if there is an
error, or to update the saved entity data.
NOTE: The dictionary is keyed by property URIs, not field
identifiers - the mapping is invoked before this method is called.
context_extra_values
a dictionary of additional values that may be used if the
form needs to be redisplayed.
import_field if specified, is a field description for which a resource
import is requested. cf. method `save_linked_resource`.
responseinfo a `ResponseInfo` object that is used to collect diagnostic
information about form processing. It may contain an HTTP
response object if the form or an error page needs to be
displayed, a flag indicating whether the entity data was
updated, and any additional messages to be included with
any other response.
Returns the supplied ResponseInfo object, with `None` for the HTTPResponse
value if the save completes successfully, otherwise an HTTP response object
that reports the nature of the problem.
"""
if responseinfo is None:
raise ValueError("entityedit.save_entity expects ResponseInfo object")
if responseinfo.has_http_response():
return responseinfo
save_entity_id = viewinfo.curr_entity_id
save_type_id = viewinfo.curr_type_id
save_coll_id = viewinfo.coll_id
orig_entity_id = viewinfo.orig_entity_id
orig_type_id = viewinfo.orig_type_id
orig_coll_id = viewinfo.orig_coll_id
action = viewinfo.action
# log.info("save_entity: formvals: %r, import_field %r"%(entityformvals, import_field))
# log.info(
# "save_entity_id %s, save_type_id %s, orig_entity_id %s, orig_type_id %s, action %s"%
# (save_entity_id, save_type_id, orig_entity_id, orig_type_id, action)
# )
orig_typeinfo = viewinfo.orig_typeinfo
save_typeinfo = viewinfo.curr_typeinfo
messages = viewinfo.type_messages
if self.uri_action == "view":
# This is a view operation: nothing to save
return responseinfo
if not action in ["new", "copy", "edit"]:
log.warning("'Save' operation for action '%s'"%(action))
# Check "edit" authorization to continue
if viewinfo.check_authorization("edit"):
return responseinfo.set_http_response(viewinfo.http_response)
# If no id field in form, use original or allocated id
# Assumes Id value of None corresponds to no form field
if save_entity_id is None:
save_entity_id = orig_entity_id
entity_renamed = (
( action == "edit" ) and
( (save_entity_id != orig_entity_id) or
(save_type_id != orig_type_id ) or
(save_coll_id != orig_coll_id ) )
)
# log.info("@@ Renamed: %s"%entity_renamed)
# Check for valid id and type to be saved
# (This duplicates entity-level validation, but provides more precise error messages)
if not valid_id(save_entity_id):
log.debug("form_response: save_entity_id not valid ('%s')"%save_entity_id)
return self.form_re_render(responseinfo,
viewinfo, entityvaluemap, entityformvals, context_extra_values,
error_head=messages['entity_heading'],
error_message=messages['entity_invalid_id']
)
if not valid_id(save_type_id):
log.debug("form_response: save_type_id not valid_id('%s')"%save_type_id)
return self.form_re_render(responseinfo,
viewinfo, entityvaluemap, entityformvals, context_extra_values,
error_head=messages['entity_type_heading'],
error_message=messages['entity_type_invalid']
)
# Check original parent exists (still)
if (action != "new") and (not orig_typeinfo.parent_exists()):
log.warning("save_entity: original entity parent does not exist")
return self.form_re_render(responseinfo,
viewinfo, entityvaluemap, entityformvals, context_extra_values,
error_head=messages['parent_heading'],
error_message=messages['parent_missing']
)
# Create parent RecordTypeData entity for entity to be saved, if needed
save_typeinfo.parent_typedata(create_typedata=True)
# @@TODO: factor out repeated re-rendering logic
if action == "new":
err_vals, entity_values = self.save_new(viewinfo,
save_typeinfo, save_entity_id, entityformvals
)
elif action == "copy":
#@@TODO merge values and force new URI - see above
err_vals, entity_values = self.save_copy(viewinfo,
save_typeinfo, save_entity_id,
orig_typeinfo, orig_entity_id, entityformvals
)
else: # action == "edit":
if save_coll_id != orig_coll_id:
err_vals, entity_values = self.save_copy(viewinfo,
save_typeinfo, save_entity_id,
orig_typeinfo, orig_entity_id, entityformvals
)
elif save_type_id != orig_type_id:
if entitytypeinfo.TYPE_ID in [save_type_id, orig_type_id]:
log.warning(
"EntityEdit.rename_entity_type: attempt to change type of type record"
)
err_vals = (message.INVALID_OPERATION_ATTEMPTED, message.INVALID_TYPE_CHANGE)
entity_values = None
else:
# Entity renamed to new type
err_vals, entity_values = self.save_rename(viewinfo,
save_typeinfo, save_entity_id,
orig_typeinfo, orig_entity_id, entityformvals
)
elif save_entity_id != orig_entity_id:
# Non -collection or -type record rename
err_vals, entity_values = self.save_rename(viewinfo,
save_typeinfo, save_entity_id,
orig_typeinfo, orig_entity_id,
entityformvals
)
else:
err_vals, entity_values = self.save_update(viewinfo,
save_typeinfo, save_entity_id, entityformvals
)
# Save any imported resource or uploaded files
if not err_vals:
responseinfo = self.save_uploaded_files(
save_entity_id, save_typeinfo,
entityvaluemap, entity_values,
self.request.FILES,
responseinfo
)
if not err_vals and import_field is not None:
responseinfo = self.save_linked_resource(
save_entity_id, save_typeinfo,
entity_values,
import_field,
responseinfo
)
# log.info("save_linked_resource: responseinfo %r"%responseinfo)
# log.info("save_linked_resource: entity_values %r"%entity_values)
if responseinfo.is_updated():
err_vals = self.create_update_entity(save_typeinfo, save_entity_id, entity_values)
# Finish up
if err_vals:
log.warning("err_vals %r"%(err_vals,))
return self.form_re_render(responseinfo,
viewinfo, entityvaluemap, entityformvals, context_extra_values,
error_head=err_vals[0],
error_message=err_vals[1]
)
log.info("Saved %s/%s"%(save_type_id, save_entity_id))
viewinfo.saved(is_saved=True)
viewinfo.update_coll_version()
return responseinfo
def save_validate_assemble_values(self, action, viewinfo,
save_typeinfo, save_entity_id,
orig_typeinfo, orig_entity_id, entityformvals
):
"""
Validate input and assemble updated values for storage
Note: form data is applied as update to original entity data so that
values not in view are preserved. Use original entity values without
field aliases as basis for new value.
Returns a pair (err_vals, entity_values), where:
err_vals is None if the operation succeeds, or error details
consisting of a pair of strings for the error message
heading and body.
entity_values is the merged entity values.
"""
errs = save_typeinfo.validate(save_entity_id, entityformvals)
if errs:
err_vals = (message.INPUT_VALIDATION_ERROR, "\n".join(errs))
log.info("save_validate_assemble_values: err_vals %r"%(err_vals,))
return (err_vals, entityformvals)
# Validation OK
if orig_typeinfo and orig_entity_id:
orig_entity = orig_typeinfo.get_entity(orig_entity_id, action)
orig_values = orig_entity.get_values() if orig_entity else {}
else:
orig_values = {}
entity_values = self.merge_entity_form_values(orig_values, entityformvals)
if action == "copy":
# Force new URI on copy
orig_uri = orig_values.get(ANNAL.CURIE.uri, None)
new_uri = entity_values.get(ANNAL.CURIE.uri, None)
if new_uri == orig_uri:
entity_values.pop(ANNAL.CURIE.uri, None)
entity_values[ANNAL.CURIE.type_id] = save_typeinfo.get_type_id()
entity_values[ANNAL.CURIE.type] = save_typeinfo.get_type_uri()
# Supply values for label and comment if not already provided or aliased
entity_implied_vals = save_typeinfo.get_entity_implied_values(entity_values)
entity_label = entity_implied_vals.get(RDFS.CURIE.label, None)
if not entity_label:
entity_label = label_from_id(save_entity_id)
entity_values[RDFS.CURIE.label] = entity_label
if not entity_implied_vals.get(RDFS.CURIE.comment, None):
entity_values[RDFS.CURIE.comment] = entity_label
return (None, entity_values)
def save_new(self, viewinfo, save_typeinfo, save_entity_id, entityformvals):
"""
Save new entity
Returns a pair (err_vals, entity_values), where:
err_vals is None if the operation succeeds, or error details
consisting of a pair of strings for the error message
heading and body.
entity_values is a copy of the data values that were saved.
"""
# messages = viewinfo.type_messages
err_vals = None
if not viewinfo.saved():
# First save - check for existence
if save_typeinfo.entity_exists(save_entity_id):
log.warning(
"Entity exists (new): %s/%s"%
(save_typeinfo.type_id, save_entity_id)
)
err_vals = (
viewinfo.type_messages['entity_heading'],
viewinfo.type_messages['entity_exists']
)
if not err_vals:
err_vals, entity_values = self.save_validate_assemble_values("new", viewinfo,
save_typeinfo, save_entity_id,
None, None, entityformvals
)
if not err_vals:
err_vals = self.create_update_entity(save_typeinfo, save_entity_id, entity_values)
return (err_vals, entityformvals)
def save_copy(self, viewinfo,
save_typeinfo, save_entity_id,
orig_typeinfo, orig_entity_id, entityformvals
):
"""
Save copy of entity.
As well as saving the entity data, attachments are copied from the original
to the new entity directory or container.
Returns a pair (err_vals, entity_values), where:
err_vals is None if the operation succeeds, or error details
a pair of strings for the error message heading and body.
entity_values is a copy of the data values that were saved.
"""
err_vals = None
entity_values = None
if not viewinfo.saved():
# First save - check for existence
if save_typeinfo.entity_exists(save_entity_id):
log.warning(
"Entity exists (copy): %s/%s, orig %s/%s"%
(save_typeinfo.type_id, save_entity_id, orig_typeinfo.type_id, orig_entity_id)
)
err_vals = (
viewinfo.type_messages['entity_heading'],
viewinfo.type_messages['entity_exists']
)
if not err_vals:
err_vals, entity_values = self.save_validate_assemble_values("copy", viewinfo,
save_typeinfo, save_entity_id,
orig_typeinfo, orig_entity_id, entityformvals
)
if not err_vals:
err_vals = self.copy_entity(
orig_typeinfo, orig_entity_id,
save_typeinfo, save_entity_id,
entity_values
)
return (err_vals, entity_values)
def save_rename(self, viewinfo, save_typeinfo, save_entity_id, orig_typeinfo, orig_entity_id, entityformvals):
"""
Save renamed entity.
Returns a pair (err_vals, entity_values), where:
err_vals is None if the operation succeeds, or error details
a pair of strings for the error message heading and body.
entity_values is a copy of the data values that were saved.
"""
err_vals = None
entity_values = None
if not viewinfo.saved():
# First save - check for existence
if save_typeinfo.entity_exists(save_entity_id):
log.warning(
"Entity exists (rename): %s/%s, orig %s/%s"%
(save_typeinfo.type_id, save_entity_id, orig_typeinfo.type_id, orig_entity_id)
)
err_vals = (
viewinfo.type_messages['entity_heading'],
viewinfo.type_messages['entity_exists']
)
if not err_vals:
save_type_id = viewinfo.curr_type_id
orig_type_id = viewinfo.orig_type_id
err_vals, entity_values = self.save_validate_assemble_values("edit", viewinfo,
save_typeinfo, save_entity_id,
orig_typeinfo, orig_entity_id, entityformvals
)
if not err_vals:
if entitytypeinfo.TYPE_ID in [save_type_id, orig_type_id]:
# Type renamed
# log.info(
# "@@ rename_entity_type %s/%s to %s/%s"%
# ( save_typeinfo.get_type_id(), orig_entity_id,
# save_typeinfo.get_type_id(), save_entity_id)
# )
err_vals = self.rename_entity_type(
viewinfo,
orig_typeinfo, orig_entity_id,
save_typeinfo, save_entity_id, entity_values
)
elif entitytypeinfo.COLL_ID in [save_type_id, orig_type_id]:
# Collection renamed
err_vals = self.rename_collection(
orig_typeinfo, orig_entity_id,
save_typeinfo, save_entity_id, entity_values
)
else:
err_vals = self.rename_entity(
orig_typeinfo, orig_entity_id,
save_typeinfo, save_entity_id,
entity_values
)
if not err_vals:
# Update references to renamed entity in continuation URL
viewinfo.update_continuation_url(
old_type_id=orig_type_id, old_entity_id=orig_entity_id,
new_type_id=save_type_id, new_entity_id=save_entity_id
)
return (err_vals, entity_values)
def save_update(self, viewinfo, save_typeinfo, save_entity_id, entityformvals):
"""
Save updated entity.
Returns a pair (err_vals, entity_values), where:
err_vals is None if the operation succeeds, or error details
a pair of strings for the error message heading and body.
entity_values is a copy of the data values that were saved.
"""
err_vals = None
entity_values = None
if not save_typeinfo.entity_exists(save_entity_id, altscope=None):
# This shouldn't happen, but just in case (e.g. bad URL hacking) ...
log.warning("save_update: expected entity %s/%s not found"%
(save_typeinfo.type_id, save_entity_id)
)
err_vals = (
viewinfo.type_messages['entity_heading'],
viewinfo.type_messages['entity_not_exists']
)
if not err_vals:
err_vals, entity_values = self.save_validate_assemble_values("edit", viewinfo,
save_typeinfo, save_entity_id,
save_typeinfo, save_entity_id, entityformvals
)
if not err_vals:
err_vals = self.create_update_entity(save_typeinfo, save_entity_id, entity_values)
return (err_vals, entity_values)
def import_resource(self,
field_desc, field_name,
type_id, entity_id, entityvals,
init_field_vals, read_resource,
responseinfo
):
"""
Common logic for saving uploaded files or linked resources.
field_desc Field descriptor for import/upload field
field_name is the name of the field instance for which a
resource is imported.
type_id Id of type of entity to which uploaded resource is attached
entity_id Id of entity
entityvals Entity values dictionary
init_field_vals is a function that is called to set up a field values
dictionary. Called as:
init_field_vals(field_vals, field_name, field_string)
read_resource opens and saves a resource. Also updates the supplied
field_vals with details of the accessed resource. Called as:
read_resource(field_desc, field_name, field_vals)
responseinfo receives information about any read_resource error.
The structure is provided with message templates for reporting.
returns `import_vals`, which is a copy of the field values augmented with
some additional information to assist diagnostic generation.
"""
log.info("Importing resource for %s"%field_name)
property_uri = field_desc['field_property_uri']
fv = entityvals[property_uri]
if isinstance(fv, dict):
field_vals = fv.copy()
field_string = None
elif is_string(fv):
field_vals = {}
field_string = fv
else:
field_vals = {}
init_field_vals(field_vals, field_name, field_string)
import_vals = field_vals.copy() # Used for reporting..
import_vals.update(
{ 'id': entity_id
, 'type_id': type_id
})
log.debug("import_vals: %r"%(import_vals))
try:
read_resource(field_desc, field_name, field_vals)
# Import completed: update field in entity value dictionary
entityvals[property_uri] = field_vals
import_vals.update(field_vals)
import_done = responseinfo.get_message('import_done')
import_msg = responseinfo.get_formatted('import_done_info', import_vals)
responseinfo.set_updated()
responseinfo.set_response_confirmation(import_done, import_msg)
except Exception as e:
import_err = responseinfo.get_message('import_err')
import_msg = responseinfo.get_formatted(
'import_err_info', dict(import_vals, import_exc=str(e))
)
log.warning("%s: %s"%(import_err, import_msg))
log.debug(str(e), exc_info=True)
responseinfo.set_response_error(import_err, import_msg)
return import_vals
def save_uploaded_files(self,
entity_id, typeinfo,
entityvaluemap, entityvals,
uploaded_files,
responseinfo
):
"""
Process uploaded files: files are saved to the entity directory, and
the supplied entity values are updated accordingly. This function is
called after the main entity data has been saved.
This functon operates by scanning through fields that may generate a file
upload and looking for a corresponding uploaded files. Uploaded files not
corresponding to view fields are ignored.
entity_id entity identifier
typeinfo type informaton about entity
entityvaluemap used to find fields that correspond to uploaded files.
entityvals a copy of the entity values that have been saved.
uploaded_files is the Django uploaded files information from the request
being processed.
responseinfo receives information about any error.
Updates and returns the supplied responseinfo object.
"""
def is_upload_f(fd):
return fd.is_upload_field()
def init_field_vals(field_vals, field_name, field_string):
field_vals['upload_name'] = field_name
field_vals['uploaded_file'] = field_vals.get('uploaded_file', field_string)
return
def read_resource(field_desc, field_name, field_vals):
value_type = field_desc.get('field_value_type', ANNAL.CURIE.Unknown_type)
uploaded_file = uploaded_files[field_name]
resource_type = uploaded_file.content_type
with typeinfo.get_fileobj(
entity_id, field_name, value_type, resource_type, "wb"
) as local_fileobj:
resource_name = os.path.basename(local_fileobj.name)
field_vals.update(
{ 'resource_name': resource_name
, 'resource_type': resource_type
, 'uploaded_file': uploaded_file.name
, 'uploaded_size': uploaded_file.size
})
for chunk in uploaded_files[upload_name].chunks():
local_fileobj.write(chunk)
return
# log.info("@@ save_uploaded_files, entityvals: %r"%(entityvals,))
if responseinfo.is_response_error():
return responseinfo # Error already seen - return now
responseinfo.set_message_templates(
{ 'import_err': message.UPLOAD_ERROR
, 'import_err_info': message.UPLOAD_ERROR_REASON
, 'import_done': message.UPLOAD_DONE
, 'import_done_info': message.UPLOAD_DONE_DETAIL
})
for fd in self.find_fields(entityvaluemap, is_upload_f):
upload_name = fd.get_field_instance_name()
if upload_name in uploaded_files:
self.import_resource(
fd, upload_name,
typeinfo.type_id, entity_id, entityvals,
init_field_vals, read_resource,
responseinfo
)
return responseinfo
def save_linked_resource(self,
entity_id, typeinfo,
entityvals,
import_field,
responseinfo
):
"""
Imports a resource described by a supplied field description, updates the
saved entity with information about the imported resource, and redisplays
the current form.
entity_id entity identifier
typeinfo type informaton about entity
entityvals a copy of the entity values that have been saved.
import_field is field description of import field that has been triggered.
responseinfo receives information about any error.
Updates and returns the supplied responseinfo object.
"""
def init_field_vals(field_vals, field_name, field_string):
field_vals['import_name'] = field_name
field_vals['import_url'] = field_vals.get('import_url', field_string)
return
def read_resource(field_desc, field_name, field_vals):
import_url = field_vals['import_url']
resource_fileobj, resource_url, resource_type = open_url(import_url)
log.debug(
"import_field: import_url %s, resource_url %s, resource_type %s"%
(import_url, resource_url, resource_type)
)
try:
value_type = import_field.get('field_value_type', ANNAL.CURIE.Unknown_type)
with typeinfo.get_fileobj(
entity_id, field_name, value_type, resource_type, "wb"
) as local_fileobj:
resource_name = os.path.basename(local_fileobj.name)
field_vals.update(
{ 'resource_url' : resource_url
, 'resource_name': resource_name
, 'resource_type': resource_type
})
#@@TODO: timeout / size limit? (Potential DoS?)
copy_resource_to_fileobj(resource_fileobj, local_fileobj)
finally:
resource_fileobj.close()
return
# log.info("@@ save_linked_resource, entityvals: %r"%(entityvals,))
if responseinfo.is_response_error():
return responseinfo # Error already seen - return now
responseinfo.set_message_templates(
{ 'import_err': message.IMPORT_ERROR
, 'import_err_info': message.IMPORT_ERROR_REASON
, 'import_done': message.IMPORT_DONE
, 'import_done_info': message.IMPORT_DONE_DETAIL
})
import_name = import_field.get_field_instance_name()
self.import_resource(
import_field, import_name,
typeinfo.type_id, entity_id, entityvals,
init_field_vals, read_resource,
responseinfo
)
return responseinfo
def create_update_entity(self, typeinfo, entity_id, entity_values):
"""
Create or update an entity.
Returns None if the operation succeeds, or error details in the form of
a pair of values for the error message heading and the error message body.
"""
typeinfo.create_entity(entity_id, entity_values)
if not typeinfo.entity_exists(entity_id):
log.warning(
"EntityEdit.create_update_entity: Failed to create/update entity %s/%s"%
(typeinfo.type_id, entity_id)
)
return (
message.SYSTEM_ERROR,
message.CREATE_ENTITY_FAILED%
(typeinfo.type_id, entity_id)
)
return None
def rename_entity_type(self,
viewinfo,
old_typeinfo, old_type_id,
new_typeinfo, new_type_id, type_data
):
"""
Save a renamed type entity.
This involves renaming all of the instances of the type to
the new type (with new type id and in new location).
Returns None if the operation succeeds, or error details in the form of
a pair of values for the error message heading and the error message body.
"""
# NOTE: old RecordData instance is not removed.
# Don't allow type-rename to or from a type value
if old_typeinfo.type_id != new_typeinfo.type_id:
log.warning(
"EntityEdit.rename_entity_type: attempt to change type of type record"
)
return (message.INVALID_OPERATION_ATTEMPTED, message.INVALID_TYPE_CHANGE)
# Don't allow renaming built-in type
builtin_types = get_built_in_type_ids()
if (new_type_id in builtin_types) or (old_type_id in builtin_types):
log.warning(
"EntityEdit.rename_entity_type: attempt to rename or define a built-in type"
)
return (message.INVALID_OPERATION_ATTEMPTED, message.INVALID_TYPE_RENAME)
# Create new type record
new_typeinfo.create_entity(new_type_id, type_data)
# Update instances of type
src_typeinfo = EntityTypeInfo(
viewinfo.collection, old_type_id
)
dst_typeinfo = EntityTypeInfo(
viewinfo.collection, new_type_id,
create_typedata=True
)
if new_typeinfo.entity_exists(new_type_id):
# Enumerate type instance records and move to new type
remove_OK = True
for d in src_typeinfo.enum_entities():
data_id = d.get_id()
data_vals = d.get_values()
data_vals[ANNAL.CURIE.type_id] = new_type_id
data_vals[ANNAL.CURIE.type] = dst_typeinfo.get_type_uri()
if self.rename_entity(
src_typeinfo, data_id,
dst_typeinfo, data_id, data_vals
):
remove_OK = False
# Finally, remove old type record:
if remove_OK: # Precautionary
new_typeinfo.remove_entity(old_type_id)
RecordTypeData.remove(new_typeinfo.entitycoll, old_type_id)
else:
log.warning(
"Failed to rename type %s to type %s"%
(old_type_id, new_type_id)
)
return (
message.SYSTEM_ERROR,
message.RENAME_TYPE_FAILED%(old_type_id, new_type_id)
)
return None
def rename_collection(self,
old_typeinfo, old_entity_id,
new_typeinfo, new_entity_id, entity_values
):
"""
Save a renamed collection.
This involves renaming the collection directory and updating
the collection metadata.
Returns None if the operation succeeds, or error message
details to be displayed as a pair of values for the message
heading and the message body.
"""
log.info("rename_collection old: %s, new: %s, vals: %r"%(old_entity_id, new_entity_id, entity_values))
new_typeinfo.rename_entity(new_entity_id, old_typeinfo, old_entity_id)
new_typeinfo.create_entity(new_entity_id, entity_values)
if not new_typeinfo.entity_exists(new_entity_id): # Precautionary
log.warning(
"EntityEdit.rename_collection: Failed to rename collection %s/%s to %s/%s"%
(old_typeinfo.type_id, old_entity_id,
new_typeinfo.type_id, new_entity_id)
)
return (
message.SYSTEM_ERROR,
message.RENAME_ENTITY_FAILED%
(old_typeinfo.type_id, old_entity_id,
new_typeinfo.type_id, new_entity_id)
)
return None
def rename_entity(self,
old_typeinfo, old_entity_id,
new_typeinfo, new_entity_id, entity_values
):
"""
Save a renamed entity.
Renaming may involve changing the type (hence location) of the entity,
and/or the entity_id
The new entity is saved and checked before the original entity is deleted.
Returns None if the operation succeeds, or error message
details to be displayed as a pair of values for the message
heading and the message body.
"""
# log.info(
# "rename_entity old: %s/%s, new: %s/%s, vals: %r"%
# ( old_typeinfo.type_id, old_entity_id,
# new_typeinfo.type_id, new_entity_id,
# entity_values
# )
# )
# _new_entity just constructs a new object of the appropriate class
old_entity = old_typeinfo._new_entity(old_entity_id)
new_entity = new_typeinfo.create_entity(new_entity_id, entity_values)
msg = new_entity._copy_entity_files(old_entity)
if msg:
return (message.SYSTEM_ERROR, msg)
if new_typeinfo.entity_exists(new_entity_id): # Precautionary
old_typeinfo.remove_entity(old_entity_id)
else:
log.warning(
"EntityEdit.rename_entity: Failed to rename entity %s/%s to %s/%s"%
(old_typeinfo.type_id, old_entity_id,
new_typeinfo.type_id, new_entity_id)
)
return (
message.SYSTEM_ERROR,
message.RENAME_ENTITY_FAILED%
( old_typeinfo.type_id, old_entity_id,
new_typeinfo.type_id, new_entity_id
)
)
return None
def copy_entity(self,
old_typeinfo, old_entity_id,
new_typeinfo, new_entity_id, entity_values
):
"""
Copy an entity, including any attachments.
(Unlike rename, entities are just copied without affecting any existing
entities of that type.
Returns None if the operation succeeds, or error details in the form of
a pair of values for the error message heading and the error message body.
"""
log.info(
"copy_entity old: %s/%s, new: %s/%s"%
( old_typeinfo.type_id, old_entity_id,
new_typeinfo.type_id, new_entity_id
)
)
# log.debug("copy_entity vals: %r"%(entity_values,))
# _new_entity just constructs a new object of the appropriate class
old_entity = old_typeinfo._new_entity(old_entity_id)
new_entity = new_typeinfo.create_entity(new_entity_id, entity_values)
msg = new_entity._copy_entity_files(old_entity)
if msg:
return (message.SYSTEM_ERROR, msg)
if not new_typeinfo.entity_exists(new_entity_id):
log.warning(
"EntityEdit.copy_entity: failed to copy entity %s/%s to %s/%s"%
( old_typeinfo.type_id, old_entity_id,
new_typeinfo.type_id, new_entity_id
)
)
return (
message.SYSTEM_ERROR,
message.COPY_ENTITY_FAILED%
( old_typeinfo.type_id, old_entity_id,
new_typeinfo.type_id, new_entity_id
)
)
return None
def save_invoke_edit_entity(self,
viewinfo, entityvaluemap, entityvals, context_extra_values,
config_edit_url, edit_perm,
url_params,
responseinfo=None):
"""
Common logic for invoking a resource edit while editing
some other resource:
- the entity currently being edited is saved
- the invoke_edit_entity method (below) is called
If there is a problem, an error response is returned for display
in the current view.
"""
responseinfo = self.save_entity(
viewinfo, entityvaluemap, entityvals, context_extra_values,
responseinfo=responseinfo
)
if not responseinfo.has_http_response():
responseinfo.set_http_response(
self.invoke_edit_entity(
viewinfo, edit_perm,
config_edit_url, url_params,
viewinfo.curr_type_id,
viewinfo.curr_entity_id or viewinfo.orig_entity_id
)
)
return responseinfo
def invoke_edit_entity(self,
viewinfo, edit_perm,
edit_url, url_params,
entity_type_id, entity_id
):
"""
Common logic for invoking a resource edit while editing
or viewing some other resource:
- authorization to perform the requested edit is checked
- a continuaton URL is calculated which is the URL for the current view
- a URL for the config edit view is assembled from the supplied base URL
and parameters, and the calculated continuaton URL
- an HTTP redirect response to the config edit view is returned.
If there is a problem with any ofthese steps, an error response is returned
and displayed in the current view.
viewinfo current view information.
edit_perm action for which permission is required to invoke the indicated
edit (e.g. "new", "edit" or "config").
edit_url base URL for edit view to be invoked.
url_params additional parameters to be added to the edit view base url.
entity_type_id type_id of entity currently being presented.
entity_id entity_id of entity currently presented.
"""
if viewinfo.check_authorization(edit_perm):
return viewinfo.http_response
log.info("invoke_edit_entity: entity_id %s"%entity_id)
cont_here = viewinfo.get_continuation_here(
base_here=self.view_uri(
"AnnalistEntityEditView",
coll_id=viewinfo.coll_id,
view_id=viewinfo.view_id,
type_id=entity_type_id,
entity_id=entity_id,
action=self.uri_action
)
)
return HttpResponseRedirect(
uri_with_params(edit_url, url_params, {'continuation_url': cont_here})
)
def save_invoke_task(self,
viewinfo, entityvaluemap, entityformvals,
context_extra_values,
task_id=None,
responseinfo=None
):
"""
Save current entity and invoke identified task using current entity values
viewinfo contains display context for the form which is being processed
entityvaluemap a list of field descriptions that are used to map valuyes between
the edited entyity and the form display, including references to
field descriptions that control hopw values are rendered. This
is used to find form
entityformvals a dictionary of entity values extracted from the submitted form;
these are used either for redisplaying the form if there is an
error, or to update the saved entity data.
context_extra_values
a dictionary of additional values that may be used if the
form needs to be redisplayed.
task_id if to task to be performed.
responseinfo a `ResponseInfo` object that is used to collect diagnostic
information about form processing. It may contain an HTTP
response object if the form or an error page needs to be
displayed, a flag indicating whether the entity data was
updated, and any additional messages to be included with
any other response.
Returns the updated responseinfo value.
"""
# NOTE: see also find_task_button and annal:edit_task_buttons in correspnding view data
# Tasks invoked without saving current entity
# If no response generated yet, save entity
responseinfo = self.save_entity(
viewinfo, entityvaluemap, entityformvals, context_extra_values,
responseinfo=responseinfo
)
if responseinfo.is_response_error():
return responseinfo
# Tasks invoked after current entity has been saved (if required)
#@@------------------------------------------------------
#@@TODO: drive this logic from a stored _task description
#@@------------------------------------------------------
if task_id == entitytypeinfo.TASK_ID+"/Define_view_list":
if viewinfo.check_authorization("edit"):
return responseinfo.set_http_response(viewinfo.http_response)
# Extract info from entityformvals
base_id = entityformvals[ANNAL.CURIE.id]
type_entity_id = entitytypeinfo.TYPE_ID+"/"+base_id
type_label = entityformvals[RDFS.CURIE.label]
type_uri = entityformvals.get(ANNAL.CURIE.uri, None)
prev_view_id = entityformvals.get(ANNAL.CURIE.type_view, None)
prev_list_id = entityformvals.get(ANNAL.CURIE.type_list, None)
view_entity_id = entitytypeinfo.VIEW_ID+"/"+base_id+layout.SUFFIX_VIEW
list_entity_id = entitytypeinfo.LIST_ID+"/"+base_id+layout.SUFFIX_LIST
list_selector = "'%s' in [@type]"%(type_uri) if type_uri else "ALL"
if not (prev_view_id or prev_list_id):
error_params = self.error_params(
message.NO_VIEW_OR_LIST_SELECTED
)
return responseinfo.set_http_response(
HttpResponseRedirect(self.get_form_refresh_uri(viewinfo, params=error_params))
)
# log.debug("task/Define_view_list prev_view_id %s"%(prev_view_id,))
# log.debug("task/Define_view_list prev_list_id %s"%(prev_list_id,))
# Set up view details (other defaults from sitedata '_initial_values')
type_values = { "type_id": base_id, "type_label": type_label }
if prev_view_id:
view_typeinfo = EntityTypeInfo(
viewinfo.collection, entitytypeinfo.VIEW_ID
)
view_entity = view_typeinfo.get_copy_entity(view_entity_id, prev_view_id)
view_entity.setdefault(RDFS.CURIE.label,
message.TYPE_VIEW_LABEL%type_values
)
view_entity.setdefault(RDFS.CURIE.comment,
message.TYPE_VIEW_COMMENT%type_values
)
view_entity[ANNAL.CURIE.view_entity_type] = type_uri
view_entity._save()
# Set up list details (other defaults from sitedata '_initial_values')
if prev_list_id:
list_typeinfo = EntityTypeInfo(
viewinfo.collection, entitytypeinfo.LIST_ID
)
list_entity = list_typeinfo.get_copy_entity(list_entity_id, prev_list_id)
list_entity.setdefault(RDFS.CURIE.label,
message.TYPE_LIST_LABEL%type_values
)
list_entity.setdefault(RDFS.CURIE.comment,
message.TYPE_LIST_COMMENT%type_values
)
list_entity[ANNAL.CURIE.display_type] = "List"
list_entity[ANNAL.CURIE.default_view] = view_entity_id
list_entity[ANNAL.CURIE.default_type] = type_entity_id
list_entity[ANNAL.CURIE.list_entity_type] = type_uri
list_entity[ANNAL.CURIE.list_entity_selector] = list_selector
list_entity._save()
# Update view, list values in type record, and save again
if prev_view_id:
entityformvals[ANNAL.CURIE.type_view] = view_entity_id
if prev_list_id:
entityformvals[ANNAL.CURIE.type_list] = list_entity_id
responseinfo = self.save_entity(
viewinfo, entityvaluemap, entityformvals, context_extra_values,
responseinfo=responseinfo
)
info_values = self.info_params(
message.TASK_CREATE_VIEW_LIST%{'id': base_id, 'label': type_label}
)
redirect_uri = self.get_form_refresh_uri(viewinfo, params=info_values)
responseinfo.set_http_response(HttpResponseRedirect(redirect_uri))
elif task_id == entitytypeinfo.TASK_ID+"/Define_subtype":
if viewinfo.check_authorization("edit"):
return responseinfo.set_http_response(viewinfo.http_response)
# Extract info from entityformvals
type_typeinfo = EntityTypeInfo(
viewinfo.collection, entitytypeinfo.TYPE_ID
)
base_type_id = entityformvals[ANNAL.CURIE.id] or viewinfo.use_entity_id
base_type_entity = type_typeinfo.get_entity(base_type_id)
base_type_label = base_type_entity.get(RDFS.CURIE.label, base_type_id)
base_type_uri = base_type_entity.get(ANNAL.CURIE.uri, "_coll:"+base_type_id)
base_view_entity_id = base_type_entity.get(ANNAL.CURIE.type_view, "Default_view")
base_list_entity_id = base_type_entity.get(ANNAL.CURIE.type_list, "Default_list")
# Set up subtype details
sub_type_id = base_type_id+layout.SUFFIX_SUBTYPE
sub_type_entity_id = entitytypeinfo.TYPE_ID+"/"+sub_type_id
sub_type_uri = base_type_uri and base_type_uri + layout.SUFFIX_SUBTYPE
sub_type_label = "@@subtype of " + base_type_label
sub_type_values = (
{ "type_id": sub_type_id
, "type_uri": sub_type_uri
, "type_label": sub_type_label
, "type_ref": sub_type_entity_id
, "type_view_id": base_view_entity_id
, "type_list_id": base_list_entity_id
, "base_type_id": base_type_id
, "base_type_label": base_type_label
})
sub_type_comment = message.SUBTYPE_COMMENT%sub_type_values
sub_type_supertypes = [{ "@id": base_type_uri }]
# Create subtype record, and save
type_typeinfo = EntityTypeInfo(
viewinfo.collection, entitytypeinfo.TYPE_ID
)
sub_type_entity = type_typeinfo.get_create_entity(sub_type_entity_id)
sub_type_entity[RDFS.CURIE.label] = sub_type_label
sub_type_entity[RDFS.CURIE.comment] = sub_type_comment
sub_type_entity[ANNAL.CURIE.uri] = sub_type_uri
sub_type_entity[ANNAL.CURIE.supertype_uri] = sub_type_supertypes
sub_type_entity[ANNAL.CURIE.type_view] = base_view_entity_id
sub_type_entity[ANNAL.CURIE.type_list] = base_list_entity_id
sub_type_entity._save()
# Construct response that redirects to view new type entity with message
view_uri_params = (
{ 'coll_id': viewinfo.coll_id
, 'type_id': entitytypeinfo.TYPE_ID
, 'view_id': "Type_view"
, 'entity_id': sub_type_id
, 'action': "edit"
})
info_values = self.info_params(
message.TASK_CREATE_SUBTYPE%{'id': sub_type_id, 'label': sub_type_label}
)
more_uri_params = viewinfo.get_continuation_url_dict()
more_uri_params.update(info_values)
redirect_uri = (
uri_with_params(
self.view_uri("AnnalistEntityEditView", **view_uri_params),
more_uri_params
)
)
responseinfo.set_http_response(HttpResponseRedirect(redirect_uri))
elif task_id == entitytypeinfo.TASK_ID+"/Define_subproperty_field":
if viewinfo.check_authorization("edit"):
return responseinfo.set_http_response(viewinfo.http_response)
# Extract info from entityformvals
field_typeinfo = EntityTypeInfo(
viewinfo.collection, entitytypeinfo.FIELD_ID
)
base_field_id = entityformvals[ANNAL.CURIE.id] or viewinfo.use_entity_id
base_field_entity = field_typeinfo.get_entity(base_field_id)
base_field_label = base_field_entity.get(RDFS.CURIE.label, base_field_id)
base_field_prop_uri = base_field_entity.get(ANNAL.CURIE.property_uri, "_coll:"+base_field_id)
# Set up subtype details
sub_field_id = base_field_id+layout.SUFFIX_SUBPROPERTY
sub_field_entity_id = entitytypeinfo.FIELD_ID+"/"+sub_field_id
sub_field_prop_uri = base_field_prop_uri + layout.SUFFIX_SUBPROPERTY
sub_field_values = (
{ "field_id": sub_field_id
, "field_prop_uri": sub_field_prop_uri
, "field_ref": sub_field_entity_id
, "base_field_id": base_field_id
, "base_field_label": base_field_label
})
sub_field_label = message.SUBFIELD_LABEL%sub_field_values
sub_field_values["field_label"] = sub_field_label
sub_field_comment = message.SUBFIELD_COMMENT%sub_field_values
sub_field_superprop_uris = [{ "@id": base_field_prop_uri }]
# Create subfield record, and save
field_typeinfo = EntityTypeInfo(
viewinfo.collection, entitytypeinfo.FIELD_ID
)
subfield_copy_fields = (
[ ANNAL.CURIE.type_id
, ANNAL.CURIE.field_render_type
, ANNAL.CURIE.field_value_mode
, ANNAL.CURIE.field_ref_type
, ANNAL.CURIE.field_value_type
, ANNAL.CURIE.field_entity_type
, ANNAL.CURIE.field_placement
, ANNAL.CURIE.default_value
, ANNAL.CURIE.placeholder
, ANNAL.CURIE.tooltip
])
sub_field_entity = field_typeinfo.get_create_entity(sub_field_entity_id)
for f in subfield_copy_fields:
if f in base_field_entity:
sub_field_entity[f] = base_field_entity[f]
sub_field_entity[RDFS.CURIE.label] = sub_field_label
sub_field_entity[RDFS.CURIE.comment] = sub_field_comment
sub_field_entity[ANNAL.CURIE.property_uri] = sub_field_prop_uri
sub_field_entity[ANNAL.CURIE.superproperty_uri] = sub_field_superprop_uris
sub_field_entity._save()
# Construct response that redirects to view new field entity with message
view_uri_params = (
{ 'coll_id': viewinfo.coll_id
, 'type_id': entitytypeinfo.FIELD_ID
, 'view_id': "Field_view"
, 'entity_id': sub_field_id
, 'action': "edit"
})
info_values = self.info_params(
message.TASK_CREATE_SUBFIELD%
{ 'id': sub_field_id
, 'label': sub_field_label
, 'base_uri': base_field_prop_uri
}
)
more_uri_params = viewinfo.get_continuation_url_dict()
more_uri_params.update(info_values)
redirect_uri = (
uri_with_params(
self.view_uri("AnnalistEntityEditView", **view_uri_params),
more_uri_params
)
)
responseinfo.set_http_response(HttpResponseRedirect(redirect_uri))
elif task_id == entitytypeinfo.TASK_ID+"/Define_many_field":
if viewinfo.check_authorization("edit"):
return responseinfo.set_http_response(viewinfo.http_response)
# Extract info from entityformvals (form is a field description)
field_entity_id = entityformvals[ANNAL.CURIE.id]
field_label = entityformvals[RDFS.CURIE.label]
field_entity_type = entityformvals[ANNAL.CURIE.field_entity_type] # domain
field_property_uri = entityformvals[ANNAL.CURIE.property_uri]
field_value_type = entityformvals[ANNAL.CURIE.field_value_type] # range
repeat_field_id = field_entity_id + layout.SUFFIX_REPEAT
repeat_property_uri = field_property_uri
repeat_entity_type = (
field_entity_type if field_entity_type != ANNAL.CURIE.Field_list
else ""
)
repeat_value_type = repeat_entity_type
field_params = { "field_id": field_entity_id, "field_label": field_label }
repeat_field_label = message.MANY_FIELD_LABEL%field_params
repeat_field_comment = message.MANY_FIELD_COMMENT%field_params
repeat_field_placeholder = message.MANY_FIELD_PLACEHOLDER%field_params
repeat_field_add = message.MANY_FIELD_ADD%field_params
repeat_field_delete = message.MANY_FIELD_DELETE%field_params
# Create repeat-field referencing original field
field_typeinfo = EntityTypeInfo(
viewinfo.collection, entitytypeinfo.FIELD_ID
)
repeat_field_entity = field_typeinfo.get_create_entity(repeat_field_id)
repeat_field_entity[ANNAL.CURIE.field_render_type] = "Group_Set_Row"
repeat_field_entity[ANNAL.CURIE.field_value_mode] = "Value_direct"
if not repeat_field_entity.get(ANNAL.CURIE.field_fields, None):
repeat_field_entity[ANNAL.CURIE.field_fields] = (
[ { ANNAL.CURIE.field_id: entitytypeinfo.FIELD_ID+"/"+field_entity_id
, ANNAL.CURIE.property_uri: "@id"
, ANNAL.CURIE.field_placement: "small:0,12"
}
])
repeat_field_entity.setdefault(RDFS.CURIE.label, repeat_field_label)
repeat_field_entity.setdefault(RDFS.CURIE.comment, repeat_field_comment)
repeat_field_entity.setdefault(ANNAL.CURIE.placeholder, repeat_field_placeholder)
repeat_field_entity.setdefault(ANNAL.CURIE.property_uri, repeat_property_uri)
repeat_field_entity.setdefault(ANNAL.CURIE.field_entity_type, repeat_entity_type)
repeat_field_entity.setdefault(ANNAL.CURIE.field_value_type, repeat_value_type)
repeat_field_entity.setdefault(ANNAL.CURIE.field_placement, "small:0,12")
repeat_field_entity.setdefault(ANNAL.CURIE.repeat_label_add, repeat_field_add)
repeat_field_entity.setdefault(ANNAL.CURIE.repeat_label_delete, repeat_field_delete)
repeat_field_entity._save()
# Redisplay field view with message
info_values = self.info_params(
message.TASK_CREATE_MANY_VALUE_FIELD%
{'field_id': repeat_field_id, 'label': field_label}
)
view_uri_params = (
{ 'coll_id': viewinfo.coll_id
, 'type_id': entitytypeinfo.FIELD_ID
, 'entity_id': repeat_field_id
, 'view_id': "Field_view"
, 'action': "edit"
})
more_uri_params = viewinfo.get_continuation_url_dict()
more_uri_params.update(info_values)
redirect_uri = (
uri_with_params(
self.view_uri("AnnalistEntityEditView", **view_uri_params),
more_uri_params
)
)
responseinfo.set_http_response(HttpResponseRedirect(redirect_uri))
elif task_id in [entitytypeinfo.TASK_ID+"/Define_repeat_field", entitytypeinfo.TASK_ID+"/Define_list_field"]:
# elif task_id == entitytypeinfo.TASK_ID+"/Define_list_field":
if viewinfo.check_authorization("edit"):
return responseinfo.set_http_response(viewinfo.http_response)
# Extract info from entityformvals (form is a field description)
field_entity_id = entityformvals[ANNAL.CURIE.id]
field_label = entityformvals[RDFS.CURIE.label]
field_entity_type = entityformvals[ANNAL.CURIE.field_entity_type] # domain
field_property_uri = entityformvals[ANNAL.CURIE.property_uri]
field_value_type = entityformvals[ANNAL.CURIE.field_value_type] # range
repeat_field_id = field_entity_id + layout.SUFFIX_SEQUENCE
repeat_property_uri = field_property_uri + layout.SUFFIX_SEQUENCE_P
repeat_entity_type = (
field_entity_type if field_entity_type != ANNAL.CURIE.Field_list
else ""
)
field_params = { "field_id": field_entity_id, "field_label": field_label }
repeat_field_label = message.LIST_FIELD_LABEL%field_params
repeat_field_comment = message.LIST_FIELD_COMMENT%field_params
repeat_field_placeholder = message.LIST_FIELD_PLACEHOLDER%field_params
repeat_field_add = message.LIST_FIELD_ADD%field_params
repeat_field_delete = message.LIST_FIELD_DELETE%field_params
# Create repeat-field referencing original
field_typeinfo = EntityTypeInfo(
viewinfo.collection, entitytypeinfo.FIELD_ID
)
repeat_field_entity = field_typeinfo.get_create_entity(repeat_field_id)
repeat_field_entity[ANNAL.CURIE.field_render_type] = "Group_Seq_Row"
repeat_field_entity[ANNAL.CURIE.field_value_mode] = "Value_direct"
if not repeat_field_entity.get(ANNAL.CURIE.field_fields, None):
repeat_field_entity[ANNAL.CURIE.field_fields] = (
[ { ANNAL.CURIE.field_id: entitytypeinfo.FIELD_ID+"/"+field_entity_id
, ANNAL.CURIE.property_uri: field_property_uri
, ANNAL.CURIE.field_placement: "small:0,12"
}
])
repeat_field_entity.setdefault(RDFS.CURIE.label, repeat_field_label)
repeat_field_entity.setdefault(RDFS.CURIE.comment, repeat_field_comment)
repeat_field_entity.setdefault(ANNAL.CURIE.placeholder, repeat_field_placeholder)
repeat_field_entity.setdefault(ANNAL.CURIE.property_uri, repeat_property_uri)
repeat_field_entity.setdefault(ANNAL.CURIE.field_entity_type, repeat_entity_type)
repeat_field_entity.setdefault(ANNAL.CURIE.field_value_type, ANNAL.CURIE.Field_list)
repeat_field_entity.setdefault(ANNAL.CURIE.field_placement, "small:0,12")
repeat_field_entity.setdefault(ANNAL.CURIE.repeat_label_add, repeat_field_add)
repeat_field_entity.setdefault(ANNAL.CURIE.repeat_label_delete, repeat_field_delete)
repeat_field_entity._save()
# Redisplay field view with message
info_values = self.info_params(
message.TASK_CREATE_LIST_VALUE_FIELD%
{'field_id': repeat_field_id, 'label': field_label}
)
view_uri_params = (
{ 'coll_id': viewinfo.coll_id
, 'type_id': entitytypeinfo.FIELD_ID
, 'entity_id': repeat_field_id
, 'view_id': "Field_view"
, 'action': "edit"
})
more_uri_params = viewinfo.get_continuation_url_dict()
more_uri_params.update(info_values)
redirect_uri = (
uri_with_params(
self.view_uri("AnnalistEntityEditView", **view_uri_params),
more_uri_params
)
)
responseinfo.set_http_response(HttpResponseRedirect(redirect_uri))
elif task_id == entitytypeinfo.TASK_ID+"/Define_field_ref":
if viewinfo.check_authorization("edit"):
return responseinfo.set_http_response(viewinfo.http_response)
# Extract info from entityformvals (form is a field description)
field_entity_id = entityformvals[ANNAL.CURIE.id]
field_label = entityformvals[RDFS.CURIE.label]
field_entity_type = entityformvals[ANNAL.CURIE.field_entity_type]
field_property_uri = entityformvals[ANNAL.CURIE.property_uri]
field_value_type = entityformvals[ANNAL.CURIE.field_value_type]
field_params = (
{ "field_id": field_entity_id
, "field_label": field_label
})
ref_field_id = field_entity_id + layout.SUFFIX_REF_FIELD
ref_property_uri = field_property_uri + layout.SUFFIX_REF_FIELD_P
ref_entity_type = "" # Ref applicable with any entity type
ref_value_type = field_entity_type
ref_field_ref_type = viewinfo.get_uri_type_id(field_entity_type)
ref_field_label = message.FIELD_REF_LABEL%field_params
ref_field_comment = message.FIELD_REF_COMMENT%field_params
ref_field_placeholder = message.FIELD_REF_PLACEHOLDER%field_params
# Create field referencing field
field_typeinfo = EntityTypeInfo(
viewinfo.collection, entitytypeinfo.FIELD_ID
)
ref_field_entity = field_typeinfo.get_create_entity(ref_field_id)
ref_field_entity[ANNAL.CURIE.field_render_type] = "RefMultifield"
ref_field_entity[ANNAL.CURIE.field_value_mode] = "Value_entity"
# If ref field already exists, use existing values, otherwise new ones...
if not ref_field_entity.get(ANNAL.CURIE.field_fields, None):
ref_field_entity[ANNAL.CURIE.field_fields] = (
[ { ANNAL.CURIE.field_id: entitytypeinfo.FIELD_ID+"/"+field_entity_id
, ANNAL.CURIE.field_placement: "small:0,12"
}
])
ref_field_entity.setdefault(RDFS.CURIE.label, ref_field_label)
ref_field_entity.setdefault(RDFS.CURIE.comment, ref_field_comment)
ref_field_entity.setdefault(ANNAL.CURIE.placeholder, ref_field_placeholder)
ref_field_entity.setdefault(ANNAL.CURIE.field_entity_type, ref_entity_type)
ref_field_entity.setdefault(ANNAL.CURIE.field_value_type, ref_value_type)
ref_field_entity.setdefault(ANNAL.CURIE.property_uri, ref_property_uri)
ref_field_entity.setdefault(ANNAL.CURIE.field_placement, "small:0,12")
ref_field_entity.setdefault(ANNAL.CURIE.field_ref_type, ref_field_ref_type)
ref_field_entity._save()
# Display new reference field view with message; continuation same as current view
info_values = self.info_params(
message.TASK_CREATE_REFERENCE_FIELD%
{'field_id': field_entity_id, 'label': field_label}
)
view_uri_params = (
{ 'coll_id': viewinfo.coll_id
, 'type_id': entitytypeinfo.FIELD_ID
, 'entity_id': ref_field_id
, 'view_id': "Field_view"
, 'action': "edit"
})
more_uri_params = viewinfo.get_continuation_url_dict()
more_uri_params.update(info_values)
redirect_uri = (
uri_with_params(
self.view_uri("AnnalistEntityEditView", **view_uri_params),
more_uri_params
)
)
responseinfo.set_http_response(HttpResponseRedirect(redirect_uri))
elif task_id == entitytypeinfo.TASK_ID+"/Show_list":
list_entity_id = viewinfo.use_entity_id or viewinfo.orig_entity_id
list_uri = self.view_uri(
"AnnalistEntityGenericList",
coll_id=viewinfo.coll_id,
list_id=list_entity_id
)
cont_here = viewinfo.get_continuation_here(
base_here=self.view_uri(
"AnnalistEntityEditView",
coll_id=viewinfo.coll_id,
view_id=viewinfo.view_id,
type_id=viewinfo.curr_type_id,
entity_id=viewinfo.curr_entity_id or viewinfo.orig_entity_id,
action=self.uri_action
)
)
redirect_uri = uri_with_params(
list_uri, {'continuation_url': cont_here}
)
responseinfo.set_http_response(HttpResponseRedirect(redirect_uri))
else:
log.error("EntityEdit.save_invoketask: Unknown task_id %s"%(task_id,))
err_values = self.error_params(
message.UNKNOWN_TASK_ID%{'task_id': task_id},
message.SYSTEM_ERROR
)
redirect_uri = self.get_form_refresh_uri(viewinfo, params=err_values)
responseinfo.set_http_response(HttpResponseRedirect(redirect_uri))
return responseinfo
def _do_something_placeholder(self,
viewinfo, entityvaluemap, entityformvals,
context_extra_values,
some_param=None,
responseinfo=None
):
"""
@@ <placeholder function skeleton>
Save current entity and <do something> using current entity values
viewinfo contains display context for the form which is being processed
entityvaluemap a list of field descriptions that are used to map valuyes between
the edited entyity and the form display, including references to
field descriptions that control hopw values are rendered. This
is used to find form
entityformvals a dictionary of entity values extracted from the submitted form;
these are used either for redisplayiongthe form if there is an
error, or to update the saved entity data.
context_extra_values
a dictionary of additional values that may be used if the
form needs to be redisplayed.
some_param ...
responseinfo a `ResponseInfo` object that is used to collect diagnostic
information about form processing. It may contain an HTTP
response object if the form or an error page needs to be
displayed, a flag indicating whether the entity data was
updated, and any additional messages to be included with
any other response.
"""
responseinfo = self.save_entity(
viewinfo, entityvaluemap, entityformvals, context_extra_values,
responseinfo=responseinfo
)
if responseinfo.is_response_error():
return responseinfo
info_values = self.error_params(
"@@TODO: implement 'do_something'",
message.SYSTEM_ERROR
)
redirect_uri = self.get_form_refresh_uri(viewinfo, params=info_values)
responseinfo.set_http_response(HttpResponseRedirect(redirect_uri))
return responseinfo
def find_add_field(self, entityvaluemap, form_data):
"""
Locate any add field option in form data and, if present, return a description of
the field to be added.
"""
for repeat_desc in self.find_repeat_fields(entityvaluemap):
# log.info("find_add_field: %r"%repeat_desc)
# log.info("find_add_field - trying %s"%(repeat_desc['group_id']+"__add"))
if repeat_desc['group_id']+"__add" in form_data:
return repeat_desc
return None
def find_remove_field(self, entityvaluemap, form_data):
"""
Locate remove field option in form data and, if present, return a description of
the field to be removed, with the list of member indexes to be removed added as
element 'remove_fields'.
"""
for repeat_desc in self.find_repeat_fields(entityvaluemap):
# log.info("find_remove_field: %r"%repeat_desc)
if repeat_desc['group_id']+"__remove" in form_data:
remove_fields_key = repeat_desc['group_id']+"__select_fields"
if remove_fields_key in form_data:
repeat_desc['remove_fields'] = form_data.getlist(remove_fields_key)
else:
repeat_desc['remove_fields'] = []
return repeat_desc
return None
def find_move_field(self, entityvaluemap, form_data):
"""
Locate move field option in form data and, if present, return a description
of the field to be moved, with the member index to be moved added as element
'remove_fields', and the direction of movement as element 'move_direction'.
"""
for repeat_desc in self.find_repeat_fields(entityvaluemap):
# log.info("find_remove_field: %r"%repeat_desc)
move_direction = None
if repeat_desc['group_id']+"__up" in form_data:
move_direction = "up"
elif repeat_desc['group_id']+"__down" in form_data:
move_direction = "down"
if move_direction is not None:
repeat_desc['move_direction'] = move_direction
move_fields_key = repeat_desc['group_id']+"__select_fields"
if move_fields_key in form_data:
repeat_desc['move_fields'] = form_data.getlist(move_fields_key)
else:
repeat_desc['move_fields'] = []
return repeat_desc
return None
def find_new_enum(self, entityvaluemap, form_data):
"""
Locate add enumerated value option in form data and, if present, return a
description of the enumerated field for which a new value is to be created.
Field 'field_ref_type' of the returned value is the type_id of the
enumerated value type.
"""
def is_new_f(fd):
# Using FieldDescription method directly doesn't work
# log.info("@@ find_new_enum is_new_f fd %r"%(fd,))
return fd.has_new_button()
for enum_desc in self.find_fields(entityvaluemap, is_new_f):
# log.info("@@ find_new_enum enum_desc %r"%(enum_desc,))
enum_new_edit = self.form_data_contains(form_data, enum_desc, "new_edit")
if enum_new_edit:
enum_desc['enum_value'] = form_data[enum_new_edit]
return enum_desc
return None
def find_repeat_id(self, entityvaluemap, repeat_id):
"""
Locate a repeated field description by repeat_id.
Returns the field description (see `find_repeat_fields` above), or None
"""
for repeat_desc in self.find_repeat_fields(entityvaluemap):
# log.info("find_add_field: %r"%repeat_desc)
if repeat_desc['group_id'] == repeat_id:
return repeat_desc
return None
def update_repeat_field_group(self,
viewinfo, field_desc, entityvaluemap, entityformvals, **context_extra_values
):
"""
Saves an entity instance data with a repeated field or field group added,
moved or removed, then redisplays the current form.
viewinfo DisplayInfo object describing the current view.
field_desc is a field description for a field or field group to be added
or removed. Fields are removed if the description contains a
'remove_fields' field, which contains a list of the repeat index
values to be removed, otherwise a field is added.
entityvaluemap
an EntityValueMap object for the entity being presented.
entityformvals
is a dictionary of entity values to which the field is added.
context_extra_values
is a dictionary of default and additional values not provided by the
entity itself, that may be needed to render the updated form.
returns None if the entity is updated and saved, or an HttpResponse object to
display an error message.
"""
# log.info("field_desc: %r: %r"%(field_desc,))
if 'remove_fields' in field_desc:
self.remove_entity_field(field_desc, entityformvals)
elif 'move_fields' in field_desc:
self.move_entity_field(field_desc, entityformvals)
else:
self.add_entity_field(field_desc, entityformvals)
# log.info("entityvals: %r"%(entityvals,))
responseinfo = ResponseInfo()
http_response = self.save_entity(
viewinfo, entityvaluemap, entityformvals, context_extra_values,
responseinfo=responseinfo
)
return responseinfo.get_http_response()
def find_repeat_fields(self, entityvaluemap):
"""
Iterate over repeat field groups in the current view.
Each value found is returned as a field description dictionary
(cf. FieldDescription).
"""
def is_repeat_f(fd):
return fd.is_repeat_group()
return self.find_fields(entityvaluemap, is_repeat_f)
def add_entity_field(self, add_field_desc, entity):
"""
Add a described repeat field group to the supplied entity values.
See 'find_repeat_fields' for information about the field description.
"""
# log.info("*** add_field_desc %r"%(add_field_desc,))
# log.info("*** entity %r"%(entity,))
field_val = dict(
[ (f['field_property_uri'], None)
for f in add_field_desc['group_field_descs']
])
entity[add_field_desc['field_property_uri']].append(field_val)
return
def remove_entity_field(self, remove_field_desc, entity):
repeatvals_key = remove_field_desc['field_property_uri']
old_repeatvals = entity[repeatvals_key]
new_repeatvals = []
for i in range(len(old_repeatvals)):
if str(i) not in remove_field_desc['remove_fields']:
new_repeatvals.append(old_repeatvals[i])
entity[repeatvals_key] = new_repeatvals
return
def move_entity_field(self, move_field_desc, entity):
def reverselist(l):
return list(reversed(l))
def move_up(vals):
"""
Local function to move selected elements towards the head of a list.
Operates on a list of (valuye,flag) pairs
Based on spike/rearrange-list/move_up.lhs:
> move_up p [] = []
> move_up p [v] = [v]
> move_up p (v:vtail)
> | p v = v:(move_up p vtail)
> move_up p (v1:v2:vtail)
> | not (p v1) && (p v2)
> = v2:(move_up p (v1:vtail))
> | not (p v1) && not (p v2)
> = v1:(move_up p (v2:vtail))
"""
if len(vals) <= 1:
return vals
v1 = vals[0]
v2 = vals[1]
if v1[1]:
return [v1] + move_up(vals[1:])
if v2[1]:
return [v2] + move_up([v1]+vals[2:])
else:
return [v1] + move_up([v2]+vals[2:])
raise RuntimeError("move_entity_field/move_up cases exhausted without match")
# Shuffle selected items up or down
repeatvals_key = move_field_desc['field_property_uri']
old_repeatvals = entity[repeatvals_key]
old_index_list = (
[ (i, str(i) in move_field_desc['move_fields'])
for i in range(len(old_repeatvals))
])
if move_field_desc['move_direction'] == 'up':
new_index_list = move_up(old_index_list)
# log.info("***** Move up: %r"%(new_index_list,))
elif move_field_desc['move_direction'] == 'down':
new_index_list = reverselist(move_up(reverselist(old_index_list)))
# log.info("***** Move down: %r"%(new_index_list,))
else:
raise RuntimeError("move_entity_field - 'move_direction' must be 'up' or 'down'")
new_repeatvals = (
[ old_repeatvals[new_index_list[i][0]]
for i in range(len(new_index_list))
])
entity[repeatvals_key] = new_repeatvals
return
def find_import(self, entityvaluemap, form_data):
"""
Locate any import option in form data and, if present, return a
description of the field describing the value to be imported.
"""
def is_import_f(fd):
return fd.is_import_field()
for enum_desc in self.find_fields(entityvaluemap, is_import_f):
enum_import = self.form_data_contains(form_data, enum_desc, "import")
if enum_import:
enum_desc.set_field_instance_name(enum_import)
return enum_desc
return None
def find_task_button(self, entityvaluemap, form_data):
"""
If form data indicates a task button has been triggered,
return its Id, otherwise return None.
"""
# Implementation deferred until save logioc can be refactored
# [ entitytypeinfo.TASK_ID+"/Copy_type_view_list"
task_ids = (
[ entitytypeinfo.TASK_ID+"/Define_view_list"
, entitytypeinfo.TASK_ID+"/Define_subtype"
, entitytypeinfo.TASK_ID+"/Define_subproperty_field"
# , entitytypeinfo.TASK_ID+"/Define_subtype_view_list"
, entitytypeinfo.TASK_ID+"/Define_repeat_field"
, entitytypeinfo.TASK_ID+"/Define_list_field"
, entitytypeinfo.TASK_ID+"/Define_many_field"
, entitytypeinfo.TASK_ID+"/Define_field_ref"
, entitytypeinfo.TASK_ID+"/Show_list"
])
for t in task_ids:
if extract_entity_id(t) in form_data:
return t
return None
# The next two methods are used to locate form fields, which may be in repeat
# groups, that contain activated additional controls (buttons).
#
# `find_fields` is a generator that locates candidate fields that *might* have
# a designated control, and
# `form_data_contains` tests a field returned by `find_fields` to see if a
# designated control (identified by a name suffix) has been activated.
def find_fields(self, entityvaluemap, filter_f):
"""
Iterate over fields that satisfy the supplied predicate
entityvaluemap is the list of entity-value map entries for the current view
filter_f is a predicate that is applied to field description values,
and returns True for those that are to be returned.
Returns a generator of FieldDescription values from the supplied entity
value map that satisfy the supplied predicate.
"""
# Recursive helper function walks through list of field descriptions,
# including those that are nested in field group descriptions.
def _find_fields(fieldmap, group_list):
if fieldmap is None:
log.warning("entityedit.find_fields: fieldmap is None")
return
# Always called with list of field descriptions
for field_desc in fieldmap:
# log.debug("find_fields: field_desc %r"%(field_desc))
if filter_f(field_desc):
field_desc['group_list'] = group_list
# log.debug(
# "entityedit.find_fields: field name %s, prefixes %r"%
# (field_desc.get_field_name(), group_list)
# )
yield field_desc
if field_desc.has_field_list():
if not field_desc.group_field_descs():
# this is for resilience in the face of bad data
groupref = field_desc.group_ref()
if groupref and not valid_id(groupref):
log.warning(
"entityedit.find_fields: invalid group_ref %s in field description for %s"%
(groupref, field_desc['field_id'])
)
else:
log.warning(
"entityedit.find_fields: no field list or group ref in field description for %s"%
(field_desc['field_id'])
)
elif 'group_id' not in field_desc:
log.error(
"entityedit.find_fields: groupref %s, missing 'group_id' in field description for %s"%
(groupref, field_desc['field_id'])
)
else:
group_fields = field_desc['group_field_descs']
new_group_list = group_list + [field_desc['group_id']]
for fd in _find_fields(group_fields, new_group_list):
yield fd
return
# Entry point: locate list of fields and return generator
for evmapitem in entityvaluemap:
# Data entry fields are always presented within a top-level FieldListValueMap
# cf. self.get_view_entityvaluemap.
itemdesc = evmapitem.get_structure_description()
if itemdesc['field_type'] == "FieldListValueMap":
return _find_fields(itemdesc['field_list'], [])
return
def form_data_contains(self, form_data, field_desc, field_name_postfix):
"""
Tests to see if the form data contains a result field corresponding to
the supplied field descriptor (as returned by 'find_fields') with a
postfix value as supplied.
Returns the full name of the field found (without the trailing suffix),
or None.
"""
# log.debug("form_data_contains: field_desc %r"%field_desc)
# log.debug("form_data_contains: group_list %r"%field_desc['group_list'])
# log.debug("form_data_contains: form_data %r"%form_data)
field_name = field_desc.get_field_name()
def _scan_groups(prefix, group_list):
"""
return (stop, result)
where:
'stop' is True if there are no more possible results to try.
'result' is the final result to return if `stop` is True.
"""
stop_all = True
if group_list == []:
try_field = prefix + field_name
if try_field in form_data:
try_postfix = try_field + "__" + field_name_postfix
return (try_postfix in form_data, try_field)
else:
group_head = group_list[0]
group_tail = group_list[1:]
index = 0
while True:
next_prefix = "%s%s__%d__"%(prefix, group_head, index)
(stop, result) = _scan_groups(next_prefix, group_tail)
if stop:
if result:
return (True, result)
else:
break
stop_all = False
index += 1
return (stop_all, None)
matched, result = _scan_groups("", field_desc["group_list"])
return result if matched else None
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/entityedit.py
|
entityedit.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import os
import os.path
import json
import markdown
import traceback
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.template import loader
from django.views import generic
from django.views.decorators.csrf import csrf_exempt
from django.urls import resolve, reverse
from django.conf import settings
import login.login_views
from utils.py3porting import urljoin
from utils.ContentNegotiationView import ContentNegotiationView
import annalist
from annalist import message
from annalist import layout
from annalist import util
from annalist.identifiers import RDF, RDFS, ANNAL
from annalist.models.site import Site
from annalist.models.annalistuser import (
AnnalistUser,
site_default_user_id, site_default_user_uri,
default_user_id, default_user_uri,
unknown_user_id, unknown_user_uri
)
from annalist.views.uri_builder import uri_with_params, continuation_params, uri_params
# -------------------------------------------------------------------------------------------
#
# Utility methods and data
#
# -------------------------------------------------------------------------------------------
LOGIN_URIS = None # Populated by first call of `authenticate`
# -------------------------------------------------------------------------------------------
#
# Generic Annalist view (contains logic applicable to all pages)
#
# -------------------------------------------------------------------------------------------
class AnnalistGenericView(ContentNegotiationView):
"""
Common base class for Annalist views
"""
def __init__(self):
super(AnnalistGenericView, self).__init__()
self._sitebaseuri = reverse("AnnalistHomeView")
# self._sitebasedir = os.path.join(settings.BASE_DATA_DIR, layout.SITE_DIR)
self._sitebasedir = settings.BASE_SITE_DIR
self._site = None
self._site_data = None
self._user_perms = {}
return
# @@TODO: make host parameter required in the following?
def site(self, host=""):
if not self._site:
self._site = Site(self._sitebaseuri, self._sitebasedir, host=host)
return self._site
def site_data(self, host=""):
if not self._site_data:
self._site_data = self.site(host=host).site_data()
if not self._site_data:
log.error("views.generic.site_data: failed to load site data (%s)"%
self.site(host=host)._dir_path_uri()[1])
return self._site_data
def error(self, values, continuation_url=None):
"""
Construct HTTP error response.
This is an application-specific override of a method defined
in ContentNegotiationView.
values is a dictionary of values to be passed as a context to the
error display page. Typically, this contains more details
about the error (cf. ContentNegotiationView.errorvalues)
continutation_url
is a URL to (re)display when any error is dismissed or has
otherwise been handled.
"""
# log.info(
# "AnnalistGenericView.error: values %r, continuation_url %s"%(values, continuation_url)
# )
template = loader.get_template('annalist_error.html')
context = dict(values)
if continuation_url:
context['continuation_url'] = continuation_url
context['continuation_param'] = uri_params({ 'continuation_url': continuation_url })
return HttpResponse(template.render(context), status=values['status'], reason=values['reason'])
def view_uri(self, viewname, **kwargs):
"""
Return view URI given view name and any additional arguments
"""
return reverse(viewname, kwargs=kwargs)
def get_collection_view_url(self, coll_id):
"""
Return view (root) URL for specified collection
"""
return self.view_uri(
"AnnalistCollectionView",
coll_id=coll_id
)
def get_site_base_url(self):
"""
Return base URL for current site
"""
return self.view_uri("AnnalistHomeView")
def get_collection_base_url(self, coll_id):
"""
Return base URL for specified collection
"""
return urljoin(self.get_collection_view_url(coll_id), layout.COLL_BASE_REF)
def get_entity_base_url(self, coll_id, type_id, entity_id):
"""
Return base URL for specified entity
"""
return self.view_uri(
"AnnalistEntityAccessView",
coll_id=coll_id, type_id=type_id, entity_id=entity_id
)
def get_list_base_url(self, coll_id, type_id, list_id):
"""
Return base URL for specified list, which is one of:
.../coll_id/d/
.../coll_id/d/type_id/
.../coll_id/l/list_id/
.../coll_id/l/list_id/type_id/
"""
if list_id is None:
if type_id is None:
list_url = self.view_uri(
"AnnalistEntityDefaultListAll",
coll_id=coll_id
)
else:
list_url = self.view_uri(
"AnnalistEntityDefaultListType",
coll_id=coll_id, type_id=type_id
)
else:
list_url = self.view_uri(
"AnnalistEntityGenericList",
coll_id=coll_id, list_id=list_id, type_id=type_id
)
return list_url
def resource_response(self, resource_file, resource_type, links={}):
"""
Construct response containing body of referenced resource (or list),
with supplied resource_type as its content_type
"""
# @@TODO: assumes response can reasonably be held in memory;
# consider 'StreamingHttpResponse'?
response = HttpResponse(content_type=resource_type)
response = self.add_link_header(response, links)
response.write(resource_file.read())
return response
def continuation_next(self, request_dict={}, default_cont=None):
"""
Returns a continuation URL to be used when returning from the current view,
or the supplied default if no continuation is specified for the current view.
"""
return request_dict.get("continuation_url") or default_cont or None
def continuation_here(self, request_dict={}, default_cont="", base_here=None):
"""
Returns a URL that returns control to the current page, to be passed as a
continuation_uri parameter to any subsidiary pages invoked. Such continuation
URIs are cascaded, so that the return URI includes a the `continuation_url`
for the current page.
request_dict is a request dictionary that is expected to contain the
continuation_url value to use, and other parameters to
be included an any continuation back to the current page.
default_cont is a default continuation URI to be used for returning from
the current page if the current POST request does not specify
a continuation_url query parameter.
base_here if specified, overrides the current request path as the base URI
to be used to return to the currently displayed page (e.g. when
current request URI is non-idempotent, such as creating a new
entity).
"""
# Note: use default if request/form parameter is present but blank:
if not base_here:
base_here = self.get_request_path()
continuation_next = self.continuation_next(request_dict, default_cont)
return uri_with_params(base_here,
continuation_params({"continuation_url": continuation_next}, request_dict)
)
def info_params(self, info_message, info_head=message.ACTION_COMPLETED):
"""
Returns a URI query parameter dictionary with details that are used to generate an
information message.
"""
return {"info_head": info_head, "info_message": info_message}
def redirect_info(self, viewuri, view_params={}, info_message=None, info_head=message.ACTION_COMPLETED):
"""
Redirect to a specified view with an information/confirmation message for display
(see templates/base_generic.html for display details)
"""
redirect_uri = uri_with_params(viewuri, view_params, self.info_params(info_message, info_head))
return HttpResponseRedirect(redirect_uri)
def error_params(self, error_message, error_head=message.INPUT_ERROR):
"""
Returns a URI query parameter string with details that are used to generate an
error message.
"""
return {"error_head": error_head, "error_message": error_message}
def redirect_error(self,
viewuri, view_params={}, error_message=None, error_head=message.INPUT_ERROR
):
"""
Redirect to a specified view with an error message for display
(see templates/base_generic.html for display details)
"""
redirect_uri = uri_with_params(
viewuri, view_params, self.error_params(error_message, error_head=error_head)
)
return HttpResponseRedirect(redirect_uri)
def check_site_data(self):
"""
Check that site data is present and accessible. If not, return an HTTP error
response, otherwise None.
"""
site_data = self.site_data()
if not site_data:
return self.error(
self.errorvalues(500, "Internal server error",
"Resource %(request_uri)s: Unable to load Annalist site data"
)
)
return None
# Authentication and authorization
def authenticate(self, continuation_url):
"""
Return None if required authentication is present, otherwise
an appropriate login redirection response.
continuation_url is a URL that to be retrieved and processed
when the authentication process completes
(or is aborted).
self.credential is set to credential that can be used to access resource
"""
# Cache copy of URIs to use with login
global LOGIN_URIS
if LOGIN_URIS is None:
LOGIN_URIS = (
{ "login_form_url": self.view_uri('LoginUserView')
, "login_post_url": self.view_uri('LoginPostView')
, "login_done_url": self.view_uri('OIDC_AuthDoneView')
, "user_profile_url": self.view_uri('AnnalistProfileView')
})
return login.login_views.confirm_authentication(self,
continuation_url=continuation_url,
**LOGIN_URIS
)
def get_user_identity(self):
"""
returns the username and authenticated URI for the user making the
current request, as a pair (username, URI)
"""
user = self.request.user
if user.is_authenticated:
return (user.username, "mailto:"+user.email)
return ("_unknown_user_perms", "annal:User/_unknown_user_perms")
def get_user_permissions(self, collection, user_id, user_uri):
"""
Get a user permissions record (AnnalistUser).
To return a value, both the user_id and the user_uri (typically a mailto: URI, but
may be any *authenticated* identifier) must match. This is to prevent access to
records of a deleted account being granted to a new account created with the
same user_id (username).
This function includes any collection- or site- default permissions in the
set of permissions returned.
Permissions are cached in the view object so that the permissions record is read at
most once for any HTTP request.
collection the collection for which permissions are required.
user_id local identifier for the user permnissions to retrieve.
user_uri authenticated identifier associated with the user_id. That is,
the authentication service used is presumed to confirm that
the identifier belongs to the user currently logged in with
the supplied username.
returns an AnnalistUser object containing permissions for the identified user.
"""
coll_id = collection.get_id() if collection else layout.SITEDATA_ID
if coll_id not in self._user_perms:
# if collection:
# log.info("user_id %s (%s), coll_id %s, %r"%
# (user_id, user_uri, collection.get_id(), collection.get_user_permissions(user_id, user_uri))
# )
parentcoll = collection or self.site()
if user_id == unknown_user_id:
# Don't apply collection default-user permissions if no authenticated user
user_perms = parentcoll.get_user_permissions(unknown_user_id, unknown_user_uri)
else:
# Combine user permissions with default-user permissions for collection
default_perms = parentcoll.get_user_permissions(site_default_user_id, default_user_uri)
if not default_perms:
default_perms = parentcoll.get_user_permissions(default_user_id, default_user_uri)
user_perms = parentcoll.get_user_permissions(user_id, user_uri) or default_perms
user_perms[ANNAL.CURIE.user_permission] = list(
set(user_perms[ANNAL.CURIE.user_permission]) |
set(default_perms[ANNAL.CURIE.user_permission])
)
self._user_perms[coll_id] = user_perms
# log.debug("get_user_permissions %r"%(self._user_perms,))
return self._user_perms[coll_id]
def get_permissions(self, collection):
"""
Get permissions for the current user
"""
user_id, user_uri = self.get_user_identity()
return self.get_user_permissions(collection, user_id, user_uri)
def get_message_data(self):
"""
Returns a dictionary of message data that can be passed inthe request parameters
to be displayed on a different page.
"""
messagedata = {}
def uri_param_val(msg_name, hdr_name, hdr_default):
"""
Incorporate values from the incoming URI into the message data.
"""
message = self.request.GET.get(msg_name, None)
if message:
messagedata[msg_name] = message
messagedata[hdr_name] = self.request.GET.get(hdr_name, hdr_default)
return
uri_param_val("info_message", "info_head", message.ACTION_COMPLETED)
uri_param_val("error_message", "error_head", message.INPUT_ERROR)
return messagedata
def authorize(self, scope, collection, continuation_url=None):
"""
Return None if user is authorized to perform the requested operation,
otherwise appropriate 401 Authorization Required or 403 Forbidden response.
May be called with or without an authenticated user.
scope indication of the operation requested to be performed.
e.g. "VIEW", "CREATE", "UPDATE", "DELETE", "CONFIG", ...
collection is the collection to which the requested action is directed,
or None if the test is against site-level permissions.
continutation_url
is a URL to (re)display when any error is dismissed or has
otherwise been handled.
"""
user_id, user_uri = self.get_user_identity()
coll_id = collection.get_id() if collection else "(site)"
if not util.valid_id(user_id):
log.warning("Invalid user_id %s, URI %s"%(user_id, user_uri))
message="Bad request to %(request_uri)s: invalid user_id: '"+user_id+"'"
return self.error(self.error400values(message=message), continuation_url=continuation_url)
user_perms = self.get_user_permissions(collection, user_id, user_uri)
if not user_perms:
log.warning("No user permissions found for user_id %s, URI %s"%(user_id, user_uri))
log.warning("".join(traceback.format_stack()))
return self.error(self.error403values(scope=scope), continuation_url=continuation_url)
# log.info("Authorize %s in %s, %s, %r"%(user_id, coll_id, scope, user_perms[ANNAL.CURIE.user_permission]))
# user_perms is an AnnalistUser object
coll_id = collection.get_id() if collection else "(No coll)"
if scope not in user_perms[ANNAL.CURIE.user_permission]:
if user_id == unknown_user_id:
err = self.error401values(scope=scope)
else:
err = self.error403values(scope=scope)
return self.error(err, continuation_url=continuation_url)
return None
def form_action_auth(self, action, auth_collection, perm_required, continuation_url=None):
"""
Check that the requested form action is authorized for the current user.
action is the requested action: new, edit, copy, etc.
auth_collection is the collection to which the requested action is directed,
or None if the test is against site-level permissions
(which should be stricter than all collections).
perm_required is a data dependent dictionary that maps from action to
permissions required to perform the action. The structure
is similar to that of 'action_scope' (below) that provides
a fallback mapping.
continutation_url
is a URL to (re)display when any error is dismissed or has
otherwise been handled.
Returns None if the desired action is authorized for the current user, otherwise
an HTTP response value to return an error condition.
"""
if action in perm_required:
auth_scope = perm_required[action]
else:
log.warning("form_action_auth: unknown action: %s"%(action))
log.warning("perm_required: %r"%(perm_required,))
auth_scope = "UNKNOWN"
return self.authorize(auth_scope, auth_collection, continuation_url=continuation_url)
# Entity access
def get_entity(self, entity_id, typeinfo, action):
"""
Create local entity object or load values from existing.
entity_id entity id to create or load
typeinfo EntityTypeInfo object for the entity
action is the requested action: new, edit, copy, view
Returns an object of the appropriate type.
If an existing entity is accessed, values are read from storage,
If the identified entity does not exist and `action` is "new" then
a new entity is initialized (but not saved), otherwise the entity
value returned is None.
"""
# log.info(
# "AnnalistGenericView.get_entity id %s, parent %s, action %s, altparent %s"%
# (entity_id, typeinfo.entityparent, action, typeinfo.entityaltparent)
# )
entity = typeinfo.get_entity(entity_id, action)
if entity is None:
# Log diagnostics for missing entity
parent_id = typeinfo.entityparent.get_id()
altparent_id = (
typeinfo.entityaltparent.get_id() if typeinfo.entityaltparent
else "(none)"
)
# log.info(
# "AnnalistGenericView.get_entity id %s, parent %s, action %s, altparent %s"%
# (entity_id, typeinfo.entityparent, action, typeinfo.entityaltparent)
# )
log.info(
"AnnalistGenericView.get_entity, not found: parent_id %s, altparent_id %s, entity_id %s"%
(parent_id, altparent_id, entity_id)
)
return entity
# HTML rendering
@ContentNegotiationView.accept_types(["text/html", "application/html", "*/*"])
def render_html(self, resultdata, template_name, links=[]):
"""
Construct an HTML response based on supplied data and template name.
Also contains logic to interpolate message values from the incoming URI,
for error and confirmation message displays. These additional message
displays are commonly handled by the "base_generic.html" underlay template.
"""
def uri_param_val(name, default):
"""
Incorporate values from the incoming URI into the result data, if not already defined.
"""
if name not in resultdata:
resultdata[name] = self.request.GET.get(name, default)
return
uri_param_val("info_head", message.ACTION_COMPLETED)
uri_param_val("info_message", None)
uri_param_val("error_head", message.INPUT_ERROR)
uri_param_val("error_message", None)
resultdata["annalist_version"] = annalist.__version__
if 'help_filename' in resultdata:
help_filepath = os.path.join(
settings.SITE_SRC_ROOT,
"annalist/views/help/%s.html"%resultdata['help_filename']
)
if os.path.isfile(help_filepath):
with open(help_filepath, "r") as helpfile:
resultdata['help_text'] = helpfile.read()
if 'help_markdown' in resultdata:
resultdata['help_text'] = markdown.markdown(resultdata['help_markdown'])
template = loader.get_template(template_name)
context = resultdata
# log.debug("render_html - data: %r"%(resultdata))
response = HttpResponse(template.render(context, request=self.request))
if "entity_data_ref" in resultdata:
alt_link = [ { "ref": resultdata["entity_data_ref"], "rel": "alternate" } ]
else:
alt_link = []
response = self.add_link_header(response, links=alt_link+links)
return response
# JSON and Turtle content negotiation and redirection
@ContentNegotiationView.accept_types(["application/json", "application/ld+json"])
def redirect_json(self, jsonref, links=[]):
"""
Construct a redirect response to access JSON data at the designated URL.
jsonref is the URL from which JSON data may be retrieved.
links is an optional array of link values to be added to the HTTP response
(see method add_link_header for description).
Returns an HTTP redirect response object if the current request is for JSON data,
otherwise None.
"""
response = HttpResponseRedirect(jsonref)
response = self.add_link_header(response, links=links)
return response
@ContentNegotiationView.accept_types(["text/turtle", "application/x-turtle", "text/n3"])
def redirect_turtle(self, turtleref, links=[]):
"""
Construct a redirect response to access Turtle data at the designated URL.
turtleref is the URL from which Turtle data may be retrieved.
links is an optional array of link values to be added to the HTTP response
(see method add_link_header for description).
Returns an HTTP redirect response object if the current request is for Turtle data,
otherwise None.
"""
response = HttpResponseRedirect(turtleref)
response = self.add_link_header(response, links=links)
return response
def add_link_header(self, response, links=[]):
"""
Add HTTP link header to response, and return the updated response.
response response to be returned
links is an optional array of link valuyes to be added to the HTTP response,
where each link is specified as:
{ "rel": <relation-type>
, "ref": <target-url>
}
"""
link_headers = []
for l in links:
link_headers.append('''<%(ref)s>; rel="%(rel)s"'''%l)
if len(link_headers) > 0:
response["Link"] = ",".join(link_headers)
return response
# Default view methods return 405 Forbidden
def get(self, request, *args, **kwargs):
return self.error(self.error405values())
def head(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def put(self, request, *args, **kwargs):
return self.error(self.error405values())
def post(self, request, *args, **kwargs):
return self.error(self.error405values())
def delete(self, request, *args, **kwargs):
return self.error(self.error405values())
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/generic.py
|
generic.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Annalist action confirmation view definition
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
# import os.path
# import json
# import random
import logging
# import uuid
# import copy
import logging
log = logging.getLogger(__name__)
# import rdflib
# import httplib2
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.template import loader
from django.conf import settings
from utils.ContentNegotiationView import ContentNegotiationView
from annalist.models.annalistuser import AnnalistUser
from annalist.models.annalistuser import (
site_default_user_id, site_default_user_uri,
default_user_id, default_user_uri,
unknown_user_id, unknown_user_uri
)
from annalist.views.generic import AnnalistGenericView
class ProfileView(AnnalistGenericView):
"""
View class to handle requests to the Annalist user profile URI
"""
def __init__(self):
super(ProfileView, self).__init__()
return
# GET
def get(self, request):
def resultdata():
username, useruri = self.get_user_identity()
recent_userid = request.session.get('recent_userid', username)
return (
{ 'title': self.site_data()["title"]
, 'user': request.user
, 'username': username
, 'useruri': useruri
, 'userid': recent_userid
, 'continuation_url': continuation_url
})
continuation_url = self.continuation_next(
request.GET, self.view_uri("AnnalistHomeView")
)
return (
self.authenticate(continuation_url) or
self.render_html(resultdata(), 'annalist_profile.html') or
self.error(self.error406values())
)
def post(self, request):
if request.POST.get('continue', None):
# Check if user permissions are defined
user_id, user_uri = self.get_user_identity()
site_coll = self.site().site_data_collection()
user_perms = site_coll.get_user_permissions(user_id, user_uri)
if not user_perms:
# Initialize new user permissions from system defaults
# (NOTE: site-specific default permissions are incorporate dynamically)
default_perms = site_coll.get_user_permissions(
default_user_id, default_user_uri
)
new_perms_values = default_perms.get_values()
new_perms_values.update(
{ "annal:id": None
, "rdfs:label": "Permissions for %s"%user_id
, "rdfs:comment": "# Permissions for %s\r\n\r\n"%user_id+
"Permissions for user %s (copied from default)"%user_id
, "annal:user_uri": user_uri
})
new_perms = AnnalistUser.create(site_coll, user_id, new_perms_values)
continuation_url = request.POST.get("continuation_url", "../")
return HttpResponseRedirect(continuation_url)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/profile.py
|
profile.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import re
from utils.py3porting import urljoin, urlsplit, urlunsplit, quote, unquote
# From RFC 3986:
gen_delims = ":/?#[]@"
sub_delims = "!$&'()*+,;="
unreserved = "-._~"
# subset of above safe in query string (no "?", "&" or #")
query_safe = re.sub('[?&#]', '', gen_delims + sub_delims + unreserved)
def uri_quote_param(pval):
"""
Apply escaping to a supplied query parameter value for inclusion in a URI.
"""
return quote(pval, query_safe)
def uri_base(uri):
"""
Get the base URI from the supplied URI by removing any parameters and/or fragments.
"""
base_uri = uri.split("#", 1)[0]
base_uri = base_uri.split("?", 1)[0]
return base_uri
def uri_query_key_val(p):
"""
Returns a key-value pair for a supplied query parameter string.
The value part returned has %-escaping unapplied.
If no '=' is present, the value part returned is an empty string.
"""
kv = p.split("=", 1) + [""]
return (kv[0], unquote(kv[1]))
def uri_param_dict(uri):
"""
Extract parameter dictionary from the supplied URI
>>> uri_param_dict("base:path?q1=p1&q2=p2#frag") == { 'q1': "p1", 'q2': "p2"}
True
>>> uri_param_dict("base:path?q1=p1=p1&q2=p2%26p2&q3") == { 'q1': "p1=p1", 'q2': "p2&p2", 'q3': "" }
True
"""
base_uri = uri.split("#", 1)[0]
query = (base_uri.split("?", 1)+[""])[1]
return { k: v for k, v in [ uri_query_key_val(qp) for qp in query.split("&") ] }
def build_dict(*param_dicts, **param_dict):
merged_param_dict = param_dict.copy()
for d in param_dicts:
if d is not None:
# log.info("param_dicts %r"%(d,))
merged_param_dict.update(d)
return merged_param_dict
def uri_params(*param_dicts, **param_dict):
"""
Construct a URI parameter string from the supplied dictionary values
(or values which are convertible to a dictionary using `dict()`).
"""
uri_param_dict = build_dict(*param_dicts, **param_dict)
uri_param_str = ""
next_sep = "?"
for pnam in uri_param_dict:
pval = uri_param_dict[pnam]
if pval:
# log.info("pnam %s, pval %s, uri_param_dict %r"%(pnam, pval, uri_param_dict))
uri_param_str += next_sep + pnam + "=" + uri_quote_param(pval)
next_sep = "&"
return uri_param_str
def uri_with_params(base_uri, *param_dicts, **param_dict):
"""
Construct a URI from the supplied base URI (with any parameters and/or fragment removed)
and URI parameters created using the supplied dictionary values.
"""
return uri_base(base_uri) + uri_params(*param_dicts, **param_dict)
def scope_params(*param_dicts, **param_dict):
"""
Return URI parameters from the supplied dictionary specifically used for entity selection,
ignoring all others. These are the parameters which, in conjunction with a base URI,
represent a resource or set of resources to be returned.
Preserves the following query params from original request:
scope
search
Query parameters not preserved (among others):
continuation_url
info_head
info_message
error_head
error_message
add_field
type
"""
uri_param_dict = build_dict(*param_dicts, **param_dict)
return (
{ 'search': uri_param_dict.get('search_for') or
uri_param_dict.get('search') or None
, 'scope': uri_param_dict.get('scope') or None
})
def _unused_scope_params_url(base_url, type=None):
"""
Takes a supplied URL and returns a corresponding continuation URL with all
but scope parameters removed (c.f. scope_params above).
"""
url_params = uri_param_dict(base_url)
scope_url = uri_with_params(base_url, scope_params(url_params))
return scope_url
def continuation_params(*param_dicts, **param_dict):
"""
Return URI parameters from the supplied dictionary specifically needed for a continuation
URI, ignoring all others. These are the parameters which, in conjunction with a base URI,
represent application state. Parameters not included here are transient in their effect.
"""
uri_param_dict = build_dict(*param_dicts, **param_dict)
return (
{ 'continuation_url': uri_param_dict.get('continuation_url') or None
, 'search': uri_param_dict.get('search_for') or
uri_param_dict.get('search') or None
, 'scope': uri_param_dict.get('scope') or None
})
def continuation_params_url(base_url):
"""
Takes a supplied URL and returns a corresponding continuation URL with transient
query parameters removed (cf. continuation_params above).
"""
url_params = uri_param_dict(base_url)
cont_url = uri_with_params(base_url, continuation_params(url_params))
return cont_url
def continuation_url_chain(continuation_url):
"""
Disects a supplied continuation URL into its components going back up the return chain.
Thus, if:
>>> hop1 = uri_with_params("base:hop1", search="s1")
>>> hop2 = uri_with_params("base:hop2", search="s2")
>>> hop3 = uri_with_params("base:hop3", search="s3")
>>> hop4 = uri_with_params("base:hop4", search="s4")
>>> hop1p = (uri_base(hop1), uri_param_dict(hop1))
>>> hop2p = (uri_base(hop2), uri_param_dict(hop2))
>>> hop3p = (uri_base(hop3), uri_param_dict(hop3))
>>> hop4p = (uri_base(hop4), uri_param_dict(hop4))
>>> hop1p == ('base:hop1', {'search': 's1'})
True
>>> hop2p == ('base:hop2', {'search': 's2'})
True
>>> hop3p == ('base:hop3', {'search': 's3'})
True
>>> hop4p == ('base:hop4', {'search': 's4'})
True
>>> hop1c = hop1
>>> hop2c = uri_with_params("base:hop2", search="s2", continuation_url=hop1)
>>> hop3c = uri_with_params("base:hop3", search="s3", continuation_url=hop2c)
>>> hop4c = uri_with_params("base:hop4", search="s4", continuation_url=hop3c)
>>> hop1c == 'base:hop1?search=s1'
True
>>> hop2c == 'base:hop2?search=s2&continuation_url=base:hop1%3Fsearch=s1'
True
>>> hop3c == 'base:hop3?search=s3&continuation_url=base:hop2%3Fsearch=s2%26continuation_url=base:hop1%253Fsearch=s1'
True
>>> hop4c == 'base:hop4?search=s4&continuation_url=base:hop3%3Fsearch=s3%26continuation_url=base:hop2%253Fsearch=s2%2526continuation_url=base:hop1%25253Fsearch=s1'
True
>>> continuation_url_chain(hop1c) == [hop1p]
True
>>> continuation_url_chain(hop2c) == [hop2p, hop1p]
True
>>> continuation_url_chain(hop3c) == [hop3p, hop2p, hop1p]
True
>>> continuation_url_chain(hop4c) == [hop4p, hop3p, hop2p, hop1p]
True
"""
c_base = uri_base(continuation_url)
c_params = uri_param_dict(continuation_url)
if "continuation_url" in c_params:
c_cont = c_params.pop("continuation_url")
c_list = continuation_url_chain(c_cont)
c_list.insert(0, (c_base, c_params))
return c_list
return [(c_base, c_params)]
def continuation_chain_url(continuation_chain):
"""
Assembles a list of continuation components into a single continuation URL
>>> hop1 = uri_with_params("base:hop1", search="s1")
>>> hop2 = uri_with_params("base:hop2", search="s2")
>>> hop3 = uri_with_params("base:hop3", search="s3")
>>> hop4 = uri_with_params("base:hop4", search="s4")
>>> hop1p = (uri_base(hop1), uri_param_dict(hop1))
>>> hop2p = (uri_base(hop2), uri_param_dict(hop2))
>>> hop3p = (uri_base(hop3), uri_param_dict(hop3))
>>> hop4p = (uri_base(hop4), uri_param_dict(hop4))
>>> hop1c = hop1
>>> hop2c = uri_with_params("base:hop2", search="s2", continuation_url=hop1)
>>> hop3c = uri_with_params("base:hop3", search="s3", continuation_url=hop2c)
>>> hop4c = uri_with_params("base:hop4", search="s4", continuation_url=hop3c)
>>> continuation_chain_url([hop1p]) == hop1c
True
>>> continuation_chain_url([hop2p, hop1p]) == hop2c
True
>>> continuation_chain_url([hop3p, hop2p, hop1p]) == hop3c
True
>>> continuation_chain_url([hop4p, hop3p, hop2p, hop1p]) == hop4c
True
"""
u_base, u_params = continuation_chain[0]
c_tail = continuation_chain[1:]
if c_tail:
u_params.update(continuation_url=continuation_chain_url(c_tail))
return uri_with_params(u_base, u_params)
def url_update_type_entity_id(url_base,
old_type_id=None, new_type_id=None,
old_entity_id=None, new_entity_id=None
):
"""
Isolates type and entity identifiers in the supplied URL, and replaces
them with values supplied.
Entity ids are updated only if the type id is also supplied and matches.
URL path forms recognized (see also urls.py):
.../c/<coll-id>/d/<type-id>/
.../c/<coll-id>/d/<type-id>/!<scope>
.../c/<coll-id>/d/<type-id>/<entity-id>/
.../c/<coll-id>/l/<list-id>/<type-id>/
.../c/<coll-id>/l/<list-id>/<type-id>/!<scope>
.../c/<coll-id>/v/<view-id>/<type-id>/
.../c/<coll-id>/v/<view-id>/<type-id>/!action
.../c/<coll-id>/v/<view-id>/<type-id>/<entity-id>/
.../c/<coll-id>/v/<view-id>/<type-id>/<entity-id>/!action
Thus, the key patterns used for rewriting are:
^.*/d/<type-id>/(!.*])?$
^.*/d/<type-id>/<entity-id>/$
^.*/l/<list-id>/<type-id>/(!.*])?$
^.*/v/<view-id>/<type-id>/(!.*])?$
^.*/v/<view-id>/<type-id>/<entity-id>/(!.*])?$
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/d/t1/",
... old_type_id="t1", new_type_id="t2")
... == 'http://example.com/base/c/coll/d/t2/' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/d/t1/!all",
... old_type_id="t1", new_type_id="t2")
... == 'http://example.com/base/c/coll/d/t2/!all' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/l/list/t1/",
... old_type_id="t1", new_type_id="t2")
... == 'http://example.com/base/c/coll/l/list/t2/' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/l/list/t1/!all",
... old_type_id="t1", new_type_id="t2")
... == 'http://example.com/base/c/coll/l/list/t2/!all' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/v/view/t1/",
... old_type_id="t1", new_type_id="t2")
... == 'http://example.com/base/c/coll/v/view/t2/' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/v/view/t1/!new",
... old_type_id="t1", new_type_id="t2")
... == 'http://example.com/base/c/coll/v/view/t2/!new' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/d/t1/e1/",
... old_type_id="t1", new_type_id="t2",
... old_entity_id="e1", new_entity_id="e2")
... == 'http://example.com/base/c/coll/d/t2/e2/' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/v/view/t1/e1/",
... old_type_id="t1", new_type_id="t2",
... old_entity_id="e1", new_entity_id="e2")
... == 'http://example.com/base/c/coll/v/view/t2/e2/' )
True
>>> ( url_update_type_entity_id("http://example.com/base/c/coll/v/view/t1/e1/",
... old_type_id="t1", new_type_id="t2",
... old_entity_id="e1", new_entity_id="e2")
... == 'http://example.com/base/c/coll/v/view/t2/e2/' )
True
"""
rewrite_type_id_patterns = (
# (<prefix>)/(<type_id>)/<suffix>)
[ re.compile(r"(^.*/d)/(?P<type_id>\w{0,32})/(!.*)?$")
, re.compile(r"(^.*/l/\w{0,32})/(?P<type_id>\w{0,32})/(!.*)?$")
, re.compile(r"(^.*/v/\w{0,32})/(?P<type_id>\w{0,32})/(!.*)?$")
])
rewrite_entity_id_patterns = (
# (<prefix>)/(<type_id>)/(<entity_id>)/<suffix>)
[ re.compile(r"(^.*/d)/(?P<type_id>\w{0,32})/(?P<entity_id>\w{0,32})/(!.*)?$")
, re.compile(r"(^.*/v/\w{0,32})/(?P<type_id>\w{0,32})/(?P<entity_id>\w{0,32})/(!.*)?$")
])
us, ua, up, uq, uf = urlsplit(url_base)
if new_type_id:
for rexp in rewrite_type_id_patterns:
match = rexp.match(up)
if match:
prefix, type_id, suffix = match.group(1, 2, 3)
if not new_entity_id:
# Rename all instances of type
if type_id == old_type_id:
up = "%s/%s/%s"%(prefix, new_type_id, suffix or "")
break
for rexp in rewrite_entity_id_patterns:
match = rexp.match(up)
if match:
prefix, type_id, entity_id, suffix = match.group(1, 2, 3, 4)
if new_entity_id:
# Rename matching type+entities only
if ( (type_id == old_type_id) and (entity_id == old_entity_id) ):
up = "%s/%s/%s/%s"%(prefix, new_type_id, new_entity_id, suffix or "")
break
else:
# Rename all instances of type
if type_id == old_type_id:
up = "%s/%s/%s/%s"%(prefix, new_type_id, entity_id, suffix or "")
break
return urlunsplit((us, ua, up, uq, uf))
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/uri_builder.py
|
uri_builder.py
|
# Login page
The intended means of user authentication is via a third party login service (using HTTPS - see below):
* Enter a local username (which is how Annalist will identify you, independently of any third party authentication account you may use), select an identity provider (IDP) (e.g. `Google`) and click **Login**.
* If you are not already logged in to the IDP you will be asked to login via their site. Then the IDP will ask your permission to disclose basic identifying information (email, first name and last name) to Annalist. This step is skipped if you have completed these actions previously.
* If this is an existing Annalist account, and the email from the IDP matches the Annalist account email, you will be logged in. If the username given does not match an existing Annalist account, a new account is created with the appropriate details and you are logged in to it.
Authentication using a local user account (e.g. created by an admin user using the 'admin' link on the page footer) can be performed by selecting `Local` as the Login service, and entering a password when requested.
Being logged in does not necessarily mean you have permissions to access Annalist data; it simply means that Annalist has an indication of who you are. Permissions to access Annalist collection data are set up separately by the site administrator or data collection owner.
Initial administrator access and permissions can be established using the `annalist-manager` command line utility.
Third party login services require HTTPS to be used for security. But for a server running locally, this requirement can be relaxed by setting environment variable `OAUTHLIB_INSECURE_TRANSPORT=1` when running the server.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/help/login-help.md
|
login-help.md
|
# List of entities
This page lists entities records in a collection, and provides options for them to be created, copied, edited or deleted. Initially, the default behaviour is to display all entities defined in the current collection, but different list options may be selected from the 'List view' dropdown, and displayed contents may be filtered by entering a value in the 'Search' box. To **refresh the display** after making any such selections, click the 'View' button. To **set the default list** for the current collection to be the currently displayed list, click on the 'Set default' button at the bottom of the page.
To **create a new entity record**, click on the 'New' button. A new form will be presented allowing data values to be entered.
To **copy** or **edit** an entity record, click on the beside the corresponding entry, and then click 'Copy' or 'Edit'. A form will be presented allowing data values to be modified or entered. To **delete** an entity record, click on the checkbox beside the entry to be deleted, and then click 'delete'. Options to create, copy, edit and/or delete entity records are displayed only for logged-in users.
The 'Customize' button displays a new form that allows entity types, lists and/or views to be created, modified or deleted.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/help/entity-list-help.md
|
entity-list-help.md
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Annalist class for processing a list of field mappings for conversion between
entity values context values and form data.
A FieldListValueMap is an object that can be inserted into an entity view value
mapping table to process the corresponding list of fields.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
# import collections
from django.conf import settings
from annalist.identifiers import RDFS, ANNAL
from annalist.views.fields.field_description import FieldDescription, field_description_from_view_field
from annalist.views.form_utils.fieldrowvaluemap import FieldRowValueMap
from annalist.views.form_utils.fieldvaluemap import FieldValueMap
from annalist.views.form_utils.repeatvaluesmap import RepeatValuesMap
from annalist.views.fields.render_placement import (
Placement, LayoutOptions,
make_field_width, make_field_offset, make_field_display,
)
# ----------------------------------------------------------------------------
#
# Support functions for generating padding fields
#
# ----------------------------------------------------------------------------
padding_field_data = (
{ "annal:id": "Field_padding"
, "rdfs:label": ""
, "annal:field_render_type": "Padding"
, "annal:field_value_mode": "Value_direct"
})
def get_padding_field_desc(pad_s, pad_m, pad_l):
if pad_s == 0:
if pad_m == 0:
if pad_l == 0:
# No padding
return None
else:
# Pad for large only
disp = "show-for-large-up large-%d columns"%(pad_l,)
else:
if pad_l == 0:
# Pad for medium only
disp = "show-for-medium-only medium-%d columns"%(pad_m,)
else:
disp = "hide-for-small-only medium-%d large-%d columns"%(pad_m, pad_l)
else:
if pad_m == 0:
if pad_l == 0:
# Pad for small only
disp = "show-for-small-only small-%d"%(pad_s,)
else:
# Pad for small and large
disp = "hide-for-medium-only small-%d large-%d"%(pad_s, pad_l)
else:
if pad_l == 0:
# Pad for small and medium
disp = "hide-for-large-up small-%d medium-%d"%(pad_s, pad_m)
else:
# Pad all sizes
disp = "small-%d medium-%d large-%d"%(pad_s, pad_m, pad_l)
placement = Placement(
width=make_field_width(sw=pad_s, mw=pad_m, lw=pad_l),
offset=make_field_offset(so=0, mo=0, lo=0),
display=make_field_display(sd=bool(pad_s), md=bool(pad_m), ld=bool(pad_l)),
field=disp,
label="small-4 columns",
value="small-8 columns"
)
pad_desc = FieldDescription(
None, padding_field_data, field_placement_classes=placement
)
return pad_desc
def next_field_pad(pos_prev, offset, width, display):
"""
Local helper calculates padding required for a given previous position,
field offset and field width.
pos_prev Position following previous field
offset Offset of next field
width Width of next field
display True if next field is to be displayed, otherwise False.
Returns a tuple containing::
[0] True if next field is to be placed on next row
[1] Amount of padding (0..11) to be added from end of last field or start of row
[2] Position of next free space after new element
"""
if not display:
# Field not to be displayed
return (False, 0, pos_prev)
if (offset < pos_prev) or (offset+width > 12):
# Force to next row
next_row = True
padding = offset
else:
# Same row
next_row = False
padding = offset - pos_prev
pos_next = offset + width
return (next_row, padding, pos_next)
def get_padding_desc(position, field_desc):
"""
Calculate padding required to position field where requested, and return:
[0] next row indicator: True if the field and padding are to be placed
on a new row.
[1] padding field descriptor, or None
[2] updated position vector
NOTE: the field is forced onto a new row if it does not fit on the same row
for a large display. For smaller displays, fields that run off the end of
a row are positioned by the browser. Position information is maintained
separately for all display sizes so that size-dependent padding can be
calculated.
position is the position immediately following the preceeding field.
field_desc is the next field descriptor to be addedc to the display.
"""
placement = field_desc['field_placement']
# log.info(
# "@@ get_padding_desc: prev %r, next %r, width %r"%
# (position, placement.offset, placement.width)
# )
next_rows, pad_s, next_s = next_field_pad(position.s, placement.offset.s, placement.width.s, placement.display.s)
next_rowm, pad_m, next_m = next_field_pad(position.m, placement.offset.m, placement.width.m, placement.display.m)
next_rowl, pad_l, next_l = next_field_pad(position.l, placement.offset.l, placement.width.l, placement.display.l)
pos_next = LayoutOptions(s=next_s, m=next_m, l=next_l)
pad_desc = get_padding_field_desc(pad_s, pad_m, pad_l)
# log.info(
# "@@ get_padding_desc: next_row %r, pad_desc %r, pos_next %r"%
# (next_rowl, LayoutOptions(s=pad_s, m=pad_m, l=pad_l), pos_next)
# )
return (next_rowl, pad_desc, pos_next)
# ----------------------------------------------------------------------------
#
# Support class for generating row data
#
# ----------------------------------------------------------------------------
class RowData(object):
def __init__(self, coll, view_context):
self._coll = coll
self._view = view_context
self._pos = LayoutOptions(s=0, m=0, l=0)
self._fields = []
return
def next_field(self, field_desc, map_field_row):
next_row, pad_desc, pos_next = get_padding_desc(self._pos, field_desc)
if next_row:
self.flush(map_field_row)
if pad_desc:
self._fields.append(pad_desc)
self._fields.append(field_desc)
self._pos = pos_next
return
def flush(self, map_field_row):
if self._fields:
# Context name: cf. FieldListValueMap.map_entity_to_context
row_values_map = FieldRowValueMap("_fieldlistvaluemap_",
self._coll, self._fields, self._view
)
map_field_row.append(row_values_map)
self._pos = LayoutOptions(s=0, m=0, l=0)
self._fields = []
return
# ----------------------------------------------------------------------------
#
# FieldListValueMap class
#
# ----------------------------------------------------------------------------
class FieldListValueMap(object):
"""
Define an entry to be added to an entity view value mapping table,
corresponding to a list of field descriptions.
Used by 'entityedit' and 'entitylist' to create a list field mappers used to
generate an entity view or list respectively. Also called recursively when
generating a mapping for a repeated field.
"""
def __init__(self, c, coll, fields, view_context):
"""
Define an entry to be added to an entity view value mapping table,
corresponding to a list of field descriptions.
c name of field used for this value in display context
coll is a collection from which data is being rendered.
fields list of field descriptions from a view definition, each
of which is a dictionary with the field description from
a view or list description.
view_context is a dictionary of additional values that may be used in
assembling values to be used when rendering the fields.
Specifically, this is currently used in calls of
`EntityFinder` for building filtered lists of entities
used to populate enumerated field values. Fields in
the supplied context currently used are `entity` for the
entity value being rendered, and `view` for the view record
used to render that value
(cf. GenericEntityEditView.get_view_entityvaluemap)
The form rendering template iterates over the field descriptions to be
added to the form display. The constructor for this object appends the
current field to a list of field value mappings, with a `map_entity_to_context`
method that assigns a list of values from the supplied entity to a context
field named by parameter `c`.
"""
self.c = c # Context field name for values mapped from entity
self.fd = [] # List of field descriptions
self.fm = [] # List of field value maps
properties = None # Used to detect and disambiguate duplicate properties
rowdata = RowData(coll, view_context)
for f in fields:
# Add field descriptor for field presentation
# log.debug("@@ FieldListValueMap: field %r"%(f,))
field_desc = field_description_from_view_field(coll, f, view_context)
properties = field_desc.resolve_duplicates(properties)
self.fd.append(field_desc)
# Add field value mapper to field value map list
if field_desc.is_repeat_group():
# Repeat group occupies new row
rowdata.flush(self.fm)
view_field_context = dict(view_context, group=field_desc._field_desc)
repeatfieldsmap = FieldListValueMap('_repeatfieldsmap_',
coll, field_desc.group_view_fields(), view_field_context
)
# Context name: cf. FieldListValueMap.map_entity_to_context
repeatvaluesmap = RepeatValuesMap(c='_fieldlistvaluemap_',
f=field_desc, fieldlist=repeatfieldsmap
)
self.fm.append(repeatvaluesmap)
else:
# Single field: try to fit on current row
rowdata.next_field(field_desc, self.fm)
# Flush out any remaining fields
rowdata.flush(self.fm)
return
def __repr__(self):
return (
"FieldListValueMap.fm: %r\n"%(self.fm)
)
def map_entity_to_context(self, entityvals, context_extra_values=None):
listcontext = []
for f in self.fm:
fv = f.map_entity_to_context(entityvals, context_extra_values=context_extra_values)
listcontext.append(fv['_fieldlistvaluemap_'])
return { self.c: listcontext }
def map_form_to_entity(self, formvals, entityvals):
"""
Use form data to update supplied entity values
"""
for f in self.fm:
f.map_form_to_entity(formvals, entityvals)
return entityvals
def map_form_to_entity_repeated_item(self, formvals, entityvals, prefix):
"""
Extra helper method used when mapping a repeated list of fields items to
repeated entity values. Returns values corresponding to a single repeated
set of fields. The field names extracted are constructed using the supplied
prefix string.
Returns the supplied entityvals dictionary extended with repeated field values
found using the supplied prefix. (If an empty dictionary is supplied, this
evaluates as False if no fields using the supplied prefix are found.)
"""
for f in self.fm:
f.map_form_to_entity_repeated_item(formvals, entityvals, prefix)
return entityvals
def get_structure_description(self):
"""
Helper function returns list of field description information
"""
return (
{ 'field_type': 'FieldListValueMap'
, 'field_list': self.fd
})
def get_field_description(self):
return None
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/form_utils/fieldlistvaluemap.py
|
fieldlistvaluemap.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
This module defines a class that performs mapping of entity data between
whoile entities, (Django) view contexts and form data.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import copy
class EntityValueMap(object):
"""
This class represents a mapping between some specific entity data
and view context data and HTML form data.
"""
def __init__(self, basemap):
"""
Initialize and entity-map value.
basemap is a static list of the initial (base) fields that are covered
by this EntityValueMap. Each entry in this list provides at
least these three methods:
map_entity_to_context:
accepts a dictionary-like value containing entity values,
and extra/default value keyword parameters, and returns a
partial context dictionary corresponding for item(s) covered
by the described entry.
map_form_to_entity:
accepts a dictionary-like value containing form data values,
and returns a partial dictionary of corressponding entity
value fields.
get_structure_description:
returns a simple directory describing the field structure.
Common to all is member 'field_type', which indicates the type
if field map entry. Other fields are dependent onthe type.
See classes SimpleValueMap, FieldValue, FieldListValueMap,
RepeatedValuesMap, etc., for possible entries in an entity/value map.
NOTE: mapping from context to form data is handled by templates
and/or field renderers.
"""
super(EntityValueMap, self).__init__()
self._map = copy.copy(basemap)
return
def __iter__(self):
"""
Return entity value map entries
"""
for map_entry in self._map:
yield map_entry
return
def add_map_entry(self, map_entry):
"""
Adds a single mapping entry to an entity/value map.
"""
self._map.append(map_entry)
return
def map_value_to_context(self, entity_values, **kwargs):
"""
Map data from entity values to view context for rendering.
Values defined in the supplied entity take priority, and the keyword
arguments provide values when the entity does not.
"""
# log.debug("EntityValueMap.map_value_to_context, context_extra_values: %r"%(kwargs,))
context = {}
for kmap in self._map:
# log.debug("EntityValueMap.map_value_to_context, kmap: %r"%(kmap,))
kval = kmap.map_entity_to_context(entity_values, context_extra_values=kwargs)
# log.debug("EntityValueMap.map_value_to_context, kval: %r"%(kval,))
context.update(kval)
return context
def map_form_data_to_values(self, form_data, entityvals):
"""
Map data from form response to entity data.
Returns a deep copy of the supplied `entityvals` updated with values from
the form. Values not mentioned in the form data are not updated.
"""
# log.debug("map_form_data_to_values: form_data %r, entityvals %r"%(form_data, entityvals))
values = copy.deepcopy(entityvals) or {}
for kmap in self._map:
kmap.map_form_to_entity(form_data, values)
return values
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/form_utils/entityvaluemap.py
|
entityvaluemap.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2015, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import re
import logging
log = logging.getLogger(__name__)
from collections import namedtuple
from utils.py3porting import to_unicode
from django.utils.html import format_html, mark_safe, escape
_FieldChoice_tuple = namedtuple("FieldChoice", ("id", "value", "label", "link", "choice_value"))
class FieldChoice(_FieldChoice_tuple):
"""
Class representing a choice for an enumerated field.
>>> c1 = FieldChoice('id1', 'value1', 'label1', 'link1', choice_value=True)
>>> c1 == FieldChoice(id='id1', value='value1', label='label1', link='link1', choice_value=True)
True
>>> c1.id == 'id1'
True
>>> c1.value == 'value1'
True
>>> c1.label == 'label1'
True
>>> c1.link == 'link1'
True
>>> c1.choice_html() == u'label1 (value1)'
True
>>> c2 = FieldChoice('id2', 'value2', 'label2', 'link2', choice_value=False)
>>> c2 == FieldChoice(id='id2', value='value2', label='label2', link='link2', choice_value=False)
True
>>> c2.id == 'id2'
True
>>> c2.value == 'value2'
True
>>> c2.label == 'label2'
True
>>> c2.link == 'link2'
True
>>> c2.choice() == u'label2'
True
>>> c3 = FieldChoice(id='id3', value='value3', link='link3')
>>> c3 == FieldChoice(id='id3', value='value3', label='value3', link='link3', choice_value=False)
True
>>> c3.id == 'id3'
True
>>> c3.value == 'value3'
True
>>> c3.label == 'value3'
True
>>> c3.link == 'link3'
True
>>> c4 = FieldChoice('id4', link='link4')
>>> c4 == FieldChoice(id='id4', value='id4', label='id4', link='link4', choice_value=False)
True
>>> c4.id == 'id4'
True
>>> c4.value == 'id4'
True
>>> c4.label == 'id4'
True
>>> c4.link == 'link4'
True
>>> c5 = FieldChoice('')
>>> c5 == FieldChoice(id='', value='', label='', link=None, choice_value=False)
True
"""
def __new__(_cls, id=None, value=None, label=None, link=None, choice_value=False):
if value is None: value = id
if label is None: label = value
result = super(FieldChoice, _cls).__new__(_cls, id, value, label, link, choice_value)
return result
def __eq__(self, other):
"""
Returns True if self == other for sorting and equivalence purposes
"""
return self.id.__eq__(other.id)
def __ne__(self, other):
"""
Returns True if self != other for sorting and equivalence purposes
Note: required for Python2.
"""
return self.id.__ne__(other.id)
def __lt__(self, other):
"""
Returns True if self < other for sorting purposes
"""
return self.id.__lt__(other.id)
def __hash__(self):
"""
pylint says this should be defined if __eq__ is defined.
Something to do with sets?
"""
return hash(self.id)
def choice(self, sep=u"\xa0\xa0\xa0"):
"""
Return choice string
"""
if self.choice_value:
choice_text = self.option_label(sep=sep)
else:
choice_text = to_unicode(self.label)
return choice_text
def choice_html(self, sep=u" "):
"""
Return choice string HTML for option in drop-down list.
"""
return self.choice(sep=sep)
def add_link(self, link=None):
return FieldChoice(self.id, self.value, self.label, link)
def option_label(self, sep=u"\xa0\xa0\xa0"):
"""
Returns string used for displayed option label.
This function is used mainly for testing, to isolate details of
option presentation from the majority of test cases.
"""
if self.label:
return format_html(u"{}{}({})", self.label, mark_safe(sep), self.value)
else:
return escape(self.value)
def option_label_html(self, sep=u" "):
"""
Variation of option_label returns HTML-encoded form of label text
"""
return self.option_label(sep=sep)
def update_choice_labels(fieldchoices):
"""
Update choice labels in supplied list of FieldChoice values so that duplicate labels can
be distinguished.
Returns an updated list of options.
"""
# Detect non-unique labels
labels = {}
for o in fieldchoices:
l = o.label
labels[l] = labels.get(l, 0) + 1
# Generate updated choice values
new_choices = []
for o in fieldchoices:
if labels[o.label] > 1:
new_choices.append(
FieldChoice(id=o.id, value=o.value, label=o.label, link=o.link, choice_value=True)
)
else:
new_choices.append(o)
return new_choices
def get_choice_labels(fieldchoices):
"""
Return a list of choice labels based on the supplied list of FieldChoice values
>>> c1 = FieldChoice('id1', 'value1', 'label1', 'link1')
>>> c2a = FieldChoice('id2a', 'value2a', 'label2', 'link2')
>>> c2b = FieldChoice('id2b', 'value2b', 'label2', 'link2')
>>> labels = get_choice_labels([c1,c2a,c2b])
>>> labels == ['label1', u'label2\\xa0\\xa0\\xa0(value2a)', u'label2\\xa0\\xa0\\xa0(value2b)']
True
"""
return [ fc.choice() for fc in update_choice_labels(fieldchoices) ]
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/form_utils/fieldchoice.py
|
fieldchoice.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Annalist class for processing a RepeatValuesMap in an annalist view value mapping table.
A RepeatValuesMap is used to render repeated groups of fields for each of
a list of values.
When the mapping function `map_entity_to_context` is called, the supplied `entityvals`
is expected to be an iterator (list, tuple, etc.) of entities or dictionary values
to be processed for display.
When decoding values from a form, different logic is required to extract a
repeating structure from the flat namespace used for form data. See method
`map_form_to_entity`, along with `FieldListValueMap.map_form_to_entity_repeated_item`
for more details.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
# import json
from annalist.identifiers import RDFS, ANNAL
from annalist.views.form_utils.fieldvaluemap import FieldValueMap
from annalist.views.fields.bound_field import bound_field
class RepeatValuesMap(FieldValueMap):
"""
Define an entry in an entity value mapping table corresponding to a
group of fields that is repeated for multiple values in an entity.
c request context field name for the field value mapping entry
f is a `FieldDescription` value describing the repeated data.
fieldlist a `FieldListValueMap` object describing a set of fields to be
displayed for each repeated value.
"""
def __init__(self, c=None, f=None, fieldlist=None):
super(RepeatValuesMap, self).__init__(c=c, f=f)
self.fieldlist = fieldlist
return
def __repr__(self):
return (
super(RepeatValuesMap, self).__repr__()+
"RepeatValuesMap.fieldlist: %r\n"%(self.fieldlist)
)
def map_form_to_entity(self, formvals, entityvals):
# log.info(repr(formvals))
prefix_template = self.i+"__%d__"
prefix_n = 0
repeat_vals = []
field_key = self.f.get_field_value_key(entityvals)
previous_vals = entityvals.get(field_key,[])
while True:
#@@
# The following logic works, but sometimes yields unexpected results:
# Specifically, when the original data has more fields than the form,
# the additional old fields were copied over into the result.
#
# For now, we live with the restriction that fields within repeated
# fields cannot propagate subproperty values used; i.e. when editing,
# subproperties used in the data are replaced by the superproperty from
# the repeated field definition. In practice, this may not be a problem,
# as the cases of repeated fields with subproperties are generally associated
# with special JSON-LD keys like '@id' or '@value'
#
# Extract previous values in same position to be updated
# This ad-hocery is used to try and preserve property URIs used
# within the list, so that subproperties (where used) are preserved.
# if len(previous_vals) > prefix_n:
# vals = previous_vals[prefix_n]
# else:
# vals = {}
#@@
vals = {}
prefix = prefix_template%prefix_n
updated_vals = self.fieldlist.map_form_to_entity_repeated_item(
formvals, vals, prefix
)
if not updated_vals:
break
repeat_vals.append(updated_vals)
prefix_n += 1
entityvals[field_key] = repeat_vals
return entityvals
def get_structure_description(self):
"""
Helper function returns structure description information
"""
return (
{ 'field_type': "RepeatValuesMap"
, 'field_descr': self.f
, 'entity_field': self.e
, 'form_field': self.i
})
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/form_utils/repeatvaluesmap.py
|
repeatvaluesmap.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Annalist class for processing a SimpleValueMap in an annalist view value mapping table.
A SimpleValueMap maps values directly from a supplied entity value to the indicated field
in the context, and also from the form values returned to an entity value, or another context
for re-rendering. The entity and form fields used are build directly into the mapping table.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import collections
from django.conf import settings
from annalist.views.fields.bound_field import bound_field
# Named tuple is base class for SimpleValueMap:
_SimpleValueMap_tuple = collections.namedtuple("SimpleValueMap", ("c", "e", "f"))
class SimpleValueMap(_SimpleValueMap_tuple):
"""
Define an entry in an entity value mapping table corresponding to a
simple value that can be mapped from an incoming entity to a request
context, and from form response data to a request context or to a
resulting entity value.
c request context field name (also keys default values)
e entity value field name (property URI)
f HTML input form field name (used as key in POST results)
"""
def map_entity_to_context(self, entityvals, context_extra_values=None):
subcontext = {}
if self.c:
subcontext[self.c] = entityvals.get(self.e,
context_extra_values and context_extra_values.get(self.c, None)
)
# log.log(settings.TRACE_FIELD_VALUE,
# "SimpleValueMap.map_entity_to_context: entitykey %s, contextkey %s, value %s"%
# (self.e, self.c, subcontext[self.c])
# )
return subcontext
def map_form_to_entity(self, formvals, entityvals):
if self.e and self.f:
# log.debug("SimpleValueMap.map_form_to_entity %s, %s, %r"%(self.e, self.f, formvals))
v = formvals.get(self.f, None)
if v:
entityvals[self.e] = v
return entityvals
def get_structure_description(self):
return (
{ 'field_type': 'SimpleValueMap'
, 'entity_field': self.e
, 'context_field': self.c
, 'form_field': self.f
})
def get_field_description(self):
return None
class StableValueMap(SimpleValueMap):
"""
Like SimpleValueMap, except that no value is returned from the
form to the entity. (Some fields are handled specially.)
"""
def map_form_to_entity(self, formvals, entityvals):
return entityvals
def get_structure_description(self):
d = super(StableValueMap, self).get_structure_description()
d['field_type'] = 'StableValueMap'
return d
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/form_utils/simplevaluemap.py
|
simplevaluemap.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Annalist class for processing a row of field mappings for conversion between
entity values context values and form data.
A FieldRowValueMap is an object that can be inserted into an entity view value
mapping table to process the corresponding list of fields. It functions like a
simplified form of FieldListValueMap, except that each row is wrapped in a
"<div class='row'>...</div>", etc., to force a new row of displayed output.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2016, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
# import collections
from django.conf import settings
from annalist.identifiers import RDFS, ANNAL
from annalist.views.form_utils.fieldvaluemap import FieldValueMap
from annalist.views.fields.field_description import FieldDescription, field_description_from_view_field
from annalist.views.fields.bound_field import bound_field
# ----------------------------------------------------------------------------
#
# FieldRowValueMap class
#
# ----------------------------------------------------------------------------
class FieldRowValueMap(object):
"""
Define an entry to be added to an entity view value mapping table,
corresponding to a list of field descriptions.
"""
def __init__(self, c, coll, field_descs, view_context):
"""
Define an entry to be added to an entity view value mapping table,
corresponding to a list of field descriptions.
c name of field used for this value in display context
coll is a collection from which data is being rendered.
field_descs list of field descriptions derived from a view definition,
each of which is a FieldDescription object.
view_context is a dictionary of additional values that may be used in
assembling values to be used when rendering the fields.
The form rendering template iterates over the field descriptions to be
added to the form display. The constructor for this object appends the
current field to a list of field value mappings, with a `map_entity_to_context`
method that assigns a list of values from the supplied entity to a context
field named by parameter `c`.
"""
self.c = c # Context field name for row values
self.fd = field_descs # Field descriptors for fields in row
self.fm = [] # Field maps for fields in row
for field_desc in field_descs:
# Add field value mapper to field value map list
self.fm.append(FieldValueMap(c='_fieldrowvaluemap_', f=field_desc))
fieldrow_data = (
{ ANNAL.CURIE.id: "Row_fields"
, RDFS.CURIE.label: "Fields in row"
, RDFS.CURIE.comment: "@@@ Field description for row of fields @@@"
, ANNAL.CURIE.field_name: "Row_fields"
, ANNAL.CURIE.field_render_type: "FieldRow"
, ANNAL.CURIE.field_value_mode: "Value_direct"
})
self.rd = FieldDescription(
coll, fieldrow_data,
view_context=view_context
)
# @@TODO: Review this: consider passing in bare field descriptions from view, and
# adapting or using a variant of field_description_from_view_field to populate
# 'row_field_descs' in field description. This would be more in line with
# treatment of ref_multifield fields.
self.rd['row_field_descs'] = self.fd
return
def __repr__(self):
return (
"FieldRowValueMap.fd: %r\n"%(self.fd)
)
def map_entity_to_context(self, entityvals, context_extra_values=None):
"""
Add row of fields to display context.
The context returned uses a single field with a special renderer that
handles expansion of contained fields, wrapped in markup that presents
the data as a new row.
"""
rowcontext = bound_field(
self.rd, entityvals,
context_extra_values=context_extra_values
)
return { self.c: rowcontext }
def map_form_to_entity(self, formvals, entityvals):
"""
Use form data to update supplied entity values
"""
for f in self.fm:
f.map_form_to_entity(formvals, entityvals)
return entityvals
def map_form_to_entity_repeated_item(self, formvals, entityvals, prefix):
"""
Extra helper method used when mapping a repeated list of fields items to
repeated entity values. Returns values corresponding to a single repeated
set of fields. The field names extracted are constructed using the supplied
prefix string.
Returns the supplied entityvals dictionary extended with repeated field values
found using the supplied prefix. (If an empty dictionary is supplied, this
evaluates as False if no fields using the supplied prefix are found.)
"""
for f in self.fm:
f.map_form_to_entity_repeated_item(formvals, entityvals, prefix)
return entityvals
def get_structure_description(self):
"""
Helper function returns list of field description information
"""
return (
{ 'field_type': 'FieldRowValueMap'
, 'field_list': self.fd
})
def get_field_description(self):
return None
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/form_utils/fieldrowvaluemap.py
|
fieldrowvaluemap.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Annalist class for processing a FieldValueMap in an annalist view value mapping table.
A FieldValueMap accepts a context field identifier and a field description structure,
and generates context values to drive rendering of that field in a form.
The template used is expected to iterate over the fields and render each one, e.g.:
{% for field in fields %}
{% include field.render.label_edit %}
{% endfor %}
The iterated values of `field` provide additional values for the field rendering template,
including the value of the entity field to be presented. Field descriptions are bound
to entity values as the context elements are generated by this class.
Entity and form field names used for this value are obtained from the field definition;
i.e. they are defined indirectly to support data-driven form generation.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from django.conf import settings
from annalist.views.fields.bound_field import bound_field
class FieldValueMap(object):
"""
Define an entry in an entity value mapping table corresponding to a
field value and description, which is added to a list of such fields
in the indicated context variable.
c request context field name for the field value mapping entry
f field description structure (cf. `FieldDescription`)
NOTE: The form rendering template iterates over the context field values to be
added to the form display. The constructor for this object appends the current
field to a list of field value mappings at the indcated context field.
"""
def __init__(self, c=None, f=None):
self.c = c
self.f = f
self.e = f.get_field_property_uri() # entity data key
self.i = f.get_field_name() # field data key
return
def __repr__(self):
return (
"FieldValueMap.c: %r\n"%(self.c)+
"FieldValueMap.f: %r\n"%(self.f)+
"FieldValueMap.e: %s\n"%(self.e)+
"FieldValueMap.i: %s\n"%(self.i)
)
def map_entity_to_context(self, entityvals, context_extra_values=None):
"""
Returns a bound_field, which is a dictionary-like of values to be added
to the display context under construction
"""
# log.info("map entity %s to context %s, vals %r"%(self.e, self.i, entityvals))
# log.info("map_entity_to_context: bound_field: context_extra_values %r"%(context_extra_values,))
boundfield = bound_field(
field_description=self.f,
entityvals=entityvals,
context_extra_values=context_extra_values
)
return { self.c: boundfield }
def map_form_to_entity(self, formvals, entityvals):
"""
Returns singleton or empty dictionary to be included in the resulting entity.
self.i is the form value key for the value to save.
self.e is the declared entity property URI for the field value, or None if no
value is saved for this field.
"""
if self.e:
# log.debug("FieldValueMap.map_form_to_entity %s, %r"%(self.e, formvals))
# log.info("@@ FieldValueMap.map_form_to_entity e:%s, i:%s"%(self.e, self.i))
k = self.f.get_field_value_key(entityvals)
v = formvals.get(self.i, None)
self.f['field_value_mapper'].decode_store(v, entityvals, k)
return entityvals
def map_form_to_entity_repeated_item(self, formvals, entityvals, prefix):
"""
Extra helper method used when mapping repeated field items to repeated entity values.
The field name extracted is constructed using the supplied prefix string.
Returns the supplied entityvals dictionary extended with the new field value
found using the supplied prefix. (If an empty dictionary is supplied, this
evaluates as False if no such field is found.)
"""
if self.e:
# log.debug("FieldValueMap.map_form_to_entity_repeated_item %s, %r"%(self.e, formvals))
k = self.f.get_field_value_key(entityvals)
v = formvals.get(prefix+self.i, None)
if v is not None:
self.f['field_value_mapper'].decode_store(v, entityvals, k)
return entityvals
def get_structure_description(self):
return (
{ 'field_type': 'FieldValueMap'
, 'field_descr': self.f
, 'entity_field': self.e
, 'form_field': self.i
})
def get_field_description(self):
return self.f
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/form_utils/fieldvaluemap.py
|
fieldvaluemap.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for URI value displayed as an image.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from annalist.views.fields.render_base import RenderBase
from annalist.views.fields.render_fieldvalue import (
RenderFieldValue,
get_context_field_value,
get_field_edit_value,
get_field_view_value
)
from django.template import Template, Context
# ----------------------------------------------------------------------------
#
# Image reference value mapping
#
# ----------------------------------------------------------------------------
class RefImageValueMapper(RenderBase):
"""
Value mapper class for image resource reference
"""
@classmethod
def encode(cls, data_value):
"""
Encodes image reference as a string
"""
return data_value or ""
@classmethod
def decode(cls, field_value):
"""
Decodes a URI value as an image reference.
"""
return field_value or ""
# ----------------------------------------------------------------------------
#
# Image reference field renderers
#
# ----------------------------------------------------------------------------
class ref_image_view_renderer(object):
def render(self, context):
"""
Render reference in entity view as referenced image.
"""
linkval = RefImageValueMapper.encode(get_context_field_value(context, "target_value_link", ""))
return (
'''<a href="%s" target="_blank">'''+
'''<img src="%s" alt="Image at '%s'" />'''+
'''</a>''')%(linkval, linkval, linkval)
class ref_image_edit_renderer(object):
def __init__(self):
self._template = Template(
'''<input type="text" size="64" name="{{repeat_prefix}}{{field.description.field_name}}" '''+
'''placeholder="{{field.description.field_placeholder}}" '''+
'''value="{{field.field_edit_value}}" />'''
)
return
def render(self, context):
"""
Render image URI for editing
"""
return self._template.render(context)
def get_ref_image_renderer():
"""
Return field renderer object for token list values
"""
return RenderFieldValue("ref_image",
view_renderer=ref_image_view_renderer(),
edit_renderer=ref_image_edit_renderer(),
)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_ref_image.py
|
render_ref_image.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import re
from collections import OrderedDict, namedtuple
from annalist.views.fields.render_base import RenderBase
from annalist.views.fields.render_fieldvalue import (
RenderFieldValue,
get_context_value,
get_context_field_value,
get_field_edit_value,
get_field_view_value,
get_context_field_description_value
)
from annalist.views.form_utils.fieldchoice import FieldChoice
# ----------------------------------------------------------------------------
#
# Field placement render support functions
#
# ----------------------------------------------------------------------------
# These symbols don't display will with some languages/fonts (specifically Chinese)
view_is_occupied = "█"
view_not_occupied = "░"
option_is_occupied = "#"
option_not_occupied = "_"
# Enumerated placement options.
#
# '.' and '#' here are placeholders for symbols that will be used to show
# that a grid column is unoccupied or occupied respectively by the field.
placement_occupancy = OrderedDict(
[ ("small:0,12" , "############ (0/12)")
#@@ Label width calculation doesn't work for placements not sub-multiple of 12
#@@ (but still OK for columns)
# , ("small:0,12;medium:0,9", "#########... (0/9)")
# , ("small:0,12;medium:3,9", "...######### (3/9)")
# , ("small:0,12;medium:0,8", "########.... (0/8)")
# , ("small:0,12;medium:4,8", "....######## (4/8)")
, ("small:0,12;medium:0,6", "######...... (0/6)")
, ("small:0,12;medium:3,6", "...######... (3/6)")
, ("small:0,12;medium:6,6", "......###### (6/6)")
, ("small:0,12;medium:0,4", "####........ (0/4)")
, ("small:0,12;medium:4,4", "....####.... (4/4)")
, ("small:0,12;medium:8,4", "........#### (8/4)")
, ("small:0,12;medium:0,3", "###......... (0/3)")
, ("small:0,12;medium:3,3", "...###...... (3/3)")
, ("small:0,12;medium:6,3", "......###... (6/3)")
, ("small:0,12;medium:9,3", ".........### (9/3)")
, ("small:0,12;medium:6,6right", "......###### (6/6R)")
, ("small:0,12;medium:8,4right", "........#### (8/4R)")
, ("small:0,12;medium:9,3right", ".........### (9/3R)")
, ("small:0,9", "#########... (0/9col)")
, ("small:3,9", "...######### (3/9col)")
, ("small:0,8", "########.... (0/8col)")
, ("small:4,8", "....######## (4/8col)")
, ("small:0,6", "######...... (0/6col)")
, ("small:3,6", "...######... (3/6col)")
, ("small:6,6", "......###### (6/6col)")
, ("small:0,4", "####........ (0/4col)")
, ("small:4,4", "....####.... (4/4col)")
, ("small:8,4", "........#### (8/4col)")
, ("small:0,3", "###......... (0/3col)")
, ("small:3,3", "...###...... (3/3col)")
, ("small:6,3", "......###... (6/3col)")
, ("small:9,3", ".........### (9/3col)")
])
def option_symbol(occupied):
return (
option_is_occupied if occupied == "#" else
option_not_occupied if occupied == "." else occupied
)
def option_body(occupancy):
"""
Returns an option body string corresponding to a supplied occupancy string
"""
return "".join([ option_symbol(c) for c in occupancy ])
def view_symbol(occupied):
return (
view_is_occupied if occupied == "#" else
view_not_occupied if occupied == "." else occupied
)
def view_body(occupancy):
"""
Returns an option body string corresponding to a supplied occupancy string
"""
return "".join([ view_symbol(c) for c in occupancy ])
def get_placement_options():
return [ FieldChoice(o, label=option_body(placement_occupancy[o]))
for o in placement_occupancy
]
# return [ option_body(placement_occupancy[o]) for o in placement_occupancy ]
# return placement_occupancy.keys()
def get_placement_value_option_dict():
return { o: option_body(placement_occupancy[o]) for o in placement_occupancy }
def get_placement_option_value_dict():
return { option_body(placement_occupancy[o]) : o for o in placement_occupancy }
def placement_opton_text(placement, placeholder="(select...)"):
if placement in placement_occupancy:
display_text = option_body(placement_occupancy[placement])
elif placement == "":
display_text = placeholder
else:
display_text = placement
return display_text
def placement_option(placement, placeholder, placement_selected="False"):
body_text = placement_opton_text(placement, placeholder=placeholder)
if placement_selected:
selected = ''' selected="selected"'''
else:
selected = ""
return (
'''<option value="%s"%s>%s</option>'''%
(placement, selected, body_text)
)
def placement_display_span(placement):
if placement in placement_occupancy:
display_text = view_body(placement_occupancy[placement])
elif placement == "":
display_text = placeholder
else:
display_text = placement
return '''<span class="placement-text">%s</span>'''%display_text
# ----------------------------------------------------------------------------
#
# Field placement field renderers
#
# ----------------------------------------------------------------------------
class placement_view_renderer(object):
def render(self, context):
"""
Render field placement for viewing.
"""
placement = get_field_view_value(context, " ")
if placement in placement_occupancy:
return placement_display_span(placement)
# Not predefined value - return string in unadorned span.
# (Without a <span ... /> with some content, the grid layout gets messed up.
return '''<span>%s</span>'''%(placement or "(not specified)")
class placement_edit_renderer(object):
def render(self, context):
"""
Render field placement for editing
"""
repeat_prefix = get_context_value(context, 'repeat_prefix', "")
placement = get_field_edit_value(context, "")
field_name = get_context_field_description_value(
context, 'field_name', "_unknown_"
)
field_placeholder = get_context_field_description_value(
context, 'field_placeholder', "small:0,12"
)
option_elem = placement_option(
"", field_placeholder, placement_selected=(placement=="")
)
pref = (
[ '''<select class="placement-text" name="%s%s">'''%
(repeat_prefix, field_name)
, " "+option_elem
])
opts = []
if placement != "" and placement not in placement_occupancy:
option_elem = placement_option(
placement, field_placeholder, placement_selected=True
)
opts.append(" "+option_elem)
for opt in placement_occupancy:
option_elem = placement_option(
opt, field_placeholder, placement_selected=(placement==opt)
)
opts.append(" "+option_elem)
suff = ['''</select>''']
return '\n'.join(pref+opts+suff)
def get_field_placement_renderer():
"""
Return field renderer object for field placement values
"""
return RenderFieldValue("placement",
view_renderer=placement_view_renderer(),
edit_renderer=placement_edit_renderer()
)
# ----------------------------------------------------------------------------
#
# Internal representation of field placement and placement string parser
#
# ----------------------------------------------------------------------------
LayoutOptions = namedtuple("LayoutOptions", ["s", "m", "l"])
Placement = namedtuple("Placement", ['width', 'offset', 'display', 'field', 'label', 'value'])
def get_placement_classes(placement):
"""
Returns Placement classes corresponding to placement string provided.
>>> get_placement_classes("small:0,12").field == 'small-12 columns'
True
>>> get_placement_classes("small:0,12").label == 'small-12 medium-2 columns'
True
>>> get_placement_classes("small:0,12").value == 'small-12 medium-10 columns'
True
"""
def set_field_width(pmmode, pmwidth):
if pmwidth == 0:
field_width[pmmode] = 0
label_width[pmmode] = 0
value_width[pmmode] = 0
else:
field_width[pmmode] = pmwidth
label_width[pmmode] = labelw[pmmode]*(12 // pmwidth)
value_width[pmmode] = 12 - label_width[pmmode]
if label_width[pmmode] >= 12:
label_width[pmmode] = 12
value_width[pmmode] = 12
return
def format_class(cd, right="", show=""):
prev = cd.get("small", None)
for test in ("medium", "large"):
if (test in cd):
if cd[test] == prev:
del cd[test]
else:
prev = cd[test]
if right: right = " "+right
if show: show = " "+show
return " ".join([k+"-"+str(v) for k,v in cd.items()]) + " columns" + right + show
ppr = re.compile(r"^(small|medium|large):(\d+),(\d+),?(right)?,?(hide)?$")
ps = [ s.strip() for s in placement.split(';') ]
labelw = {'small': 12, 'medium': 2, 'large': 2}
field_width = OrderedDict()
label_width = OrderedDict()
value_width = OrderedDict()
pmright = ""
pmshow = ""
set_field_width("small", 12) # Default small-12 columns (may be overridden)
set_field_width("medium", 12) # Default medium-12 columns (may be overridden)
set_field_width("large", 12) # Default large-12 columns (may be overridden)
field_offset = {'small': 0, 'medium': 0, 'large': 0}
field_display = {'small': True, 'medium': True, 'large': True}
# Process each placement sub-expression
for p in ps:
pm = ppr.match(p)
if not pm:
break
pmmode = pm.group(1) # "small", "medium" or "large"
pmoffset = int(pm.group(2))
pmwidth = int(pm.group(3))
pmright = pm.group(4) or ""
pmhide = pm.group(5)
if pmhide:
pmshow = {'small': "show-for-medium-up", 'medium': "show-for-large-up", 'large': ""}[pmmode]
# print "pmhide %s, pmmode %s, pmshow %s"%(pmhide, pmmode, pmshow)
set_field_width(pmmode, pmwidth)
field_offset[pmmode] = pmoffset
if pmhide:
field_display[pmmode] = False
if pmmode == "small":
set_field_width("medium", pmwidth)
field_offset["medium"] = pmoffset
if pmmode in ["small", "medium"]:
set_field_width("large", pmwidth)
field_offset["large"] = pmoffset
c = Placement(
width=make_field_width(
sw=field_width["small"], mw=field_width["medium"], lw=field_width["large"]
),
offset=make_field_offset(
so=field_offset['small'], mo=field_offset['medium'],lo=field_offset['large']
),
display=make_field_display(
sd=field_display['small'], md=field_display['medium'],ld=field_display['large']
),
field=format_class(field_width, pmright, pmshow),
label=format_class(label_width),
value=format_class(value_width)
)
# log.debug("get_placement_class %s, returns %s"%(placement,c))
return c
def make_field_width(sw=12, mw=12, lw=12):
return LayoutOptions(s=sw, m=mw, l=lw)
def make_field_offset(so=0, mo=0, lo=0):
return LayoutOptions(s=so, m=mo, l=lo)
def make_field_display(sd=True, md=True, ld=True):
return LayoutOptions(s=sd, m=md, l=ld)
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_placement.py
|
render_placement.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for text values selected from a list of options.
In some cases, the ren dered edit control also inclused a button for
creating a new value.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import traceback
import logging
log = logging.getLogger(__name__)
from utils.py3porting import is_string
from annalist import message
from annalist.exceptions import TargetIdNotFound_Error, TargetEntityNotFound_Error
from annalist.util import fill_type_entity_id
from annalist.views.fields.render_base import RenderBase
from annalist.views.fields.render_fieldvalue import (
RenderFieldValue,
get_field_edit_value,
get_field_view_value
)
from annalist.views.form_utils.fieldchoice import FieldChoice, update_choice_labels
from django.template import Template, Context
# ----------------------------------------------------------------------------
#
# Select value templates
#
# ----------------------------------------------------------------------------
edit_options = (
'''{% for opt in field_options %} '''+
'''{% if opt.value == encoded_field_value %} '''+
'''{% if opt.value == "" %} '''+
'''<option value="" selected="selected">{{field.description.field_placeholder}}</option>\n'''+
'''{% else %} '''+
'''<option value="{{opt.value}}" selected="selected">{{opt.choice_html}}</option>\n'''+
'''{% endif %} '''+
'''{% else %} '''+
'''{% if opt.value == "" %} '''+
'''<option value="">{{field.description.field_placeholder}}</option>\n'''+
'''{% else %} '''+
'''<option value="{{opt.value}}">{{opt.choice_html}}</option>\n'''+
'''{% endif %} '''+
'''{% endif %} '''+
'''{% endfor %} '''
)
view_select = (
"""<!-- fields.render_select.view_select -->
{% if field_linkval %}
<a href="{{field_linkval}}{{field_continuation_param}}">{{field_labelval}}</a>
{% elif field_textval and field_textval != "" %}
<span class="value-missing">{{field_labelval}}</span>
{% else %}
<span class="value-blank">"""+
message.NO_SELECTION%{'id': "{{field.description.field_label}}"}+
"""</span>
{% endif %}
""")
edit_select = (
"""<!-- fields.render_select.edit_select -->
<div class="row">
<div class="small-10 columns view-value less-new-button">
<select name="{{repeat_prefix}}{{field.description.field_name}}">
"""+
edit_options+
"""
</select>
</div>
<div class="small-2 columns view-value new-button left small-text-right">
<button type="submit"
name="{{repeat_prefix}}{{field.description.field_name}}__new_edit"
value="New"
title="Define new or edit {{field.description.field_label}}"
>
<span class="select-edit-button-text">+✍</span>
</button>
</div>
</div>
""")
view_choice = (
"""<!-- fields.render_select.view_choice -->
{% if field_linkval %}
<a href="{{field_linkval}}{{field_continuation_param}}">{{field_labelval}}</a>
{% elif field_textval and field_textval != "" %}
<span class="value-missing">{{field_labelval}}</span>
{% else %}
<span class="value-blank">"""+
message.NO_SELECTION%{'id': "{{field.description.field_label}}"}+
"""</span>
{% endif %}
""")
edit_choice = (
"""<!-- fields.render_select.edit_choice -->
<select name="{{repeat_prefix}}{{field.description.field_name}}">
"""+
edit_options+
"""
</select>
""")
view_entitytype = (
"""<!-- fields.render_select.view_entitytype -->
{% if field_linkval %}
<a href="{{field_linkval}}{{field_continuation_param}}">{{field_labelval}}</a>
{% elif field_textval and field_textval != "" %}
<span class="value-missing">{{field_labelval}}</span>
{% else %}
<span class="value-blank">"""+
message.NO_SELECTION%{'id': "{{field.description.field_label}}"}+
"""</span>
{% endif %}
""")
edit_entitytype = (
# Note use of fixed field name
"""<!-- fields.render_select.edit_entitytype -->
<select name="entity_type">
"""+
edit_options+
"""
</select>
""")
view_view_choice = (
"""<!-- field/annalist_view_view_choice.html -->
<span>{{encoded_field_value}}</span>
""")
edit_view_choice = (
"""<!-- field/annalist_edit_view_choice.html -->
<div class="row">
<div class="small-9 columns">
<select name="{{repeat_prefix}}{{field.description.field_name}}">
"""+
edit_options+
"""
</select>
</div>
<div class="small-3 columns">
<input type="submit" name="use_view" value="Show view" />
</div>
</div>
""")
# ----------------------------------------------------------------------------
#
# Select text value mapping
#
# ----------------------------------------------------------------------------
class SelectValueMapper(RenderBase):
"""
Value mapper class for text selected from a list of choices.
"""
@classmethod
def encode(cls, data_value):
"""
Encodes supplied data value as an option value to be selected in a
<select> form input.
"""
return data_value or ""
# ----------------------------------------------------------------------------
#
# Select text field renderers
#
# ----------------------------------------------------------------------------
class Select_view_renderer(object):
"""
Render select value for viewing using supplied template
"""
def __init__(self, template):
self._template = Template(template)
return
def render(self, context):
try:
# val = get_field_view_value(context, None)
val = get_field_edit_value(context, None)
if val is not None:
if not is_string(val):
log.error(ValueError("Entity selector value is not string", val))
val = "@@ unexpected selector %r"%(val,)
typval = fill_type_entity_id(
val, context['field'].description['field_ref_type']
)
textval = SelectValueMapper.encode(typval)
labelval = textval
linkval = None
linkcont = context['field']['continuation_param']
options = context['field']['options']
for o in options:
# log.info("Select_view_renderer.render: option %r"%(o,))
if textval == o.value:
labelval = o.label
linkval = o.link
break
# log.info(
# "Select_view_renderer.render: textval %s, labelval %s, linkval %s"%
# (textval, labelval, linkval)
# )
except TargetIdNotFound_Error as e:
log.debug(repr(e))
textval = ""
except TargetEntityNotFound_Error as e:
log.debug(repr(e))
textval = repr(e)
except Exception as e:
log.error(repr(e))
textval = repr(e)
with context.push(
field_textval=textval,
field_labelval=labelval,
field_linkval=linkval,
field_continuation_param=linkcont):
try:
result = self._template.render(context)
except Exception as e:
log.error(repr(e))
result = repr(e)
# log.debug("Select_view_renderer.render: result %r"%(result,))
return result
class Select_edit_renderer(object):
"""
Render select value for editing using supplied template
"""
def __init__(self, template):
self._template = Template(template)
return
def render(self, context):
try:
val = get_field_edit_value(context, None) or ""
if not is_string(val):
log.error(ValueError("Entity selector value is not string", val))
val = "@@ unexpected selector %r"%(val,)
# Use refer-to type if value does not include type..
typval = fill_type_entity_id(
val, context['field'].description['field_ref_type']
)
textval = SelectValueMapper.encode(typval)
options = update_choice_labels(context['field']['options'])
# print repr(options)
if textval not in [ o.value for o in options ]:
options = list(options) # clone
options.insert(0, FieldChoice(textval)) # Add missing current value to options
with context.push(encoded_field_value=textval, field_options=options):
result = self._template.render(context)
except Exception as e:
log.exception("Exception in Select_edit_renderer.render")
log.error("Select_edit_renderer.render: "+repr(e))
# log.error("Field val %r"%(val,))
# log.error("Field name %r"%(context['field'].field_name,))
# log.error("Field type ref %r"%(context['field'].description['field_ref_type'],))
# ex_type, ex, tb = sys.exc_info()
# traceback.print_tb(tb)
result = repr(e)
return result
# ----------------------------------------------------------------------------
#
# Return render objects for select or choice controls (with or without '+' button)
#
# ----------------------------------------------------------------------------
def get_select_renderer():
"""
Return field renderer object for value selector (with '+' button)
"""
return RenderFieldValue("select",
view_renderer=Select_view_renderer(view_select),
edit_renderer=Select_edit_renderer(edit_select),
)
def get_choice_renderer():
"""
Return field renderer object for value selector (without '+' button)
"""
return RenderFieldValue("choice",
view_renderer=Select_view_renderer(view_choice),
edit_renderer=Select_edit_renderer(edit_choice),
)
def get_entitytype_renderer():
"""
Return field renderer object for entitytype
"""
return RenderFieldValue("entitytype",
view_renderer=Select_view_renderer(view_entitytype),
edit_renderer=Select_edit_renderer(edit_entitytype),
)
def get_view_choice_renderer():
"""
Return field renderer object for "view choice" controlm, which combines
a regular selection box with a "Show view" button. This option is used
by the view template/renderer, and is not available as a field choice
within the view.
"""
return RenderFieldValue("view_choice",
view_renderer=Select_view_renderer(view_view_choice),
edit_renderer=Select_edit_renderer(edit_view_choice),
)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_select.py
|
render_select.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for Boolean value rendered as a checkbox.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from django.template import Template, Context
from utils.py3porting import is_string, to_unicode
from annalist.views.fields.render_base import RenderBase
from annalist.views.fields.render_fieldvalue import (
RenderFieldValue,
get_field_edit_value,
get_field_view_value
)
# ----------------------------------------------------------------------------
#
# Boolean checkbox value mapping
#
# ----------------------------------------------------------------------------
class BoolCheckboxValueMapper(RenderBase):
"""
Value mapper class for Boolean value presented as checkbox.
Also interprets text values 'True', 'False','Yes', 'No', etc.
"""
@classmethod
def encode(cls, data_value):
"""
Encodes supplied data value as string
"""
# print "data_value "+repr(data_value)
if data_value is None:
textval = "No"
elif is_string(data_value):
textval = data_value
elif isinstance(data_value, bool):
textval = "Yes" if data_value else "No"
else:
textval = "Unknown Boolean "+repr(data_value)
# print "text_value "+repr(textval)
return textval
@classmethod
def decode(cls, field_value):
"""
Decodes a checkbox value attribute string as a Boolean value
"""
# print "field_value "+repr(field_value)
# BoolCheckboxValueMapper.encode(field_value).lower() in ["yes", "true"]
return (field_value is not None) and (field_value != "")
# ----------------------------------------------------------------------------
#
# Boolean checkbox field renderers
#
# ----------------------------------------------------------------------------
class bool_checkbox_view_renderer(object):
def render(self, context):
"""
Render Boolean value for viewing.
"""
textval = BoolCheckboxValueMapper.encode(get_field_view_value(context, None))
return "<span>%s</span>"%textval
class bool_checkbox_edit_renderer(object):
def __init__(self):
self._template = Template(
'''<input type="checkbox" '''+
'''name="{{repeat_prefix}}{{field.description.field_name}}" '''+
'''value="{{encoded_field_value}}"{{checked|safe}} />'''+
''' <span class="value-placeholder">{{field.description.field_placeholder}}</span>'''
)
return
def render(self, context):
"""
Render Boolean value for editing
"""
val = get_field_edit_value(context, None)
boolval = BoolCheckboxValueMapper.decode(val)
textval = BoolCheckboxValueMapper.encode(val)
boolval = textval.lower() in ["y", "yes", "t", "true"]
checked = ''' checked="checked"''' if boolval else ''''''
with context.push(encoded_field_value=textval, checked=checked):
result = self._template.render(context)
return result
def get_bool_checkbox_renderer():
"""
Return field renderer object for Boolean as checkbox
"""
return RenderFieldValue("checkbox",
view_renderer=bool_checkbox_view_renderer(),
edit_renderer=bool_checkbox_edit_renderer(),
)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_bool_checkbox.py
|
render_bool_checkbox.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
RenderFieldValue class for returning field renderers. This class works for
fields that can be rendered using supplied renderer objects (which may be
compiled templates).
The class provides for wrapping the value rendering templates in various ways
so that they can be applied in a range of different contexts.
This class is based on RenderFieldValue, but accepts renderers rather than
template file names. In due course, RenderFieldValue should be renamed and
re-written to be based on the class defined here.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import sys
import traceback
import logging
log = logging.getLogger(__name__)
from django.http import HttpResponse
from django.template import Template, Context
import django
# django.setup() # Needed for template loader (??)
# Used to bypass configurable template loader so we can render using Context values
#
# Mostly, Analist calls template renderers with dict values for context data, which are
# fine with the template rendering framework introduced with Django 1.8. But where
# renderers are references using {% include .. %} directives from Django templates,
# Context values are provided, and assumed by the compiled templates used with field
# value renderers. So we need to explicitly access the DjangoTemplates engine to render
# field values.
#
from django.template import engine
template_backend = engine.Engine.get_default()
from annalist.exceptions import Annalist_Error
# ------------------------------------------------------------
# Local data values
# ------------------------------------------------------------
# These templates all expect the value renderer to be provided in the
# view context as `value_renderer`
# Render-type-independent templates
label_template = (
"""<span>{{field.description.field_label|default:" "}}</span>"""
)
no_tooltip = ""
with_tooltip = "{{field.field_tooltip_attr|safe}}"
# Renderer wrapper templates
# Wrap field label
label_wrapper_template = (
# """<!-- label_wrapper_template ({{field.description.field_render_type}}, {{render_mode}}) -->"""+
"""<div class="view-label {{field.description.field_placement.field}}">\n"""+
""" {% include value_renderer %}\n"""+
"</div>"""
)
# Wrap bare value (e.g. column value)
def value_wrapper_template(tooltip):
return (
# """<!-- value_wrapper_template ({{field.description.field_render_type}}, {{render_mode}}) -->"""+
"""<div class="view-value {{field.description.field_placement.field}}"%s>\n"""+
""" {%% include value_renderer %%}\n"""+
"""</div>"""
)%(tooltip,)
view_value_wrapper_template = value_wrapper_template(no_tooltip)
edit_value_wrapper_template = value_wrapper_template(with_tooltip)
# Wrap value and include label
def label_value_wrapper_template(tooltip):
return (
# """<!-- label_value_wrapper_template ({{field.description.field_render_type}}, {{render_mode}}) -->"""+
"""<div class="{{field.description.field_placement.field}}"%s>\n"""+
""" <div class="row view-value-row">\n"""+
""" <div class="view-label {{field.description.field_placement.label}}">\n"""+
""" <span>{{field.description.field_label}}</span>\n"""+
""" </div>\n"""+
""" <div class="view-value {{field.description.field_placement.value}}">\n"""+
""" {%% include value_renderer %%}\n"""+
""" </div>\n"""+
""" </div>\n"""+
"""</div>"""
)%(tooltip,)
label_view_value_wrapper_template = label_value_wrapper_template(no_tooltip)
label_edit_value_wrapper_template = label_value_wrapper_template(with_tooltip)
# Wrap field label with column heading styling
col_head_wrapper_template = (
# """<!-- col_head_wrapper_template ({{field.description.field_render_type}}, {{render_mode}}) -->"""+
"""<div class="view-label col-head {{field.description.field_placement.field}}">\n"""+
""" {% include value_renderer %}\n"""+
"""</div>"""
)
# Wrap value with column value styling; include label on small displays only
def col_label_value_wrapper_template(tooltip):
return (
# """<!-- col_label_value_wrapper_template ({{field.description.field_render_type}}, {{render_mode}}) -->"""+
"""<div class="{{field.description.field_placement.field}}"%s>\n"""+
""" <div class="row show-for-small-only">\n"""+
""" <div class="view-label small-12 columns">\n"""+
""" <span>{{field.description.field_label}}</span>\n"""+
""" </div>\n"""+
""" </div>\n"""+
""" <div class="row view-value-col">\n"""+
""" <div class="view-value small-12 columns">\n"""+
""" {%% include value_renderer %%}\n"""+
""" </div>\n"""+
""" </div>\n"""+
"""</div>"""
)%(tooltip,)
col_label_view_value_wrapper_template = col_label_value_wrapper_template(no_tooltip)
col_label_edit_value_wrapper_template = col_label_value_wrapper_template(with_tooltip)
# ------------------------------------------------------------
# Helper classes
# ------------------------------------------------------------
class WrapValueRenderer(object):
"""
Render class wraps a supplied value renderer for diagnostic purposes
"""
def __init__(self, value_renderer):
self.value_renderer = value_renderer
return
def render(self, context):
try:
return self.value_renderer.render(context)
except Exception as e:
msg = "Exception in WrapValueRenderer.value_renderer"
log.exception(msg)
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
response_parts = (
["Exception in WrapValueRenderer.value_renderer"]+
[repr(e)]+
traceback.format_exception(ex_type, ex, tb)+
["***WrapValueRenderer.value_renderer***"]
)
del tb
raise ValueError(msg) # (used in testing to help pinpoint errors)
return "\n".join(response_parts)
class TemplateWrapValueRenderer(object):
"""
Render class combines a value renderer with a wrapper template.
The wrapper template invokes the value renderer by a reference
to `{% include value_renderer %}`
"""
def __init__(self, wrapper_template, value_renderer):
self.compiled_wrapper = Template(wrapper_template)
self.value_renderer = WrapValueRenderer(value_renderer)
return
def render(self, context):
if isinstance(context, dict):
raise ValueError("@@ TemplateWrapValueRenderer.render called with dict")
with context.push(value_renderer=self.value_renderer):
try:
return self.compiled_wrapper.render(context)
except Exception as e:
log.exception("Exception in TemplateWrapValueRenderer.render")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
response_parts = (
["Exception in TemplateWrapValueRenderer.render"]+
[repr(e)]+
traceback.format_exception(ex_type, ex, tb)+
["***TemplateWrapValueRenderer.render***"]
)
del tb
return "\n".join(response_parts)
class ModeWrapValueRenderer(object):
"""
Render class invokes a value renderer with a specified render mode.
"""
def __init__(self, render_mode, value_renderer):
self.render_mode = render_mode
self.value_renderer = value_renderer
return
def render(self, context):
if isinstance(context, dict):
raise ValueError("@@ ModeWrapValueRenderer.render called with dict")
with context.push(render_mode=self.render_mode):
try:
return self.value_renderer.render(context)
except Exception as e:
log.exception("Exception in ModeWrapValueRenderer.render")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
response_parts = (
["Exception in ModeWrapValueRenderer.render"]+
[repr(e)]+
traceback.format_exception(ex_type, ex, tb)+
["***ModeWrapValueRenderer.render***"]
)
del tb
return "\n".join(response_parts)
# ------------------------------------------------------------
# Renderer factory class
# ------------------------------------------------------------
class RenderFieldValue(object):
"""
Renderer constructor for an entity value field.
Given simple rendering templates for a display and editing an entity value
fields, this class will construct new renderers for using those values in
different contexts:
* label_view: labeled value display, not editable
* label_edit: labeled value edit control
* col_head: value label or nothing, depending on media context
* col_view: labeled or unlabeled value display, depending on media context
* col_edit: labeled or unlabeled value edit control, depending on media context
* label: value label
* view: unlabeled value display control
* edit: unlabeled value edit control
The various renderers returned require `context['field']` to contain a
`bound_field` value corresponding to the value and field to be displayed.
"""
def __init__(self, render_type,
view_renderer=None, edit_renderer=None,
col_head_view_renderer=None, col_head_edit_renderer=None,
view_template=None, edit_template=None,
view_file=None, edit_file=None
):
"""
Creates a renderer factory for a value field.
render_type render type string (for diagnostics)
view_renderer is a render object that formats a field value
edit_renderer is a render object that formats a field value in a
form control that allows the value to be edited
col_head_view_renderer
if supplied, overrides the renderer normally used for
displaying column headings when viewing an entity.
col_head_edit_renderer
if supplied, overrides the renderer normally used for
displaying column headings when editing an entity.
view_template is a template string that formats a field value
edit_template is a template string that formats a field value in a
form control that allows the value to be edited
view_file is the name of a template file that formats a field value
edit_file is the name of a template file that formats a field value
in an editable form control
Methods provided return composed renderers for a variety of contexts.
"""
# log.info("RenderFieldValue: viewrender %s, editrender %s"%(viewrender, edit_file))
super(RenderFieldValue, self).__init__()
self._render_type = render_type
# Save label renderer
self._label_renderer = Template(label_template)
# Save view renderer
if view_renderer is not None:
self._view_renderer = view_renderer
elif view_template is not None:
self._view_renderer = Template(view_template)
elif view_file is not None:
self._view_renderer = get_field_template(view_file)
else:
raise Annalist_Error("RenderFieldValue: no view renderer or template provided")
# Save edit renderer
if edit_renderer is not None:
self._edit_renderer = edit_renderer
elif edit_template is not None:
self._edit_renderer = Template(edit_template)
elif edit_file is not None:
self._edit_renderer = get_field_template(edit_file)
else:
raise Annalist_Error("RenderFieldValue: no edit renderer or template provided")
# Initialize various renderer caches
self._col_head_view_renderer = col_head_view_renderer
self._col_head_edit_renderer = col_head_edit_renderer
self._render_label = None
self._render_view = None
self._render_edit = None
self._render_label_view = None
self._render_label_edit = None
self._render_col_head = None
self._render_col_head_view = None
self._render_col_head_edit = None
self._render_col_view = None
self._render_col_edit = None
self._renderers = None
return
def __str__(self):
return (
"RenderFieldValue: view_renderer %s, edit_renderer %s"%
(self._view_renderer, self._edit_renderer)
)
def __repr__(self):
return (
"RenderFieldValue(render_type=%s, view_renderer=%r, edit_renderer=%r)"%
(self._render_type, self._view_renderer, self._edit_renderer)
)
# Template access functions
def label(self):
"""
Returns a renderer object to display a field label from the
supplied `context['field']` value.
"""
if not self._render_label:
self._render_label = ModeWrapValueRenderer(
"label",
TemplateWrapValueRenderer(
label_template, None
)
)
# log.info("self._render_label %r"%self._render_label)
return self._render_label
def view(self):
"""
Returns a renderer object to display just a non-editable field value.
"""
# log.info("self._view_renderer %r"%self._view_renderer)
if not self._render_view:
self._render_view = ModeWrapValueRenderer(
"view",
TemplateWrapValueRenderer(
view_value_wrapper_template, self._view_renderer
)
)
return self._render_view
def edit(self):
"""
Returns a renderer object to display just an editable field value.
"""
if not self._render_edit:
self._render_edit = ModeWrapValueRenderer(
"edit",
TemplateWrapValueRenderer(
edit_value_wrapper_template, self._edit_renderer
)
)
return self._render_edit
def label_view(self):
"""
Returns a renderer object to display a labeled non-editable field value.
"""
if not self._render_label_view:
self._render_label_view = ModeWrapValueRenderer(
"label_view",
TemplateWrapValueRenderer(
label_view_value_wrapper_template, self._view_renderer
)
)
return self._render_label_view
def label_edit(self):
"""
Returns a renderer object to display an editable field value.
"""
if not self._render_label_edit:
self._render_label_edit = ModeWrapValueRenderer(
"label_edit",
TemplateWrapValueRenderer(
label_edit_value_wrapper_template, self._edit_renderer
)
)
return self._render_label_edit
def col_head(self):
"""
Returns a renderer object to display nothing on small media, or
a field label used as a column header on larger media.
"""
if not self._render_col_head:
self._render_col_head = ModeWrapValueRenderer(
"col_head",
TemplateWrapValueRenderer(
col_head_wrapper_template, self._label_renderer
)
)
return self._render_col_head
def col_head_view(self):
"""
Returns a renderer object to display nothing on small media, or
a field label used as a column header on larger media when
viewing an entity.
"""
if not self._render_col_head_view and self._col_head_view_renderer:
self._render_col_head_view = ModeWrapValueRenderer(
"col_head_view",
TemplateWrapValueRenderer(
col_head_wrapper_template, self._col_head_view_renderer
)
)
return self._render_col_head_view or self.col_head()
def col_head_edit(self):
"""
Returns a renderer object to display nothing on small media, or
a field label used as a column header on larger media when
editing an entity.
"""
if not self._render_col_head_edit and self._col_head_edit_renderer:
self._render_col_head_edit = ModeWrapValueRenderer(
"col_head_edit",
TemplateWrapValueRenderer(
col_head_wrapper_template, self._col_head_edit_renderer
)
)
return self._render_col_head_edit or self.col_head()
def col_view(self):
"""
Returns a renderer object to display a non-editable field,
labeled on a small display, and unlabelled for a larger display
"""
if not self._render_col_view:
self._render_col_view = ModeWrapValueRenderer(
"col_view",
TemplateWrapValueRenderer(
col_label_view_value_wrapper_template, self._view_renderer
)
)
return self._render_col_view
def col_edit(self):
"""
Returns a renderer object to display an editable field,
labeled on a small display, and unlabelled for a larger display
"""
if not self._render_col_edit:
self._render_col_edit = ModeWrapValueRenderer(
"col_edit",
TemplateWrapValueRenderer(
col_label_edit_value_wrapper_template, self._edit_renderer
)
)
return self._render_col_edit
# Helper function for caller to get template content.
# This uses the configured Django template loader.
def get_field_template(templatefile, failmsg="no template filename supplied"):
"""
Retrieve field template from the supplied filename
"""
assert templatefile, "get_template: %s"%failmsg
# Use DjangoTemplates backend to get template that works with Context values
template = template_backend.get_template(templatefile)
return template
# Helper functions for accessing values from context
def get_context_value(context, key, default):
if key in context:
return context[key]
return default
def get_context_field_value(context, key, default):
field = get_context_value(context, 'field', {})
return get_context_value(field, key, default)
def get_field_edit_value(context, default):
return get_context_field_value(context, 'field_edit_value', default)
def get_field_view_value(context, default):
return get_context_field_value(context, 'field_view_value', default)
def get_context_field_description_value(context, key, default):
field = get_context_value(context, 'field', None)
return get_context_value(field.description, key, default)
# End.
#........1.........2.........3.........4.........5.........6.........7.........8
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_fieldvalue.py
|
render_fieldvalue.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for URI value displayed as an audio player widget.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2015, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from annalist.views.fields.render_base import RenderBase
from annalist.views.fields.render_fieldvalue import (
RenderFieldValue,
get_context_field_value,
get_field_edit_value,
get_field_view_value
)
from django.template import Template, Context
# ----------------------------------------------------------------------------
#
# Audio resource reference value mapping
#
# ----------------------------------------------------------------------------
class RefAudioValueMapper(RenderBase):
"""
Value mapper class for audio resource reference
"""
@classmethod
def encode(cls, data_value):
"""
Encodes audio reference as a string
"""
return data_value or ""
@classmethod
def decode(cls, field_value):
"""
Decodes a URI value as an audio reference.
"""
return field_value or ""
# ----------------------------------------------------------------------------
#
# Audio resource reference field renderers
#
# ----------------------------------------------------------------------------
class ref_audio_view_renderer(object):
def render(self, context):
"""
Render audio reference in entity view as player widget for referenced resource.
"""
linkval = RefAudioValueMapper.encode(get_context_field_value(context, "target_value_link", ""))
return (
"""<div>Audio at '<a href="%s" target="_blank">%s</a>'</div>"""+
"""<audio controls="controls" src="%s" ></audio>"""+
"")%(linkval, linkval, linkval)
class ref_audio_edit_renderer(object):
def __init__(self):
self._template = Template(
'''<input type="text" size="64" name="{{repeat_prefix}}{{field.description.field_name}}" '''+
'''placeholder="{{field.description.field_placeholder}}" '''+
'''value="{{field.field_edit_value}}" />'''
)
return
def render(self, context):
"""
Render audio URI for editing
"""
return self._template.render(context)
def get_ref_audio_renderer():
"""
Return field renderer object for token list values
"""
return RenderFieldValue("ref_audio",
view_renderer=ref_audio_view_renderer(),
edit_renderer=ref_audio_edit_renderer(),
)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_ref_audio.py
|
render_ref_audio.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import re
from django.conf import settings
from .render_fieldvalue import RenderFieldValue
from .render_text import TextValueMapper
from .render_entityid import EntityIdValueMapper
from .render_identifier import IdentifierValueMapper
from .render_text_language import get_text_language_renderer, TextLanguageValueMapper
from .render_placement import get_field_placement_renderer
from .render_tokenset import get_field_tokenset_renderer, TokenSetValueMapper
from .render_bool_checkbox import get_bool_checkbox_renderer, BoolCheckboxValueMapper
from .render_ref_audio import get_ref_audio_renderer, RefAudioValueMapper
from .render_ref_image import get_ref_image_renderer, RefImageValueMapper
from .render_text_markdown import (
get_text_markdown_renderer, get_show_markdown_renderer, TextMarkdownValueMapper
)
from .render_select import (
get_select_renderer, get_choice_renderer,
get_entitytype_renderer, get_view_choice_renderer,
SelectValueMapper
)
from .render_uri_link import get_uri_link_renderer, URILinkValueMapper
from .render_uri_import import get_uri_import_renderer, URIImportValueMapper
from .render_file_upload import get_file_upload_renderer, FileUploadValueMapper
from .render_repeatgroup import (
get_repeatgroup_renderer,
get_repeatgrouprow_renderer,
get_repeatlistrow_renderer,
)
from .render_fieldrow import get_fieldrow_renderer, FieldRowValueMapper
from .render_ref_multifields import get_ref_multifield_renderer, RefMultifieldValueMapper
# Render type mappings to templates and/or renderer access functions
_field_renderers = {} # renderer cache
_field_view_files = (
{ "Text": "field/annalist_view_text.html"
, "Showtext": "field/annalist_view_text.html"
, "Textarea": "field/annalist_view_textarea.html"
, "Codearea": "field/annalist_view_codearea.html"
, "EntityRef": "field/annalist_view_entityref.html"
, "EntityId": "field/annalist_view_entityid.html"
, "Identifier": "field/annalist_view_identifier.html"
, "Padding": "field/annalist_view_padding.html"
, "Placeholder": "field/annalist_view_placeholder.html"
})
_field_edit_files = (
{ "Text": "field/annalist_edit_text.html"
, "Showtext": "field/annalist_view_text.html"
, "Textarea": "field/annalist_edit_textarea.html"
, "Codearea": "field/annalist_edit_codearea.html"
, "EntityRef": "field/annalist_edit_entityref.html"
, "EntityId": "field/annalist_edit_entityid.html"
, "Identifier": "field/annalist_edit_identifier.html"
, "Padding": "field/annalist_edit_padding.html"
, "Placeholder": "field/annalist_view_placeholder.html"
})
_field_get_renderer_functions = (
{ "LangText": get_text_language_renderer
, "Markdown": get_text_markdown_renderer
, "ShowMarkdown": get_show_markdown_renderer
, "Placement": get_field_placement_renderer
, "TokenSet": get_field_tokenset_renderer
, "CheckBox": get_bool_checkbox_renderer
, "RefAudio": get_ref_audio_renderer
, "RefImage": get_ref_image_renderer
, "URILink": get_uri_link_renderer
, "URIImport": get_uri_import_renderer
, "FileUpload": get_file_upload_renderer
, "EntityTypeId": get_entitytype_renderer
, "Enum": get_select_renderer
, "Enum_optional": get_select_renderer
, "Enum_choice": get_choice_renderer
, "Enum_choice_opt": get_choice_renderer
, "View_choice": get_view_choice_renderer
, "RefMultifield": get_ref_multifield_renderer
, "RepeatGroup": get_repeatgroup_renderer
, "Group_Seq": get_repeatgroup_renderer
, "Group_Set": get_repeatgroup_renderer
, "RepeatGroupRow": get_repeatgrouprow_renderer
, "Group_Seq_Row": get_repeatgrouprow_renderer
, "Group_Set_Row": get_repeatgrouprow_renderer
, "RepeatListRow": get_repeatlistrow_renderer
, "FieldRow": get_fieldrow_renderer
# Render types recognized for backward compatibility
, "URIImage": get_ref_image_renderer
, "Type": get_select_renderer
, "View": get_select_renderer
, "List": get_select_renderer
, "Field": get_select_renderer
, "List_sel": get_choice_renderer
})
_field_value_mappers = (
{ "LangText": TextLanguageValueMapper
, "TokenSet": TokenSetValueMapper
, "CheckBox": BoolCheckboxValueMapper
, "Markdown": TextMarkdownValueMapper
, "ShowMarkdown": TextMarkdownValueMapper
, "RefAudio": RefAudioValueMapper
, "RefImage": RefImageValueMapper
, "URILink": URILinkValueMapper
, "URIImport": URIImportValueMapper
, "FileUpload": FileUploadValueMapper
, "EntityId": EntityIdValueMapper
, "EntityTypeId": SelectValueMapper
, "Identifier": IdentifierValueMapper
, "Enum": SelectValueMapper
, "Enum_optional": SelectValueMapper
, "Enum_choice": SelectValueMapper
, "Enum_choice_opt": SelectValueMapper
, "View_choice": SelectValueMapper
, "RefMultifield": RefMultifieldValueMapper
, "FieldRow": FieldRowValueMapper
# Render types recognized for backward compatibility
, "URIImage": RefImageValueMapper
, "Type": SelectValueMapper
, "View": SelectValueMapper
, "List": SelectValueMapper
, "Field": SelectValueMapper
, "List_sel": SelectValueMapper
})
def is_repeat_field_render_type(render_type):
repeat_field_render_types = (
[ "RepeatGroup", "RepeatGroupRow"
, "Group_Seq", "Group_Seq_Row"
, "Group_Set", "Group_Set_Row"
])
return render_type in repeat_field_render_types
def get_field_base_renderer(field_render_type):
"""
Lookup and return base renderer for given field type.
"""
if field_render_type not in _field_renderers:
# Create and cache renderer
if ( (field_render_type in _field_view_files) or
(field_render_type in _field_edit_files) ):
viewfile = _field_view_files.get(field_render_type, None)
editfile = _field_edit_files.get(field_render_type, None)
_field_renderers[field_render_type] = RenderFieldValue(
field_render_type,
view_file=viewfile, edit_file=editfile
)
elif field_render_type in _field_get_renderer_functions:
_field_renderers[field_render_type] = _field_get_renderer_functions[field_render_type]()
return _field_renderers.get(field_render_type, None)
def get_entityref_edit_renderer(renderer, field_render_type):
"""
Returns an updated edit renderer, called for fields with an entity type reference:
used to force a selection renderer for fields with other view render types.
"""
if field_render_type not in ["Enum", "Enum_optional", "Enum_choice", "Enum_choice_opt", "View_choice", "List_sel"]:
renderer = get_field_base_renderer("Enum")
return renderer
def get_uriimport_edit_renderer(renderer, field_render_type):
"""
Returns an updated edit renderer for fields with a URI import value mode
"""
if field_render_type not in ["URIImport"]:
renderer = get_field_base_renderer("URIImport")
return renderer
def get_fileupload_edit_renderer(renderer, field_render_type):
"""
Returns an updated edit renderer for fields with a file upload value mode
"""
if field_render_type not in ["FileUpload"]:
renderer = get_field_base_renderer("FileUpload")
return renderer
def get_field_edit_renderer(field_render_type, field_value_mode):
"""
Get edit renderer for supplied field details, taking account of variations
on the base renderer due to field reference and field value type.
"""
# log.debug("Render field_render_type %s, field_value_mode %s"%(field_render_type, field_value_mode))
renderer = get_field_base_renderer(field_render_type)
if field_value_mode == "Value_entity":
renderer = get_entityref_edit_renderer(renderer, field_render_type)
elif field_value_mode == "Value_import":
renderer = get_uriimport_edit_renderer(renderer, field_render_type)
elif field_value_mode == "Value_upload":
renderer = get_fileupload_edit_renderer(renderer, field_render_type)
return renderer
def get_label_renderer(field_render_type, field_value_mode):
"""
Returns a field label renderer object that can be referenced in a
Django template "{% include ... %}" element.
"""
class _renderer(object):
def __init__(self):
pass
def render(self, context):
return context.get('field_label', "@@no 'field_label'@@")
return _renderer()
def get_edit_renderer(field_render_type, field_value_mode):
"""
Returns an field edit renderer object that can be referenced in a
Django template "{% include ... %}" element.
The original version returns the name of a template to render the form.
With versions of Django >=1.7, an alternative is to return an
object with a `.render(context)` method that returns a string to be
included in the resulting page:
The variable may also be any object with a render() method that accepts
a context. This allows you to reference a compiled Template in your context.
- https://docs.djangoproject.com/en/dev/ref/templates/builtins/#include
"""
renderer = get_field_edit_renderer(field_render_type, field_value_mode)
if not renderer:
# Default to simple text for unknown renderer type
log.warning("get_edit_renderer: %s not found"%field_render_type)
renderer = get_field_base_renderer("Text")
return renderer.edit()
def get_view_renderer(field_render_type, field_value_mode):
"""
Returns a field view renderer object that can be referenced in a
Django template "{% include ... %}" element.
The original version returns the name of a template to render the form.
With versions of Django >=1.7, an alternative is to return an
object with a `.render(context)` method that returns a string to be
included in the resulting page:
The variable may also be any object with a render() method that accepts
a context. This allows you to reference a compiled Template in your context.
- https://docs.djangoproject.com/en/dev/ref/templates/builtins/#include
"""
renderer = get_field_base_renderer(field_render_type)
if not renderer:
# Default to simple text for unknown renderer type
log.warning("get_view_renderer: '%s' not found"%field_render_type)
renderer = get_field_base_renderer("Text")
return renderer.view()
def get_label_edit_renderer(field_render_type, field_value_mode):
"""
Returns an field edit renderer object that can be referenced in a
Django template "{% include ... %}" element.
"""
renderer = get_field_edit_renderer(field_render_type, field_value_mode)
if not renderer:
# Default to simple text for unknown renderer type
log.warning("get_label_edit_renderer: '%s' not found"%field_render_type)
renderer = get_field_base_renderer("Text")
return renderer.label_edit()
def get_label_view_renderer(field_render_type, field_value_mode):
"""
Returns a field view renderer object that can be referenced in a
Django template "{% include ... %}" element.
"""
renderer = get_field_base_renderer(field_render_type)
if not renderer:
# Default to simple text for unknown renderer type
log.warning("get_label_view_renderer: '%s' not found"%field_render_type)
renderer = get_field_base_renderer("Text")
return renderer.label_view()
def get_col_head_renderer(field_render_type, field_value_mode):
"""
Returns a field list heading renderer object that can be referenced in a
Django template "{% include ... %}" element.
"""
renderer = get_field_base_renderer(field_render_type)
if renderer:
return renderer.col_head()
log.debug("get_col_head_renderer: '%s' not found"%field_render_type)
return "field/annalist_head_any.html"
def get_col_head_view_renderer(field_render_type, field_value_mode):
"""
Returns a field list heading renderer object that can be referenced in a
Django template "{% include ... %}" element when viewing an entity.
"""
renderer = get_field_base_renderer(field_render_type)
if renderer:
return renderer.col_head_view()
log.debug("get_col_head_view_renderer: '%s' not found"%field_render_type)
return "field/annalist_head_any.html"
def get_col_head_edit_renderer(field_render_type, field_value_mode):
"""
Returns a field list heading renderer object that can be referenced in a
Django template "{% include ... %}" element when editing an entity.
"""
renderer = get_field_base_renderer(field_render_type)
if renderer:
return renderer.col_head_edit()
log.debug("get_col_head_edit_renderer: '%s' not found"%field_render_type)
return "field/annalist_head_any.html"
def get_col_edit_renderer(field_render_type, field_value_mode):
"""
Returns a field list row-item renderer object that can be referenced in a
Django template "{% include ... %}" element.
"""
renderer = get_field_edit_renderer(field_render_type, field_value_mode)
if renderer:
return renderer.col_edit()
log.debug("get_col_edit_renderer: '%s' not found"%field_render_type)
return "field/annalist_item_none.html"
def get_col_view_renderer(field_render_type, field_value_mode):
"""
Returns a field list row-item renderer object that can be referenced in a
Django template "{% include ... %}" element.
"""
renderer = get_field_base_renderer(field_render_type)
if renderer:
return renderer.col_view()
log.debug("get_col_view_renderer: '%s' not found"%field_render_type)
return "field/annalist_item_none.html"
def get_value_mapper(field_render_type):
"""
Returns a value mapper class instance (with encode and decode methods)
which is used to map values between entity fields and textual form fields.
The default 'RenderText' object returned contains identity mappings.
"""
mapper_class = TextValueMapper
if field_render_type in _field_value_mappers:
mapper_class = _field_value_mappers[field_render_type]
return mapper_class()
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/find_renderers.py
|
find_renderers.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for multiple fields displayed from a referenced entity.
The renderer displays a number of fields from the referenced entity as a single
row, wrapped in a row <div> to force the fields to a new row.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2016, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import sys
import traceback
import logging
log = logging.getLogger(__name__)
from django.http import HttpResponse
from django.template import Template, Context
# from annalist.exceptions import TargetIdNotFound_Error, TargetEntityNotFound_Error
from annalist.views.fields.render_base import RenderBase
from annalist.views.fields.bound_field import bound_field
from annalist.views.fields.render_fieldvalue import (
ModeWrapValueRenderer,
RenderFieldValue,
get_field_edit_value,
get_field_view_value
)
# ------------------------------------------------------------
# Local data values
# ------------------------------------------------------------
view_fieldrow = (
{ 'head':
"""\n"""+
"""<!-- views.fields.render_fieldrow.view_fieldrow head (mode:{{render_mode}}) -->\n"""
, 'body':
"""<!-- views.fields.render_fieldrow.view_fieldrow body (mode:{{render_mode}}) -->\n"""+
"""<div class="small-12 columns">\n"""+
""" <div class="view-fieldrow row">\n"""+
""" {% for f in row_bound_fields %}"""+
""" {% include f.render.mode with field=f %}"""+
""" {% endfor %}"""+
""" </div>\n"""+
"""</div>\n"""
, 'tail':
"""<!-- views.fields.render_fieldrow.view_fieldrow tail (mode:{{render_mode}})-->
"""
})
# edit_fieldrow = (
# { 'head':
# """\n"""+
# """<!-- views.fields.render_fieldrow.edit_fieldrow head (mode:{{render_mode}}) -->\n"""
# , 'body':
# """<!-- views.fields.render_fieldrow.edit_fieldrow body (mode:{{render_mode}}) -->\n"""+
# """<div class="small-12 columns">\n"""+
# """ <div class="view-fieldrow row">\n"""+
# """ {% for f in row_bound_fields %}"""+
# """ {% include f.render.mode with field=f %}"""+
# """ {% endfor %}"""+
# """ </div>\n"""+
# """</div>\n"""
# , 'tail':
# """<!-- views.fields.render_fieldrow.edit_fieldrow tail (mode:{{render_mode}})-->
# """
# })
target_blank = """<span class="value-blank">%s</span>"""
target_missing = """<span class="value-missing">%s</span>"""
# ----------------------------------------------------------------------------
# Multi-field reference field: value renderer for viewing or editing
# ----------------------------------------------------------------------------
class RenderFieldRow(object):
"""
Render class for field values in a referenced entity.
"""
def __init__(self, templates=None):
"""
Creates a renderer object
"""
# log.info("RenderFieldRow: __init__ %r"%(templates))
super(RenderFieldRow, self).__init__()
assert templates is not None, "RenderFieldRow template must be supplied"
self._template_head = Template(templates.get('head', ""))
self._template_body = Template(templates.get('body', "@@missing body@@"))
self._template_tail = Template(templates.get('tail', ""))
return
def __str__(self):
return "RenderFieldRow %r"%(self._template_head)
# return "RenderFieldRow %r, %s"%(self._template_head,self.render(context))
def render(self, context):
"""
Renders multiple fields in a row
`context` is a dictionary-like object that provides information for the
rendering operation. `context['row_bound_fields']` is et up
to provide a list of bound fields to be rendered.
returns a string that is incorporated into the resulting web page.
"""
# log.info("RenderFieldRow.render (mode: %s)"%context['render_mode'])
# row_bound_fields = context['field']['row_bound_fields']
# log.info("RenderFieldRow.render field: %r"%(context['field'],))
# log.info("RenderFieldRow.render descs: %r"%(context['field']['row_bound_fields'],))
try:
row_field_descs = context['field'].description['row_field_descs']
entity_vals = context['field']['entity_value']
extras = context['field']['context_extra_values']
row_bound_fields = [
bound_field(f, entity_vals, context_extra_values=extras)
for f in row_field_descs
]
with context.push({'row_bound_fields': row_bound_fields}):
response_parts = [self._template_head.render(context)]
response_parts.append(self._template_body.render(context))
response_parts.append(self._template_tail.render(context))
except Exception as e:
log.exception("Exception in RenderFieldRow.render")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
response_parts = (
["Exception in RenderFieldRow.render"]+
[repr(e)]+
traceback.format_exception(ex_type, ex, tb)+
["***RenderFieldRow.render***"]
)
del tb
return "".join(response_parts)
# ----------------------------------------------------------------------------
# Field row reference value mapping
# ----------------------------------------------------------------------------
class FieldRowValueMapper(RenderBase):
"""
Value mapper for simple text entry field.
"""
def __init__(self):
"""
Creates a renderer object for a simple text field
"""
super(FieldRowValueMapper, self).__init__()
return
# encode, decode methods default to RenderBase; i.e. identity mappings
def get_fieldrow_renderer():
"""
Return field row renderer object.
This renders multiple fields from a supplied entity as a single row, wrapping
the entire set of fields in a row <div>.
"""
r = RenderFieldValue("fieldrow",
view_renderer=RenderFieldRow(view_fieldrow),
edit_renderer=RenderFieldRow(view_fieldrow) # @@@@ change back to edit
)
# Suppress all modal rendering: just render field content
# @@TODO: this is a hack - need to re-think how render modes are handled.
r._render_label = None
r._render_view = ModeWrapValueRenderer("view", r._view_renderer)
r._render_edit = ModeWrapValueRenderer("edit", r._edit_renderer)
r._render_label_view = ModeWrapValueRenderer("label_view", r._view_renderer)
r._render_label_edit = ModeWrapValueRenderer("label_edit", r._edit_renderer)
r._render_col_head = None
r._render_col_head_view = ModeWrapValueRenderer("col_head_view", r._view_renderer)
r._render_col_head_edit = ModeWrapValueRenderer("col_head_edit", r._edit_renderer)
r._render_col_view = ModeWrapValueRenderer("col_view", r._view_renderer)
r._render_col_edit = ModeWrapValueRenderer("col_edit", r._edit_renderer)
r._render_label = None
return r
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_fieldrow.py
|
render_fieldrow.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for file upload used to upload a resource into
the local data store.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from utils.py3porting import is_string
from django.template import Template, Context
from annalist.views.fields.render_base import RenderBase
from annalist.views.fields.render_fieldvalue import (
RenderFieldValue,
get_field_edit_value,
get_field_view_value
)
# ----------------------------------------------------------------------------
#
# Link URI value mapping
#
# ----------------------------------------------------------------------------
def upload_field_value(data_value):
"""
Construct field value in expected format for remaining processing
"""
if data_value:
if is_string(data_value):
data_value = (
{ 'resource_name': "uploaded.data"
, 'uploaded_file': data_value
})
else:
# Also for empty string case
data_value = (
{ 'resource_name': "uploaded.data"
, 'uploaded_file': ""
})
return data_value
class FileUploadValueMapper(RenderBase):
"""
Field rendering class for file upload field.
"""
@classmethod
def resource_name(cls, data_value):
"""
Extracts import URL ref from value structure, for field display.
"""
return upload_field_value(data_value).get('resource_name', "(@@resource_name not present)")
@classmethod
def uploaded_file(cls, data_value):
"""
Extracts uploaded filename from value structure, for field display.
"""
return upload_field_value(data_value).get('uploaded_file', "")
@classmethod
def encode(cls, data_value):
"""
Extracts import URL ref from value structure, for field display.
"""
return cls.resource_name(upload_field_value(data_value))
@classmethod
def decode(cls, field_value):
"""
Returns textual path value from file upload field value
"""
return field_value or ""
def decode_store(self, field_value, entityvals, property_uri):
"""
Decodes a supplied value and uses it to update the 'upload_file'
field of an URI import field.
"""
u = self.decode(field_value)
v = entityvals.get(property_uri, {})
# try:
# v['resource_name'] = u
# except TypeError:
# v = {'resource_name': u} # Ignore non-updatable value
entityvals[property_uri] = upload_field_value(v)
return v
# ----------------------------------------------------------------------------
#
# Import value templates
#
# ----------------------------------------------------------------------------
# NOTE: this is a minimal rendering. Enhancements might include additional information
# from the entity field, especially for the view (e.g. content-type, etc.)
# NOTE: The <a> element supports a `type` attribute
# (cf. https://developer.mozilla.org/en-US/docs/Web/HTML/Element/a)
view_upload = (
"""Uploaded file <a href="%s" target="_blank">%s</a>""")
edit_upload = (
"""<!-- fields.render_file_upload -->
<input type="file" name="{{repeat_prefix}}{{field.description.field_name}}"
placeholder="{{field.description.field_placeholder}}"
value="{{resource_name}}" />
{% if uploaded_file != "" %}
Previously uploaded: {{uploaded_file}}
{% endif %}
""")
# ----------------------------------------------------------------------------
#
# Link URI field renderers
#
# ----------------------------------------------------------------------------
class File_upload_view_renderer(object):
def render(self, context):
"""
Render import link for viewing.
"""
val = get_field_view_value(context, None)
linkval = FileUploadValueMapper.resource_name(val)
textval = FileUploadValueMapper.uploaded_file(val)
return view_upload%(linkval, textval)
class File_upload_edit_renderer(object):
def __init__(self):
self._template = Template(edit_upload)
return
def render(self, context):
"""
Render import link for editing
"""
val = get_field_edit_value(context, None)
resource_name = FileUploadValueMapper.resource_name(val)
uploaded_file = FileUploadValueMapper.uploaded_file(val)
# log.info("@@File_upload_edit_renderer.render: %s %s"%(resource_name, uploaded_file))
with context.push(resource_name=resource_name, uploaded_file=uploaded_file):
result = self._template.render(context)
# log.info("@@File_upload_edit_renderer.render: %s"%(result,))
return result
def get_file_upload_renderer():
"""
Return field renderer object for file upload
"""
return RenderFieldValue("file_upload",
view_renderer=File_upload_view_renderer(),
edit_renderer=File_upload_edit_renderer(),
)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_file_upload.py
|
render_file_upload.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for text value rendered as Markdown.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2015, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import markdown
from annalist.views.displayinfo import apply_substitutions
from annalist.views.fields.render_base import RenderBase
from annalist.views.fields.render_fieldvalue import (
RenderFieldValue,
get_field_edit_value,
get_field_view_value,
)
from django.template import Template, Context
# ----------------------------------------------------------------------------
#
# Markdown text value mapping
#
# ----------------------------------------------------------------------------
class TextMarkdownValueMapper(RenderBase):
"""
Value mapper class for Markdown text
"""
@classmethod
def encode(cls, data_value):
"""
Encodes supplied data value as string to appear in <textarea> form input.
"""
return data_value or ""
# ----------------------------------------------------------------------------
#
# Markdown text field renderers
#
# ----------------------------------------------------------------------------
class text_markdown_view_renderer(object):
def render(self, context):
"""
Render Markdown text for viewing.
"""
textval = TextMarkdownValueMapper.encode(get_field_view_value(context, None))
textval = apply_substitutions(context, textval)
htmlval = markdown.markdown(textval)
return """<span class="markdown">%s</span>"""%htmlval
class text_markdown_edit_renderer(object):
def __init__(self):
self._template = Template(
'''<textarea cols="64" rows="6" name="{{repeat_prefix}}{{field.description.field_name}}" '''+
'''class="small-rows-4 medium-rows-8" '''+
'''placeholder="{{field.description.field_placeholder}}" '''+
'''>{{encoded_field_value}}</textarea>'''
)
return
def render(self, context):
"""
Render Markdown text for editing
"""
val = get_field_edit_value(context, None)
textval = TextMarkdownValueMapper.encode(val)
with context.push(encoded_field_value=textval):
result = self._template.render(context)
return result
def get_text_markdown_renderer():
"""
Return field renderer object for Markdown text
"""
return RenderFieldValue("markdown",
view_renderer=text_markdown_view_renderer(),
edit_renderer=text_markdown_edit_renderer(),
)
def get_show_markdown_renderer():
"""
Return field renderer object for display-only (no input) of Markdown text
"""
return RenderFieldValue("show_markdown",
view_renderer=text_markdown_view_renderer(),
edit_renderer=text_markdown_view_renderer(),
)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_text_markdown.py
|
render_text_markdown.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for multiple fields displayed from a referenced entity.
The edit renderer provides for selection of the referenced entity, and stores its
id as the field value.
The view renderer displays a number of fields from the referenced entity corresponding
to a field group specified in the field definition.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2015, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import sys
import traceback
import logging
log = logging.getLogger(__name__)
from django.http import HttpResponse
from django.template import Template, Context
from annalist.exceptions import TargetIdNotFound_Error, TargetEntityNotFound_Error
from annalist.views.fields.render_select import edit_select, Select_edit_renderer, SelectValueMapper
from annalist.views.fields.bound_field import bound_field
from annalist.views.fields.render_fieldvalue import (
RenderFieldValue,
get_field_edit_value,
get_field_view_value
)
# ------------------------------------------------------------
# Local data values
# ------------------------------------------------------------
col_head_view = (
{ 'head':
"""\n"""+
"""<!-- views.fields.render_ref_multifield.col_head_view head (mode: {{render_mode}}) -->\n"""
, 'body':
"""<!-- views.fields.render_ref_multifield.col_head_view body (mode: {{render_mode}}) -->\n"""+
"""<div class="view-grouprow col-head row">\n"""+
""" {% for f in group_bound_fields %}"""+
""" {% include f.render.mode with field=f %}"""+
""" {% endfor %}"""+
"""</div>\n"""+
"""<!-- views.fields.render_ref_multifield.col_head_view body (end) -->\n"""+
""""""
, 'tail':
"""<!-- views.fields.render_ref_multifield.col_head_view tail (mode: {{render_mode}}) -->\n"""
})
view_multifield = (
{ 'head':
"""\n"""+
"""<!-- views.fields.render_ref_multifield.view_multifield head (mode: {{render_mode}}) -->\n"""
, 'body':
"""<!-- views.fields.render_ref_multifield.view_multifield body (mode: {{render_mode}}) -->\n"""+
"""<div class="view-grouprow row">\n"""+
""" {% for f in group_bound_fields %}"""+
""" {% include f.render.mode with field=f %}"""+
""" {% endfor %}"""+
"""</div>\n"""
, 'tail':
"""<!-- views.fields.render_ref_multifield.view_multifield tail (mode: {{render_mode}}) -->
"""
})
target_blank = """<span class="value-blank">%s</span>"""
target_missing = """<span class="value-missing">%s</span>"""
# ----------------------------------------------------------------------------
# Multi-field reference field label renderer for column label display
# ----------------------------------------------------------------------------
class RenderMultiFields_label(object):
"""
Render class for a field group labels in a referenced entity.
"""
def __init__(self, templates=None):
# Later, may introduce a template_file= option to read from templates directory
"""
Creates a renderer object
"""
# log.info("RenderMultiFields_label: __init__ %r"%(templates))
super(RenderMultiFields_label, self).__init__()
assert templates is not None, "RenderMultiFields_label template must be supplied"
self._template_head = Template(templates.get('head', ""))
self._template_body = Template(templates.get('body', "@@missing body@@"))
self._template_tail = Template(templates.get('tail', ""))
return
def __str__(self):
return "RenderMultiFields_label %r"%(self._template_head)
# return "RenderMultiFields_label %r, %s"%(self._template_head,self.render(context))
def render(self, context):
"""
Renders column labels for multiple fields in a group
`context` is a dictionary-like object that provides information for the
rendering operation. `context['field']` contains the group
field descriptions.
returns a string that is incorporated into the resulting web page.
"""
group_field_descs = context['field'].description['group_field_descs']
if group_field_descs is None:
return (
"""<span class="value-missing">Missing field group for %(group_id)s</span>"""%
context['field']
)
try:
group_fields = [ bound_field(f, {}) for f in group_field_descs ]
group_dict = (
{ 'group_bound_fields': group_fields
})
#@@ log.info("RenderMultiFields_label.render group_dict: %r"%(group_dict))
with context.push(group_dict):
response_parts = [self._template_head.render(context)]
response_parts.append(self._template_body.render(context))
response_parts.append(self._template_tail.render(context))
except TargetIdNotFound_Error as e:
response_parts = [ target_blank%str(e) ]
except TargetEntityNotFound_Error as e:
response_parts = [ target_missing%str(e) ]
except Exception as e:
log.exception("Exception in RenderMultiFields_label.render")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
response_parts = (
["Exception in RenderMultiFields_label.render"]+
[repr(e)]+
traceback.format_exception(ex_type, ex, tb)+
["***RenderMultiFields_label.render***"]
)
del tb
return "".join(response_parts)
# ----------------------------------------------------------------------------
# Multi-field reference field: value renderer for viewing or editing
# ----------------------------------------------------------------------------
class RenderMultiFields_value(object):
"""
Render class for field values in a referenced entity.
"""
def __init__(self, templates=None):
# Later, may introduce a template_file= option to read from templates directory
"""
Creates a renderer object
"""
# log.info("RenderMultiFields_value: __init__ %r"%(templates))
super(RenderMultiFields_value, self).__init__()
assert templates is not None, "RenderMultiFields_value template must be supplied (.edit or .view)"
self._template_head = Template(templates.get('head', ""))
self._template_body = Template(templates.get('body', "@@missing body@@"))
self._template_tail = Template(templates.get('tail', ""))
return
def __str__(self):
return "RenderMultiFields_value %r"%(self._template_head)
# return "RenderMultiFields_value %r, %s"%(self._template_head,self.render(context))
def render(self, context):
"""
Renders column values for multiple fields in a group
`context` is a dictionary-like object that provides information for the
rendering operation.
context['field'] is a bound_field combining the reference field
description bound to an entity that contains a
reference to some target entity.
returns a string that is incorporated into the resulting web page.
"""
# log.info("RenderMultiFields_value.render (mode: %s)"%context['render_mode'])
group_field_descs = context['field'].description['group_field_descs']
if group_field_descs is None:
return (
"""<span class="value-missing">Missing field group for %(field_id)s</span>"""%
context['field']
)
try:
target_vals = context['field'].get_targetvals()
extras = context['field']['context_extra_values']
group_fields = [
bound_field(f, target_vals, context_extra_values=extras)
for f in group_field_descs
]
group_dict = (
{ 'group_bound_fields': group_fields
# @@TODO: is group_entity actually used anywhere??
, 'group_entity': target_vals
})
# log.info("RenderMultiFields_value.render group_dict: %r"%(group_dict))
response_parts = [self._template_head.render(context)]
with context.push(group_dict):
response_parts.append(self._template_body.render(context))
response_parts.append(self._template_tail.render(context))
except TargetIdNotFound_Error as e:
response_parts = [ target_blank%str(e) ]
except TargetEntityNotFound_Error as e:
response_parts = [ target_missing%str(e) ]
except Exception as e:
log.exception("Exception in RenderMultiFields_value.render")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
response_parts = (
["Exception in RenderMultiFields_value.render"]+
[repr(e)]+
traceback.format_exception(ex_type, ex, tb)+
["***RenderMultiFields_value.render***"]
)
del tb
return "".join(response_parts)
# ----------------------------------------------------------------------------
# Multi-field reference value mapping
# ----------------------------------------------------------------------------
class RefMultifieldValueMapper(SelectValueMapper):
"""
Value mapper class for multifield reference
Inherits all logic from SelectvalueMapper.
"""
pass
# ----------------------------------------------------------------------------
# Render object factory functions
# ----------------------------------------------------------------------------
def get_ref_multifield_renderer():
"""
Return multi-field renderer object
"""
r = RenderFieldValue("ref_multifields",
col_head_view_renderer=RenderMultiFields_label(col_head_view),
view_renderer=RenderMultiFields_value(view_multifield),
edit_renderer=Select_edit_renderer(edit_select)
)
return r
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_ref_multifields.py
|
render_ref_multifields.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import traceback
from collections import OrderedDict, namedtuple
from django.conf import settings
from django.utils.html import escape
from utils.py3porting import is_string, to_unicode, urljoin
from annalist.exceptions import TargetIdNotFound_Error, TargetEntityNotFound_Error
from annalist.identifiers import RDFS, ANNAL
from annalist.util import split_type_entity_id
from annalist.models.entitytypeinfo import EntityTypeInfo
from annalist.models.entity import EntityRoot
from annalist.views.uri_builder import (
uri_params, uri_with_params, continuation_params
)
from annalist.views.form_utils.fieldchoice import FieldChoice
# -----------------------------------------------------------------------------
# Field description for doctests (avoids circular imports via FieldDescription)
# -----------------------------------------------------------------------------
class MockFieldDescription(object):
"""
Simplified field description for local testing.
(Somehow, can't get it working with real FieldDescription...)
(I think it may be a problem with import cycles...)
"""
def __init__(self, coll, recordfield):
self._field_desc = recordfield
return
def __copy__(self):
"""
Shallow copy of self.
"""
cls = self.__class__
result = cls.__new__(cls)
result._field_desc = self._field_desc
return result
def __repr__(self):
return repr(self._field_desc)
def __getitem__(self, k):
"""
Allow direct indexing to access field description fields
"""
return self._field_desc[k]
def get(self, name, default):
return self._field_desc.get(name, default)
def get_field_id(self):
"""
Returns the field identifier
"""
return self._field_desc['field_id']
def get_field_name(self):
"""
Returns form field name to be used for the described field
"""
return self.get('field_name', self.get_field_id())
def get_field_property_uri(self):
"""
Returns form field property URI to be used for the described field
"""
return self._field_desc['field_property_uri']
def get_field_subproperty_uris(self):
"""
Returns list of possible subproperty URIs for the described field
"""
return self._field_desc.get('field_subproperty_uris', [])
def get_field_value_key(self, entityvals):
"""
Returns field value key for use in supplied entity values instance
"""
return self.get_field_property_uri()
# -----------------------------------------------------------------------------
# Bound field
# -----------------------------------------------------------------------------
class bound_field(object):
"""
Class representing an entity bound to a field description,
which can be used as input for data driven rendering of a form field.
The field description contains information used to extract the field
value from the entity.
This class, and in particular its `__getattr__` method, is provided to
allow indirected access to an entity value fields to be performed using a
Django template using, e.g., "{{ field.field_value }}" (thus satisfying the
Django design goal that computation is handled in the python code rather
than the template, though in this case the computation us handled while
rather than before the page is rendered).
See also: http://docs.python.org/2/reference/datamodel.html#slots
--- from annalist.views.fields.field_description import FieldDescription
>>> coll = None
>>> entity = EntityRoot("entityuri", "entityuri", "entitydir", "entitydir")
>>> entity.set_id("testentity")
>>> vals = entity.set_values({"foo": "foo_val", "bar": "bar_val"})
>>> field_foo_desc = MockFieldDescription(coll, {"field_id": "foo_id", "field_property_uri": "foo", "field_type": "foo_type"})
>>> field_foo = bound_field(field_foo_desc, entity)
>>> field_foo._key == 'foo'
True
>>> field_foo.description['field_type'] == 'foo_type'
True
>>> field_foo.field_value == 'foo_val'
True
>>> field_bar_desc = MockFieldDescription(coll, {"field_id": "bar_id", "field_property_uri": "bar", "field_type": "bar_type"})
>>> field_bar = bound_field(field_bar_desc, entity)
>>> field_bar.description['field_type'] == 'bar_type'
True
>>> field_bar.field_value == 'bar_val'
True
>>> field_def_desc = MockFieldDescription(coll, {"field_id": "def_id", "field_property_uri": "def", "field_type": "def_type"})
>>> entityvals = entity.get_values()
>>> entityvals['entity_id'] = entity.get_id()
>>> entityvals['entity_type_id'] = entity.get_type_id()
>>> entityvals['entity_link'] = entity.get_url()
>>> field_def = bound_field(field_def_desc, entity)
>>> field_def.description['field_type'] == 'def_type'
True
>>> field_def.field_value == ""
True
>>> field_def = bound_field(field_def_desc, entity, context_extra_values={"def": "default"})
>>> field_def.description['field_type'] == 'def_type'
True
>>> field_def.field_value == 'default'
True
>>> field_def.entity_link == 'entityuri/'
True
"""
__slots__ = ("_field_description", "_entityvals", "_targetvals", "_key", "_extras")
def __init__(self, field_description, entityvals, context_extra_values=None):
"""
Initialize a bound_field object.
field_description is a dictionary-like object describing a display
field. See `FieldDescription` class for more details.
entityvals is an entity values dictionary from which a value to be
rendered is obtained. The specific field value used is
defined by the combination with `field_description`.
context_extra_values if supplied, a supplementary value dictionary that may be
probed for values that are not provided by the entity itself.
Can be used to specify default values for an entity.
"""
# if not isinstance(entityvals, dict):
# raise ValueError("bound_field entityvals is not dictionary (%r)"%(entityvals,))
self._field_description = field_description
self._entityvals = entityvals
self._targetvals = None
self._key = field_description.get_field_property_uri()
self._extras = context_extra_values
return
def __copy__(self):
"""
Shallow(-ish) copy of self.
(Tried code from http://stackoverflow.com/a/15774013, but hits recursion limit)
"""
cls = self.__class__
result = cls.__new__(cls)
result._field_description = self._field_description.copy()
result._entityvals = self._entityvals
result._targetvals = self._targetvals
result._key = self._key
result._extras = self._extras
return result
def __getattr__(self, name):
"""
Get a bound field description attribute. Broadly, if the attribute name is
"field_value" then the value corresponding to the field description is
retrieved from the entity, otherwise the named attribute is retrieved from
the field description.
There are also a number of other special cases handled here as needed to
support internally-generated hyperlinks and internal system logic.
"""
# log.info("self._key %s, __getattr__ %s"%(self._key, name))
# log.info("self._key %s"%self._key)
# log.info("self._entity %r"%self._entity)
if name in ["entity_id", "entity_link", "entity_type_id", "entity_type_link"]:
return self._entityvals.get(name, "")
elif name == "entity_value":
return self._entityvals
elif name in ["field_value", "field_edit_value"]:
return self.get_field_value()
elif name == "field_value_key":
return self.get_field_value_key()
elif name == "field_value_link":
return self.get_field_selection_link()
elif name in ["target_value", "field_view_value"]:
return self.get_target_value()
elif name == "target_value_link":
return self.get_target_link()
elif name == "continuation_url":
return self.get_continuation_url()
elif name == "continuation_param":
return self.get_continuation_param()
elif name == "field_id":
return self._field_description.get_field_id()
elif name == "field_name":
return self._field_description.get_field_name()
elif name == "field_label":
return self._field_description["field_label"]
elif name == "field_help":
return self.get_field_help_esc()
elif name == "field_tooltip":
return self.get_field_tooltip()
elif name == "field_tooltip_attr":
return self.get_field_tooltip_attr()
elif name == "render":
return self._field_description["field_renderer"]
elif name == "value_mapper":
return self._field_description["field_value_mapper"]
elif name == "description":
return self._field_description
elif name == "field_value_key":
return self._key
elif name == "context_extra_values":
return self._extras
elif name == "options":
return self.get_field_options()
elif name == "copy":
return self.__copy__
# elif name == "row_field_descs":
elif True: # For diagnosing failed accesses...
msg = "@@@@ Accessing bound_field.%s"%(name,)
log.error(msg)
log.debug("".join(traceback.format_stack()))
assert False, msg
return "@@bound_field.%s@@"%(name)
def get_field_value(self):
"""
Return field value corresponding to key from field description.
"""
field_key = self.get_field_value_key()
# log.debug(
# "@@ bound_field.get_field_value field_key %s, _entityvals %r"%
# (field_key, self._entityvals)
# )
if hasattr(self._entityvals, "get"):
field_val = self._entityvals.get(field_key, None)
else:
field_val = "@@ Cannot resolve: %s[%s]"%(self._entityvals, field_key)
# Allow field value to be provided via `context_extra_values` if not in entity.
# (Currently used for 'get_view_choices_field' and 'get_list_choices_field'
# to insert current display selection.)
if field_val is None and self._extras and self._key in self._extras:
field_val = self._extras[self._key]
# If no value present, use default from field definition, or blank value
if field_val is None:
field_val = self._field_description.get('field_default_value', None)
if field_val is None:
field_val = ""
return field_val
def get_field_value_key(self):
"""
Return field value key used in current entity.
This takes account of possible use of subproperties of the property URI
specified in the field description. If the declared property URI is not
present in the entity, and a subproperty URI is present, then that
subproperty URI is returned. Otherwise the declared property URI is returned.
"""
return self._field_description.get_field_value_key(self._entityvals)
def get_field_selection_link(self):
"""
Return a link corresponding to a field value that is a selection from
an enumeration of entities (or some other value with an associated link),
or None
"""
choices = self._field_description['field_choices'] # OrderedDict
v = self.field_value
if choices and v in choices:
return choices[v].link
return None
def get_field_help_esc(self):
"""
Return help text from field description, for use as tooltip
"""
return escape(
self._field_description['field_help'] or
"@@field help for %(field_label)s@@"%self._field_description
)
def get_field_tooltip(self):
"""
Return tooltip text for displaying field popup
"""
tooltip_text_esc = escape(
self._field_description['field_tooltip'] or
(self._field_description['field_help']) or
"@@tooltip for %(field_label)s@@"%self._field_description
)
return tooltip_text_esc
def get_field_tooltip_attr(self):
"""
Return tooltip attribute for displaying field help, or blank
"""
tooltip_text_esc = self.get_field_tooltip()
return ''' title="%s"'''%tooltip_text_esc if tooltip_text_esc else ''
def get_target_value(self):
"""
Get target value of field for view display.
This may be different from field_value if it references another entity field
"""
targetvals = self.get_targetvals()
# log.debug("bound_field.get_target_value: targetvals %r"%(targetvals,))
target_key = self._field_description.get('field_ref_field', None)
target_key = target_key and target_key.strip()
if targetvals is not None:
if target_key:
log.debug("bound_field.get_target_value: target_key %s"%(target_key,))
target_value = targetvals.get(target_key, "(@%s)"%(target_key))
else:
target_value = targetvals
elif target_key:
target_value = self._entityvals.get(target_key, "(@%s)"%(target_key))
else:
target_value = self.field_value
# log.debug("bound_field.get_target_value result: %r"%(target_value,))
return target_value
def get_target_link(self):
"""
Return link corresponding to target value, or None.
The target value is treated as a relative reference relative to
the field_link value. If the target value is itself an absolute URI,
it will be used as-is.
If the target value is a dictionary structure created by a URIImport or
FileUpload field, the resulting value links to the imported data object.
If the field is a reference to values of another type (i.e. a selection
field), then the field field value is used to determine the selected entity,
and the entity link is used as the base URI against which the target value
is resolved (the entity URI referencing a directory or container).
"""
target_base = self.get_field_selection_link() or self.entity_link
target_value = self.get_target_value()
# log.debug("get_target_link: base %r, value %r"%(target_base, target_value))
if target_base and target_value:
if isinstance(target_value, dict) and 'resource_name' in target_value:
target_ref = target_value['resource_name']
elif is_string(target_value):
target_ref = target_value
else:
log.warning(
"bound_field.get_target_link: "+
"target_value must be URI string or URIImport structure; got %r"%
(target_value,)
)
target_ref = None
return urljoin(target_base, target_ref)
return target_value
def get_targetvals(self):
"""
If field description is a reference to a target type entity or field,
return a copy of the referenced target entity, otherwise None.
"""
# log.debug("@@ bound_field.get_targetvals: field_description %r"%(self._field_description,))
target_type = self._field_description.get('field_ref_type', None)
target_key = self._field_description.get('field_ref_field', None)
log.debug("bound_field.get_targetvals: target_type '%s', target_key '%s'"%(target_type, target_key))
if self._targetvals is None:
if target_type:
# Extract entity_id and type_id; default to type id from field descr
field_val = self.get_field_value()
log.debug("field_val: %s"%(field_val,))
type_id, entity_id = split_type_entity_id(self.get_field_value(), target_type)
log.debug("bound_field.get_targetvals: type_id %s, entity_id %s"%(type_id, entity_id))
# Get entity type info
coll = self._field_description._collection
typeinfo = EntityTypeInfo(coll, type_id)
# Check access permission required, assuming user has "VIEW" permission
# in collection, and retrieve target values if permissions are OK.
# This is primarily to prevent a loophole for accessing user
# account details.
#@@TODO: pass actual user permissions in to bound_field or
# field description or extra params
user_permissions = ["VIEW"]
req_permissions_map = typeinfo.get_entity_permissions_map(entity_id)
req_permissions = list(set( req_permissions_map[a] for a in ["view", "list"] ))
if all([ p in user_permissions for p in req_permissions]):
if entity_id is None or entity_id == "":
raise TargetIdNotFound_Error(value=(typeinfo.type_id, self._field_description["field_name"]))
targetentity = typeinfo.get_entity(entity_id)
if targetentity is None:
raise TargetEntityNotFound_Error(value=(target_type, entity_id))
targetentity = typeinfo.get_entity_implied_values(targetentity)
self._targetvals = get_entity_values(typeinfo, targetentity)
log.debug("bound_field.get_targetvals: %r"%(self._targetvals,))
else:
log.warning(
"bound_field.get_targetvals: target value type %s requires %r permissions"%
(target_type, req_permissions)
)
log.debug("bound_field.get_targetvals: targetvals %r"%(self._targetvals,))
return self._targetvals
def get_link_continuation(self, link):
"""
Return supplied base link with continuation parameter appended
(if the link value is defined - i.e. not None or empty).
"""
if link:
link += self.get_continuation_param()
return link
def get_continuation_param(self):
"""
Generate continuation parameter string for return back to the current request page
"""
cparam = ""
chere = self.get_continuation_url()
if chere:
cparam = uri_params({'continuation_url': chere})
return cparam
def get_continuation_url(self):
"""
Generate continuation URL for return back to the current request page
"""
chere = ""
if self._extras is None:
log.warning("bound_field.get_continuation_url() - no extra context provided")
else:
requrl = self._extras.get("request_url", "")
if requrl:
chere = uri_with_params(requrl, continuation_params(self._extras))
# log.debug('bound_field.get_continuation_url %s'%(chere,))
return chere
def get_field_options(self):
"""
Returns list of selectable options for the current field
Note: in Python3, OrderedDict.values() returns a view, not a list.
"""
options = self._field_description['field_choices'] # OrderedDict
options = ( list(options.values()) if options is not None else
[ FieldChoice('', label="(no options)") ]
)
return options
def __getitem__(self, name):
return self.__getattr__(name)
def __iter__(self):
"""
Implement iterator protocol, returning accessible value keys.
"""
yield "entity_id"
yield "entity_type_id"
yield "entity_link"
yield "entity_type_link"
yield "field_edit_value"
yield "field_view_value"
yield "field_value"
yield "field_value_key"
yield "field_value_link"
yield "target_value"
yield "target_value_link"
yield "continuation_url"
yield "continuation_param"
yield "description"
yield "options"
# Direct access to selected field descrption attributes
yield "field_id"
yield "field_name"
yield "field_label"
yield "field_help"
yield "field_tooltip"
yield "field_tooltip_attr"
return
def as_dict(self):
return dict( # self._field_description.items(),
entity=dict(self._entityvals.items()),
field_value=self.field_value,
context_extra_values=self._extras,
key=self._key
)
def __repr__(self):
return self.fullrepr()
def __str__(self):
return self.shortrepr()
def __unicode__(self):
return self.shortrepr()
def shortrepr(self):
return (
"bound_field(\n"+
" { 'key': %r\n"%(self._key,)+
" , 'val': %r\n"%(self.field_value,)+
" , 'field_description': %r\n"%(self._field_description,)+
" })")
def fullrepr(self):
try:
field_view_value = self.field_view_value
except Exception as e:
field_view_value = str(e)
return (
( "bound_field(\n"+
" { 'key': %r\n"+
" , 'field_edit_value': %r\n"+
" , 'field_view_value': %r\n"+
" , 'description': %r\n"+
" , 'entity_vals': %r\n"+
" })\n"
)%( self._key
, self.field_edit_value
, field_view_value
, self._field_description
, dict(self._entityvals.items())
)
)
def htmlrepr(self):
return (
"<ul>"+
"<li>key: %s</li>"%(self._key,)+
"<li>val: %s</li>"%(self.field_value,)+
"<li>field_description: %r</li>"%(self._field_description,)+
"</ul>"
)
# -----------------------------------------------------------------------------
# Helper functions
# -----------------------------------------------------------------------------
def get_entity_values(typeinfo=None, entity=None, entity_id=None, action="view"):
"""
Returns an entity values dictionary for a supplied entity, suitable for
use with a bound_field object.
"""
if not entity_id:
entity_id = entity.get_id()
type_id = entity.get_type_id()
entityvals = entity.get_values().copy()
entityvals['entity_id'] = entity_id
entityvals['entity_link'] = entity.get_view_url_path()
# log.info("type_id %s"%(type_id))
entityvals['entity_type_id'] = type_id
if action == "copy":
# Allocate new entity Id and lose values based on original Id
# when performing a copy operation.
# entity_id = typeinfo.entityclass.allocate_new_id(typeinfo.entityparent)
# log.info("@@ copy new entity_id %s"%entity_id)
# entityvals['entity_id'] = entity_id
entityvals.pop('entity_link', None)
entityvals[ANNAL.CURIE.id] = entity_id
entityvals.pop(ANNAL.CURIE.uri, None)
if typeinfo and typeinfo.recordtype:
entityvals['entity_type_link'] = typeinfo.recordtype.get_view_url_path()
# else:
# log.error("get_entity_values: No recordtype: typeinfo %r"%(typeinfo,))
# log.error("get_entity_values: No recordtype: entity id %s/%s"%(type_id,entity_id))
# log.error("get_entity_values: No recordtype: entity %r"%(entity,))
# traceback.print_stack()
return entityvals
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/bound_field.py
|
bound_field.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for URI value displayed as a link.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from utils.py3porting import is_string, to_unicode
from annalist.views.fields.render_base import RenderBase
from annalist.views.fields.render_fieldvalue import (
RenderFieldValue,
get_field_edit_value, get_field_view_value
)
from django.template import Template, Context
# ----------------------------------------------------------------------------
#
# Link URI value mapping
#
# ----------------------------------------------------------------------------
class URILinkValueMapper(RenderBase):
"""
Value mapper class for URI string presented for viewing as a clickable link
"""
@classmethod
def encode(cls, data_value):
"""
Encodes link value as string
"""
return data_value or ""
@classmethod
def decode(cls, field_value):
"""
Decodes a URI string value as a link
"""
if is_string(field_value):
field_value = field_value.strip()
return field_value or ""
# ----------------------------------------------------------------------------
#
# Link URI field renderers
#
# ----------------------------------------------------------------------------
class uri_link_view_renderer(object):
def render(self, context):
"""
Render link for viewing.
"""
linkval = URILinkValueMapper.encode(get_field_view_value(context, ""))
# log.info("uri_link_view_renderer: linkval %r (orig)"%(linkval,))
common_prefixes = (
[ "http://", "https://"
, "file:///", "file://localhost/", "file://"
, "mailto:"]
)
textval = linkval
for p in common_prefixes:
if is_string(linkval) and linkval.startswith(p):
textval = linkval[len(p):]
break
if ":" in linkval:
link_pref, link_path = linkval.split(":", 1)
if "collection" not in context:
log.warning("uri_link_view_renderer: no key 'collection' in context")
# log.error("@@@@")
# for k in context.flatten():
# hidden_fields = (
# [ "fields", "row_bound_fields", "repeat_bound_fields"
# , "help_text", "help_markdown"
# , "forloop", "f", "block", "view_choices"
# , "LANGUAGES"
# ])
# if k not in hidden_fields:
# log.error(" @@ %s: %r"%(k, context[k]))
else:
link_vocab = context["collection"].cache_get_vocab(link_pref)
if link_vocab:
linkval = link_vocab.get_uri() + link_path
# log.info("uri_link_view_renderer: linkval %r (final)"%(linkval,))
return '''<a href="%s" target="_blank">%s</a>'''%(linkval, textval)
class uri_link_edit_renderer(object):
def __init__(self):
self._template = Template(
'''<input type="text" size="64" name="{{repeat_prefix}}{{field.description.field_name}}" '''+
'''placeholder="{{field.description.field_placeholder}}" '''+
'''value="{{field.field_edit_value}}" />'''
)
return
def render(self, context):
"""
Render link for editing
"""
return self._template.render(context)
def get_uri_link_renderer():
"""
Return field renderer object for URI link values
"""
return RenderFieldValue("uri_link",
view_renderer=uri_link_view_renderer(),
edit_renderer=uri_link_edit_renderer(),
)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_uri_link.py
|
render_uri_link.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for field consisting of a list of simple token values.
Simple tokens may not contain whitespace.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import traceback
import logging
log = logging.getLogger(__name__)
from utils.py3porting import is_string, to_unicode
from annalist.views.fields.render_base import RenderBase
from annalist.views.fields.render_fieldvalue import (
RenderFieldValue,
get_context_value, get_context_field_value,
get_field_edit_value,
get_field_view_value
)
from django.template import Template, Context
# ----------------------------------------------------------------------------
#
# Token set render support functions
#
# ----------------------------------------------------------------------------
class TokenSetValueMapper(RenderBase):
"""
Value mapper class for token list
"""
@classmethod
def encode(cls, field_value):
"""
Encodes a token list as a string of space-separated values
"""
if isinstance(field_value, (list,tuple)):
return " ".join(field_value)
log.warning("TokenSetValueMapper.encode tokenset: supplied value is not a list or tuple")
log.warning("TokenSetValueMapper.encode value: %r"%(field_value,))
return str(field_value)
@classmethod
def decode(cls, field_value):
"""
Decodes a string of space-separated tokens as a list of tokens
"""
if is_string(field_value):
return field_value.split()
log.warning("TokenSetValueMapper.decode: %r"%(field_value,))
# log.info("\n".join(traceback.format_stack()))
return [field_value]
# ----------------------------------------------------------------------------
#
# Token set field renderers
#
# ----------------------------------------------------------------------------
class tokenset_view_renderer(object):
def render(self, context):
"""
Render token list for viewing.
"""
tokenset = get_field_view_value(context, None)
return TokenSetValueMapper.encode(tokenset) if tokenset else " "
class tokenset_edit_renderer(object):
def __init__(self):
self._template = Template(
'''<input type="text" size="64" name="{{repeat_prefix}}{{field.description.field_name}}" '''+
'''placeholder="{{field.description.field_placeholder}}" '''+
'''value="{{encoded_field_value}}"/>'''
)
def render(self, context):
"""
Render token list for editing
"""
tokenset = get_field_edit_value(context, None)
with context.push(encoded_field_value=TokenSetValueMapper.encode(tokenset)):
return self._template.render(context)
def get_field_tokenset_renderer():
"""
Return field renderer object for token list values
"""
return RenderFieldValue("tokenset",
view_renderer=tokenset_view_renderer(),
edit_renderer=tokenset_edit_renderer(),
)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_tokenset.py
|
render_tokenset.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
This module implements a class that is used for rendering a bound field, given
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2018, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import traceback
from annalist.views.fields.find_renderers import (
get_label_renderer,
get_view_renderer,
get_edit_renderer,
get_label_view_renderer,
get_label_edit_renderer,
get_col_head_renderer,
get_col_head_view_renderer,
get_col_head_edit_renderer,
get_col_view_renderer,
get_col_edit_renderer,
get_value_mapper
)
class FieldRenderer(object):
"""
This class represents a value renderer that is bound to a specific field value.
Methods are referenced from Django templates by a {% include ... %} directive.
"""
def __init__(self, field_render_type, field_value_mode):
"""
Initialize a bound field renderer object.
"""
self._field_render_type = field_render_type
self._field_value_mode = field_value_mode
self._value_mapper = get_value_mapper(field_render_type)
self._value_renderer = (
{ 'label': get_label_renderer( field_render_type, field_value_mode)
, 'view': get_view_renderer( field_render_type, field_value_mode)
, 'edit': get_edit_renderer( field_render_type, field_value_mode)
, 'label_view': get_label_view_renderer( field_render_type, field_value_mode)
, 'label_edit': get_label_edit_renderer( field_render_type, field_value_mode)
, 'col_head': get_col_head_renderer( field_render_type, field_value_mode)
, 'col_head_view': get_col_head_view_renderer(field_render_type, field_value_mode)
, 'col_head_edit': get_col_head_edit_renderer(field_render_type, field_value_mode)
, 'col_view': get_col_view_renderer( field_render_type, field_value_mode)
, 'col_edit': get_col_edit_renderer( field_render_type, field_value_mode)
})
return
def __str__(self):
return (
"FieldRenderer(render_type=%s, value_mode=%s)"%
(self._field_render_type, self._field_value_mode)
)
def value_mapper(self):
return self._value_mapper
def renderer(self, mode):
"""
Return a renderer for a specified mode.
The renderer returned is accessed from a template using a {% include %} directive,
which can accept an object with a render method. A context value is supplied by
the Django template processor at the point of invocation. This mechanism is used
in the "render_fieldvalue" module, and when composing value renderers.
See: https://docs.djangoproject.com/en/2.0/ref/templates/builtins/#std:templatetag-include
"""
return self._value_renderer[mode]
# return self.diagnostic_renderer(mode)
def diagnostic_renderer(self, mode):
"""
Return a diagnostic renderer for a specified mode.
This is similar to the standard renderer, except that it allows for additional
diagnostic information to be included to assist in debugging renderer call flows.
"""
class _renderer(object):
def __init__(self, field_renderer, mode):
self._render_type = field_renderer._field_render_type
self._value_mode = field_renderer._field_value_mode
self._mode = mode
self._renderer = field_renderer._value_renderer[mode]
return
def render(self, context):
try:
msg = (
"@@render(render_type=%s, value_mode=%s, mode=%s)"%
(self._render_type, self._value_mode, self._mode)
)
log.info(msg)
return "<!-- %s -->\n"%msg + self._renderer.render(context)
except Exception as e:
log.error("Error in FieldRenderer.renderer: "+str(e))
log.info("\n".join(traceback.format_stack()))
return str(e)
class _error_renderer(object):
def __init__(self, field_renderer, mode, exc):
self._render_type = field_renderer._field_render_type
self._value_mode = field_renderer._field_value_mode
self._mode = mode
self._error = str(exc)
return
def render(self, context):
return (
"@@_error_renderer(render_type=%s, value_mode=%s, mode=%s, error=%s)"%
(self._render_type, self._value_mode, self._mode, self._error)
)
try:
r = _renderer(self, mode)
except Exception as e:
log.error("Error in FieldRenderer.renderer._renderer(): "+str(e))
log.info("\n".join(traceback.format_stack()))
r = _error_renderer(self, mode, e)
return r
def label(self):
return self.renderer("label")
def view(self):
return self.renderer("view")
def edit(self):
return self.renderer("edit")
def label_view(self):
return self.renderer("label_view")
def label_edit(self):
return self.renderer("label_edit")
def col_head(self):
return self.renderer("col_head")
def col_head_view(self):
return self.renderer("col_head_view")
def col_head_edit(self):
return self.renderer("col_head_edit")
def col_view(self):
return self.renderer("col_view")
def col_edit(self):
return self.renderer("col_edit")
def mode(self):
class _renderer(object):
def __init__(modeself):
pass
def render(modeself, context):
try:
r = self._value_renderer[context['render_mode']].render(context)
except Exception as e:
log.error("Exception in FieldRenderer.mode, _renderer.render: %s"%(str(e),))
formatted_traceback = traceback.format_stack()
log.info("".join(formatted_traceback))
response_parts = (
["Exception in FieldRenderer.mode, _renderer.render"]+
[repr(e)]+
formatted_traceback
)
return "\n".join(response_parts)
return r
return _renderer()
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/field_renderer.py
|
field_renderer.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2015, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import sys
import traceback
from collections import OrderedDict
from annalist import layout
from annalist import message
from annalist.identifiers import RDFS, ANNAL
from annalist.exceptions import Annalist_Error, EntityNotFound_Error, UnexpectedValue_Error
from annalist.util import extract_entity_id
# from annalist.models.recordfield import RecordField
from annalist.models.entitytypeinfo import EntityTypeInfo
from annalist.models.entityfinder import EntityFinder
from annalist.views.fields.field_renderer import FieldRenderer
from annalist.views.fields.find_renderers import (
is_repeat_field_render_type,
get_value_mapper
)
from annalist.views.fields.render_placement import (
get_placement_classes
)
from annalist.views.form_utils.fieldchoice import FieldChoice
class FieldDescription(object):
"""
Describes an entity view field, and methods to perform
manipulations involving the field description.
"""
__slots__ = ("_collection", "_field_desc", "_field_suffix_index", "_field_suffix")
def __init__(self,
collection, recordfield, view_context=None,
field_property=None, field_placement=None,
field_list=None, field_ids_seen=[],
field_placement_classes=None
):
"""
Creates a field description value to use in a context value when
rendering a form. Values defined here are mentioned in field
rendering templates.
The FieldDescription object behaves as a dictionary containing the
various field attributes.
collection is a collection from which data is being rendered.
recordfield is a RecordField value or dictionary containing details of
the field for which a descriptor is constructed.
view_context is a dictionary of additional values that may be used in assembling
values to be used when rendering the field. In particular, a copy
of the view description record provides context for some enumeration
type selections.
field_property if supplied, overrides the field property URI from `recordfield`
field_placement if supplied, overrides field placement from `recordfield`
field_list if the field itself contains or references a list of fields, this is
that list of fields.
field_ids_seen field ids expanded so far, to check for recursive reference.
field_placement_classes
if supplied, overrides field placement classes derived from value
for `field_placement` string.
"""
self._collection = collection
# log.debug("FieldDescription recordfield: %r"%(recordfield,))
field_id = recordfield.get(ANNAL.CURIE.id, "_missing_id_")
field_name = recordfield.get(ANNAL.CURIE.field_name, field_id) # Field name in form
field_label = recordfield.get(RDFS.CURIE.label, "")
field_help = recordfield.get(RDFS.CURIE.comment, "")
field_property = field_property or recordfield.get(ANNAL.CURIE.property_uri, "")
field_placement = field_placement or recordfield.get(ANNAL.CURIE.field_placement, "")
field_placement_c = field_placement_classes or get_placement_classes(field_placement)
field_placeholder = recordfield.get(ANNAL.CURIE.placeholder, "")
field_tooltip = recordfield.get(ANNAL.CURIE.tooltip, "")
field_render_type = extract_entity_id(recordfield.get(ANNAL.CURIE.field_render_type, ""))
field_value_mode = extract_entity_id(recordfield.get(ANNAL.CURIE.field_value_mode, "@@FieldDescription:value_mode@@"))
field_ref_type = extract_entity_id(recordfield.get(ANNAL.CURIE.field_ref_type, None))
field_entity_type = recordfield.get(ANNAL.CURIE.field_entity_type, None)
field_group_ref = extract_entity_id(recordfield.get(ANNAL.CURIE.group_ref, None))
self._field_desc = (
{ 'field_id': field_id
, 'field_name': field_name
, 'field_instance_name': field_name
, 'field_render_type': field_render_type
, 'field_value_mode': field_value_mode
, 'field_value_type': recordfield.get(ANNAL.CURIE.field_value_type, "")
, 'field_label': field_label
, 'field_help': field_help
, 'field_property_uri': field_property
, 'field_placement': field_placement_c
, 'field_placeholder': field_placeholder
, 'field_tooltip': field_tooltip
, 'field_tooltip_test': field_tooltip or (field_help) or ""
, 'field_default_value': recordfield.get(ANNAL.CURIE.default_value, None)
, 'field_ref_type': field_ref_type
, 'field_ref_field': recordfield.get(ANNAL.CURIE.field_ref_field, None)
, 'field_ref_restriction': recordfield.get(ANNAL.CURIE.field_ref_restriction, "ALL")
, 'field_entity_type': field_entity_type
, 'field_choices': None
, 'field_group_ref': field_group_ref
, 'group_label': None
, 'group_add_label': None
, 'group_delete_label': None
, 'group_field_list': None
, 'group_field_descs': None
, 'field_renderer': FieldRenderer(field_render_type, field_value_mode)
, 'field_value_mapper': get_value_mapper(field_render_type) # Used by fieldvaluemap.py
})
self._field_suffix_index = 0 # No dup
self._field_suffix = ""
# If field references type, pull in copy of type id and link values
type_ref = self._field_desc['field_ref_type']
if type_ref:
restrict_values = self._field_desc['field_ref_restriction']
entity_finder = EntityFinder(collection, selector=restrict_values)
entities = entity_finder.get_entities_sorted(
type_id=type_ref, context=view_context, altscope="select"
)
# Note: the options list may be used more than once, so the id generator
# returned must be materialized as a list
# Uses collections.OrderedfDict to preserve entity ordering
self._field_desc['field_choices'] = OrderedDict()
if field_render_type in ["Enum_optional", "Enum_choice_opt"]:
# Add blank choice for optional selections
self._field_desc['field_choices'][''] = FieldChoice('', label=field_placeholder)
for e in entities:
eid = e.get_id()
val = e.get_type_entity_id()
if eid != layout.INITIAL_VALUES_ID:
self._field_desc['field_choices'][val] = FieldChoice(
val, label=e.get_label(), link=e.get_view_url_path()
)
# If field references or contains field list, pull in field details
if field_list:
if field_id in field_ids_seen:
raise Annalist_Error(field_id, "Recursive field reference in field group")
field_ids_seen = field_ids_seen + [field_id]
group_label = field_label
add_label = recordfield.get(ANNAL.CURIE.repeat_label_add, None) or "Add "+field_id
remove_label = recordfield.get(ANNAL.CURIE.repeat_label_delete, None) or "Remove "+field_id
group_field_descs = []
for subfield in field_list:
f = field_description_from_view_field(collection, subfield, view_context, field_ids_seen)
group_field_descs.append(f)
self._field_desc.update(
{ 'group_id': field_id
, 'group_label': group_label
, 'group_add_label': add_label
, 'group_delete_label': remove_label
, 'group_field_list': field_list # Description from field/group
, 'group_field_descs': group_field_descs # Resulting field description list
})
# log.debug("FieldDescription: %s"%field_id)
# log.info("FieldDescription._field_desc %r"%(self._field_desc,))
# log.info("FieldDescription.field_placement %r"%(self._field_desc['field_placement'],))
return
def __copy__(self):
"""
Shallow copy of self.
(Tried code from http://stackoverflow.com/a/15774013, but got type error)
"""
cls = self.__class__
result = cls.__new__(cls)
result._collection = self._collection
result._field_desc = self._field_desc
result._field_suffix_index = self._field_suffix_index
result._field_suffix = self._field_suffix
return result
def copy(self):
return self.__copy__()
def resolve_duplicates(self, properties):
"""
Resolve duplicate property URIs that appear in a common context corresponding to
the supplied `properties` diuctionary. If there is a clash, assign a suffix that
can be added to the field_id and field_property_uri to make them unique.
The properties paramneter should be initialized to None by the calling program,
and updated to the return value of this method each time it is called.
"""
if properties is None:
properties = (set(), set())
i = 0
if ( (self._field_desc['field_name'] in properties[0]) or
(self._field_desc['field_property_uri'] in properties[1]) ):
i = 1
suffix = ""
while ( (self._field_desc['field_name']+suffix in properties[0]) or
(self._field_desc['field_property_uri']+suffix in properties[1]) ):
i += 1
suffix = "__%d"%i
self._field_suffix_index = i
self._field_suffix = suffix
# Only use suffix for values that actually clash:
if self._field_desc['field_name'] in properties[0]:
self._field_desc['field_name'] += suffix
if self._field_desc['field_property_uri'] in properties[1]:
self._field_desc['field_property_uri'] += suffix
properties[0].add(self._field_desc['field_name'])
properties[1].add(self._field_desc['field_property_uri'])
return properties
def get_field_id(self):
"""
Returns the field identifier
"""
return self._field_desc['field_id']
def get_field_name(self):
"""
Returns form field name to be used for the described field
"""
return self._field_desc['field_name']
def set_field_instance_name(self, instance_name):
"""
Set instance name for field (used when scanning field groups)
This is the full field instance name including group ids and
indexes.
"""
self._field_desc['field_instance_name'] = instance_name
return
def get_field_instance_name(self):
"""
Get instance name for field (used when scanning field groups)
"""
return self._field_desc['field_instance_name']
def get_field_property_uri(self):
"""
Returns form field property URI to be used for the described field
"""
return self._field_desc['field_property_uri']
def get_field_subproperty_uris(self):
"""
Returns list of possible subproperty URIs for the described field
"""
subproperty_uris = []
if self._collection is not None:
property_uri = self.get_field_property_uri()
subproperty_uris = self._collection.cache_get_subproperty_uris(property_uri)
return subproperty_uris
def get_field_value_key(self, entityvals):
"""
Return field value key used in current entity.
This takes account of possible use of subproperties of the property URI
specified in the field description. If the declared property URI is not
present in the entity, and a subproperty URI is present, then that
subproperty URI is returned. Otherwise the declared property URI is returned.
"""
if self.get_field_property_uri() not in entityvals:
for altkey in self.get_field_subproperty_uris():
if altkey in entityvals:
return altkey
return self.get_field_property_uri()
def group_ref(self):
"""
If the field itself contains or uses a group of fields, returns an
reference (a group_id) for the field group, or None
"""
return self._field_desc['field_group_ref']
def group_view_fields(self):
"""
If the field itself contains or uses a group of fields, returns a
list of the field references
"""
field_list = self._field_desc.get('group_field_list', None)
if field_list is None:
msg = "Field %(field_id)s is missing 'group_field_list' value"%(self._field_desc)
log.error(msg)
raise ValueError(msg)
# log.error("".join(traceback.format_stack()))
# return []
return field_list
def group_field_descs(self):
"""
If the field itself contains or uses a group of fields, returns a
list of field descriptions as a list of FieldDescriptiopn values.
"""
return self._field_desc['group_field_descs']
def is_repeat_group(self):
"""
Returns true if this is a repeating field, in which case the field
value is assumed to be a list of values to be rendered, and to
have buttons for adding and removing values.
"""
return is_repeat_field_render_type(self._field_desc['field_render_type'])
def is_enum_field(self):
"""
Returns true if this is an enumerated-value field, in which case the
'field_ref_type' field is assumed to the type_id of the value
type to be enumerated.
"""
enum_render_types = (
[ "EntityTypeId"
, "Type", "View", "List", "Field"
, "Enum", "Enum_optional", "Enum_choice"
])
return self._field_desc['field_render_type'] in enum_render_types
def has_new_button(self):
"""
Returns true if this field has a control (a 'new' or '+' button)
that invokes a new form to create a new entity.
"""
# Strictly, this test includes 'Enum_choice' which does not have a '+' button,
# but since the absent button cannot be clicked, the infidelity here is benign
return self._field_desc['field_ref_type'] is not None
def is_import_field(self):
"""
Returns 'true' if this field has a control (an 'import' button) that is used
to request additional external data is added to an entity
"""
return self._field_desc['field_value_mode'] == "Value_import"
def is_upload_field(self):
"""
Returns 'true' if this field is a file-upload field (selected file contents are
returned with the form response.)
"""
return self._field_desc['field_value_mode'] == "Value_upload"
def has_field_list(self):
"""
Returns true if this field contains or references a list of
field descriptions
@@ (Currently, this function duplicates `is_repeat_group`.)
@@ test for: group_ref, group_field_descs, and group_id
"""
return is_repeat_field_render_type(self._field_desc['field_render_type'])
def __repr1__(self):
return (
"FieldDescription(\n"+
" { 'field_id': %r\n"%(self._field_desc["field_id"])+
" , 'field_name': %r\n"%(self.get_field_name())+
" , 'field_render_type': %r\n"%(self._field_desc["field_render_type"])+
" , 'field_property_uri': %r\n"%(self.get_field_property_uri())+
" , 'field_ref_type': %r\n"%(self._field_desc["field_ref_type"])+
" , 'group_field_list': %r\n"%(self._field_desc["group_field_list"])+
" , 'group_field_descs': %r\n"%(self._field_desc["group_field_descs"])+
" })"
)
def __repr2__(self):
return (
"FieldDescription("+repr(self._field_desc)+")"
)
def __repr3__(self):
return (
"FieldDescription("+repr(self._field_desc['field_id'])+")"
)
def __repr__(self):
return self.__repr2__()
# Define methods to facilitate access to values using dictionary operations
# on the FieldDescription object
#
# @@TODO: do we really need to return lists for .keys() and .items()?
# It would be more consistent with Python3 to just return an interator.
# Try returning iter(...) instead and see what happens?
def keys(self):
"""
Return collection metadata value keys
"""
return list(self._field_desc.keys())
def items(self):
"""
Return collection metadata value fields
"""
return list(self._field_desc.items())
def get(self, key, default):
"""
Equivalent to dict.get() function
"""
return self[key] if self._field_desc and key in self._field_desc else default
def __getitem__(self, k):
"""
Allow direct indexing to access collection metadata value fields
"""
return self._field_desc[k]
def __setitem__(self, k, v):
"""
Allow direct indexing to update collection metadata value fields
"""
self._field_desc[k] = v
return
def __iter__(self):
"""
Iterator over dictionary keys
"""
for k in self._field_desc:
yield k
return
def field_description_from_view_field(
collection, field, view_context=None, field_ids_seen=[]
):
"""
Returns a field description value created using information from
a field reference in a view description record (i.e. a dictionary
containing a field id value and optional field property URI and
placement values. (The optional values, if not provided, are
obtained from the referenced field description)
collection is a collection from which data is being rendered.
field is a dictionary with the field description from a view or list
description, containing a field id and placement values.
view_context is a dictionary of additional values that may be used in assembling
values to be used when rendering the field. In particular, a copy
of the view description record provides context for some enumeration
type selections.
field_ids_seen field ids expanded so far, to check for recursive reference.
"""
field_id = extract_entity_id(field[ANNAL.CURIE.field_id])
# recordfield = RecordField.load(collection, field_id, altscope="all")
recordfield = collection.get_field(field_id)
if recordfield is None:
log.warning("Can't retrieve definition for field %s"%(field_id))
# recordfield = RecordField.load(collection, "Field_missing", altscope="all")
recordfield = collection.get_field("Field_missing")
recordfield[RDFS.CURIE.label] = message.MISSING_FIELD_LABEL%{ 'id': field_id }
# If field references group, pull in group details
field_list = recordfield.get(ANNAL.CURIE.field_fields, None)
if not field_list:
group_ref = extract_entity_id(recordfield.get(ANNAL.CURIE.group_ref, None))
if group_ref:
raise UnexpectedValue_Error("Group %s used in field %s"%(group_ref, field_id))
# If present, `field_property` and `field_placement` override values in the field dexcription
return FieldDescription(
collection, recordfield, view_context=view_context,
field_property=field.get(ANNAL.CURIE.property_uri, None),
field_placement=field.get(ANNAL.CURIE.field_placement, None),
field_list=field_list,
field_ids_seen=field_ids_seen
)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/field_description.py
|
field_description.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for a group of fields repeated over a list of values.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import sys
import traceback
import logging
log = logging.getLogger(__name__)
from django.http import HttpResponse
from django.template import Template, Context
from annalist.exceptions import Annalist_Error
from .bound_field import bound_field
from .render_fieldvalue import (
RenderFieldValue,
TemplateWrapValueRenderer,
ModeWrapValueRenderer
)
# ------------------------------------------------------------
# Local data values
# ------------------------------------------------------------
view_group = (
{ "head":
"""
<!-- views.fields.render_repeatgroup.view_group (head) -->
<div class="small-12 columns">
<div class="row">
<div class="group-label small-2 columns">
<span>{{field.description.field_label}}</span>
</div>
<div class="group-placeholder small-10 columns">
{{field.description.field_placeholder}}
</div>
</div>
</div>"""
, "body":
"""
<div class="group-row small-12 columns">
<div class="row">
<div class="small-2 columns">
</div>
<div class="small-10 columns">
{% for f in repeat_bound_fields %}
<div class="view-group row">
{% include f.render.label_view with field=f %}
</div>
{% endfor %}
</div>
</div>
</div>"""
# , "tail":
# """
# """
})
edit_group = (
{ "head":
"""<!-- views.fields.render_repeatgroup.edit_group (head) -->
<div class="small-12 columns"{{field.field_tooltip_attr|safe}}>
<div class="row">
<div class="group-label small-2 columns">
<span>{{field.description.field_label}}</span>
</div>
<div class="group-placeholder small-10 columns">
{{field.description.field_placeholder}}
</div>
</div>
</div>"""
, "body":
"""
<div class="group-row small-12 columns"{{field.field_tooltip_attr|safe}}>
<div class="row selectable">
<div class="small-2 columns checkbox-in-edit-padding">
{% if auth_config %}
<input type="checkbox" name="{{field.description.group_id}}__select_fields"
value="{{repeat_index}}" class="right" />
{% endif %}
</div>
<div class="small-10 columns">
{% for f in repeat_bound_fields %}
<div class="edit-group row">
{% include f.render.label_edit with field=f %}
</div>
{% endfor %}
</div>
</div>
</div>"""
, "tail":
"""
<div class="small-12 columns">
<div class="row">
<div class="small-2 columns">
</div>
<div class="small-10 columns">
<input type="submit" name="{{field.description.group_id}}__remove"
value="{{field.description.group_delete_label}}" />
<input type="submit" name="{{field.description.group_id}}__add"
value="{{field.description.group_add_label}}" />
<input type="submit" name="{{field.description.group_id}}__up"
value="Move ⬆" />
<input type="submit" name="{{field.description.group_id}}__down"
value="Move ⬇" />
</div>
</div>
</div>"""
})
view_grouprow = (
{ "head":
"""
<!-- views.fields.render_repeatgroup.view_grouprow (head) -->
<div class="small-12 columns">
<div class="grouprow row">
<div class="group-label small-12 medium-2 columns">
<span>{{field.description.field_label}}</span>
</div>
<div class="small-12 medium-10 columns hide-for-small-only">
<div class="row">
<div class="small-12 columns">
<div class="view-grouprow col-head row">
{% for f in group_head_fields %}
<!-- ===== renderer {{f.render}} -->
<!-- ===== {{f.description.field_render_type}}.{{f.description.field_value_mode}}/{{f.description.field_id}}:{{f.description.field_label}} ({{render_mode}}) -->
<!-- ===== f.render.col_head_view... -->
<!-- {_ include f.field_render_colhead_view with field=f %} -->
{% include f.render.col_head_view with field=f %}
<!-- ===== f.render.col_head_view end -->
{% endfor %}
</div>
</div>
</div>
</div>
</div>
</div>
"""
, "head_empty":
"""
<!-- views.fields.render_repeatgroup.view_grouprow (empty list) -->
<div class="small-12 columns">
<div class="grouprow row">
<div class="group-label small-12 medium-2 columns">
<span>{{field.description.field_label}}</span>
</div>
<div class="group-placeholder small-12 medium-10 columns">
<span>(None specified)</span>
</div>
</div>
</div>
"""
, "body":
"""
<div class="small-12 columns">
<div class="grouprow row">
<div class="small-12 medium-2 columns">
</div>
<div class="small-12 medium-10 columns">
<div class="row select-row">
<div class="small-12 columns">
<div class="view-grouprow row">
{% for f in repeat_bound_fields %}
{% include f.render.col_view with field=f %}
{% endfor %}
</div>
</div>
</div>
</div>
</div>
</div>
"""
# , "tail":
# """
# """
})
edit_grouprow = (
{ "head":
"""
<!-- views.fields.render_repeatgroup.edit_grouprow (head) -->
<div class="small-12 columns"{{field.field_tooltip_attr|safe}}>
<div class="grouprow row">
<div class="group-label small-12 medium-2 columns">
<span>{{field.description.field_label}}</span>
</div>
<div class="small-12 medium-10 columns hide-for-small-only">
<div class="row">
<div class="small-1 columns">
</div>
<div class="small-11 columns">
<div class="edit-grouprow col-head row">
{% for f in group_head_fields %}
{% include f.render.col_head_edit with field=f %}
{% endfor %}
</div>
</div>
</div>
</div>
</div>
</div>
"""
, "body":
"""
<div class="small-12 columns">
<div class="grouprow row">
<div class="small-12 medium-2 columns hide-for-small-only">
</div>
<div class="small-12 medium-10 columns">
<div class="tbody row select-row">
<div class="small-1 columns checkbox-in-edit-padding">
<input type="checkbox" class="select-box right"
name="{{field.description.group_id}}__select_fields"
value="{{repeat_index}}" />
</div>
<div class="small-11 columns">
<div class="edit-grouprow row">
{% for f in repeat_bound_fields %}
{% include f.render.col_edit with field=f %}
{% endfor %}
</div>
</div>
</div>
</div>
</div>
</div>
"""
, "tail":
"""
<div class="small-12 columns">
<div class="grouprow row">
<div class="small-12 medium-2 columns">
</div>
<div class="group-buttons small-12 medium-10 columns">
<div class="row">
<div class="small-1 columns">
</div>
<div class="small-11 columns">
<input type="submit" name="{{field.description.group_id}}__remove"
value="{{field.description.group_delete_label}}" />
<input type="submit" name="{{field.description.group_id}}__add"
value="{{field.description.group_add_label}}" />
<input type="submit" name="{{field.description.group_id}}__up"
value="Move ⬆" />
<input type="submit" name="{{field.description.group_id}}__down"
value="Move ⬇" />
</div>
</div>
</div>
</div>
</div>"""
})
# Used for rendering lists of entities via the EntityList view class
view_listrow = (
{ "head":
"""
<!-- views.fields.render_repeatgroup.view_listrow (head) -->
<div class="thead row">
<div class="small-1 columns">
</div>
<div class="small-11 columns">
<div class="view-listrow col-head row">
{% for f in group_head_fields %}
{% include f.render.col_head with field=f %}
{% endfor %}
</div>
</div>
</div>
"""
, "body":
"""
<div class="tbody row select-row">
<div class="small-1 columns">
<input type="checkbox" class="select-box right" name="entity_select"
value="{{repeat_entity.entity_type_id}}/{{repeat_entity.entity_id}}" />
</div>
<div class="small-11 columns">
<div class="view-listrow row">
{% for f in repeat_bound_fields %}
{% include f.render.view with field=f %}
{% endfor %}
</div>
</div>
</div>
"""
})
# ------------------------------------------------------------
# Repeat group render class
# ------------------------------------------------------------
class RenderRepeatGroup(object):
"""
Render class for repeated field group
"""
def __init__(self, templates=None):
"""
Creates a renderer object for a repeating group field
"""
# Later, may introduce a template_file= option to read from templates directory
# log.info("RenderRepeatGroup: __init__ %r"%(templates))
super(RenderRepeatGroup, self).__init__()
assert templates is not None, "RenderRepeatGroup template must be supplied (.edit, .view or .item)"
self._template_head = Template(templates.get("head", ""))
self._template_body = Template(templates.get("body", "@@missing body@@"))
self._template_tail = Template(templates.get("tail", ""))
self._template_empty = self._template_head
if "head_empty" in templates:
self._template_empty = Template(templates["head_empty"])
return
def __str__(self):
return "RenderRepeatGroup %r"%(self._template_head)
# return "RenderRepeatGroup %r, %s"%(self._template_head,self.render(context))
def render(self, context):
"""
Renders a repeating field group.
`context` is a dictionary-like object that provides information for the
rendering operation.
returns a string that is incorporated into the resulting web page.
`context["field"]` is a `bound_field` value that combines the field
definition, entity values and additional context information.
The entity value is either the entire entity that is currently
being rendered, or sub-element containing a list of repeated values that
are each formatted using the supplied body template.
"""
# log.info("RenderRepeatGroup.render")
try:
# log.info("RenderRepeatGroup.render field: %r"%(context["field"],))
group_id = context["field"]["description"]["group_id"]
group_field_descs = context["field"]["description"]["group_field_descs"]
# log.info("RenderRepeatGroup.render descs: %r"%(group_field_descs,))
h = [ bound_field(f, {}) for f in group_field_descs ]
with context.push({ "group_head_fields": h }):
value_list = context["field"]["field_value"]
if value_list and not isinstance(value_list, list):
# This is to allow field changes from single to repeated values
# to be handled less confusingly. String values were previously
# treated as lists if characters, which gave some pretty weird
# results.
value_list = [value_list]
if len(value_list) > 0:
response_parts = [self._template_head.render(context)]
repeat_index = 0
extras = context["field"]["context_extra_values"]
for g in value_list:
# log.debug("RenderRepeatGroup.render field_val: %r"%(g))
r = [ bound_field(f, g, context_extra_values=extras)
for f in group_field_descs
]
repeat_id = context.get("repeat_prefix", "") + group_id
repeat_dict = (
{ "repeat_id": repeat_id
, "repeat_index": str(repeat_index)
, "repeat_prefix": repeat_id+("__%d__"%repeat_index)
, "repeat_bound_fields": r
, "repeat_entity": g
})
# log.info("RenderRepeatGroup.render repeat_dict: %r"%(repeat_dict))
with context.push(repeat_dict):
response_parts.append(self._template_body.render(context))
repeat_index += 1
response_parts.append(self._template_tail.render(context))
else:
# Empty list
response_parts = [self._template_empty.render(context)]
response_parts.append(self._template_tail.render(context))
except Exception as e:
log.exception("Exception in RenderRepeatGroup.render")
ex_type, ex, tb = sys.exc_info()
traceback.print_tb(tb)
response_parts = (
["Exception in RenderRepeatGroup.render"]+
[repr(e)]+
traceback.format_exception(ex_type, ex, tb)+
["***RenderRepeatGroup.render***"]
)
del tb
return "".join(response_parts)
# ------------------------------------------------------------
# Repeat group renderer factory class and functions
# ------------------------------------------------------------
class RenderGroupFieldValue(RenderFieldValue):
"""
Return field render factory for a repeated group based on
the supplied render value objects.
"""
def __init__(self, render_type, view_renderer=None, edit_renderer=None):
"""
Creates a renderer factory for a repeating value field.
view_renderer is a render object that formats values for viewing
edit_renderer is a render object that formats value editing widgets
Methods provided return composed renderers for a variety of contexts.
"""
super(RenderGroupFieldValue, self).__init__(render_type,
view_renderer=view_renderer,
edit_renderer=edit_renderer
)
# Override view/edit renderers to not use wrapper.
self._render_view = ModeWrapValueRenderer("view", self._view_renderer)
self._render_edit = ModeWrapValueRenderer("edit", self._edit_renderer)
#@@TODO: remove labelling from templates instead?
self._render_label_view = ModeWrapValueRenderer("view", self._view_renderer)
self._render_label_edit = ModeWrapValueRenderer("edit", self._edit_renderer)
self._render_col_view = ModeWrapValueRenderer("view", self._view_renderer)
self._render_col_edit = ModeWrapValueRenderer("edit", self._edit_renderer)
return
# def label_view(self):
# log.warning("RepeatGroup renderer has no label_view method")
# raise Annalist_Error("RepeatGroup renderer has no label_view method")
# def label_edit(self):
# log.warning("RepeatGroup renderer has no label_edit method")
# raise Annalist_Error("RepeatGroup renderer has no label_edit method")
# def col_head(self):
# log.warning("RepeatGroup renderer has no col_head method")
# raise Annalist_Error("RepeatGroup renderer has no col_head method")
# def col_head_view(self):
# log.warning("RepeatGroup renderer has no col_head_view method")
# raise Annalist_Error("RepeatGroup renderer has no col_head_view method")
# def col_head_edit(self):
# log.warning("RepeatGroup renderer has no col_head_edit method")
# raise Annalist_Error("RepeatGroup renderer has no col_head_edit method")
# def col_view(self):
# log.warning("RepeatGroup renderer has no col_view method")
# raise Annalist_Error("RepeatGroup renderer has no col_view method")
# def col_edit(self):
# log.warning("RepeatGroup renderer has no col_edit method")
# raise Annalist_Error("RepeatGroup renderer has no col_edit method")
def get_repeatgroup_renderer():
"""
Return field renderer object for RepeatGroup (labeled fields)
"""
return RenderGroupFieldValue("repeatgroup",
view_renderer=RenderRepeatGroup(view_group),
edit_renderer=RenderRepeatGroup(edit_group)
)
def get_repeatgrouprow_renderer():
"""
Return field renderer object for RepeatGroup as row (col headier labels)
"""
return RenderGroupFieldValue("repeatgrouprow",
view_renderer=RenderRepeatGroup(view_grouprow),
edit_renderer=RenderRepeatGroup(edit_grouprow)
)
def get_repeatlistrow_renderer():
"""
Return field renderer object for RepeatGroup as list (col header labels)
"""
return RenderGroupFieldValue("repeatlistrow",
view_renderer=RenderRepeatGroup(view_listrow),
edit_renderer=Template("@@repeatlistrow_renderer cannot be used for editing@@")
)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_repeatgroup.py
|
render_repeatgroup.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for URI value used to import a resource into
the local data store.
NOTE: the actual import logic is handled by the edit form handler:
the renderer just ensures appropriate values are returned.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from utils.py3porting import is_string
from django.template import Template, Context
from annalist.views.fields.render_base import RenderBase
from annalist.views.fields.render_fieldvalue import (
RenderFieldValue,
get_field_edit_value,
get_field_view_value
)
# ----------------------------------------------------------------------------
#
# Link URI value mapping
#
# ----------------------------------------------------------------------------
def import_field_value(data_value):
"""
Construct field value in expected format for remaining processing
"""
if data_value:
if is_string(data_value):
data_value = (
{ 'resource_name': "imported.data"
, 'import_url': data_value
})
else:
# Also for empty string case
data_value = (
{ 'resource_name': "imported.data"
, 'import_url': ""
})
return data_value
class URIImportValueMapper(RenderBase):
"""
Value mapper class for token list
"""
@classmethod
def encode(cls, data_value):
"""
Extracts import URL from value structure, for field display.
"""
return import_field_value(data_value).get('import_url', "")
@classmethod
def decode(cls, field_value):
"""
Returns textual link value from import URL field value
"""
return field_value or ""
def decode_store(self, field_value, entityvals, property_uri):
"""
Decodes a supplied value and uses it to update the 'import_url'
field of an URI import field.
"""
u = self.decode(field_value)
v = entityvals.get(property_uri, {})
if isinstance(v, dict):
v['import_url'] = u
else:
v = {'import_url': u}
entityvals[property_uri] = v
return v
# ----------------------------------------------------------------------------
#
# Import value templates
#
# ----------------------------------------------------------------------------
view_import = (
"""<a href="%s" target="_blank">%s</a>""")
edit_import = (
"""<!-- fields.uri_import_edit_renderer -->
<div class="row">
<div class="small-10 columns view-value view-subfield less-import-button">
<input type="text" size="64" name="{{repeat_prefix}}{{field.description.field_name}}"
placeholder="{{field.description.field_placeholder}}"
value="{{encoded_field_value}}" />
</div>
<div class="small-2 columns view-value view-subfield import-button left small-text-right">
<input type="submit" name="{{repeat_prefix}}{{field.description.field_name}}__import" value="Import" />
</div>
</div>
""")
# ----------------------------------------------------------------------------
#
# Link URI field renderers
#
# ----------------------------------------------------------------------------
class uri_import_view_renderer(object):
def render(self, context):
"""
Render import link for viewing.
"""
linkval = URIImportValueMapper.encode(get_field_view_value(context, ""))
common_prefixes = (
[ "http://", "https://"
, "file:///", "file://localhost/", "file://"
, "mailto:"]
)
textval = linkval
for p in common_prefixes:
if linkval.startswith(p):
textval = linkval[len(p):]
break
return view_import%(linkval, textval)
class uri_import_edit_renderer(object):
def __init__(self):
self._template = Template(edit_import)
return
def render(self, context):
"""
Render import link for editing
"""
val = URIImportValueMapper.encode(get_field_edit_value(context, None))
with context.push(encoded_field_value=val):
result = self._template.render(context)
return result
def get_uri_import_renderer():
"""
Return field renderer object for uri import value
"""
return RenderFieldValue("uri_import",
view_renderer=uri_import_view_renderer(),
edit_renderer=uri_import_edit_renderer(),
)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_uri_import.py
|
render_uri_import.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer for simple text field
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from django.http import HttpResponse
from django.template import Template
from annalist.views.fields.render_base import RenderBase
# ----------------------------------------------------------------------------
#
# Text value mapping
#
# ----------------------------------------------------------------------------
class TextValueMapper(RenderBase):
"""
Value mapper for simple text entry field.
"""
def __init__(self):
"""
Creates a renderer object for a simple text field
"""
super(TextValueMapper, self).__init__()
return
# encode, decode methods default to RenderBase; i.e. identity mappings
# ----------------------------------------------------------------------------
#
# Text field renderers
#
# ----------------------------------------------------------------------------
class text_view_renderer(object):
def render(self, context):
"""
Renders a simple text field for viewing
"""
responsetemplate = Template("""
<!-- views/fields/render_text.py:text_view_renderer -->
<span>{{ field.field_value|default:" " }}</span>
""")
responsebody = responsetemplate.render(context)
return responsebody
class text_edit_renderer(object):
def render(self, context):
"""
Renders a simple text field.
See also:
http://stackoverflow.com/questions/1480588/input-size-vs-width
"""
responsetemplate = Template("""
<!-- views/fields/render_text.py:text_edit_renderer -->
<input type="text" size="64" name="{{repeat_prefix}}{{field.description.field_name}}"
placeholder="{{field.description.field_placeholder}}"
value="{{field.field_value}}" />
""")
responsebody = responsetemplate.render(context)
return responsebody
def get_text_renderer():
"""
Return field renderer object for text values
"""
return RenderFieldValue("text",
view_renderer=text_view_renderer(),
edit_renderer=text_edit_renderer(),
)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_text.py
|
render_text.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for a short (1-line) text value with optional language code.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2019, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import re
import collections.abc
from utils.py3porting import is_string, to_unicode
from annalist.views.fields.render_base import RenderBase
from django.conf import settings
from django.template import Template, Context
# ----------------------------------------------------------------------------
#
# Language-tagged text value mapping
#
# ----------------------------------------------------------------------------
class TextLanguageValueMapper(RenderBase):
"""
Value mapper class for language-tagged text
"""
@classmethod
def encode(cls, field_value):
"""
Encodes language-tagged string for display
>>> TextLanguageValueMapper.encode("text") == "text"
True
>>> TextLanguageValueMapper.encode({ "@value": "text" }) == "text"
True
>>> TextLanguageValueMapper.encode({ "@value": "text", "@language": "en" }) == "text (en)"
True
>>> TextLanguageValueMapper.encode({ "@value": "text", "@language": "" }) == "text"
True
>>> TextLanguageValueMapper.encode({ "@value": "text", "@language": None }) == "text"
True
"""
field_string = ""
if is_string(field_value):
field_string = field_value.strip()
elif isinstance(field_value, collections.abc.Mapping):
field_string = field_value["@value"]
if ("@language" in field_value) and field_value["@language"]:
field_string += (" ("+field_value["@language"]+")")
return field_string
@classmethod
def decode(cls, field_string):
"""
Decodes a language-tagged string value as an internal JSON-bvased representation.
>>> TextLanguageValueMapper.decode("text") == { "@value": "text" }
True
>>> TextLanguageValueMapper.decode("text (en)") == { "@value": "text", "@language": "en" }
True
>>> TextLanguageValueMapper.decode("text (en) more") == { "@value": "text (en) more" }
True
>>> TextLanguageValueMapper.decode("") == { "@value": "" }
True
"""
field_string = field_string or ""
m = re.match(r"^(.*)(\s+\((\w[\w-]+)\))$", field_string)
if m:
field_value = { "@value": m.group(1).strip() }
if m.group(2):
field_value["@language"] = m.group(3).strip()
else:
field_value = { "@value": field_string or "" }
return field_value
# ----------------------------------------------------------------------------
#
# Language-tagged text field renderers
#
# ----------------------------------------------------------------------------
class text_language_view_renderer(object):
def render(self, context):
"""
Render language-tagged text for viewing.
"""
from annalist.views.fields.render_fieldvalue import get_field_view_value
textval = TextLanguageValueMapper.encode(get_field_view_value(context, ""))
log.debug("text_language_view_renderer: textval %r"%(textval,))
return '''<span>%s</span>'''%(textval)
class text_language_edit_renderer(object):
def __init__(self):
self._template = Template(
'''<input type="text" size="64" name="{{repeat_prefix}}{{field.description.field_name}}" '''+
'''placeholder="{{field.description.field_placeholder}}" '''+
'''value="{{encoded_field_value}}" />'''
)
return
def render(self, context):
"""
Render language-tagged text for editing
"""
from annalist.views.fields.render_fieldvalue import get_field_edit_value
textval = TextLanguageValueMapper.encode(get_field_edit_value(context, ""))
log.debug("text_language_edit_renderer: textval %r"%(textval,))
with context.push(encoded_field_value=textval):
result = self._template.render(context)
return result
def get_text_language_renderer():
"""
Return field renderer object for language-tagged text values
"""
from annalist.views.fields.render_fieldvalue import RenderFieldValue
return RenderFieldValue("text_language",
view_renderer=text_language_view_renderer(),
edit_renderer=text_language_edit_renderer(),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_text_language.py
|
render_text_language.py
|
# Field rendering templates
This directory contains templates for rendering different types of field value in different contexts.
The template name format is:
annalist_<context>_<value-type>
where `<context>` is one of:
* `edit` - a template used to render an element that can be used to display and edit a field value
* `view` - a template used for an element to display a value without options to change it
* [`grid` - used to render an element to display a field value in a grid NOT YET IMPLEMENTED]
and `<value-type>` is indicative of the type of value rendered by the template.
The selection of field templates for rendering is handled by module views.fields.find_renderers.
## Notes
* http://stackoverflow.com/questions/1480588/input-size-vs-width
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/templates/field/README.md
|
README.md
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2013, Graham Klyne, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import unittest
import logging
junitxml_present = False
try:
import junitxml
junitxml_present = True
except ImportError:
pass
def getTestSuite(testclass, testdict, select="unit"):
"""
Assemble test suite from supplied class, dictionary and selector
testclass is the test class whose methods are test cases
testdict is a dictionary of test cases in named test suite,
keyed by "unit", "component", etc., or by a named test.
select is the test suite selector:
"unit" return suite of unit tests only
"component" return suite of component tests
"integrate" return suite of integration tests
"pending" return suite of pending tests
"all" return suite of unit and component tests
name a single named test to be run
"""
suite = unittest.TestSuite()
# Named test only
if select[0:3] not in ["uni","com","all","int","pen"]:
if not hasattr(testclass, select):
print("%s: no test named '%s'"%(testclass.__name__, select))
return None
suite.addTest(testclass(select))
return suite
# Select test classes to include
if select[0:3] == "uni":
testclasses = ["unit"]
elif select[0:3] == "com":
testclasses = ["component"]
elif select[0:3] == "int":
testclasses = ["integration"]
elif select[0:3] == "pen":
testclasses = ["pending"]
elif select[0:3] == "all":
testclasses = ["unit", "component"]
else:
testclasses = ["unit"]
for c in testclasses:
for t in testdict.get(c,[]):
if not hasattr(testclass, t):
print("%s: in '%s' tests, no test named '%s'"%(testclass.__name__, c, t))
return None
suite.addTest(testclass(t))
return suite
def runTests(logname, getSuite, args):
"""
Run unit tests based on supplied command line argument values
logname name for logging output file, if used
getSuite function to retrieve test suite, given selector value
args command line arguments (or equivalent values)
"""
sel = "unit"
vrb = 1
if len(args) > 1:
sel = args[1]
if sel == "xml":
# Run with XML test output for use in Jenkins environment
if not junitxml_present:
print("junitxml module not available for XML test output")
raise ValueError("junitxml module not available for XML test output")
with open('xmlresults.xml', 'w') as report:
result = junitxml.JUnitXmlResult(report)
result.startTestRun()
try:
getSuite(select="unit").run(result)
finally:
result.stopTestRun()
else:
if sel[0:3] in ["uni","com","all","int","pen"]:
logging.basicConfig(level=logging.WARNING)
if sel[0:3] in ["com","all"]: vrb = 2
else:
# Run single test with elevated logging to file via new handler
logging.basicConfig(level=logging.DEBUG)
# Enable debug logging to a file
fileloghandler = logging.FileHandler(logname,"w")
fileloghandler.setLevel(logging.DEBUG)
# Use this formatter for shorter log records
###filelogformatter = logging.Formatter('%(levelname)s %(message)s', "%H:%M:%S")
# Use this formatter to display timing information:
filelogformatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(message)s', "%H:%M:%S")
fileloghandler.setFormatter(filelogformatter)
logging.getLogger('').addHandler(fileloghandler)
vrb = 2
runner = unittest.TextTestRunner(verbosity=vrb)
tests = getSuite(select=sel)
if tests: runner.run(tests)
return
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/miscutils/TestUtils.py
|
TestUtils.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import httpretty
from utils.py3porting import urljoin, pathname2url
from . import ScanDirectories
from .FileMimeTypes import FileMimeTypes
FileType_MimeType = dict([ (ft,ct) for (ct, fts) in FileMimeTypes
for ft in fts ])
def HttpContentType(filename):
fsplit = filename.rsplit(".", 1)
if len(fsplit) == 2 and fsplit[1] in FileType_MimeType:
return FileType_MimeType[fsplit[1]]
return "application/octet-stream"
class MockHttpFileResources(object):
def __init__(self, baseuri, path):
self._baseuri = baseuri
self._path = path
return
def __enter__(self):
httpretty.enable()
logging.getLogger("httpretty.core").setLevel(logging.WARNING)
# register stuff...
refs = ScanDirectories.CollectDirectoryContents(self._path, baseDir=self._path,
listDirs=False, listFiles=True, recursive=True)
for r in refs:
ru = self._baseuri + pathname2url(r)
rt = HttpContentType(r)
# log.info("MockHttpFileResource uri %s, file %s"%(ru, self._path+r))
with open(self._path+r, 'r') as cf:
httpretty.register_uri(httpretty.GET, ru, status=200, content_type=rt,
body=cf.read())
httpretty.register_uri(httpretty.HEAD, ru, status=200, content_type=rt)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
suppress_exc = False
httpretty.disable()
return suppress_exc
class MockHttpDictResources(object):
def __init__(self, baseuri, resourcedict):
self._baseuri = baseuri
self._dict = resourcedict
return
def __enter__(self):
httpretty.enable()
logging.getLogger("httpretty.core").setLevel(logging.WARNING)
# register stuff...
for r in self._dict.keys():
ru = urljoin(self._baseuri, r)
rt = HttpContentType(r)
log.debug("MockHttpDictResources: registering: %s"%ru)
httpretty.register_uri(httpretty.GET, ru, status=200, content_type=rt,
body=self._dict[r])
httpretty.register_uri(httpretty.HEAD, ru, status=200, content_type=rt)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
suppress_exc = False
httpretty.disable()
return suppress_exc
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/miscutils/MockHttpResources.py
|
MockHttpResources.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2013, Graham Klyne, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
from os.path import join, isdir, normpath
import os
# Scan files matching pattern in a directory tree
#
# Exceptions are left to the calling program.
#
# srcdir directory to search, maybe including subdirectories
# pattern a compiled regex pattern, for filename selection
# FileFunc a function to be called for each selected filename
# as FileFunc( dir, name ). (NOTE: this can be an
# object method with access to the instance data of
# the object to which it belongs.)
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
#
def ScanFilesEx(srcdir, pattern, FileFunc, recursive=True):
"""
Scan all files in a directory or directory tree matching a given pattern.
Exceptions are thrown back to the calling program.
"""
names = os.listdir(srcdir)
for name in names:
srcname = join(srcdir, name)
if isdir(srcname):
if recursive:
ScanFilesEx(srcname, pattern, FileFunc)
elif pattern.match(name):
FileFunc(srcdir, name)
# Scan files matching pattern in a directory tree
#
# This is just like 'ScanFilesEx' above, except that an error
# is reported if an I/O exception occurs.
#
# srcdir directory to search, maybe including subdirectories
# pattern a compiled regex pattern, for filename selection
# FileFunc a function to be called for each selected filename
# as FileFunc( dir, name ). (NOTE: this can be an
# object method with access to the instance data of
# the object to which it belongs.)
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
#
def ScanFiles(srcdir, pattern, FileFunc, recursive=True):
try:
ScanFilesEx(srcdir, pattern, FileFunc, recursive)
except (IOError, os.error) as why:
log.warning("Can't scan %s: %s" % (repr(srcdir), str(why)))
return
# Collect files matching pattern in a directory tree
#
# srcdir directory to search, maybe including subdirectories
# pattern a compiled regex pattern, for filename selection
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
#
# Returns a list of pairs of the form (directory,filename)
#
def CollectFiles(srcdir, pattern, recursive=True):
"""
Return a list of (dir,name) pairs for matching files in a directory tree.
"""
global collection
collection = []
ScanFilesEx(srcdir, pattern, Collect, recursive)
return collection
def Collect(fdir,fnam):
global collection
collection.append( (fdir,fnam) )
# Helper functions to read the contents of a file into a string
def joinDirName(fdir,fnam):
"""
Return a normalized path name obtained by combining a named directory
with a file name. The first argument is presumed to name a directory,
even when its trailing directory indicator is omitted.
"""
return normpath(join(fdir,fnam))
def readDirNameFile(fdir,fnam):
"""
Read a file from a specified directory, and return its content as a string.
"""
return readFile(joinDirName(fdir,fnam))
def readFile(nam):
"""
Read a file and return its content as a string.
"""
f = open(nam,"r")
s = f.read()
f.close()
return s
# Test case
if __name__ == "__main__":
import re
pattern = re.compile( r'^.+\.py$' )
c = CollectFiles(".", pattern)
for (d,f) in c:
print(d+"\\"+f)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/miscutils/ScanFiles.py
|
ScanFiles.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import re # Used for link header parsing
import httplib2
import rdflib
from utils.py3porting import urljoin, urlsplit
RDF_CONTENT_TYPES = (
{ "application/rdf+xml": "xml"
, "text/turtle": "n3"
, "text/n3": "n3"
, "text/nt": "nt"
, "application/json": "jsonld"
, "application/xhtml": "rdfa"
})
ACCEPT_RDF_CONTENT_TYPES = "application/rdf+xml, text/turtle"
def splitValues(txt, sep=",", lq='"<', rq='">'):
"""
Helper function returns list of delimited values in a string,
where delimiters in quotes are protected.
sep is string of separator
lq is string of opening quotes for strings within which separators are not recognized
rq is string of corresponding closing quotes
"""
result = []
cursor = 0
begseg = cursor
while cursor < len(txt):
if txt[cursor] in lq:
# Skip quoted or bracketed string
eq = rq[lq.index(txt[cursor])] # End quote/bracket character
cursor += 1
while cursor < len(txt) and txt[cursor] != eq:
if txt[cursor] == '\\': cursor += 1 # skip '\' quoted-pair
cursor += 1
if cursor < len(txt):
cursor += 1 # Skip closing quote/bracket
elif txt[cursor] in sep:
result.append(txt[begseg:cursor])
cursor += 1
begseg = cursor
else:
cursor += 1
# append final segment
result.append(txt[begseg:cursor])
return result
def testSplitValues():
assert splitValues("a,b,c") == ['a','b','c']
assert splitValues('a,"b,c",d') == ['a','"b,c"','d']
assert splitValues('a, "b, c\\", c1", d') == ['a',' "b, c\\", c1"',' d']
assert splitValues('a,"b,c",d', ";") == ['a,"b,c",d']
assert splitValues('a;"b;c";d', ";") == ['a','"b;c"','d']
assert splitValues('a;<b;c>;d', ";") == ['a','<b;c>','d']
assert splitValues('"a;b";(c;d);e', ";", lq='"(', rq='")') == ['"a;b"','(c;d)','e']
def parseLinks(headerlist):
"""
Helper function to parse 'link:' headers,
returning a dictionary of links keyed by link relation type
headerlist is a list of header (name,value) pairs
"""
linkheaders = [ v for (h,v) in headerlist if h.lower() == "link" ]
log.debug("parseLinks linkheaders %s"%(repr(linkheaders)))
links = {}
for linkheader in linkheaders:
for linkval in splitValues(linkheader, ","):
linkparts = splitValues(linkval, ";")
linkmatch = re.match(r'''\s*<([^>]*)>\s*''', linkparts[0])
if linkmatch:
linkuri = linkmatch.group(1)
for linkparam in linkparts[1:]:
linkmatch = re.match(r'''\s*rel\s*=\s*"?(.*?)"?\s*$''', linkparam) # .*? is non-greedy
if linkmatch:
linkrel = linkmatch.group(1)
log.debug("parseLinks links[%s] = %s"%(linkrel, linkuri))
links[linkrel] = linkuri
return links
def testParseLinks():
links = (
('Link', '<http://example.org/foo>; rel=foo'),
('Link', ' <http://example.org/bar> ; rel = bar '),
('Link', '<http://example.org/bas>; rel=bas; par = zzz , <http://example.org/bat>; rel = bat'),
('Link', ' <http://example.org/fie> ; par = fie '),
('Link', ' <http://example.org/fum> ; rel = "http://example.org/rel/fum" '),
('Link', ' <http://example.org/fas;far> ; rel = "http://example.org/rel/fas" '),
)
assert str(parseLinks(links)['foo']) == 'http://example.org/foo'
assert str(parseLinks(links)['bar']) == 'http://example.org/bar'
assert str(parseLinks(links)['bas']) == 'http://example.org/bas'
assert str(parseLinks(links)['bat']) == 'http://example.org/bat'
assert str(parseLinks(links)['http://example.org/rel/fum']) == 'http://example.org/fum'
assert str(parseLinks(links)['http://example.org/rel/fas']) == 'http://example.org/fas;far'
# Class for exceptions raised by HTTP session
class HTTP_Error(Exception):
def __init__(self, msg="HTTP_Error", value=None, uri=None):
self._msg = msg
self._value = value
self._uri = uri
return
def __str__(self):
txt = self._msg
if self._uri: txt += " for "+str(self._uri)
if self._value: txt += ": "+repr(self._value)
return txt
def __repr__(self):
return ( "HTTP_Error(%s, value=%s, uri=%s)"%
(repr(self._msg), repr(self._value), repr(self._uri)))
# Class for handling Access in an HTTP session
class HTTP_Session(object):
"""
Client access class for HTTP session.
Creates a session to access a single HTTP endpoint,
and provides methods to issue requests on this session
This class is primarily designed to access a specific endpoint, and
by default refuses requests for different endpoints. But the request
methods accept an additional "exthost" parameter that can be used to
override this behaviour. Specifying "exthost=True" causes the request
to allow URIs that use different scheme, hostname or port than the original
request, but such requests are not issued using the access key of the HTTP
session.
"""
def __init__(self, baseuri, accesskey=None):
log.debug("HTTP_Session.__init__: baseuri "+baseuri)
self._baseuri = baseuri
self._key = accesskey
parseduri = urlsplit(baseuri)
self._scheme = parseduri.scheme
self._host = parseduri.netloc
self._path = parseduri.path
self._http2 = httplib2.Http()
return
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
return
def close(self):
self._key = None
self._http2 = None
return
def baseuri(self):
return self._baseuri
def getpathuri(self, uripath):
# str used here so rdflib.URIRef values can be accepted
return urljoin(self._baseuri, str(uripath))
def error(self, msg, value=None):
return HTTP_Error(msg=msg, value=value, uri=self._baseuri)
def parseLinks(self, headers):
"""
Parse link header(s), return dictionary of links keyed by link relation type
"""
return parseLinks(headers["_headerlist"])
def doRequest(self, uripath,
method="GET", body=None, ctype=None, accept=None, reqheaders=None, exthost=False):
"""
Perform HTTP request.
Parameters:
uripath URI reference of resource to access, resolved against the base URI of
the current HTTP_Session object.
method HTTP method to use (default GET)
body request body to use (default none)
ctype content-type of request body (default none)
accept string containing list of content types for HTTP accept header
reqheaders dictionary of additional header fields to send with the HTTP request
exthost True if a request to a URI with a scheme and/or host different than
the session base URI is to be respected (default False).
Return:
status, reason(text), response headers, response body
Note: relies on to,meout or object deallocation to close connections:
see http://stackoverflow.com/questions/16687033/is-this-a-bug-of-httplib2.
Python's reference counting should ensure Http objects amnd referenced connections
are deallocated promptly.
"""
# Construct request path
urifull = self.getpathuri(uripath)
uriparts = urlsplit(urifull)
path = uriparts.path
if uriparts.query: path += ("?"+uriparts.query)
# Sort out HTTP connection to use: session or new
if ( (uriparts.scheme and uriparts.scheme != self._scheme) or
(uriparts.netloc and uriparts.netloc != self._host) ):
if exthost:
usescheme = uriparts.scheme
usekey = None
elif (uriparts.scheme and uriparts.scheme != self._scheme):
raise HTTP_Error(
"URI scheme mismatch",
value=uriparts.scheme,
uri=self._baseuri)
elif (uriparts.netloc and uriparts.netloc != self._host):
raise HTTP_Error(
"URI host:port mismatch",
value=uriparts.netloc,
uri=self._baseuri)
else:
usescheme = self._scheme
usekey = self._key
# Assemble request headers
if not reqheaders:
reqheaders = {}
if usekey:
reqheaders["authorization"] = "Bearer "+usekey
if ctype:
reqheaders["content-type"] = ctype
if accept:
reqheaders["accept"] = accept
# Execute request
log.debug("HTTP_Session.doRequest method: "+method)
log.debug("HTTP_Session.doRequest path: "+path)
log.debug("HTTP_Session.doRequest reqheaders: "+repr(reqheaders))
log.debug("HTTP_Session.doRequest body: "+repr(body))
(resp, data) = self._http2.request(urifull,
method=method, body=body, headers=reqheaders)
# Pick out elements of response
try:
status = resp.status
reason = resp.reason
headerlist = [ (hn.lower(),resp[hn]) for hn in resp ]
headers = dict(headerlist) # dict(...) keeps last result of multiple keys
headers["_headerlist"] = headerlist
log.debug("HTTP_Session.doRequest response: "+str(status)+" "+reason)
log.debug("HTTP_Session.doRequest rspheaders: "+repr(headers))
except Exception as e:
log.warn("HTTP_Session error %r accessing %s with request headers %r"%(e, uripath, reqheaders))
status = 900
reason = str(e)
headers = {"_headerlist": []}
data = None
###log.debug("HTTP_Session.doRequest data: "+repr(data))
return (status, reason, headers, data)
def doRequestFollowRedirect(self, uripath,
method="GET", body=None, ctype=None, accept=None, reqheaders=None, exthost=False):
"""
Perform HTTP request, following any redirect returned.
Parameters:
uripath URI reference of resource to access, resolved against the base URI of
the current HTTP_Session object.
method HTTP method to use (default GET)
body request body to use (default none)
ctype content-type of request body (default none)
accept string containing list of content types for HTTP accept header
reqheaders dictionary of additional header fields to send with the HTTP request
exthost True if a request to a URI with a scheme and/or host different than
the session base URI is to be respected (default False).
Return:
status, reason(text), response headers, final URI, response body
"""
(status, reason, headers, data) = self.doRequest(uripath,
method=method, accept=accept,
body=body, ctype=ctype, reqheaders=reqheaders,
exthost=exthost)
return (status, reason, headers, headers['content-location'], data)
def doRequestRDFFollowRedirect(self, uripath,
method="GET", body=None, ctype=None, reqheaders=None, exthost=False, graph=None):
"""
Perform HTTP request with RDF response, following any redirect returned
If the request succeeds, return response as an RDF graph,
or return fake 9xx status if RDF cannot be parsed.
Otherwise return response and content per request.
Thus, only 2xx responses include RDF data.
Parameters:
uripath URI reference of resource to access, resolved against the base URI of
the current HTTP_Session object.
method HTTP method to use (default GET)
body request body to use (default none)
ctype content-type of request body (default none)
reqheaders dictionary of additional header fields to send with the HTTP request
exthost True if a request to a URI with a scheme and/or host different than
the session base URI is to be respected (default False).
graph an rdflib.Graph object to which any RDF read is added. If not
provided, a new RDF graph is created and returmned.
Return:
status, reason(text), response headers, final URI, response graph or body
"""
(status, reason, headers, finaluri, data) = self.doRequestFollowRedirect(uripath,
method=method, body=body,
ctype=ctype, accept=ACCEPT_RDF_CONTENT_TYPES, reqheaders=reqheaders,
exthost=exthost)
if status >= 200 and status < 300:
content_type = headers["content-type"].split(";",1)[0].strip().lower()
if content_type in RDF_CONTENT_TYPES:
rdfgraph = graph if graph != None else rdflib.graph.Graph()
baseuri = self.getpathuri(uripath)
bodyformat = RDF_CONTENT_TYPES[content_type]
# log.debug("HTTP_Session.doRequestRDF data:\n----\n"+data+"\n------------")
try:
# rdfgraph.parse(data=data, location=baseuri, format=bodyformat)
rdfgraph.parse(data=data, publicID=baseuri, format=bodyformat)
data = rdfgraph
except Exception as e:
log.info("HTTP_Session.doRequestRDF: %s"%(e))
log.info("HTTP_Session.doRequestRDF parse failure: '%s', '%s'"%(content_type, bodyformat))
# log.debug("HTTP_Session.doRequestRDF data:\n----\n"+data[:200]+"\n------------")
status = 902
reason = "RDF (%s) parse failure"%bodyformat
else:
status = 901
reason = "Non-RDF content-type returned"
return (status, reason, headers, finaluri, data)
def doRequestRDF(self, uripath,
method="GET", body=None, ctype=None, reqheaders=None, exthost=False, graph=None):
"""
Perform HTTP request with RDF response.
If the request succeeds, return response as RDF graph,
or return fake 9xx status if RDF cannot be parsed.
Otherwise return response and content per request.
Thus, only 2xx responses include RDF data.
Parameters:
uripath URI reference of resource to access, resolved against the base URI of
the current HTTP_Session object.
method HTTP method to use (default GET)
body request body to use (default none)
ctype content-type of request body (default none)
reqheaders dictionary of additional header fields to send with the HTTP request
exthost True if a request to a URI with a scheme and/or host different than
the session base URI is to be respected (default False).
graph an rdflib.Graph object to which any RDF read is added. If not
provided, a new RDF graph is created and returmned.
Return:
status, reason(text), response headers, response graph or body
"""
(status, reason, headers, finaluri, data) = self.doRequestRDFFollowRedirect(uripath,
method=method, body=body,
ctype=ctype, accept=ACCEPT_RDF_CONTENT_TYPES, reqheaders=reqheaders,
exthost=exthost)
return (status, reason, headers, data)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/miscutils/HttpSessionRDF.py
|
HttpSessionRDF.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
FileMimeTypes = (
{ ("application/andrew-inset", ("ez",))
, ("application/applixware", ("aw",))
, ("application/atom+xml", ("atom",))
, ("application/atomcat+xml", ("atomcat",))
, ("application/atomsvc+xml", ("atomsvc",))
, ("application/ccxml+xml", ("ccxml",))
, ("application/cdmi-capability", ("cdmia",))
, ("application/cdmi-container", ("cdmic",))
, ("application/cdmi-domain", ("cdmid",))
, ("application/cdmi-object", ("cdmio",))
, ("application/cdmi-queue", ("cdmiq",))
, ("application/cu-seeme", ("cu",))
, ("application/davmount+xml", ("davmount",))
, ("application/docbook+xml", ("dbk",))
, ("application/dssc+der", ("dssc",))
, ("application/dssc+xml", ("xdssc",))
, ("application/ecmascript", ("ecma",))
, ("application/emma+xml", ("emma",))
, ("application/epub+zip", ("epub",))
, ("application/exi", ("exi",))
, ("application/font-tdpfr", ("pfr",))
, ("application/gml+xml", ("gml",))
, ("application/gpx+xml", ("gpx",))
, ("application/gxf", ("gxf",))
, ("application/hyperstudio", ("stk",))
, ("application/inkml+xml", ("ink","inkml",))
, ("application/ipfix", ("ipfix",))
, ("application/java-archive", ("jar",))
, ("application/java-serialized-object", ("ser",))
, ("application/java-vm", ("class",))
, ("application/javascript", ("js",))
, ("application/json", ("json",))
, ("application/jsonml+json", ("jsonml",))
, ("application/lost+xml", ("lostxml",))
, ("application/mac-binhex40", ("hqx",))
, ("application/mac-compactpro", ("cpt",))
, ("application/mads+xml", ("mads",))
, ("application/marc", ("mrc",))
, ("application/marcxml+xml", ("mrcx",))
, ("application/mathematica", ("ma","nb","mb",))
, ("application/mathml+xml", ("mathml",))
, ("application/mbox", ("mbox",))
, ("application/mediaservercontrol+xml", ("mscml",))
, ("application/metalink+xml", ("metalink",))
, ("application/metalink4+xml", ("meta4",))
, ("application/mets+xml", ("mets",))
, ("application/mods+xml", ("mods",))
, ("application/mp21", ("m21","mp21",))
, ("application/mp4", ("mp4s",))
, ("application/msword", ("doc","dot",))
, ("application/mxf", ("mxf",))
, ("application/octet-stream", ("bin","dms","lrf","mar","so","dist","distz","pkg","bpk","dump","elc","deploy",))
, ("application/oda", ("oda",))
, ("application/oebps-package+xml", ("opf",))
, ("application/ogg", ("ogx",))
, ("application/omdoc+xml", ("omdoc",))
, ("application/onenote", ("onetoc","onetoc2","onetmp","onepkg",))
, ("application/oxps", ("oxps",))
, ("application/patch-ops-error+xml", ("xer",))
, ("application/pdf", ("pdf",))
, ("application/pgp-encrypted", ("pgp",))
, ("application/pgp-signature", ("asc","sig",))
, ("application/pics-rules", ("prf",))
, ("application/pkcs10", ("p10",))
, ("application/pkcs7-mime", ("p7m","p7c",))
, ("application/pkcs7-signature", ("p7s",))
, ("application/pkcs8", ("p8",))
, ("application/pkix-attr-cert", ("ac",))
, ("application/pkix-cert", ("cer",))
, ("application/pkix-crl", ("crl",))
, ("application/pkix-pkipath", ("pkipath",))
, ("application/pkixcmp", ("pki",))
, ("application/pls+xml", ("pls",))
, ("application/postscript", ("ai","eps","ps",))
, ("application/prs.cww", ("cww",))
, ("application/pskc+xml", ("pskcxml",))
, ("application/rdf+xml", ("rdf",))
, ("application/reginfo+xml", ("rif",))
, ("application/relax-ng-compact-syntax", ("rnc",))
, ("application/resource-lists+xml", ("rl",))
, ("application/resource-lists-diff+xml", ("rld",))
, ("application/rls-services+xml", ("rs",))
, ("application/rpki-ghostbusters", ("gbr",))
, ("application/rpki-manifest", ("mft",))
, ("application/rpki-roa", ("roa",))
, ("application/rsd+xml", ("rsd",))
, ("application/rss+xml", ("rss",))
, ("application/rtf", ("rtf",))
, ("application/sbml+xml", ("sbml",))
, ("application/scvp-cv-request", ("scq",))
, ("application/scvp-cv-response", ("scs",))
, ("application/scvp-vp-request", ("spq",))
, ("application/scvp-vp-response", ("spp",))
, ("application/sdp", ("sdp",))
, ("application/set-payment-initiation", ("setpay",))
, ("application/set-registration-initiation", ("setreg",))
, ("application/shf+xml", ("shf",))
, ("application/smil+xml", ("smi","smil",))
, ("application/sparql-query", ("rq",))
, ("application/sparql-results+xml", ("srx",))
, ("application/srgs", ("gram",))
, ("application/srgs+xml", ("grxml",))
, ("application/sru+xml", ("sru",))
, ("application/ssdl+xml", ("ssdl",))
, ("application/ssml+xml", ("ssml",))
, ("application/tei+xml", ("tei","teicorpus",))
, ("application/thraud+xml", ("tfi",))
, ("application/timestamped-data", ("tsd",))
, ("application/voicexml+xml", ("vxml",))
, ("application/widget", ("wgt",))
, ("application/winhlp", ("hlp",))
, ("application/wsdl+xml", ("wsdl",))
, ("application/wspolicy+xml", ("wspolicy",))
, ("application/x-7z-compressed", ("7z",))
, ("application/x-abiword", ("abw",))
, ("application/x-ace-compressed", ("ace",))
, ("application/x-apple-diskimage", ("dmg",))
, ("application/x-authorware-bin", ("aab","x32","u32","vox",))
, ("application/x-authorware-map", ("aam",))
, ("application/x-authorware-seg", ("aas",))
, ("application/x-bcpio", ("bcpio",))
, ("application/x-bittorrent", ("torrent",))
, ("application/x-blorb", ("blb","blorb",))
, ("application/x-bzip", ("bz",))
, ("application/x-bzip2", ("bz2","boz",))
, ("application/x-cbr", ("cbr","cba","cbt","cbz","cb7",))
, ("application/x-cdlink", ("vcd",))
, ("application/x-cfs-compressed", ("cfs",))
, ("application/x-chat", ("chat",))
, ("application/x-chess-pgn", ("pgn",))
, ("application/x-conference", ("nsc",))
, ("application/x-cpio", ("cpio",))
, ("application/x-csh", ("csh",))
, ("application/x-debian-package", ("deb","udeb",))
, ("application/x-dgc-compressed", ("dgc",))
, ("application/x-director", ("dir","dcr","dxr","cst","cct","cxt","w3d","fgd","swa",))
, ("application/x-doom", ("wad",))
, ("application/x-dtbncx+xml", ("ncx",))
, ("application/x-dtbook+xml", ("dtb",))
, ("application/x-dtbresource+xml", ("res",))
, ("application/x-dvi", ("dvi",))
, ("application/x-envoy", ("evy",))
, ("application/x-eva", ("eva",))
, ("application/x-font-bdf", ("bdf",))
, ("application/x-font-ghostscript", ("gsf",))
, ("application/x-font-linux-psf", ("psf",))
, ("application/x-font-otf", ("otf",))
, ("application/x-font-pcf", ("pcf",))
, ("application/x-font-snf", ("snf",))
, ("application/x-font-ttf", ("ttf","ttc",))
, ("application/x-font-type1", ("pfa","pfb","pfm","afm",))
, ("application/font-woff", ("woff",))
, ("application/x-freearc", ("arc",))
, ("application/x-futuresplash", ("spl",))
, ("application/x-gca-compressed", ("gca",))
, ("application/x-glulx", ("ulx",))
, ("application/x-gnumeric", ("gnumeric",))
, ("application/x-gramps-xml", ("gramps",))
, ("application/x-gtar", ("gtar",))
, ("application/x-hdf", ("hdf",))
, ("application/x-install-instructions", ("install",))
, ("application/x-iso9660-image", ("iso",))
, ("application/x-java-jnlp-file", ("jnlp",))
, ("application/x-latex", ("latex",))
, ("application/x-lzh-compressed", ("lzh","lha",))
, ("application/x-mie", ("mie",))
, ("application/x-mobipocket-ebook", ("prc","mobi",))
, ("application/x-ms-application", ("application",))
, ("application/x-ms-shortcut", ("lnk",))
, ("application/x-ms-wmd", ("wmd",))
, ("application/x-ms-wmz", ("wmz",))
, ("application/x-ms-xbap", ("xbap",))
, ("application/x-msaccess", ("mdb",))
, ("application/x-msbinder", ("obd",))
, ("application/x-mscardfile", ("crd",))
, ("application/x-msclip", ("clp",))
, ("application/x-msdownload", ("exe","dll","com","bat","msi",))
, ("application/x-msmediaview", ("mvb","m13","m14",))
, ("application/x-msmetafile", ("wmf","wmz","emf","emz",))
, ("application/x-msmoney", ("mny",))
, ("application/x-mspublisher", ("pub",))
, ("application/x-msschedule", ("scd",))
, ("application/x-msterminal", ("trm",))
, ("application/x-mswrite", ("wri",))
, ("application/x-netcdf", ("nc","cdf",))
, ("application/x-nzb", ("nzb",))
, ("application/x-pkcs12", ("p12","pfx",))
, ("application/x-pkcs7-certificates", ("p7b","spc",))
, ("application/x-pkcs7-certreqresp", ("p7r",))
, ("application/x-rar-compressed", ("rar",))
, ("application/x-research-info-systems", ("ris",))
, ("application/x-sh", ("sh",))
, ("application/x-shar", ("shar",))
, ("application/x-shockwave-flash", ("swf",))
, ("application/x-silverlight-app", ("xap",))
, ("application/x-sql", ("sql",))
, ("application/x-stuffit", ("sit",))
, ("application/x-stuffitx", ("sitx",))
, ("application/x-subrip", ("srt",))
, ("application/x-sv4cpio", ("sv4cpio",))
, ("application/x-sv4crc", ("sv4crc",))
, ("application/x-t3vm-image", ("t3",))
, ("application/x-tads", ("gam",))
, ("application/x-tar", ("tar",))
, ("application/x-tcl", ("tcl",))
, ("application/x-tex", ("tex",))
, ("application/x-tex-tfm", ("tfm",))
, ("application/x-texinfo", ("texinfo","texi",))
, ("application/x-tgif", ("obj",))
, ("application/x-ustar", ("ustar",))
, ("application/x-wais-source", ("src",))
, ("application/x-x509-ca-cert", ("der","crt",))
, ("application/x-xfig", ("fig",))
, ("application/x-xliff+xml", ("xlf",))
, ("application/x-xpinstall", ("xpi",))
, ("application/x-xz", ("xz",))
, ("application/x-zmachine", ("z1","z2","z3","z4","z5","z6","z7","z8",))
, ("application/xaml+xml", ("xaml",))
, ("application/xcap-diff+xml", ("xdf",))
, ("application/xenc+xml", ("xenc",))
, ("application/xhtml+xml", ("xhtml","xht",))
, ("application/xml", ("xml","xsl",))
, ("application/xml-dtd", ("dtd",))
, ("application/xop+xml", ("xop",))
, ("application/xproc+xml", ("xpl",))
, ("application/xslt+xml", ("xslt",))
, ("application/xspf+xml", ("xspf",))
, ("application/xv+xml", ("mxml","xhvml","xvml","xvm",))
, ("application/yang", ("yang",))
, ("application/yin+xml", ("yin",))
, ("application/zip", ("zip",))
, ("audio/adpcm", ("adp",))
, ("audio/basic", ("au","snd",))
, ("audio/midi", ("mid","midi","kar","rmi",))
, ("audio/mp4", ("mp4a",))
, ("audio/mpeg", ("mpga","mp2","mp2a","mp3","m2a","m3a",))
, ("audio/ogg", ("oga","ogg","spx",))
, ("audio/s3m", ("s3m",))
, ("audio/silk", ("sil",))
, ("audio/vnd.dece.audio", ("uva","uvva",))
, ("audio/vnd.digital-winds", ("eol",))
, ("audio/vnd.dra", ("dra",))
, ("audio/vnd.dts", ("dts",))
, ("audio/vnd.dts.hd", ("dtshd",))
, ("audio/vnd.lucent.voice", ("lvp",))
, ("audio/vnd.ms-playready.media.pya", ("pya",))
, ("audio/vnd.nuera.ecelp4800", ("ecelp4800",))
, ("audio/vnd.nuera.ecelp7470", ("ecelp7470",))
, ("audio/vnd.nuera.ecelp9600", ("ecelp9600",))
, ("audio/vnd.rip", ("rip",))
, ("audio/webm", ("weba",))
, ("audio/x-aac", ("aac",))
, ("audio/x-aiff", ("aif","aiff","aifc",))
, ("audio/x-caf", ("caf",))
, ("audio/x-flac", ("flac",))
, ("audio/x-matroska", ("mka",))
, ("audio/x-mpegurl", ("m3u",))
, ("audio/x-ms-wax", ("wax",))
, ("audio/x-ms-wma", ("wma",))
, ("audio/x-pn-realaudio", ("ram","ra",))
, ("audio/x-pn-realaudio-plugin", ("rmp",))
, ("audio/x-wav", ("wav",))
, ("audio/xm", ("xm",))
, ("chemical/x-cdx", ("cdx",))
, ("chemical/x-cif", ("cif",))
, ("chemical/x-cmdf", ("cmdf",))
, ("chemical/x-cml", ("cml",))
, ("chemical/x-csml", ("csml",))
, ("chemical/x-xyz", ("xyz",))
, ("image/bmp", ("bmp",))
, ("image/cgm", ("cgm",))
, ("image/g3fax", ("g3",))
, ("image/gif", ("gif",))
, ("image/ief", ("ief",))
, ("image/jpeg", ("jpeg","jpg","jpe",))
, ("image/ktx", ("ktx",))
, ("image/png", ("png",))
, ("image/prs.btif", ("btif",))
, ("image/sgi", ("sgi",))
, ("image/svg+xml", ("svg","svgz",))
, ("image/tiff", ("tiff","tif",))
, ("image/webp", ("webp",))
, ("image/x-3ds", ("3ds",))
, ("image/x-cmu-raster", ("ras",))
, ("image/x-cmx", ("cmx",))
, ("image/x-freehand", ("fh","fhc","fh4","fh5","fh7",))
, ("image/x-icon", ("ico",))
, ("image/x-mrsid-image", ("sid",))
, ("image/x-pcx", ("pcx",))
, ("image/x-pict", ("pic","pct",))
, ("image/x-portable-anymap", ("pnm",))
, ("image/x-portable-bitmap", ("pbm",))
, ("image/x-portable-graymap", ("pgm",))
, ("image/x-portable-pixmap", ("ppm",))
, ("image/x-rgb", ("rgb",))
, ("image/x-tga", ("tga",))
, ("image/x-xbitmap", ("xbm",))
, ("image/x-xpixmap", ("xpm",))
, ("image/x-xwindowdump", ("xwd",))
, ("message/rfc822", ("eml","mime",))
, ("model/iges", ("igs","iges",))
, ("model/mesh", ("msh","mesh","silo",))
, ("model/vrml", ("wrl","vrml",))
, ("model/x3d+binary", ("x3db","x3dbz",))
, ("model/x3d+vrml", ("x3dv","x3dvz",))
, ("model/x3d+xml", ("x3d","x3dz",))
, ("text/cache-manifest", ("appcache",))
, ("text/calendar", ("ics","ifb",))
, ("text/css", ("css",))
, ("text/csv", ("csv",))
, ("text/html", ("html","htm",))
, ("text/n3", ("n3",))
, ("text/plain", ("txt","text","conf","def","list","log","in",))
, ("text/prs.lines.tag", ("dsc",))
, ("text/richtext", ("rtx",))
, ("text/sgml", ("sgml","sgm",))
, ("text/tab-separated-values", ("tsv",))
, ("text/troff", ("t","tr","roff","man","me","ms",))
, ("text/turtle", ("ttl",))
, ("text/uri-list", ("uri","uris","urls",))
, ("text/vcard", ("vcard",))
, ("text/x-asm", ("s","asm",))
, ("text/x-c", ("c","cc","cxx","cpp","h","hh","dic",))
, ("text/x-fortran", ("f","for","f77","f90",))
, ("text/x-java-source", ("java",))
, ("text/x-opml", ("opml",))
, ("text/x-pascal", ("p","pas",))
, ("text/x-nfo", ("nfo",))
, ("text/x-setext", ("etx",))
, ("text/x-sfv", ("sfv",))
, ("text/x-uuencode", ("uu",))
, ("text/x-vcalendar", ("vcs",))
, ("text/x-vcard", ("vcf",))
, ("video/3gpp", ("3gp",))
, ("video/3gpp2", ("3g2",))
, ("video/h261", ("h261",))
, ("video/h263", ("h263",))
, ("video/h264", ("h264",))
, ("video/jpeg", ("jpgv",))
, ("video/jpm", ("jpm","jpgm",))
, ("video/mj2", ("mj2","mjp2",))
, ("video/mp4", ("mp4","mp4v","mpg4",))
, ("video/mpeg", ("mpeg","mpg","mpe","m1v","m2v",))
, ("video/ogg", ("ogv",))
, ("video/quicktime", ("qt","mov",))
, ("video/webm", ("webm",))
, ("video/x-f4v", ("f4v",))
, ("video/x-fli", ("fli",))
, ("video/x-flv", ("flv",))
, ("video/x-m4v", ("m4v",))
, ("video/x-matroska", ("mkv","mk3d","mks",))
, ("video/x-mng", ("mng",))
, ("video/x-ms-asf", ("asf","asx",))
, ("video/x-ms-vob", ("vob",))
, ("video/x-ms-wm", ("wm",))
, ("video/x-ms-wmv", ("wmv",))
, ("video/x-ms-wmx", ("wmx",))
, ("video/x-ms-wvx", ("wvx",))
, ("video/x-msvideo", ("avi",))
, ("video/x-sgi-movie", ("movie",))
, ("video/x-smv", ("smv",))
, ("x-conference/x-cooltalk", ("ice",))
})
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/miscutils/FileMimeTypes.py
|
FileMimeTypes.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import sys
import os
import os.path
import re
from utils.py3porting import (
urljoin, pathname2url, urlopen, Request, HTTPConnection
)
fileuribase = "file://"
def isFileUri(uri):
return uri.startswith(fileuribase)
def resolveUri(uriref, base, path=""):
"""
Resolve a URI reference against a supplied base URI and path (supplied as strings).
(The path is a local file system path, and may need converting to use URI conventions)
"""
upath = pathname2url(path)
if os.path.isdir(path) and not upath.endswith('/'):
upath = upath + '/'
return urljoin(urljoin(base, upath), uriref)
def resolveFileAsUri(path):
"""
Resolve a filename reference against the current working directory, and return the
corresponding file:// URI.
If the supplied string is already a URI, it is returned unchanged
(for idempotency and non-file URIs)
"""
if urlsplit(path).scheme == "":
path = resolveUri("", fileuribase, os.path.abspath(path))
return path
def getFilenameFromUri(uri):
"""
Convert a file:// URI into a local file system reference
"""
uriparts = urlsplit(uri)
assert uriparts.scheme == "file", "RO %s is not in local file system"%uri
uriparts = SplitResult("","",uriparts.path,uriparts.query,uriparts.fragment)
return url2pathname(urlunsplit(uriparts))
def isLiveUri(uriref):
"""
Test URI reference to see if it refers to an accessible resource
Relative URI references are assumed to be local file system references,
relative to the current working directory.
"""
islive = False
fileuri = resolveFileAsUri(uriref)
if isFileUri(fileuri):
islive = os.path.exists(getFilenameFromUri(fileuri))
else:
parseduri = urlsplit(uriref)
scheme = parseduri.scheme
host = parseduri.netloc
path = parseduri.path
if parseduri.query: path += "?"+parseduri.query
httpcon = HTTPConnection(host, timeout=5)
# Extra request headers
# ... none for now
# Execute request
try:
httpcon.request("HEAD", path)
response = httpcon.getresponse()
status = response.status
except:
status = 900
# Pick out elements of response
islive = (status >= 200) and (status <= 299)
return islive
def retrieveUri(uriref):
# @@TODO: revise to use httplib2, or delete this method
uri = resolveUri(uriref, fileuribase, os.getcwd())
request = Request(uri)
try:
response = urlopen(request)
result = response.read()
except:
result = None
return result
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/miscutils/uriutils.py
|
uriutils.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2013, Graham Klyne, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
from os.path import join, isdir, normpath
import os
import logging
logger = logging.getLogger("ScanDirectories")
#logger.setLevel(logging.INFO)
# Scan the sub-directory structure in a given directory
#
# Exceptions are left to the calling program.
#
# srcdir directory to search, maybe including sub-directories
# DirFunc a function to be called for each selected directory name
# as DirFunc( dir ). (NOTE: this can be an
# object method with access to the instance data of
# the object to which it belongs.)
# FileFunc a function to be called for each selected file name
# as FileFunc( file ). (NOTE: this can be an
# object method with access to the instance data of
# the object to which it belongs.)
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
#
def ScanDirectoriesEx(srcdir, DirFunc, FileFunc=None, recursive=True):
"""
Scan all sub-directories in a given source directory.
Exceptions are thrown back to the calling program.
"""
if not srcdir.endswith(os.path.sep): srcdir += os.path.sep
directoryList = os.listdir(srcdir)
for directoryComponent in directoryList:
path = srcdir+directoryComponent
if isdir(path):
DirFunc(path)
if recursive:
logger.debug("Adding Directory %s " % (path))
ScanDirectoriesEx(path, DirFunc, FileFunc, recursive)
elif FileFunc:
FileFunc(path)
return
# Scan the sub-directory structure in a given directory
#
# This is just like 'ScanDirectoriesEx' above, except that an error
# is reported if an I/O exception occurs.
#
# srcdir directory to search, maybe including sub-directories
# DirFunc a function to be called for each selected directory name
# as DirFunc( dir ). (NOTE: this can be an
# object method with access to the instance data of
# the object to which it belongs.)
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
#
def ScanDirectories(srcdir, DirFunc, listFiles=False, recursive=True):
try:
ScanDirectoriesEx(srcdir, DirFunc, listFiles, recursive)
except (IOError, os.error) as why:
logger.warning("Can't scan %s: %s" % (repr(srcdir), str(why)))
return
# Collect directories/sub-directories found under the source directory
#
# srcdir directory to search, maybe including sub-directories
# baseDir a base directory that is removed from all results returned.
# listFiles is True if files are to be included in the listing returned
# recursive is True if directories are to be scanned recursively,
# otherwise only the named directory is scanned.
# appendSep is True if path separator character is to be appended to directory names
#
# Returns a list of directory contents
#
def CollectDirectoryContents(srcDir, baseDir="",
listDirs=True, listFiles=False, recursive=True, appendSep=False):
"""
Return a list of directory contents found under the source directory.
"""
logger.debug("CollectDirectories: %s, %s, %s"%(srcDir,baseDir,str(os.path.sep)))
dirsuffix = ""
if appendSep: dirsuffix = os.path.sep
collection = []
if (baseDir != "") and (not baseDir.endswith(os.path.sep)):
baseDir = baseDir+os.path.sep
def CollectDir(path):
logger.debug("- CollectDir base: %s, path: %s"%(baseDir, path))
if listDirs: collection.append(path.replace(baseDir,"",1)+dirsuffix)
def CollectFile(path):
logger.debug("- CollectFile base: %s, path: %s"%(baseDir, path))
if listFiles: collection.append(path.replace(baseDir,"",1))
ScanDirectoriesEx(srcDir, CollectDir, CollectFile, recursive)
return collection
if __name__ == "__main__":
directoryCollection = CollectDirectoryContents(".", baseDir=".",
listFiles=True, listDirs=False, appendSep=True)
print("\n".join(directoryCollection))
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/miscutils/ScanDirectories.py
|
ScanDirectories.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import re # Used for link header parsing
import httplib2
from utils.py3porting import urljoin, urlsplit
def splitValues(txt, sep=",", lq='"<', rq='">'):
"""
Helper function returns list of delimited values in a string,
where delimiters in quotes are protected.
sep is string of separator
lq is string of opening quotes for strings within which separators are not recognized
rq is string of corresponding closing quotes
"""
result = []
cursor = 0
begseg = cursor
while cursor < len(txt):
if txt[cursor] in lq:
# Skip quoted or bracketed string
eq = rq[lq.index(txt[cursor])] # End quote/bracket character
cursor += 1
while cursor < len(txt) and txt[cursor] != eq:
if txt[cursor] == '\\': cursor += 1 # skip '\' quoted-pair
cursor += 1
if cursor < len(txt):
cursor += 1 # Skip closing quote/bracket
elif txt[cursor] in sep:
result.append(txt[begseg:cursor])
cursor += 1
begseg = cursor
else:
cursor += 1
# append final segment
result.append(txt[begseg:cursor])
return result
def testSplitValues():
assert splitValues("a,b,c") == ['a','b','c']
assert splitValues('a,"b,c",d') == ['a','"b,c"','d']
assert splitValues('a, "b, c\\", c1", d') == ['a',' "b, c\\", c1"',' d']
assert splitValues('a,"b,c",d', ";") == ['a,"b,c",d']
assert splitValues('a;"b;c";d', ";") == ['a','"b;c"','d']
assert splitValues('a;<b;c>;d', ";") == ['a','<b;c>','d']
assert splitValues('"a;b";(c;d);e', ";", lq='"(', rq='")') == ['"a;b"','(c;d)','e']
def parseLinks(headerlist):
"""
Helper function to parse 'link:' headers,
returning a dictionary of links keyed by link relation type
headerlist is a list of header (name,value) pairs
"""
linkheaders = [ v for (h,v) in headerlist if h.lower() == "link" ]
log.debug("parseLinks linkheaders %s"%(repr(linkheaders)))
links = {}
for linkheader in linkheaders:
for linkval in splitValues(linkheader, ","):
linkparts = splitValues(linkval, ";")
linkmatch = re.match(r'''\s*<([^>]*)>\s*''', linkparts[0])
if linkmatch:
linkuri = linkmatch.group(1)
for linkparam in linkparts[1:]:
linkmatch = re.match(r'''\s*rel\s*=\s*"?(.*?)"?\s*$''', linkparam) # .*? is non-greedy
if linkmatch:
linkrel = linkmatch.group(1)
log.debug("parseLinks links[%s] = %s"%(linkrel, linkuri))
links[linkrel] = linkuri
return links
def testParseLinks():
links = (
('Link', '<http://example.org/foo>; rel=foo'),
('Link', ' <http://example.org/bar> ; rel = bar '),
('Link', '<http://example.org/bas>; rel=bas; par = zzz , <http://example.org/bat>; rel = bat'),
('Link', ' <http://example.org/fie> ; par = fie '),
('Link', ' <http://example.org/fum> ; rel = "http://example.org/rel/fum" '),
('Link', ' <http://example.org/fas;far> ; rel = "http://example.org/rel/fas" '),
)
assert str(parseLinks(links)['foo']) == 'http://example.org/foo'
assert str(parseLinks(links)['bar']) == 'http://example.org/bar'
assert str(parseLinks(links)['bas']) == 'http://example.org/bas'
assert str(parseLinks(links)['bat']) == 'http://example.org/bat'
assert str(parseLinks(links)['http://example.org/rel/fum']) == 'http://example.org/fum'
assert str(parseLinks(links)['http://example.org/rel/fas']) == 'http://example.org/fas;far'
# Class for exceptions raised by HTTP session
class HTTP_Error(Exception):
def __init__(self, msg="HTTP_Error", value=None, uri=None):
self._msg = msg
self._value = value
self._uri = uri
return
def __str__(self):
txt = self._msg
if self._uri: txt += " for "+str(self._uri)
if self._value: txt += ": "+repr(self._value)
return txt
def __repr__(self):
return ( "HTTP_Error(%s, value=%s, uri=%s)"%
(repr(self._msg), repr(self._value), repr(self._uri)))
# Class for handling Access in an HTTP session
class HTTP_Session(object):
"""
Client access class for HTTP session.
Creates a session to access a single HTTP endpoint,
and provides methods to issue requests on this session
This class is primarily designed to access a specific endpoint, and
by default refuses requests for different endpoints. But the request
methods accept an additional "exthost" parameter that can be used to
override this behaviour. Specifying "exthost=True" causes the request
to allow URIs that use different scheme, hostname or port than the original
request, but such requests are not issued using the access key of the HTTP
session.
"""
def __init__(self, baseuri, accesskey=None):
log.debug("HTTP_Session.__init__: baseuri "+baseuri)
self._baseuri = baseuri
self._key = accesskey
parseduri = urlsplit(baseuri)
self._scheme = parseduri.scheme
self._host = parseduri.netloc
self._path = parseduri.path
self._http2 = httplib2.Http()
return
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
return
def close(self):
self._key = None
self._http2 = None
return
def baseuri(self):
return self._baseuri
def getpathuri(self, uripath):
# str used here so rdflib.URIRef values can be accepted
return urljoin(self._baseuri, str(uripath))
def error(self, msg, value=None):
return HTTP_Error(msg=msg, value=value, uri=self._baseuri)
def parseLinks(self, headers):
"""
Parse link header(s), return dictionary of links keyed by link relation type
"""
return parseLinks(headers["_headerlist"])
def doRequest(self, uripath,
method="GET", body=None, ctype=None, accept=None, reqheaders=None, exthost=False):
"""
Perform HTTP request.
Parameters:
uripath URI reference of resource to access, resolved against the base URI of
the current HTTP_Session object.
method HTTP method to use (default GET)
body request body to use (default none)
ctype content-type of request body (default none)
accept string containing list of content types for HTTP accept header
reqheaders dictionary of additional header fields to send with the HTTP request
exthost True if a request to a URI with a scheme and/or host different than
the session base URI is to be respected (default False).
Return:
status, reason(text), response headers, response body
Note: relies on to,meout or object deallocation to close connections:
see http://stackoverflow.com/questions/16687033/is-this-a-bug-of-httplib2.
Python's reference counting should ensure Http objects amnd referenced connections
are deallocated promptly.
"""
# Construct request path
urifull = self.getpathuri(uripath)
uriparts = urlsplit(urifull)
path = uriparts.path
if uriparts.query: path += ("?"+uriparts.query)
# Sort out HTTP connection to use: session or new
if ( (uriparts.scheme and uriparts.scheme != self._scheme) or
(uriparts.netloc and uriparts.netloc != self._host) ):
if exthost:
usescheme = uriparts.scheme
usekey = None
elif (uriparts.scheme and uriparts.scheme != self._scheme):
raise HTTP_Error(
"URI scheme mismatch",
value=uriparts.scheme,
uri=self._baseuri)
elif (uriparts.netloc and uriparts.netloc != self._host):
raise HTTP_Error(
"URI host:port mismatch",
value=uriparts.netloc,
uri=self._baseuri)
else:
usescheme = self._scheme
usekey = self._key
# Assemble request headers
if not reqheaders:
reqheaders = {}
if usekey:
reqheaders["authorization"] = "Bearer "+usekey
if ctype:
reqheaders["content-type"] = ctype
if accept:
reqheaders["accept"] = accept
# Execute request
log.debug("HTTP_Session.doRequest method: "+method)
log.debug("HTTP_Session.doRequest path: "+path)
log.debug("HTTP_Session.doRequest reqheaders: "+repr(reqheaders))
log.debug("HTTP_Session.doRequest body: "+repr(body))
(resp, data) = self._http2.request(urifull,
method=method, body=body, headers=reqheaders)
# Pick out elements of response
try:
status = resp.status
reason = resp.reason
headerlist = [ (hn.lower(),resp[hn]) for hn in resp ]
headers = dict(headerlist) # dict(...) keeps last result of multiple keys
headers["_headerlist"] = headerlist
log.debug("HTTP_Session.doRequest response: "+str(status)+" "+reason)
log.debug("HTTP_Session.doRequest rspheaders: "+repr(headers))
except Exception as e:
log.warn("HTTP_Session error %r accessing %s with request headers %r"%(e, uripath, reqheaders))
status = 900
reason = str(e)
headers = {"_headerlist": []}
data = None
###log.debug("HTTP_Session.doRequest data: "+repr(data))
return (status, reason, headers, data)
def doRequestFollowRedirect(self, uripath,
method="GET", body=None, ctype=None, accept=None, reqheaders=None, exthost=False):
"""
Perform HTTP request, following any redirect returned.
Parameters:
uripath URI reference of resource to access, resolved against the base URI of
the current HTTP_Session object.
method HTTP method to use (default GET)
body request body to use (default none)
ctype content-type of request body (default none)
accept string containing list of content types for HTTP accept header
reqheaders dictionary of additional header fields to send with the HTTP request
exthost True if a request to a URI with a scheme and/or host different than
the session base URI is to be respected (default False).
Return:
status, reason(text), response headers, final URI, response body
"""
(status, reason, headers, data) = self.doRequest(uripath,
method=method, accept=accept,
body=body, ctype=ctype, reqheaders=reqheaders,
exthost=exthost)
return (status, reason, headers, headers['content-location'], data)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/miscutils/HttpSession.py
|
HttpSession.py
|
# annalist_site.test
Contains data used when running Django tests.
empty/ contains a copy of the site data used to initialize a new (empty) site.
testinit/ contains a copy of the site data used to initialize the site used for testing.
data/ contains the site that is actually modified by the tests
The test suite also copies data from annalist/data/sitedata/ into the data/ directory
when running tests (this contains definitions of built-in forms, etc., which may
in principle be overridden by local definitions).
The actual test data is created by annalist.tests.test_createsitedata.CreateSiteData.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/sampledata/README.md
|
README.md
|
/Users/graham/workspace/github/gklyne/annalist/src/annalist_root/sampledata/data/annalist_test/
This directory contains Annalist site data for http://test.example.com/testsite/.
Directory layout:
/Users/graham/workspace/github/gklyne/annalist/src/annalist_root/sampledata/data/annalist_test/
c/
_annalist_site/ (site-wide definitions)
d/
coll_meta.jsonld (site metadata)
coll_context.jsonld (JSON-LD context for site definitions)
_enum_field_placement/
(field-placement-value)/
enum_meta.jsonld
:
_enum_list_type/
(list-type-id)/
enum_meta.jsonld
:
_enum_render_type/
(render-type-id)/
enum_meta.jsonld
:
_enum_value_type/
(value-type-id)/
enum_meta.jsonld
:
_enum_value_mode/
(value-mode-id)/
enum_meta.jsonld
:
_field/
(view-field definitions)
:
_list/
(entity list definitions)
:
_type/
(type definitions)
:
_user/
(user permissions)
:
_view/
(entity view definitions)
:
_vocab/
(vocabulary namespace definitions)
:
(collection-id)/ (user-created data collection)
d/
coll_meta.jsonld (collection metadata)
coll_context.jsonld (JSON-LD context for collection data)
_type/ (collection type definitions)
(type-id)/
type_meta.jsonld
:
_list/ (collection list definitions)
(list-id)/
list_meta.jsonld
:
_view/ (collection view definitions)
(view-id)/
view_meta.jsonld
:
_field/ (collection field definitions)
(field-id)/
field_meta.jsonld
:
_group/ (collection field group definitions)
(group-id)/
group_meta.jsonld
:
_user/ (collection user permissions)
(user-id)/
user_meta.jsonld
:
(type-id)/ (contains all entity data for identified type)
(entity-id)/ (contains data for identified type/entity)
entity_data.jsonld (entity data)
entity_prov.jsonld (entity provenance @@TODO)
(attachment files) (uploaded/imported attachments)
: (repeat for entities of this type)
: (repeat for types in collection)
: (repeat for collections in site)
Created by annalist.models.site.py
for Annalist 0.5.18 at 2022-04-09 09:35:10 (UTC)
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/sampledata/empty/annalist_site/README.md
|
README.md
|
# Values of annal:Enum_render_type property
Obtained by:
grep -rh annal:Enum_render_type ./annalist | awk '{print $3}' | sort | uniq
Results (reorganized and "attic" values removed)
"annal:Enum_render_type/EntityId"
"annal:Enum_render_type/EntityTypeId"
"annal:Enum_render_type/EntityRef"
"annal:Enum_render_type/Text"
"annal:Enum_render_type/Textarea"
"annal:Enum_render_type/Identifier"
"annal:Enum_render_type/Enum"
"annal:Enum_render_type/Field"
"annal:Enum_render_type/List"
"annal:Enum_render_type/Type"
"annal:Enum_render_type/View"
"annal:Enum_render_type/Placement"
"annal:Enum_render_type/View_sel"
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/sampledata/empty/annalist_site/c/_annalist_site/d/_enum_render_type/Render-types.md
|
Render-types.md
|
src/annalist_root/annalist/sampledata/data/site/
This directory contains sample Annalist site data used for testing
Directory layout:
$BASE_DATA_DIR
annalist-site/
_annalist-site/
site_meta.json_ld
<collection-id>/
_annalist_collection/
coll_meta.jsonld
types/
<type-id>/
type_meta.jsonld
:
views/
<view-id>/
view_meta.jsonld
:
lists/
<list-id>/
list_meta.jsonld
:
bridges/
(bridge-description (incl path mapping in collection) - @@TBD)
:
user-groups/ @@TBD
group-description
:
access/ @@TBD
default-access
(more details to work through - keep it simple for starters)
<type-id>/
<entity-id>/
entity-data.jsonld
entity-prov.jsonld
:
:
:
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/sampledata/testinit/annalist_test/README.md
|
README.md
|
# Values of annal:Enum_render_type property
Obtained by:
grep -rh annal:Enum_render_type ./annalist | awk '{print $3}' | sort | uniq
Results (reorganized and "attic" values removed)
"annal:Enum_render_type/EntityId"
"annal:Enum_render_type/EntityTypeId"
"annal:Enum_render_type/EntityRef"
"annal:Enum_render_type/Text"
"annal:Enum_render_type/Textarea"
"annal:Enum_render_type/Identifier"
"annal:Enum_render_type/Enum"
"annal:Enum_render_type/Field"
"annal:Enum_render_type/List"
"annal:Enum_render_type/Type"
"annal:Enum_render_type/View"
"annal:Enum_render_type/Placement"
"annal:Enum_render_type/View_sel"
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/sampledata/testinit/annalist_test/c/_annalist_site/d/_enum_render_type/Render-types.md
|
Render-types.md
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2016, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import os
import re
import traceback
import json
import markdown
import copy
import uuid
import urllib
from importlib import import_module
from django.urls import resolve, reverse
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.template import loader
from django.views import generic
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from utils.http_errors import error400values
from utils.py3porting import urlparse, urljoin
from . import login_message
from .auth_django_client import django_flow_from_user_id
from .auth_oidc_client import oauth2_flow_from_provider_data
from .login_utils import (
HttpResponseRedirectWithQuery,
HttpResponseRedirectLogin
)
PROVIDER_FILES = None
PROVIDER_DETAILS = None
settings = import_module(os.environ["DJANGO_SETTINGS_MODULE"])
def collect_provider_data():
global PROVIDER_FILES, PROVIDER_DETAILS
if PROVIDER_DETAILS is None:
PROVIDER_DETAILS = {}
PROVIDER_FILES = {}
clientsecrets_dirname = os.path.join(settings.CONFIG_BASE, "providers/")
if os.path.isdir(clientsecrets_dirname):
clientsecrets_files = os.listdir(clientsecrets_dirname)
for f in clientsecrets_files:
if f.endswith(".json"):
provider_path = os.path.join(clientsecrets_dirname,f)
with open(provider_path, "r") as f:
provider_data = json.load(f)
provider_name = provider_data['web']['provider']
PROVIDER_FILES[provider_name] = provider_path
PROVIDER_DETAILS[provider_name] = provider_data['web']
if 'provider_label' not in PROVIDER_DETAILS[provider_name]:
PROVIDER_DETAILS[provider_name]['provider_label'] = provider_name
log.debug("login_views: collect_provider_data %s"%(provider_name,))
# For debugging only: don't log details in running system...
# log.debug(json.dumps(
# PROVIDER_DETAILS[provider_name],
# sort_keys=True,
# indent=4,
# separators=(',', ': ')
# ))
return
def confirm_authentication(view,
login_form_url=None, login_post_url=None, login_done_url=None,
user_profile_url=None, continuation_url=None,
help_path="annalist/views/help/"):
"""
Return None if required authentication is present, otherwise
a login redirection response to the supplied URI
view.credential is set to credential that can be used to access resource
Five URL parameters are passed in from the calling application:
login_form_url Page to gather information to initiate login process
login_post_url URL to which login information is posted
login_done_url URL retrieved with additional parameters when authentication
is complete (maybe failed). In the OAuth2 flow, this triggers
retrieval of user profile information. Not used for local
authentication.
user_profile_url URL retrieved when user profile details have been set up.
continuation_url URL from which the login process was initiated.
"""
if view.request.user.is_authenticated:
return None
if not login_form_url:
return error400values(view, "No login form URI specified")
if not login_done_url:
return error400values(view, "No login completion URI specified")
if not login_post_url:
login_post_url = login_form_url
if not continuation_url:
continuation_url = view.request.path
# Redirect to initiate login sequence
# See: https://docs.djangoproject.com/en/2.0/topics/http/sessions/
view.request.session['login_form_url'] = login_form_url
view.request.session['login_post_url'] = login_post_url
view.request.session['login_done_url'] = login_done_url
view.request.session['user_profile_url'] = user_profile_url
view.request.session['continuation_url'] = continuation_url
view.request.session['help_dir'] = os.path.join(settings.SITE_SRC_ROOT, help_path)
userid = view.request.POST.get("userid",
view.request.GET.get("userid",
view.request.session.get('login_recent_userid', "")
)
)
query_params = (
{ "userid": userid
, "continuation_url": continuation_url
})
query_params.update(view.get_message_data())
return HttpResponseRedirectWithQuery(login_form_url, query_params)
class LoginUserView(generic.View):
"""
View class to present login form to gather user id and other login information.
The login page solicits a user id and an identity provider
The login page supports the following request parameters:
continuation_url={uri}
- a URL for a page that is displayed when the login process is complete.
"""
def get(self, request):
collect_provider_data()
# @@TODO: check PROVIDER_FILES, report error if none here
# Retrieve request parameters
continuation_url = request.GET.get("continuation_url", "/no-login-continuation/")
# Check required values in session - if missing, restart sequence from original URI
# This is intended to avoid problems if this view is invoked out of sequence
login_post_url = request.session.get("login_post_url", None)
login_done_url = request.session.get("login_done_url", None)
user_profile_url = request.session.get("user_profile_url", None)
help_dir = request.session.get("help_dir", None)
recent_userid = request.session.get("login_recent_userid", "")
if ( (login_post_url is None) or
(login_done_url is None) or
(user_profile_url is None) or
(help_dir is None) ):
log.warning(
"LoginUserView: missing details "+
"login_post_url %s, login_done_url %s, user_profile_url %s, help_dir %s"%
(login_post_url, login_done_url, user_profile_url, help_dir)
)
return HttpResponseRedirect(continuation_url)
# Display login form
default_provider = ""
provider_tuples = (
[ ( p.get('provider_order', 5),
(k, p.get('provider_label', k), p.get('provider_image', None))
) for k, p in PROVIDER_DETAILS.items()
])
provider_labels = [ p[1] for p in sorted(provider_tuples) ]
for p in PROVIDER_DETAILS:
if "default" in PROVIDER_DETAILS[p]:
default_provider = PROVIDER_DETAILS[p]["default"]
logindata = (
{ "login_post_url": login_post_url
, "login_done_url": login_done_url
, "user_profile_url": user_profile_url
, "continuation_url": continuation_url
, "provider_keys": list(PROVIDER_DETAILS)
, "provider_labels": provider_labels
, "provider": default_provider
, "suppress_user": True
, "help_filename": "login-help"
, "userid": request.GET.get("userid", recent_userid)
, "info_head": request.GET.get("info_head", None)
, "info_message": request.GET.get("info_message", None)
, "error_head": request.GET.get("error_head", None)
, "error_message": request.GET.get("error_message", None)
})
# Load help text if available
if "help_filename" in logindata:
help_filepath = help_dir + "%(help_filename)s.md"%(logindata)
if os.path.isfile(help_filepath):
with open(help_filepath, "r") as helpfile:
logindata["help_markdown"] = helpfile.read()
if "help_markdown" in logindata:
logindata["help_text"] = markdown.markdown(logindata["help_markdown"])
# Render form & return control to browser
template = loader.get_template("login.html")
return HttpResponse(template.render(logindata, request=self.request))
class LoginPostView(generic.View):
"""
View class to initiate an authentication flow, typically on POST
of the login form.
It saves the supplied user id in a session value, and redirects the user to the
identity provider, which in due course returns control to the application along
with a suitable authorization grant.
The login form provides the following values:
userid={string}
- a user identifying string that will be associated with the external service
login credentials.
provider={string}
- a string that identifies a provider selected to perform authentication
of the indicated user. This string is an index to PROVIDER_FILES,
which in turn contains filenames for client secrets to use when accessing
the indicated identity provider.
login_done={uri}
- a URI that is retrieved, with a suitable authorization grant as a parameter,
when appropriate permission has been confirmed by an authenticated user.
Used to obtain user information following completion of authentication.
Communicated via a hidden form value.
user_profile_url={uri}
- a URI that is retrieved, when user information has been obtained. Expected use
is to display user information, thenm continue tyo the page from which the
login sequence was invoked. Communicated via a hidden form value.
continuation_url={uri}
- URL of page from which logon sequence was invoked, and to which control is
eventually returned. Communicated via a hidden form value.
"""
def post(self, request):
log.info("LoginPostView.post")
try:
response = self.post_main(request)
except Exception as e:
# -- This should be redundant, but...
log.error("Exception in LoginPostView.post (%r)"%(e))
log.error("".join(traceback.format_stack()))
# --
log.exception(str(e))
response = self.error(
dict(self.error500values(),
message=str(e)+" - see server log for details"
)
)
log.info(
"LoginPostView.post complete %d %s"%
(response.status_code, response.reason_phrase)
)
return response
def post_main(self, request):
# Retrieve request parameters
userid = request.POST.get("userid", "")
provider = request.POST.get("provider", "No_provider")
provider = request.POST.get("login", provider)
login_done_url = request.POST.get("login_done_url", "/no_login_done_url_in_form/")
user_profile_url = request.POST.get("user_profile_url", "/no_user_profile_url_in_form/")
continuation_url = request.POST.get("continuation_url", "/no_continuation_url_in_form/")
if request.POST.get("login", None):
collect_provider_data()
provider_data = PROVIDER_DETAILS[provider]
provider_name = PROVIDER_FILES[provider]
provider_mechanism = provider_data.get("mechanism", "OIDC")
if userid and not re.match(r"\w+$", userid):
return HttpResponseRedirectLogin(
request,
login_message.USER_ID_SYNTAX%(userid)
)
request.session['login_recent_userid'] = userid
request.session['login_provider_data'] = provider_data
request.session['login_continuation_url'] = continuation_url
if provider_mechanism == "OIDC":
# Create and initialize flow object
log.debug("LoginPostView.post: SECURE_PROXY_SSL_HEADER %r"%(settings.SECURE_PROXY_SSL_HEADER,))
log.debug("LoginPostView.post: scheme %s"%(request.scheme,))
log.debug("LoginPostView.post: headers %r"%(request.META,))
flow = oauth2_flow_from_provider_data(
provider_data,
redirect_uri=request.build_absolute_uri(login_done_url)
)
request.session['oauth2_state'] = flow.step1_get_state_token()
request.session['oauth2_userid'] = userid
# Initiate OAuth2 dance
# The response is handled by auth_oidc_client.OIDC_AuthDoneView
return HttpResponseRedirect(flow.step1_get_authorize_url())
if provider_mechanism == "django":
flow = django_flow_from_user_id(
provider_data,
userid=userid,
auth_uri=reverse("LocalUserPasswordView"),
redirect_uri=request.build_absolute_uri(user_profile_url)
)
# Initiate django authentication
auth_uri = flow.step1_get_authorize_url()
return HttpResponseRedirect(auth_uri)
return HttpResponseRedirectLogin(
request,
login_message.UNRECOGNIZED_PROVIDER%(provider_mechanism, provider_name)
)
# Login cancelled: redirect to continuation
# (which may just redisplay the login page)
return HttpResponseRedirect(continuation_url)
class LogoutUserView(generic.View):
"""
View class to handle logout
"""
def get(self, request):
recent_userid = request.session.get('login_recent_userid', "")
logout(request)
request.session['login_recent_userid'] = recent_userid
continuation_url = request.GET.get("continuation_url",
urljoin(urlparse(request.path).path, "../")
)
return HttpResponseRedirect(continuation_url)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/login/login_views.py
|
login_views.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2016, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import copy
from utils.py3porting import quote, unquote
from django.urls import resolve, reverse
from django.http import HttpResponse
from django.http import HttpResponseRedirect
def HttpResponseRedirectWithQuery(redirect_uri, query_params):
"""
Returns an HTTP response object that redirects to the supplied URL with
the supplied query parameters applied.
"""
nq = "?"
for pname in query_params.keys():
if query_params[pname]:
redirect_uri += nq + pname + "=" + quote(query_params[pname])
nq = "&"
return HttpResponseRedirect(redirect_uri)
def HttpResponseRedirectLogin(request, message=None):
"""
Returns an HTTP response object that is used at the end of an
authentication flow.
It redirects to the user_profile_url stored in the current session,
with continuation to the supplied continuation_url, with the userid
for the (attempted) authentication as a further query parameter.
"""
user_profile_url = request.session.get('user_profile_url', reverse("AnnalistSiteView"))
query_params = {}
if 'continuation_url' in request.session:
query_params['continuation_url'] = request.session['continuation_url']
if 'recent_userid' in request.session:
query_params['recent_userid'] = request.session['recent_userid']
if message:
query_params.update(
{ "error_head": "Login failed"
, "error_message": message
})
return HttpResponseRedirectWithQuery(user_profile_url, query_params)
def object_to_dict(obj, strip):
"""
Utility function that creates dictionary representation of an object.
Args:
strip: an array of names of members to not include in the dict.
Returns:
dictionary, with non-excluded values that can be used to reconstruct an instance
of the object via its constructor (assuming an appropriate constructor form, as
used below for oauth2_dict_to_flow, etc.)
"""
t = type(obj)
d = copy.copy(obj.__dict__)
for member in strip:
if member in d:
del d[member]
d['_class'] = t.__name__
d['_module'] = t.__module__
return d
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/login/login_utils.py
|
login_utils.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import json
import copy
import re
import logging
log = logging.getLogger(__name__)
import httplib2
from django.contrib.auth.models import User
from utils.py3porting import is_string, to_unicode
class OAuth2CheckBackend(object):
"""
Authenticate using credential object from OAuth2 exchange
username is a local user id that keys the local user database
password is a Credential object obtained via the OAuth2 dance
profile is a user profile information dictionary
NOTE: when this method returns a User record on completion of a third
party authentication process, it does not guarantee that it is the same
as any record that may have been previously associated with the supplied
username. It becomes the responsibility of the calling view code to check
that the user details match any previously associated with the user id.
The returned user object is created or copied from the Django user base,
but if it already exists the email address is replaced with the one
returned by the OIDC authentication exchange.
"""
def authenticate(self, username=None, profile=None):
log.debug(
"OAuth2CheckBackend.authenticate: username %s, profile %r"%
(username, profile)
)
if is_string(profile):
# Not oauth2 exchange:
# @TODO: can we be more specific about what type this should be?
return None
auth_username = None
auth_email = None
auth_url = None
return_user = None
create_username = None
verified_email = False
verified_url = False
if profile:
# Analyze profile returned for verified identifier
# (email address or URL)
#
# Google returns a "verified_email" flag
# (It looks like this was changed in Google profile...)
log.info("login user profile: %r"%(profile,))
verified_email = (
profile.get("verified_email", False) or
profile.get("email_verified", False)
)
if verified_email:
# Use access token to retrieve profile information
# Construct authenticated user ID from email local part
auth_email = profile["email"]
email_local_part = auth_email.split('@', 1)[0]
auth_username = re.sub(r"\.", "_", email_local_part)
auth_username = re.sub(r"[^a-zA-Z0-9_]", "", auth_username)
auth_username = auth_username[:32]
# GitHub returns a URL that is implicitly verified
# NOTE: the GitHub email does not include a separate flag to say
# it's verified, though apparently it is. But the GitHub
# email address is optional in the profile.
if profile.get("html_url", "").startswith("https://github.com/"):
verified_url = True
auth_url = profile["html_url"]
auth_domain = "github.com"
auth_username = profile["login"]
# This is a hack to fit Django's User structure:
# would prefer to store and use authenticated URL instead.
# Maybe this is possible?
# See https://stackoverflow.com/questions/6085025/django-user-profile
auth_email = (
profile.get("email", None) or
auth_username+"."+auth_domain+"@user.annalist.net"
)
if username:
try:
return_user = User.objects.get(username=username)
return_user.email = auth_email
except User.DoesNotExist:
create_username = username
elif auth_username:
try:
return_user = User.objects.get(username=auth_username)
except User.DoesNotExist:
create_username = auth_username
if create_username:
# NOTE: when a new User record is created, it is important
# that it is saved to the local Django database before
# returning it to the caller, to ensure that a suitable
# primary key value is created. The authentication return
# path makes further changes to the User record which cause
# the Django ORM to force an update rather than insert of the
# new record, which in turn generates an error if no primary
# key is defined.
log.info("Create new user record for %s"%(create_username))
return_user = User(username=create_username, password='Not specified')
if profile is not None:
return_user.is_staff = True
return_user.is_superuser = False
#@@ For testing: fake old-style Google profile
# if ("given_name" in profile) and ("family_name" in profile):
# profile["name"] = profile["given_name"] + " " + profile["family_name"]
# del profile["given_name"]
# del profile["family_name"]
#@@
if ("given_name" in profile) and ("family_name" in profile):
given_name = profile["given_name"]
family_name = profile["family_name"]
elif ("name" in profile) and profile["name"]:
# Older Google profiles have just "name" value, apparently
# GitHub profiles may include a "name" value, a[parently]
n = profile["name"].split(None, 1)
given_name = ""
family_name = ""
if len(n) >= 1:
given_name = n[0]
if len(n) >= 2:
family_name = n[1]
else:
given_name = auth_username
family_name = ""
return_user.first_name = given_name
return_user.last_name = family_name
return_user.email = auth_email
elif password.id_token:
# No profile provided: Try to load email address from id_token
return_user.is_staff = True
return_user.is_superuser = False
return_user.first_name = ""
return_user.last_name = ""
return_user.email = password.id_token["email"]
else:
return_user = None
if return_user:
return_user.save()
if return_user:
log.info("user.username: %s"%(return_user.username,))
log.info("user.first_name: %s"%(return_user.first_name,))
log.info("user.last_name: %s"%(return_user.last_name,))
log.info("user.email: %s"%(return_user.email,))
return return_user
# No username or credentials provided
log.info("No user id or no credentials provided")
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/login/OAuth2CheckBackend.py
|
OAuth2CheckBackend.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2016, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os.path
import urllib
import logging
log = logging.getLogger(__name__)
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.template import loader
from django.views import generic
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from utils.uri_builder import uri_with_params
from . import login_message
from .login_utils import (
HttpResponseRedirectWithQuery,
HttpResponseRedirectLogin
)
class LocalUserPasswordView(generic.View):
"""
View class to present a form for entering a local user id and password.
The local user password page supports the following request parameters:
userid={string}
- a local user id that is the defdault user id for which a password is solicited.
continuation_url={uri}
- a URL of pagve to be displayed when the authentication process is complete.
message={string}
- a message to be displayed on the page
"""
def get(self, request):
"""
Display the local user password page with values as supplied.
"""
userid = request.GET.get("userid", "")
continuation_url = request.GET.get("continuation_url", "/no-login-continuation_url/")
login_post_url = request.session.get("login_post_url", None)
user_profile_url = request.session.get("user_profile_url", None)
help_dir = request.session.get("help_dir", None)
if (login_post_url is None) or (user_profile_url is None) or (help_dir is None):
log.warning(
"LocalUserPasswordView: missing session details "+
"login_post_url %s, user_profile_url %s, help_dir %s"%
(login_post_url, user_profile_url, help_dir)
)
return HttpResponseRedirect(continuation_url)
# Display login form
localdata = (
{ "userid": userid
, "help_filename": "local-help"
, "user_profile_url": user_profile_url
, "continuation_url": continuation_url
, "info_head": request.GET.get("info_head", None)
, "info_message": request.GET.get("info_message", None)
, "error_head": request.GET.get("error_head", None)
, "error_message": request.GET.get("error_message", None)
})
# Load help text if available
if 'help_filename' in localdata:
help_filepath = help_dir + "%(help_filename)s.md"%(localdata)
if os.path.isfile(help_filepath):
with open(help_filepath, "r") as helpfile:
localdata['help_markdown'] = helpfile.read()
if 'help_markdown' in localdata:
localdata['help_text'] = markdown.markdown(localdata['help_markdown'])
# Render form & return control to browser
template = loader.get_template('local_password.html')
return HttpResponse(template.render(localdata, request=self.request))
def post(self, request):
userid = request.POST.get("userid", "")
password = request.POST.get("password", "")
user_profile_url = request.POST.get("user_profile_url", "/no_user_profile_url_in_form/")
continuation_url = request.POST.get("continuation_url", "/no_continuation_url_in_form/")
if request.POST.get("login", None) == "Login":
if not userid:
log.info("No User ID specified")
return HttpResponseRedirectLogin(request, "No User ID specified")
log.info("djangoauthclient: userid %s"%userid)
authuser = authenticate(username=userid, password=password)
if authuser is None:
return HttpResponseRedirectLogin(request,
login_message.USER_WRONG_PASSWORD%(userid))
if not authuser.is_active:
return HttpResponseRedirectLogin(request,
login_message.USER_ACCOUNT_DISABLED%(userid))
if not authuser.email:
return HttpResponseRedirectLogin(request,
login_message.USER_NO_EMAIL%(userid))
# Complete the login
login(request, authuser)
# Copy required values to new session object (cf. HttpResponseRedirectLogin)
request.session['recent_userid'] = userid
request.session['user_profile_url'] = user_profile_url
request.session['continuation_url'] = continuation_url
log.info("LocalUserPasswordView: user.username: "+authuser.username)
log.info("LocalUserPasswordView: user.first_name: "+authuser.first_name)
log.info("LocalUserPasswordView: user.last_name: "+authuser.last_name)
log.info("LocalUserPasswordView: user.email: "+authuser.email)
return HttpResponseRedirectLogin(request)
# Login cancelled: redirect to continuation
# (which may just redisplay the login page)
return HttpResponseRedirect(continuation_url)
class DjangoWebServerFlow(object):
"""
This class presents an interface similar to "oauth2client" for initiating
a login using local Django user id and password.
"""
def __init__(self, userid="", auth_uri=None, redirect_uri=None, **kwargs):
"""
Initialize a new authentication flow object.
userid is the (default) user id for which login is being performed
redirect_uri is the URI to which control is redirected
when authentication is complete.
"""
super(DjangoWebServerFlow, self).__init__()
self.params = (
{ 'userid': userid
, 'auth_uri': auth_uri
, 'redirect_uri': redirect_uri
, 'continuation_url': redirect_uri
})
self.params.update(kwargs)
return
def step1_get_authorize_url(self):
"""
Return a URL to a page that initiates the Django authentication process.
"""
uri_base = self.params['auth_uri']
uri_params = (
{ 'userid': self.params['userid']
, 'continuation_url': self.params['continuation_url']
})
return uri_with_params(uri_base, uri_params)
def django_flow_from_user_id(provider_details, userid="", auth_uri=None, redirect_uri=None):
"""
Initialize and returns a new authentication flow object.
provider_details is a dictionary of values and parameters
that control the authentication flow.
redirect_uri is the URI to which control is redirected
when authentication is complete.
"""
return DjangoWebServerFlow(userid=userid, auth_uri=auth_uri, redirect_uri=redirect_uri)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/login/auth_django_client.py
|
auth_django_client.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2016, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import sys
import os
import json
import re
import traceback
import logging
log = logging.getLogger(__name__)
from requests_oauthlib import OAuth2Session
from django.urls import resolve, reverse
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.views import generic
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from utils.http_errors import error400values
from . import login_message
from .login_utils import (
HttpResponseRedirectWithQuery,
HttpResponseRedirectLogin
)
SCOPE_DEFAULT = ["openid", "profile", "email"]
# ---------------------------------------------------------------------------
#
# Factory function
#
# ---------------------------------------------------------------------------
def oauth2_flow_from_provider_data(provider_data, redirect_uri=None, state=None):
"""
Create an OpenId connect Oauth2 flow object from a supplied provider details file.
provider_data
dictionary containing provider details (including oauth2 client secrets).
redirect_uri
URI to which control is transferred when the OAuth2 authentication dance
is completed. If specified, overrides value from provider-file.
"""
return oauth2_flow(provider_data, redirect_uri=redirect_uri, state=state)
# ---------------------------------------------------------------------------
#
# Oauth2 flow object (based loosely on oauth2client flow object)
#
# ---------------------------------------------------------------------------
class oauth2_flow(object):
"""
Choreographs the oauth2 dance used to obtain a user authentication credential.
"""
def __init__(self, provider_data, scope=None, state=None, redirect_uri=None):
"""
Initialize a flow object with supplied provider data (provided as a dictionary)
"""
self._provider_data = provider_data
self._scope = scope or provider_data.get("scope", SCOPE_DEFAULT)
assert redirect_uri, "Redirect URI not specified"
self._redirect_uri = redirect_uri
session = OAuth2Session(
client_id=provider_data["client_id"],
scope=self._scope,
state=state,
redirect_uri=self._redirect_uri
)
auth_uri, state = session.authorization_url(provider_data["auth_uri"])
self._session = session
self._auth_uri = auth_uri
self._state = state
# log.debug("oauth2_flow: provider_data %r"%(self._provider_data,))
log.debug("oauth2_flow: scope %r"%(self._scope,))
log.debug("oauth2_flow: redirect_uri %r"%(self._redirect_uri,))
log.debug("oauth2_flow: session %r"%(self._session,))
log.debug("oauth2_flow: auth_uri %r"%(self._auth_uri,))
log.debug("oauth2_flow: state %r"%(self._state,))
return
def step1_get_state_token(self):
return self._state
def step1_get_authorize_url(self):
log.info("step1_get_authorize_url: auth_uri %r", self._auth_uri)
return self._auth_uri
def step2_exchange(self, request):
"""
Using a credentials provided in the supplied redirect request value,
requests an access token for user authentication.
"""
auth_resp = request.build_absolute_uri()
token_uri = self._provider_data['token_uri']
client_secret = self._provider_data['client_secret']
log.debug("step2_exchange: token_uri %r", token_uri)
log.debug("step2_exchange: auth_resp %r", auth_resp)
# For debugging onlky. Don't log this in a running system!
# log.debug("step2_exchange: client_secret %r", client_secret)
try:
token = self._session.fetch_token(token_uri,
client_secret=client_secret,
authorization_response=auth_resp,
timeout=5
)
except Exception as e:
log.error("Failed to fetch token: %s"%(e,))
# log.info(json.dumps(
# self._provider_data,
# sort_keys=True,
# indent=4,
# separators=(',', ': ')
# ))
raise
return token
def step3_get_profile(self, token):
"""
Uses saved credentials from `step2_exchange` to access the user profile,
which is returned as a dictionary. The content is determined by the identity
provider service, but is expected to contain at least:
{ "verified_email": true,
"email": "...",
"given_name": "...",
"family_name": "...",
}
"""
r = self._session.get(self._provider_data["profile_uri"])
profile = json.loads(r.content)
return profile
class OIDC_AuthDoneView(generic.View):
"""
View class used to complete login process with authorization grant provided by
OAuth2 authorization server.
The calling application must set up the URL routing for this handler to be invoked.
"""
def get(self, request):
# Look for authorization grant
provider_data = request.session['login_provider_data']
state = request.session['oauth2_state']
userid = request.session['oauth2_userid']
# session value "login_done_url" is set by login_views.confirm_authentication
if 'login_done_url' not in request.session:
return HttpResponseRedirectLogin(request, login_message.SESSION_INTERRUPTED)
login_done_url = request.build_absolute_uri(request.session['login_done_url'])
provider = provider_data['provider']
flow = oauth2_flow_from_provider_data(
provider_data,
redirect_uri=login_done_url,
state=state
)
# Get authenticated user details
try:
credential = flow.step2_exchange(request)
profile = flow.step3_get_profile(credential)
log.debug("auth_oidc_client: userid %s, profile %r"%(userid, profile))
authuser = authenticate(
username=userid, profile=profile
)
except Exception as e:
log.error("Exception %r"%(e,))
# For debugging only: don't log in running system
# log.error("provider_data %r"%(provider_data,))
ex_type, ex, tb = sys.exc_info()
log.error("".join(traceback.format_exception(ex_type, ex, tb)))
# log.error("".join(traceback.format_stack()))
return HttpResponseRedirectLogin(request, str(e))
# Check authenticated details for user id match any previous values.
#
# The user id is entered by the user on the login form, and is used as a key to
# access authenticated user details in the Django user database. The user id
# itself is not checked by the Oauth2 login flow, other than for checking that
# it contains only work characters
#
# Instead, we trust that the associated email address has been confirmed by the
# OAuth2 provider, and don't allow login where the email address differs from any
# currently saved email address for the user id used.. This aims to prevent a
# new set of OAuth2 credentials being used for a previously created Django user id.
#
if not authuser:
return HttpResponseRedirectLogin(request,
login_message.USER_NOT_AUTHENTICATED%(userid, provider)
)
if not userid:
# Get generated username
userid = authuser.username
if not re.match(r"\w+$", userid):
return HttpResponseRedirectLogin(
request,
login_message.USER_ID_SYNTAX%(userid)
)
if not authuser.email:
return HttpResponseRedirectLogin(request,
login_message.USER_NO_EMAIL%(userid)
)
try:
olduser = User.objects.get(username=userid)
except User.DoesNotExist:
olduser = None
if olduser:
if authuser.email != olduser.email:
return HttpResponseRedirectLogin(request,
login_message.USER_WRONG_EMAIL%(userid, authuser.email, olduser.email)
)
# Complete the login and save details
authuser.save()
login(request, authuser)
request.session['login_recent_userid'] = userid
log.info("OIDC_AuthDoneView: user.username: "+authuser.username)
log.info("OIDC_AuthDoneView: user.first_name: "+authuser.first_name)
log.info("OIDC_AuthDoneView: user.last_name: "+authuser.last_name)
log.info("OIDC_AuthDoneView: user.email: "+authuser.email)
return HttpResponseRedirectLogin(request)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/login/auth_oidc_client.py
|
auth_oidc_client.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
try:
# Python3
from urllib.parse import (
urlparse, urljoin,
urlsplit, urlunsplit,
quote, unquote
)
from urllib.request import urlopen, Request, pathname2url
from urllib.error import HTTPError
except ImportError:
# Python2
from urlparse import urlparse, urljoin, urlsplit, urlunsplit
from urllib2 import urlopen, Request, HTTPError
from urllib import quote, unquote, pathname2url
def is_string(val):
"""
Is the supplied value a string or unicode string?
See: https://stackoverflow.com/a/33699705/324122
"""
return isinstance(val, (str, u"".__class__))
def to_unicode(val):
"""
Converts a supplied string value to Unicode text
@@deprecate this -- just use val
"""
return val
def text_to_str(ustr):
"""
Return string value for supplied Unicode
@@deprecate this -- just use value
"""
return str(ustr)
def bytes_to_str(bstr):
"""
Return string value for supplied bytes
"""
return bstr.decode('ascii', 'ignore')
# return bstr
def text_to_bytes(ustr):
"""
Return bytes value for supplied string.
The intent is that the string may be an ASCII or unicode string, but not
something that has already been encoded.
"""
return ustr.encode('utf-8', 'ignore')
def bytes_to_unicode(bstr):
"""
Return Unicode value for supplied (UTF-8 encoding) bytes.
"""
return bstr.decode('utf-8')
str_space = text_to_str(' ')
def write_bytes(file, text):
"""
Write supplied string to file as bytes
"""
file.write(text_to_bytes(text))
return
def isoformat_space(datetime):
"""
Return ISO-formatted date with space to separate date and time.
"""
return datetime.isoformat(str_space)
def get_message_type(msg_info):
"""
Return content type of result returned by urlopen.
The message info value returned by Python2's urllib2.urlopen has long been
deprecated. The newer methods return info() as an `email.message.Message`
value, whose corresponding content-type method is `get_content_type`.
"""
return msg_info.get_content_type()
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/utils/py3porting.py
|
py3porting.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
from django.http import HttpResponse
def error(values):
"""
Default error method using errorvalues
"""
responsebody = """
<html>
<head>
<title>Error %(status)s: %(reason)s</title>
</head>
<body>
<h1>Error %(status)s: %(reason)s</h1>
<p>%(message)s</p>
<p>Request URI: %(request_uri)s</p>
</body>
</html>
""" % values
# NOTE: requires Django 1.6 or later to allow reason parameter
return HttpResponse(responsebody, status=values['status'], reason=values['reason'])
# Define values for display with common error cases.
def errorvalues(view, status, reason, message):
return (
{ 'status': status
, 'reason': reason
, 'message': message%
{ 'method': view.request.method
, 'request_uri': view.request.build_absolute_uri()
, 'accept_types': view.request.META.get('HTTP_ACCEPT',"default_type")
, 'content_type': view.request.META.get('CONTENT_TYPE', "application/octet-stream")
}
})
def error400values(view, message="Bad request to %(request_uri)s"):
return errorvalues(view, 400, "Bad request", message)
def error401values(view):
return errorvalues(view, 401, "Unauthorized",
"Resource %(request_uri)s requires authentication for access"
)
def error402values(view):
return errorvalues(view, 402, "Payment required",
"Resource %(request_uri)s: payment required"
)
def error403values(view):
return errorvalues(view, 401, "Forbidden",
"Forbidden %(method)s access to resource %(request_uri)s"
)
def error404values(view):
return errorvalues(view, 404, "Not found",
"Resource %(request_uri)s not found"
)
def error405values(view):
return errorvalues(view, 405, "Method not allowed",
"Method %(method)s is not recognized for %(request_uri)s"
)
def error406values(view):
return errorvalues(view, 406, "Not acceptable",
"%(method)s returning %(accept_types)s not supported for %(request_uri)s"
)
def error407values(view):
return errorvalues(view, 407, "Proxy authentication required",
"Resource %(request_uri)s: Proxy authentication required"
)
def error408values(view):
return errorvalues(view, 408, "Request timeout",
"Resource %(request_uri)s: Request timeout"
)
def error409values(view):
return errorvalues(view, 409, "Requedst timeout",
"Resource %(request_uri)s: Requedst timeout"
)
def error410values(view):
return errorvalues(view, 410, "Gone",
"Resource %(request_uri)s: Gone"
)
def error411values(view):
return errorvalues(view, 411, "Length required",
"Resource %(request_uri)s: Length required"
)
def error412values(view):
return errorvalues(view, 412, "Precondition failed",
"Resource %(request_uri)s: Precondition failed"
)
def error413values(view):
return errorvalues(view, 413, "Request entity too large",
"Resource %(request_uri)s: Request entity too large"
)
def error414values(view):
return errorvalues(view, 414, "Request URI too long",
"Resource %(request_uri)s: Request URI too long"
)
def error415values(view):
return errorvalues(view, 415, "Unsupported Media Type",
"%(method)s with %(content_type)s not supported for %(request_uri)s"
)
def error416values(view):
return errorvalues(view, 416, "Requested range not satisfiable",
"Resource %(request_uri)s: Requested range not satisfiable"
)
def error417values(view):
return errorvalues(view, 417, "Expectation failed",
"Resource %(request_uri)s: Expectation failed"
)
def error426values(view):
return errorvalues(view, 426, "Upgrade required",
"Resource %(request_uri)s: Upgrade required"
)
def error428values(view):
return errorvalues(view, 428, "Precondition required",
"Resource %(request_uri)s: Precondition required"
)
def error429values(view):
return errorvalues(view, 429, "Too many requests",
"Resource %(request_uri)s: Too many requests"
)
def error431values(view):
return errorvalues(view, 431, "Request header fields too large",
"Resource %(request_uri)s: Request header fields too large"
)
def error451values(view):
return errorvalues(view, 451, "Unavailable for legal reasons",
"Resource %(request_uri)s: Unavailable for legal reasons"
)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/utils/http_errors.py
|
http_errors.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2014, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import sys
from utils.py3porting import is_string, to_unicode
class SwitchStdout:
"""
Context handler class that swiches standard output to a named file or supplied stream.
See also http://code.activestate.com/recipes/577564-context-manager-for-low-level-redirection-of-stdou/
I didn't use this because I wanted to be able to catch output to a StringIO stream for testing.
"""
def __init__(self, fileorstr):
self.fileorstr = fileorstr
self.opened = False
return
def __enter__(self):
if is_string(self.fileorstr):
self.outstr = open(self.fileorstr, "w")
self.opened = True
else:
self.outstr = self.fileorstr
self.savestdout = sys.stdout
sys.stdout = self.outstr
return
def __exit__(self, exctype, excval, exctraceback):
if self.opened: self.outstr.close()
sys.stdout = self.savestdout
return False
class SwitchStderr(object):
"""
Context handler class that swiches standard error to a named file or supplied stream.
(Same as SwitchStdout, but swiches stderr)
"""
def __init__(self, fileorstr):
self.fileorstr = fileorstr
self.opened = False
return
def __enter__(self):
if is_string(self.fileorstr):
self.outstr = open(self.fileorstr, "w")
self.opened = True
else:
self.outstr = self.fileorstr
self.savestderr = sys.stderr
sys.stderr = self.outstr
return
def __exit__(self, exctype, excval, exctraceback):
if self.opened:
self.outstr.close()
sys.stderr = self.savestderr
return False
class SwitchStdin(object):
"""
Context handler class that swiches standard input to a named file or supplied stream.
"""
def __init__(self, fileorstr):
self.fileorstr = fileorstr
self.opened = False
return
def __enter__(self):
if is_string(self.fileorstr):
self.instr = open(self.fileorstr, "w")
self.opened = True
else:
self.instr = self.fileorstr
self.savestdin = sys.stdin
sys.stdin = self.instr
return
def __exit__(self, exctype, excval, exctraceback):
if self.opened:
self.outin.close()
sys.stdin = self.savestdin
return False
if __name__ == "__main__":
import StringIO
outstr = StringIO.StringIO()
with SwitchStdout(outstr) as mystdout:
print("Hello, ")
print("world", file=mystdout)
outtxt = outstr.getvalue()
print(repr(outtxt))
assert outtxt == "Hello, \nworld\n"
outstr.close()
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/utils/StdoutContext.py
|
StdoutContext.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne and University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
from django.http import HttpResponse
from django.views import generic
class ContentNegotiationView(generic.View):
"""
Generic view class with content negotiation decorators and generic error value methods
Note: generic.View dispatcher assigns HTTPRequest object to self.request.
"""
@staticmethod
def accept_types(types):
"""
Decorator to use associated function to render the indicated content types
Invokes decorated method and returns its result if accept content type matches,
otherwise returns None.
"""
def decorator(func):
def guard(self, *values, **kwargs):
accept_header = self.request.META.get('HTTP_ACCEPT', "*/*")
accept_types = [ a.split(';')[0].strip().lower()
for a in accept_header.split(',') ]
for t in types:
if t in accept_types:
return func(self, *values, **kwargs)
return None
return guard
return decorator
@staticmethod
def content_types(types):
"""
Decorator to use associated function when supplied with (one of)
the indicated content types in a POST request.
Invokes decorated method and returns its result if content type matches,
otherwise returns None.
"""
def decorator(func):
def guard(self, *values, **kwargs):
content_type = self.request.META.get('CONTENT_TYPE', "application/octet-stream")
if content_type.split(';')[0].strip().lower() in types:
return func(self, *values, **kwargs)
return None
return guard
return decorator
def get_request_uri(self):
"""
Utility function returns URI of current request
(useful when building new URIs with POST, etc.)
Cf. https://docs.djangoproject.com/en/dev/ref/request-response/#methods
"""
return self.request.build_absolute_uri()
def get_request_host(self):
"""
Utility function returns base URI with HOST part of current request
@@TODO: return scheme part of the request. request.scheme is introduced in recent Django
Cf. https://docs.djangoproject.com/en/dev/ref/request-response/#methods
"""
scheme = "https" if self.request.is_secure() else "http"
return "%s://%s"%(scheme, self.request.get_host())
def get_request_path(self):
"""
Utility function returns path of current request URI.
Cf. https://docs.djangoproject.com/en/dev/ref/request-response/#methods
"""
return self.request.get_full_path()
# Define values for display with common error cases.
#
# @@TODO: This should really be a separate mixin.
# Use http_errors module
def error(self, values):
"""
Default error method using errorvalues
"""
responsebody = """
<html>
<head>
<title>Error %(status)s: %(reason)s</title>
</head>
<body>
<h1>Error %(status)s: %(reason)s</h1>
<p>%(message)s</p>
</body>
</html>
""" % values
return HttpResponse(responsebody, status=values['status'], reason=values['reason'])
def errorvalues(self, status, reason, message):
return (
{ 'status': status
, 'reason': reason
, 'message': message%
{ 'method': self.request.method
, 'request_uri': self.request.build_absolute_uri()
, 'accept_types': self.request.META.get('HTTP_ACCEPT',"default_type")
, 'content_type': self.request.META.get('CONTENT_TYPE', "application/octet-stream")
}
})
# def error401values(self):
# return self.errorvalues(401, "Unauthorized",
# "Resource %(request_uri)s requires authentication for access"
# )
# def error403values(self):
# return self.errorvalues(401, "Forbidden",
# "Forbidden %(method)s access to resource %(request_uri)s"
# )
# def error404values(self):
# return self.errorvalues(404, "Not found",
# "Resource %(request_uri)s not found"
# )
# def error405values(self):
# return self.errorvalues(405, "Method not allowed",
# "Method %(method)s is not recognized for %(request_uri)s"
# )
# def error406values(self):
# return self.errorvalues(406, "Not acceptable",
# "%(method)s returning %(accept_types)s not supported for %(request_uri)s"
# )
# def error415values(self):
# return self.errorvalues(415, "Unsupported Media Type",
# "%(method)s with %(content_type)s not supported for %(request_uri)s"
# )
def error400values(self, message="Bad request to %(request_uri)s"):
return self.errorvalues(400, "Bad request", message)
def error401values(self, scope="%(method)s"):
msg = "Resource %s requires authentication for %s access"%("%(request_uri)s", scope)
return self.errorvalues(401, "Unauthorized", msg)
def error402values(self):
return self.errorvalues(402, "Payment required",
"Resource %(request_uri)s: payment required"
)
# def error403values(self):
# return self.errorvalues(403, "Forbidden",
# "Forbidden %(method)s access to resource %(request_uri)s"
# )
def error403values(self, scope="%(method)s"):
msg = "No %s access permission for resource %s"%(scope, "%(request_uri)s")
return self.errorvalues(403, "Forbidden", msg)
def error404values(self):
return self.errorvalues(404, "Not found",
"Resource %(request_uri)s not found"
)
def error405values(self):
return self.errorvalues(405, "Method not allowed",
"Method %(method)s is not recognized for %(request_uri)s"
)
def error406values(self):
return self.errorvalues(406, "Not acceptable",
"%(method)s returning %(accept_types)s not supported for %(request_uri)s"
)
def error407values(self):
return self.errorvalues(407, "Proxy authentication required",
"Resource %(request_uri)s: Proxy authentication required"
)
def error408values(self):
return self.errorvalues(408, "Request timeout",
"Resource %(request_uri)s: Request timeout"
)
def error409values(self):
return self.errorvalues(409, "Requedst timeout",
"Resource %(request_uri)s: Requedst timeout"
)
def error410values(self):
return self.errorvalues(410, "Gone",
"Resource %(request_uri)s: Gone"
)
def error411values(self):
return self.errorvalues(411, "Length required",
"Resource %(request_uri)s: Length required"
)
def error412values(self):
return self.errorvalues(412, "Precondition failed",
"Resource %(request_uri)s: Precondition failed"
)
def error413values(self):
return self.errorvalues(413, "Request entity too large",
"Resource %(request_uri)s: Request entity too large"
)
def error414values(self):
return self.errorvalues(414, "Request URI too long",
"Resource %(request_uri)s: Request URI too long"
)
def error415values(self):
return self.errorvalues(415, "Unsupported Media Type",
"%(method)s with %(content_type)s not supported for %(request_uri)s"
)
def error416values(self):
return self.errorvalues(416, "Requested range not satisfiable",
"Resource %(request_uri)s: Requested range not satisfiable"
)
def error417values(self):
return self.errorvalues(417, "Expectation failed",
"Resource %(request_uri)s: Expectation failed"
)
def error426values(self):
return self.errorvalues(426, "Upgrade required",
"Resource %(request_uri)s: Upgrade required"
)
def error428values(self):
return self.errorvalues(428, "Precondition required",
"Resource %(request_uri)s: Precondition required"
)
def error429values(self):
return self.errorvalues(429, "Too many requests",
"Resource %(request_uri)s: Too many requests"
)
def error431values(self):
return self.errorvalues(431, "Request header fields too large",
"Resource %(request_uri)s: Request header fields too large"
)
def error451values(self):
return self.errorvalues(451, "Unavailable for legal reasons",
"Resource %(request_uri)s: Unavailable for legal reasons"
)
def error500values(self, message="Server error from request to %(request_uri)s"):
return self.errorvalues(500, "Server error", message)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/utils/ContentNegotiationView.py
|
ContentNegotiationView.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
import re
try:
# Python3
from urllib.parse import (
urlparse, urljoin,
urlsplit, urlunsplit,
quote, unquote
)
from urllib.request import urlopen, Request
from urllib.error import HTTPError
except ImportError:
# Python2
from urlparse import urlparse, urljoin, urlsplit, urlunsplit
from urllib2 import urlopen, Request, HTTPError
from urllib import quote, unquote
# From RFC 3986:
gen_delims = ":/?#[]@"
sub_delims = "!$&'()*+,;="
unreserved = "-._~"
# subset of above safe in query string (no "?", "&" or #")
query_safe = re.sub('[?&#]', '', gen_delims + sub_delims + unreserved)
def uri_base(uri):
"""
Get the base URI from the supplied URI by removing any parameters and/or fragments.
"""
base_uri = uri.split("#", 1)[0]
base_uri = base_uri.split("?", 1)[0]
return base_uri
def uri_query_key_val(p):
"""
Returns a key-value pair for a supplied query parameter string.
The value part returned has %-escaping unapplied.
If no '=' is present, the value part returned is an empty string.
"""
kv = p.split("=", 1) + [""]
return (kv[0], unquote(kv[1]))
def uri_param_dict(uri):
"""
Extract parameter dictionary from the supplied URI
>>> uri_param_dict("base:path?q1=p1&q2=p2#frag") == { 'q1': "p1", 'q2': "p2"}
True
>>> uri_param_dict("base:path?q1=p1=p1&q2=p2%26p2&q3") == { 'q1': "p1=p1", 'q2': "p2&p2", 'q3': "" }
True
"""
base_uri = uri.split("#", 1)[0]
query = (base_uri.split("?", 1)+[""])[1]
return { k: v for k, v in [ uri_query_key_val(qp) for qp in query.split("&") ] }
def build_dict(*param_dicts, **param_dict):
"""
Create a merged dictionary from the supplied dictionaries and keyword parameters.
"""
merged_param_dict = param_dict.copy()
for d in param_dicts:
if d is not None:
# log.info("param_dicts %r"%(d,))
merged_param_dict.update(d)
return merged_param_dict
def uri_params(*param_dicts, **param_dict):
"""
Construct a URI parameter string from the supplied dictionary values
(or values which are convertible to a dictionary using `dict()`).
"""
uri_param_dict = build_dict(*param_dicts, **param_dict)
uri_param_str = ""
next_sep = "?"
for pnam in uri_param_dict:
pval = uri_param_dict[pnam]
if pval:
# log.info("pnam %s, pval %s, uri_param_dict %r"%(pnam, pval, uri_param_dict))
uri_param_str += next_sep + pnam + "=" + quote(pval, query_safe)
next_sep = "&"
return uri_param_str
def uri_with_params(base_uri, *param_dicts, **param_dict):
"""
Construct a URI from the supplied base URI (with any parameters and/or fragment removed)
and URI parameters created using the supplied dictionary values.
"""
return uri_base(base_uri) + uri_params(*param_dicts, **param_dict)
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/utils/uri_builder.py
|
uri_builder.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2013-2014, Graham Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
class Annalist_Manager_Error(Exception):
"""
annalist-manager error
"""
def __init__(self, errno=None, value=None, msg="Annalist manager error"):
self._msg = msg
self._errno = errno
self._value = value
return
def __str__(self):
txt = self._msg
if self._value: txt += ": "+repr(self._value)
return txt
def __repr__(self):
return ( "Annalist_Manager_Error(%s, %s, value=%s)"%
(repr(self._msg), repr(self._value)))
# Status return codes
AM_SUCCESS = 0 # Success
AM_BADCMD = 2 # Command error
AM_EXISTS = 5 # directory already exists
AM_NOTEXISTS = 6 # Directory does not exist
AM_NOSETTINGS = 7 # No configuration settings found (e.g. personal, shared, devel, etc.)
AM_UNEXPECTEDARGS = 8 # Unexpected arguments supplied
AM_NOUSERPASS = 9 # No username or password for createuser or creaeadminuser
AM_MISSINGEMAIL = 10 # No email address for createuser or creaeadminuser
AM_UNKNOWNCMD = 11 # Unknown command name for help
AM_USEREXISTS = 12 # Username for creation already exists
AM_USERNOTEXISTS = 13 # Username for deletion does not exist
AM_NOCOLLECTION = 14 # Identified collection not found
AM_COLLECTIONEXISTS = 14 # Identified collection already exists
AM_INSTALLCOLLFAIL = 15 # Failed to install collection
AM_COPYCOLLFAIL = 16 # Failed to copy collection
AM_MIGRATECOLLFAIL = 17 # Failed to migrate collection
AM_COPYENTITYFAIL = 18 # Failed to copy entity
AM_NOSERVERPIDFILE = 19 # Could not find server PID file
AM_PIDNOTFOUND = 20 # Could not find process with PID
AM_SERVERALREADYRUN = 21 # Server already run (saved PID found)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist_manager/am_errors.py
|
am_errors.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2013-2014, Graham Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import sys
import os
import os.path
import re
import argparse
import logging
import errno
log = logging.getLogger(__name__)
# if __name__ == "__main__":
dirhere = os.path.dirname(os.path.realpath(__file__))
annroot = os.path.dirname(os.path.join(dirhere))
sys.path.insert(0, annroot)
# sys.path.insert(0, dirhere)
import annalist
from . import am_errors
from .am_runtests import am_runtests
from .am_initialize import am_initialize, am_collectstatic
from .am_createsite import am_createsite, am_updatesite
from .am_runserver import (
am_runserver, am_stopserver, am_pidserver, am_rundevserver,
am_serverlog, am_accesslog, am_errorlog, am_sitedirectory,
am_settingsmodule, am_settingsfile, am_settingsdir,
am_version
)
from .am_createuser import (
am_createlocaluser, am_createadminuser, am_defaultadminuser, am_updateadminuser,
am_setuserpermissions, am_setdefaultpermissions, am_setpublicpermissions,
am_deleteuser
)
from .am_managecollections import (
am_installcollection, am_copycollection,
am_migrationreport, am_migratecollection, am_migrateallcollections
)
from .am_help import am_help, command_summary_help
VERSION = annalist.__version__
def progname(args):
return os.path.basename(args[0])
def parseCommandArgs(argv):
"""
Parse command line arguments
argv argument list from command line
Returns a pair consisting of options specified as returned by
OptionParser, and any remaining unparsed arguments.
"""
# create a parser for the command line options
parser = argparse.ArgumentParser(
description="Annalist site management utility",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=command_summary_help
)
parser.add_argument('--version', action='version', version='%(prog)s '+VERSION)
parser.add_argument("-c", "--configuration",
action='store',
dest="configuration", metavar="CONFIG",
default="personal",
#choices=['personal', 'shared', 'devel', 'runtests'],
help="Select site configuration by name (e.g. personal, shared, devel, runtests.")
parser.add_argument("-p", "--personal",
action='store_true',
dest="config_p", # metavar="PERSONAL",
help="Select personal site configuration.")
parser.add_argument("-d", "--development",
action='store_true',
dest="config_d", # metavar="DEVELOPMENT",
help="Select development site configuration.")
parser.add_argument("-s", "--shared",
action='store_true',
dest="config_s", # metavar="SHARED",
help="Select shared site configuration.")
parser.add_argument("-f", "--force",
action='store_true',
dest="force",
help="Force overwrite of existing site data or collection.")
parser.add_argument("--debug",
action="store_true",
dest="debug",
default=False,
help="Run with full debug output enabled. "+
"Also creates log file 'annalist-manager.log' in the working directory"
)
parser.add_argument("command", metavar="COMMAND",
nargs=None,
help="sub-command, one of the options listed below."
)
parser.add_argument("args", metavar="ARGS",
nargs="*",
help="Additional arguments, depending on the command used."
)
# parse command line now
options = parser.parse_args(argv)
if options:
if options.config_d: options.configuration = "devel"
if options.config_p: options.configuration = "personal"
if options.config_s: options.configuration = "shared"
if options and options.command:
return options
print("No valid usage option given.", file=sys.stderr)
parser.print_usage()
return None
def run(userhome, userconfig, options, progname):
"""
Command dispatcher.
"""
if options.command.startswith("runt"): # runtests
return am_runtests(annroot, options)
if options.command.startswith("init"): # initialize (intsllaation, django database)
return am_initialize(annroot, userhome, userconfig, options)
if options.command.startswith("collect"): # collect static data
return am_collectstatic(annroot, userhome, userconfig, options)
if options.command.startswith("createl"): # createlocaluser
return am_createlocaluser(annroot, userhome, options)
if options.command.startswith("createa"): # createadminuser
return am_createadminuser(annroot, userhome, options)
if options.command.startswith("defaulta"): # defaultadminuser
return am_defaultadminuser(annroot, userhome, options)
if options.command.startswith("updatea"): # updateadminuser
return am_updateadminuser(annroot, userhome, options)
if options.command.startswith("setuse"): # setuserpermissions
return am_setuserpermissions(annroot, userhome, options)
if options.command.startswith("setdef"): # setdefaultpermissions
return am_setdefaultpermissions(annroot, userhome, options)
if options.command.startswith("setpub"): # setpublicpermissions
return am_setpublicpermissions(annroot, userhome, options)
if options.command.startswith("deleteu"): # deleteuser
return am_deleteuser(annroot, userhome, options)
if options.command.startswith("creates"): # createsitedata
return am_createsite(annroot, userhome, options)
if options.command.startswith("updates"): # updatesitedata
return am_updatesite(annroot, userhome, options)
if options.command.startswith("installc"): # installcollection
return am_installcollection(annroot, userhome, options)
if options.command.startswith("copyc"): # copycollection
return am_copycollection(annroot, userhome, options)
if options.command.startswith("migrationr"): # migrationreport
return am_migrationreport(annroot, userhome, options)
if options.command.startswith("migratec"): # migratecollection
return am_migratecollection(annroot, userhome, options)
if options.command.startswith("migratea"): # migrateallcollections
return am_migrateallcollections(annroot, userhome, options)
if options.command.startswith("runs"): # runserver
return am_runserver(annroot, userhome, options)
if options.command.startswith("stop"): # stopserver
return am_stopserver(annroot, userhome, options)
if options.command.startswith("pid"): # pidserver
return am_pidserver(annroot, userhome, options)
if options.command.startswith("rund"): # rundevserver
return am_rundevserver(annroot, userhome, options)
if options.command.startswith("serv"): # serverlog
return am_serverlog(annroot, userhome, options)
if options.command.startswith("acc"): # accesslog
return am_accesslog(annroot, userhome, options)
if options.command.startswith("err"): # errorlog
return am_errorlog(annroot, userhome, options)
if options.command.startswith("site"): # sitedir
return am_sitedirectory(annroot, userhome, options)
if options.command.startswith("settingsm"): # settingsmodule
return am_settingsmodule(annroot, userhome, options)
if options.command.startswith("settingsf"): # settingsfile
return am_settingsfile(annroot, userhome, options)
if options.command.startswith("settingsd"): # settingsdir
return am_settingsdir(annroot, userhome, options)
if options.command.startswith("ver"): # version
return am_version(annroot, userhome, options)
if options.command.startswith("help"):
return am_help(options, progname)
print("Un-recognised sub-command: %s"%(options.command), file=sys.stderr)
print("Use '%s --help' to see usage summary"%(progname), file=sys.stderr)
return am_errors.AM_BADCMD
def runCommand(userhome, userconfig, argv):
"""
Run program with supplied configuration base directory,
configuration directory and command arguments.
This is called by main function (below), and also by test suite routines.
Returns exit status.
"""
options = parseCommandArgs(argv[1:])
if options and options.debug:
# This doesn't work as expected - some kind of interference with Django log settings?
# S ee: https://stackoverflow.com/questions/20240464/python-logging-file-is-not-working-when-using-logging-basicconfig
logging.basicConfig(
level=logging.DEBUG,
filename="annalist-manager.log", filemode="w",
force=True
)
else:
# logging.basicConfig(level=logging.INFO)
# force option added python 3.8
logging.basicConfig(level=logging.INFO, force=True)
log.debug("runCommand: argv %s"%(repr(argv)))
log.debug("Options: %s"%(repr(options)))
# else:
# logging.basicConfig()
if options:
progname = os.path.basename(argv[0])
status = run(userhome, userconfig, options, progname)
else:
status = am_errors.AM_BADCMD
return status
def runMain():
"""
Main program transfer function for setup.py console script
"""
userhome = os.path.expanduser("~")
userconfig = os.path.join(userhome, ".annalist")
return runCommand(userhome, userconfig, sys.argv)
if __name__ == "__main__":
"""
Program invoked from the command line.
"""
p = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, p)
status = runMain()
sys.exit(status)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist_manager/am_main.py
|
am_main.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import sys
import importlib
import logging
log = logging.getLogger(__name__)
import django
from utils.SuppressLoggingContext import SuppressLogging
from annalist.layout import Layout
from annalist.models.site import Site
from annalist_manager import am_errors
from annalist_manager.am_errors import Annalist_Manager_Error
class AnnalistSettings(object):
"""
Access Annalist settings indicated by command line options.
annroot is the root directory for the annalist software installation.
userhome is the home directory for the current user.
options contains options parsed from the command line.
"""
def __init__(self, annroot, userhome, options):
"""
Initialise AnnalistSettings object
"""
self.configname = options.configuration
self.filename = os.path.join(annroot, "annalist_site/settings", "%s.py"%(self.configname))
self.modulename = "annalist_site.settings.%s"%(self.configname)
log.debug("annalist_root %s"%annroot)
log.debug("settings module filename %s"%self.filename)
if not os.path.isfile(self.filename):
raise Annalist_Manager_Error(
errno=am_errors.AM_NOSETTINGS, value=self.configname,
msg="Annalist settings not found"
)
return
def am_get_settings(annroot, userhome, options):
"""
Access Annalist settings indicated by command line options.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the current user.
options contains options parsed from the command line.
returns an AnnalistSettings object if settings are successfully located,
othetrwise None.
"""
try:
am_settings = AnnalistSettings(annroot, userhome, options)
except Annalist_Manager_Error:
return None
return am_settings
def am_get_site_settings(annroot, userhome, options):
"""
Access site settings, set up corresponding django configuration and return the settings module
"""
settings = am_get_settings(annroot, userhome, options)
if not settings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return None
with SuppressLogging(logging.INFO):
os.environ['DJANGO_SETTINGS_MODULE'] = settings.modulename
site_settings = importlib.import_module(settings.modulename)
if not os.path.exists(site_settings.BASE_SITE_DIR):
os.makedirs(site_settings.BASE_SITE_DIR)
django.setup()
return site_settings
def am_get_site(sitesettings):
"""
Get site object corresponding to supplied settings
"""
site_layout = Layout(sitesettings.BASE_DATA_DIR, sitesettings.SITE_DIR_NAME)
site_dir = site_layout.SITE_PATH
site_uri = "annalist_site:"
site = Site(site_uri, site_dir)
return site
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist_manager/am_settings.py
|
am_settings.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import sys
import importlib
import re
import logging
# import subprocess
log = logging.getLogger(__name__)
#@@ import django
from annalist.identifiers import ANNAL, RDFS
from annalist.models.annalistuser import AnnalistUser
from utils.SuppressLoggingContext import SuppressLogging
from . import am_errors
from .am_settings import am_get_settings, am_get_site_settings, am_get_site
from .am_getargvalue import getarg, getargvalue, getsecret
def create_user_permissions(site, user_id, user_uri, user_name, user_comment, user_permissions):
user_values = (
{ ANNAL.CURIE.type: ANNAL.CURIE.User
, RDFS.CURIE.label: user_name
, RDFS.CURIE.comment: user_comment
, ANNAL.CURIE.user_uri: "%s"%(user_uri)
, ANNAL.CURIE.user_permissions: user_permissions
})
user = AnnalistUser.create(site.site_data_collection(), user_id, user_values)
return user
def delete_user_permissions(site, user_id):
AnnalistUser.remove(site.site_data_collection(), user_id)
return
def get_user_name(options, prompt_prefix):
user_name_regex = r"^[a-zA-Z0-9@.+_-]+$"
user_name_prompt = "%s name: "%prompt_prefix
user_name = getargvalue(getarg(options.args, 0), user_name_prompt)
while not re.match(user_name_regex, user_name):
print("Invalid username %s - re-enter"%user_name, file=sys.stderr)
user_name = getargvalue(None, user_name_prompt)
return user_name
def get_user_details(user_name, options, prompt_prefix):
"""
Get user details (email, first name, last name)
Returns a dictionary of user details, including the supplied user_name.
"""
user_email_regex = r"^[A-Za-z0-9.+_-]+@([A-Za-z0-9_-]+)(\.[A-Za-z0-9_-]+)*$"
user_email_prompt = "%s email: "%prompt_prefix
user_first_name_prompt = "%s first name: "%prompt_prefix
user_last_name_prompt = "%s last name: "%prompt_prefix
# Get other values
user_email = getargvalue(getarg(options.args, 1), user_email_prompt)
while not re.match(user_email_regex, user_email):
print("Invalid email address %s - re-enter"%user_email, file=sys.stderr)
user_email = getargvalue(None, user_email_prompt)
user_first_name = getargvalue(getarg(options.args, 2), user_first_name_prompt)
user_last_name = getargvalue(getarg(options.args, 3), user_last_name_prompt)
return (
{ 'name': user_name
, 'email': user_email
, 'uri': "mailto:%s"%user_email
, 'first_name': user_first_name
, 'last_name': user_last_name
})
def get_user_permissions(options, pos, prompt_prefix):
"""
Get user permissions to apply
"""
user_permissions_regex = r"^([A-Za-z0-9_-]+(\s+[A-Za-z0-9_-]+)*)?$"
user_permissions_prompt = "%s permissions: "%prompt_prefix
user_permissions = getargvalue(getarg(options.args, pos), user_permissions_prompt)
while not re.match(user_permissions_regex, user_permissions):
print("Invalid permissions %s - re-enter"%user_permissions, file=sys.stderr)
user_permissions = getargvalue(None, user_permissions_prompt)
return user_permissions.split()
def create_django_user(user_type, user_details):
"""
Create Django user (prompts for password)
user_type is "superuser", "staff" or "normal"
"""
# Check user does not already exist
from django.contrib.auth.models import User # import deferred until after sitesettings import
if User.objects.filter(username=user_details['name']):
print("Django user %s already exists"%user_details['name'], file=sys.stderr)
return am_errors.AM_USEREXISTS
# Get password
user_password_prompt = "Password: "
user_password_c_prompt = "Re-enter password: "
user_password = getsecret(user_password_prompt)
user_password_c = getsecret(user_password_c_prompt)
while user_password != user_password_c:
print("Password values mismatch - try again", file=sys.stderr)
user_password = getsecret(user_password_prompt)
user_password_c = getsecret(user_password_c_prompt)
# Have all the details - create djano user now
# Create the user in the Django user database
# see:
# https://docs.djangoproject.com/en/1.7/ref/contrib/auth/#django.contrib.auth.models.User
# https://docs.djangoproject.c om/en/1.7/ref/contrib/auth/#manager-methods
user = User.objects.create_user(user_details['name'], user_details['email'], user_password)
user.first_name = user_details['first_name']
user.last_name = user_details['last_name']
user.is_active = True
user.is_staff = user_type in ["staff", "superuser"]
user.is_superuser = user_type in ["superuser"]
user.save()
return am_errors.AM_SUCCESS
def read_django_user(user_name):
"""
Read details of the indicated Django user and return a user record object,
otherwise return None.
"""
from django.contrib.auth.models import User # import deferred until after sitesettings import
userqueryset = User.objects.filter(username=user_name)
if not userqueryset:
return None
return userqueryset[0]
def make_django_user_details(user_name, django_user):
"""
Assemble details of the indicated Django user and return a user_details structure.
"""
user_details = (
{ 'name': user_name
, 'email': django_user.email
, 'uri': "mailto:%s"%django_user.email
, 'first_name': django_user.first_name
, 'last_name': django_user.last_name
})
return user_details
def read_django_user_details(user_name):
"""
Read details of the indicated Django user and return a user_details structure,
otherwise return None.
"""
django_user = read_django_user(user_name)
if not django_user:
return None
return make_django_user_details(user_name, django_user)
def create_site_permissions(sitesettings, user_details, permissions):
site = am_get_site(sitesettings)
if not 'label' in user_details:
user_details['label'] = (
"%(first_name)s %(last_name)s"%user_details
)
if not 'comment' in user_details:
user_details['comment'] = (
"User %(name)s: site permissions for %(first_name)s %(last_name)s"%user_details
)
user = create_user_permissions(
site, user_details['name'], user_details['uri'],
user_details['label'],
user_details['comment'],
permissions
)
return am_errors.AM_SUCCESS
def am_createlocaluser(annroot, userhome, options,
prompt_prefix="Local user",
user_type="other",
user_perms=["VIEW"]
):
"""
Create Annalist/Django local user account.
annroot is the root directory for theannalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
prompt_prefix string used in prompts to solicit user details.
user_type "superuser", "staff" or "other".
("staff" can access django admin site, but doesn't bypass permissions.)
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
if len(options.args) > 4:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
sitesettings = am_get_site_settings(annroot, userhome, options)
if not sitesettings:
return am_errors.AM_NOSETTINGS
user_name = get_user_name(options, prompt_prefix)
#
from django.contrib.auth.models import User # import deferred until after sitesettings import
if User.objects.filter(username=user_name):
print("Django user %s already exists"%user_name, file=sys.stderr)
return am_errors.AM_USEREXISTS
user_details = get_user_details(user_name, options, prompt_prefix)
status = create_django_user(user_type, user_details)
if status == am_errors.AM_SUCCESS:
status = create_site_permissions(sitesettings, user_details, user_perms)
return status
def am_createadminuser(annroot, userhome, options):
"""
Create Annalist/Django admin/superuser account.
Once created, this can be used to create additional users through the
site 'admin' link, and can also create collections and
annroot is the root directory for theannalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
prompt_prefix = "Admin user"
user_type = "superuser"
user_perms = ["VIEW", "CREATE", "UPDATE", "DELETE", "CONFIG", "ADMIN"]
return am_createlocaluser(annroot, userhome, options, prompt_prefix, user_type, user_perms)
def am_defaultadminuser(annroot, userhome, options):
"""
Create default Annalist/Django admin/superuser account.
Creates an admin accoubnt with default values:
username: admin
email: admin@localhost
firstname: Admin
lastname: User
Prompts for password as before
annroot is the root directory for theannalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
if len(options.args) > 0:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
sitesettings = am_get_site_settings(annroot, userhome, options)
if not sitesettings:
return am_errors.AM_NOSETTINGS
default_admin = (
{ 'name': "admin"
, 'email': "admin@localhost"
, 'uri': "mailto:admin@localhost"
, 'first_name': "Admin"
, 'last_name': "User"
})
print("Creating user %(name)s"%default_admin, file=sys.stderr)
status = create_django_user("superuser", default_admin)
if status == am_errors.AM_SUCCESS:
status = create_site_permissions(
sitesettings, default_admin,
["VIEW", "CREATE", "UPDATE", "DELETE", "CONFIG", "ADMIN"]
)
return status
def am_updateadminuser(annroot, userhome, options):
"""
Update existing Django user to admin status
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
# see:
# https://docs.djangoproject.com/en/1.7/ref/contrib/auth/#django.contrib.auth.models.User
prompt_prefix = "Update user"
if len(options.args) > 1:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
sitesettings = am_get_site_settings(annroot, userhome, options)
if not sitesettings:
return am_errors.AM_NOSETTINGS
user_name = get_user_name(options, prompt_prefix)
django_user = read_django_user(user_name)
if not django_user:
print("User %s does not exist"%user_name, file=sys.stderr)
return am_errors.AM_USERNOTEXISTS
django_user.is_staff = True
django_user.is_superuser = True
django_user.save()
# Create site permissions record for admin user
user_details = make_django_user_details(user_name, django_user)
status = create_site_permissions(
sitesettings, user_details,
["VIEW", "CREATE", "UPDATE", "DELETE", "CONFIG", "ADMIN"]
)
return status
def am_setuserpermissions(annroot, userhome, options):
"""
Set Annalist permissions for designated user
annroot is the root directory for theannalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
prompt_prefix = "User "
if len(options.args) > 2:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
sitesettings = am_get_site_settings(annroot, userhome, options)
if not sitesettings:
return am_errors.AM_NOSETTINGS
user_name = get_user_name(options, prompt_prefix)
user_details = read_django_user_details(user_name)
if not user_details:
print("User %s does not exist"%user_name, file=sys.stderr)
return am_errors.AM_USERNOTEXISTS
user_permissions = get_user_permissions(options, 1, prompt_prefix)
status = create_site_permissions(sitesettings, user_details, user_permissions)
return status
def am_setdefaultpermissions(annroot, userhome, options):
"""
Set site-wide default permissions for logged in users
annroot is the root directory for theannalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
prompt_prefix = "Default "
if len(options.args) > 1:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
sitesettings = am_get_site_settings(annroot, userhome, options)
if not sitesettings:
return am_errors.AM_NOSETTINGS
user_details = (
{ 'name': "_default_user_perms"
, 'uri': "annal:User/_default_user_perms"
, 'label': "Default permissions"
, 'comment': "Default permissions for authenticated user."
})
user_permissions = get_user_permissions(options, 0, prompt_prefix)
status = create_site_permissions(sitesettings, user_details, user_permissions)
return status
def am_setpublicpermissions(annroot, userhome, options):
"""
Set site-wide default permissions for unauthenticated public access
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
prompt_prefix = "Public access "
if len(options.args) > 1:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
sitesettings = am_get_site_settings(annroot, userhome, options)
if not sitesettings:
return am_errors.AM_NOSETTINGS
user_details = (
{ 'name': "_unknown_user_perms"
, 'uri': "annal:User/_unknown_user_perms"
, 'label': "Unknown user"
, 'comment': "Permissions for unauthenticated user."
})
user_permissions = get_user_permissions(options, 0, prompt_prefix)
status = create_site_permissions(sitesettings, user_details, user_permissions)
return status
def am_deleteuser(annroot, userhome, options):
"""
Delete Annalist/Django user account.
annroot is the root directory for theannalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
prompt_prefix = "Delete user"
if len(options.args) > 1:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
sitesettings = am_get_site_settings(annroot, userhome, options)
if not sitesettings:
return am_errors.AM_NOSETTINGS
user_name = get_user_name(options, prompt_prefix)
#
from django.contrib.auth.models import User # import deferred until after sitesettings import
userqueryset = User.objects.filter(username=user_name)
if not userqueryset:
print("User %s does not exist"%user_name, file=sys.stderr)
return am_errors.AM_USERNOTEXISTS
userqueryset.delete()
site = am_get_site(sitesettings)
delete_user_permissions(site, user_name)
return am_errors.AM_SUCCESS
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist_manager/am_createuser.py
|
am_createuser.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os, os.path
import sys
import importlib
import subprocess
import signal
import time
import shutil
import logging
log = logging.getLogger(__name__)
from utils.SetcwdContext import ChangeCurrentDir
from utils.SuppressLoggingContext import SuppressLogging
from annalist import __version__
from . import am_errors
from .am_settings import (
am_get_settings, am_get_site_settings, am_get_site
)
from .am_getargvalue import getarg, getargvalue
def am_runserver(annroot, userhome, options):
"""
Run Annalist production server asynchronously; writes procdss id to stdout.
This uses the gunicorn HTTP/WSGI server.
Provide HTTPS access by proxying via Apache or Nginx.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
settings = am_get_settings(annroot, userhome, options)
if not settings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
with SuppressLogging(logging.INFO):
sitesettings = importlib.import_module(settings.modulename)
sitedirectory = sitesettings.BASE_SITE_DIR
pidfilename = os.path.join(sitedirectory, "annalist.pid")
try:
with open(pidfilename, "r") as pidfile:
pid = int(pidfile.readline())
print("Server already started with pid %d"%(pid,), file=sys.stderr)
if options.force:
print("Stopping pid %d"%(pid,), file=sys.stderr)
os.kill(pid, signal.SIGTERM)
else:
print("Use '--force' to force restart", file=sys.stderr)
return am_errors.AM_SERVERALREADYRUN
except IOError as e:
# No saved process id - continue
pass
except OSError as e:
# Process Id not found for kill
print("Process pid %d not found"%(pid,), file=sys.stderr)
pass
status = am_errors.AM_SUCCESS
with ChangeCurrentDir(annroot):
gunicorn_command = (
"gunicorn --workers=1 --threads=%d "%(sitesettings.SERVER_THREADS)+
" --bind=0.0.0.0:8000 "+
" --env DJANGO_SETTINGS_MODULE=%s "%(settings.modulename,)+
" --env ANNALIST_KEY=%s "%(sitesettings.SECRET_KEY,)+
" --access-logfile %s "%(sitesettings.ACCESS_LOG_PATH,)+
" --error-logfile %s "%(sitesettings.ERROR_LOG_PATH,)+
" --timeout 300 "+
" annalist_site.wsgi:application"+
"")
log.debug("am_runserver subprocess: %s"%gunicorn_command)
p = subprocess.Popen(gunicorn_command.split())
pid = p.pid
with open(pidfilename, "w") as pidfile:
pidfile.write(str(pid)+"\n")
time.sleep(1.0) # Allow server to start and log initial messages
print(str(pid), file=sys.stdout)
log.debug("am_runserver subprocess pid: %s"%pid)
return status
def am_stopserver(annroot, userhome, options):
"""
Stop Annalist production server.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
settings = am_get_settings(annroot, userhome, options)
if not settings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
with SuppressLogging(logging.INFO):
sitesettings = importlib.import_module(settings.modulename)
sitedirectory = sitesettings.BASE_SITE_DIR
pidfilename = os.path.join(sitedirectory, "annalist.pid")
status = am_errors.AM_SUCCESS
try:
with open(pidfilename, "r") as pidfile:
pid = int(pidfile.readline())
print("Stopping pid %d"%(pid,), file=sys.stderr)
os.kill(pid, signal.SIGTERM)
os.remove(pidfilename)
except IOError as e:
# print("PID file %s not found (%s)"%(pidfilename, e), file=sys.stderr)
print("No server running", file=sys.stderr)
return am_errors.AM_NOSERVERPIDFILE
except OSError as e:
print("Process %d not found (%s)"%(pid, e), file=sys.stderr)
return am_errors.AM_PIDNOTFOUND
return status
def am_pidserver(annroot, userhome, options):
"""
Display running Annalist server PID on stdout
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if PID is displayed, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
settings = am_get_settings(annroot, userhome, options)
if not settings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
with SuppressLogging(logging.INFO):
sitesettings = importlib.import_module(settings.modulename)
sitedirectory = sitesettings.BASE_SITE_DIR
pidfilename = os.path.join(sitedirectory, "annalist.pid")
status = am_errors.AM_SUCCESS
try:
with open(pidfilename, "r") as pidfile:
pid = int(pidfile.readline())
print("%d"%(pid,), file=sys.stdout)
except IOError as e:
# print("PID file %s not found (%s)"%(pidfilename, e), file=sys.stderr)
print("No server running", file=sys.stderr)
return am_errors.AM_NOSERVERPIDFILE
return status
def am_rundevserver(annroot, userhome, options):
"""
Run Annalist developent server.
This uses the Django developm,ent server (via manage.py).
For production deployment, use `runserver`.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
settings = am_get_settings(annroot, userhome, options)
if not settings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
status = am_errors.AM_SUCCESS
with ChangeCurrentDir(annroot):
cmd = "runserver 0.0.0.0:8000"
subprocess_command = "django-admin %s --pythonpath=%s --settings=%s"%(cmd, annroot, settings.modulename)
log.debug("am_rundevserver subprocess: %s"%subprocess_command)
status = subprocess.call(subprocess_command.split())
log.debug("am_rundevserver subprocess status: %s"%status)
return status
def am_serverlog(annroot, userhome, options):
"""
Print name of Annalist server log to standard output.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
settings = am_get_settings(annroot, userhome, options)
if not settings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
status = am_errors.AM_SUCCESS
with SuppressLogging(logging.INFO):
sitesettings = importlib.import_module(settings.modulename)
logfilename = sitesettings.ANNALIST_LOG_PATH
print(logfilename)
return status
def am_accesslog(annroot, userhome, options):
"""
Print name of WSGI access log to standard output.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
settings = am_get_settings(annroot, userhome, options)
if not settings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
status = am_errors.AM_SUCCESS
with SuppressLogging(logging.INFO):
sitesettings = importlib.import_module(settings.modulename)
logfilename = sitesettings.ACCESS_LOG_PATH
print(logfilename)
return status
def am_errorlog(annroot, userhome, options):
"""
Print name of WSGI access log to standard output.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
settings = am_get_settings(annroot, userhome, options)
if not settings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
status = am_errors.AM_SUCCESS
with SuppressLogging(logging.INFO):
sitesettings = importlib.import_module(settings.modulename)
logfilename = sitesettings.ERROR_LOG_PATH
print(logfilename)
return status
def am_sitedirectory(annroot, userhome, options):
"""
Print name of Annalist site directory to standard output
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
settings = am_get_settings(annroot, userhome, options)
if not settings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
status = am_errors.AM_SUCCESS
with SuppressLogging(logging.INFO):
sitesettings = importlib.import_module(settings.modulename)
sitedirectory = sitesettings.BASE_SITE_DIR
print(sitedirectory)
# with open(sitedirectory, "r") as logfile:
# shutil.copyfileobj(logfile, sys.stdout)
return status
def am_settingsmodule(annroot, userhome, options):
"""
Print name of Annalist settings module to standard output
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
settings = am_get_settings(annroot, userhome, options)
if not settings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
status = am_errors.AM_SUCCESS
settingsmodule = settings.modulename
print(settingsmodule)
return status
def am_settingsfile(annroot, userhome, options):
"""
Print name of Annalist settings file to standard output
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
settings = am_get_settings(annroot, userhome, options)
if not settings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
status = am_errors.AM_SUCCESS
with SuppressLogging(logging.INFO):
sitesettings = importlib.import_module(settings.modulename)
settingsfile, ext = os.path.splitext(sitesettings.__file__)
print(settingsfile)
return status
def am_settingsdir(annroot, userhome, options):
"""
Print name of Annalist settings file to standard output
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
settings = am_get_settings(annroot, userhome, options)
if not settings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
status = am_errors.AM_SUCCESS
with SuppressLogging(logging.INFO):
sitesettings = importlib.import_module(settings.modulename)
settingsdir, file = os.path.split(sitesettings.__file__)
print(settingsdir)
return status
def am_version(annroot, userhome, options):
"""
Print software version string to standard output.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
settings = am_get_settings(annroot, userhome, options)
if not settings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
status = am_errors.AM_SUCCESS
with SuppressLogging(logging.INFO):
sitesettings = importlib.import_module(settings.modulename)
print(sitesettings.ANNALIST_VERSION)
# with open(logfilename, "r") as logfile:
# shutil.copyfileobj(logfile, sys.stdout)
return status
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist_manager/am_runserver.py
|
am_runserver.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2016, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import sys
import logging
import subprocess
import importlib
import shutil
import datetime
log = logging.getLogger(__name__)
from annalist import layout
from annalist.identifiers import ANNAL, RDFS
from annalist.util import valid_id, extract_entity_id, make_type_entity_id
from annalist.collections_data import installable_collections
from annalist.models.site import Site
from annalist.models.collection import Collection
from annalist.models.recordtype import RecordType
from annalist.models.recordview import RecordView
from annalist.models.recordlist import RecordList
from annalist.models.recordfield import RecordField
from annalist.models.recordgroup import RecordGroup
from annalist.models.collectiondata import initialize_coll_data, copy_coll_data, migrate_coll_data
from . import am_errors
from .am_settings import (
am_get_settings, am_get_site_settings, am_get_site
)
from .am_getargvalue import getarg, getargvalue
# Collection access helpers
def get_settings_site(annroot, userhome, options):
"""
Get settings and site data based on command line options provided
returns:
(status, settings, site)
where 'settings' and/or 'site' are None if not found.
"""
status = am_errors.AM_SUCCESS
settings = am_get_settings(annroot, userhome, options)
site = None
if not settings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
status = am_errors.AM_NOSETTINGS
if status == am_errors.AM_SUCCESS:
sitesettings = am_get_site_settings(annroot, userhome, options)
if not sitesettings:
print("Site settings not found (%s)"%(options.configuration), file=sys.stderr)
status = am_errors.AM_NOSETTINGS
if status == am_errors.AM_SUCCESS:
site = am_get_site(sitesettings)
return (status, settings, site)
def coll_type(coll, type_id):
"""
Return identified type in collection, or None
"""
return RecordField.load(coll, field_id, altscope="all")
def coll_types(coll):
"""
Return iterator over types in collection
"""
return coll.types()
def coll_view(coll, view_id):
"""
Return identified view in collection, or None
"""
return RecordView.load(coll, view_id, altscope="all")
def coll_views(coll):
"""
Return iterator over views in collection
"""
for fid in coll._children(RecordView, altscope="all"):
f = coll_view(coll, fid)
if f and f.get_id() != "_initial_values":
yield f
return
def coll_list(coll, list_id):
"""
Return identified list in collection, or None
"""
return RecordList.load(coll, list_id, altscope="all")
def coll_lists(coll):
"""
Return iterator over lists in collection
"""
for fid in coll._children(RecordList, altscope="all"):
f = coll_list(coll, fid)
if f and f.get_id() != "_initial_values":
yield f
return
def coll_field(coll, field_id):
"""
Return identified field in collection, or None
"""
return RecordField.load(coll, field_id, altscope="all")
def coll_fields(coll):
"""
Return iterator over fields in collection
"""
for fid in coll._children(RecordField, altscope="all"):
f = coll_field(coll, fid)
if f and f.get_id() != "_initial_values":
yield f
return
def coll_group(coll, group_id):
"""
Return identified group in collection, or None
"""
return RecordGroup.load(coll, group_id, altscope="all")
def coll_groups(coll):
"""
Return iterator over groups in collection
"""
for gid in coll._children(RecordGroup, altscope="all"):
g = coll_group(coll, gid)
if g and g.get_id() != "_initial_values":
yield g
return
# Common logic for View, List and Group field lists
def add_to_set(value, values):
"""
Add non-empty value to set of values
"""
if value:
values.add(value)
return values
def field_in_field_list(field_list, field_id, property_uri):
"""
Tests to see if field is referenced in field list
"""
for fref in field_list:
if ( (extract_entity_id(fref.get(ANNAL.CURIE.field_id, "")) == field_id) or
(fref.get(ANNAL.CURIE.property_uri, "") == property_uri) ):
return True
return False
def group_in_field_list(field_list, coll, group_ids):
"""
Tests to see if any of group ids are referenced in field list
"""
for fref in field_list:
fid = extract_entity_id(fref.get(ANNAL.CURIE.field_id, ""))
fdef = coll_field(coll, fid)
if fdef.get(ANNAL.CURIE.group_ref, "") in group_ids:
return True
return False
def types_using_field(coll, field_id, property_uri):
"""
Returns a list of type ids that may use a specified field or property URI
"""
type_ids = set()
type_uris = set()
group_ids = set()
# Look at field definition
f = coll_field(coll, field_id)
add_to_set(f.get(ANNAL.CURIE.field_entity_type, ""), type_uris)
# Look at groups that reference field
for g in coll_groups(coll):
if field_in_field_list(g[ANNAL.CURIE.group_fields], field_id, property_uri):
add_to_set(g.get_id(), group_ids)
add_to_set(extract_entity_id(g.get(ANNAL.CURIE.record_type, "")), type_uris)
# Look at views that reference field or groups
for v in coll_views(coll):
if ( field_in_field_list(v[ANNAL.CURIE.view_fields], field_id, property_uri) or
group_in_field_list(v[ANNAL.CURIE.view_fields], coll, group_ids) ):
add_to_set(extract_entity_id(v.get(ANNAL.CURIE.record_type, "")), type_uris)
# Look at lists that reference field or groups
for l in coll_lists(coll):
if ( field_in_field_list(l[ANNAL.CURIE.list_fields], field_id, property_uri) or
group_in_field_list(l[ANNAL.CURIE.list_fields], coll, group_ids) ):
add_to_set(extract_entity_id(l.get(ANNAL.CURIE.record_type, "")), type_uris)
add_to_set(extract_entity_id(l.get(ANNAL.CURIE.default_type, "")), type_uris)
# Collect type ids
for t in coll_types(coll):
type_uri = t.get(ANNAL.CURIE.uri, "")
supertype_uris = set( s[ANNAL.CURIE.supertype_uri] for s in t.get(ANNAL.CURIE.supertype_uris,[]) )
if (type_uri in type_uris) or (supertype_uris & type_uris):
add_to_set(t.get_id(), type_ids)
return type_ids
def compare_field_list(old_coll, new_coll, old_field_list, new_field_list, reporting_prefix):
"""
Report URI changes between fields lists as seen in group, view and list definitions
"""
old_len = len(old_field_list)
new_len = len(new_field_list)
if new_len != old_len:
print("* %s, field count changed from %d to %d"%(reporting_prefix, old_len, new_len))
for i in range(new_len):
for j in range(old_len):
# Look for field in old group.
# If not found, ignore it - we're looking for URI changes
# @@TODO: ... or are we?
new_f = new_field_list[i]
old_f = old_field_list[j]
field_id = extract_entity_id(new_f[ANNAL.CURIE.field_id])
if field_id == extract_entity_id(old_f[ANNAL.CURIE.field_id]):
# Field found - check for incompatible URI override
# Note that field definitions are already checked
old_uri = old_f.get(ANNAL.CURIE.property_uri, "")
new_uri = new_f.get(ANNAL.CURIE.property_uri, "")
if (not old_uri) and new_uri:
old_field = coll_field(old_coll, field_id)
old_uri = old_field[ANNAL.CURIE.property_uri]
if old_uri and (not new_uri):
new_field = coll_field(new_coll, field_id)
new_uri = new_field[ANNAL.CURIE.property_uri]
if old_uri != new_uri:
print(
"* %s, field %s, property URI changed from '%s' to '%s'"%
(reporting_prefix, field_id, old_uri, new_uri)
)
print(
" Consider adding supertype '%s' to type '%s' in collection '%s'"%
(old_uri, type_id, new_coll_id)
)
report_property_references(new_coll, old_uri, "URI '%s'"%(old_uri))
break
return
def report_property_references(coll, property_uri, reporting_prefix):
"""
Report all references to a specified property URI.
"""
# Reference from types
for t in coll_types(coll):
type_id = t.get_id()
alias_value_uris = [ a[ANNAL.CURIE.alias_source] for a in t.get(ANNAL.CURIE.field_aliases,[]) ]
if property_uri in alias_value_uris:
print("%s appears as an alias value of type '%s'"%(reporting_prefix, type_id))
# References from views
for v in coll_views(coll):
view_id = v.get_id()
report_property_references_in_field_list(
coll, property_uri, v[ANNAL.CURIE.view_fields],
reporting_prefix, "fields for view %s"%(view_id)
)
# References from lists
for l in coll_lists(coll):
list_id = l.get_id()
if property_uri in l.get(ANNAL.CURIE.list_entity_selector, ""):
print("%s appears in selector for list '%s'"%(reporting_prefix, list_id))
report_property_references_in_field_list(
coll, property_uri, v[ANNAL.CURIE.list_fields],
reporting_prefix, "fields for list %s"%(list_id)
)
# References from fields
for f in coll_fields(coll):
field_id = f.get_id()
if property_uri == f.get(ANNAL.CURIE.property_uri, ""):
print("%s appears as property URI for field '%s'"%(reporting_prefix, field_id))
if property_uri in f.get(ANNAL.CURIE.field_ref_restriction, ""):
print("%s appears in value restriction for field '%s'"%(reporting_prefix, field_id))
# References from groups
for g in coll_groups(coll):
group_id = g.get_id()
report_property_references_in_field_list(
coll, property_uri, g[ANNAL.CURIE.group_fields],
reporting_prefix, "fields for group %s"%(group_id)
)
return
def report_property_references_in_field_list(
coll, property_uri, field_list,
reporting_prefix, reporting_suffix):
"""
Report occurrences of a property URI appearing in a field list.
"""
for f in field_list:
if property_uri == f.get(ANNAL.CURIE.property_uri, ""):
print("%s appears in %s"%(reporting_prefix, reporting_suffix))
return
def report_type_references(coll, type_uri, reporting_prefix):
"""
Report all references to a specified type URI.
"""
# Reference from types
for t in coll_types(coll):
type_id = t.get_id()
supertype_uris = [ u[ANNAL.CURIE.supertype_uri] for u in t.get(ANNAL.CURIE.supertype_uris,[]) ]
if type_uri in supertype_uris:
print("%s appears as a supertype of type '%s'"%(reporting_prefix, type_id))
# References from views
for v in coll_views(coll):
view_id = v.get_id()
if type_uri == v.get(ANNAL.CURIE.record_type, ""):
print("%s appears as entity type for view '%s'"%(reporting_prefix, view_id))
# References from lists
for l in coll_lists(coll):
list_id = l.get_id()
if type_uri == l.get(ANNAL.CURIE.record_type, ""):
print("%s appears as entity type for list '%s'"%(reporting_prefix, list_id))
if type_uri in l.get(ANNAL.CURIE.list_entity_selector, ""):
print("%s appears in selector for list '%s'"%(reporting_prefix, list_id))
# References from fields
for f in coll_fields(coll):
field_id = f.get_id()
if type_uri == f.get(ANNAL.CURIE.field_value_type, ""):
print("%s appears as value type for field '%s'"%(reporting_prefix, field_id))
if type_uri == f.get(ANNAL.CURIE.field_entity_type, ""):
print("%s appears as entity type for field '%s'"%(reporting_prefix, field_id))
if type_uri in f.get(ANNAL.CURIE.field_ref_restriction, ""):
print("%s appears in value restriction for field '%s'"%(reporting_prefix, field_id))
# References from groups
for g in coll_groups(coll):
group_id = g.get_id()
if type_uri == g.get(ANNAL.CURIE.record_type, ""):
print("%s appears as entity type for group %s"%(reporting_prefix, group_id))
return
# Migration helper functions
def am_migrationreport(annroot, userhome, options):
"""
Collection migration report helper
annalist_manager migrationreport old_coll new_coll
Generates a report of changes to data needed to match type and property
URI changes moving from old_coll to new_coll.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
status, settings, site = get_settings_site(annroot, userhome, options)
if status != am_errors.AM_SUCCESS:
return status
if len(options.args) > 2:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
old_coll_id = getargvalue(getarg(options.args, 0), "Old collection Id: ")
old_coll = Collection.load(site, old_coll_id)
if not (old_coll and old_coll.get_values()):
print("Old collection not found: %s"%(old_coll_id), file=sys.stderr)
return am_errors.AM_NOCOLLECTION
new_coll_id = getargvalue(getarg(options.args, 1), "New collection Id: ")
new_coll = Collection.load(site, new_coll_id)
if not (new_coll and new_coll.get_values()):
print("New collection not found: %s"%(new_coll_id), file=sys.stderr)
return am_errors.AM_NOCOLLECTION
status = am_errors.AM_SUCCESS
print("# Migration report from collection '%s' to '%s' #"%(old_coll_id, new_coll_id))
print("")
# Scan and report on type URI changes
for new_type in coll_types(new_coll):
type_id = new_type.get_id()
old_type = old_coll.get_type(type_id)
if old_type:
old_uri = old_type[ANNAL.CURIE.uri]
new_uri = new_type[ANNAL.CURIE.uri]
if old_uri != new_uri:
print("* Type %s, URI changed from '%s' to '%s'"%(type_id, old_uri, new_uri))
supertype_uris = [ u[ANNAL.CURIE.supertype_uri] for u in new_type.get(ANNAL.CURIE.supertype_uris,[]) ]
if old_uri not in supertype_uris:
print(
" Consider adding supertype '%s' to type '%s' in collection '%s'"%
(old_uri, type_id, new_coll_id)
)
report_type_references(new_coll, old_uri, " URI '%s'"%(old_uri))
# Scan and report on property URI changes in field definitions
for new_field in coll_fields(new_coll):
field_id = new_field.get_id()
old_field = coll_field(old_coll, field_id)
if old_field:
old_uri = old_field[ANNAL.CURIE.property_uri]
new_uri = new_field[ANNAL.CURIE.property_uri]
if old_uri != new_uri:
print("* Field %s, property URI changed from '%s' to '%s'"%(field_id, old_uri, new_uri))
type_ids = types_using_field(new_coll, field_id, old_uri)
for tid in type_ids:
print(
" Consider adding property alias for '%s' to type %s in collection '%s'"%
(old_uri, tid, new_coll_id)
)
# Scan and report on property URI changes in group definitions
for new_group in coll_groups(new_coll):
group_id = new_group.get_id()
old_group = coll_group(old_coll, group_id)
if old_group:
compare_field_list(
old_coll, new_coll,
old_group[ANNAL.CURIE.group_fields],
new_group[ANNAL.CURIE.group_fields],
"Group %s"%group_id)
# Scan and report on property URI changes in view definitions
for new_view in coll_views(new_coll):
view_id = new_view.get_id()
old_view = coll_view(old_coll, view_id)
if old_view:
compare_field_list(
old_coll, new_coll,
old_view[ANNAL.CURIE.view_fields],
new_view[ANNAL.CURIE.view_fields],
"View %s"%view_id)
# Scan and report on property URI changes in list definitions
for new_list in coll_lists(new_coll):
list_id = new_list.get_id()
old_list = coll_list(old_coll, list_id)
if old_list:
compare_field_list(
old_coll, new_coll,
old_list[ANNAL.CURIE.list_fields],
new_list[ANNAL.CURIE.list_fields],
"List %s"%list_id)
print("")
return status
# Collection management functions
def am_installcollection(annroot, userhome, options):
"""
Install software-defined collection data
annalist_manager installcollection coll_id
Copies data from an existing collection to a new collection.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
status, settings, site = get_settings_site(annroot, userhome, options)
if status != am_errors.AM_SUCCESS:
return status
if len(options.args) > 1:
print(
"Unexpected arguments for %s: (%s)"%
(options.command, " ".join(options.args)),
file=sys.stderr
)
return am_errors.AM_UNEXPECTEDARGS
# Check collection Id
coll_id = getargvalue(getarg(options.args, 0), "Collection Id to install: ")
if coll_id in installable_collections:
src_dir_name = installable_collections[coll_id]['data_dir']
else:
print("Collection name to install not known: %s"%(coll_id), file=sys.stderr)
print("Available collection Ids are: %s"%(",".join(installable_collections.keys())))
return am_errors.AM_NOCOLLECTION
# Check if ciollection already exists
coll = Collection.load(site, coll_id)
if (coll and coll.get_values()):
if options.force:
print("Existing collection %s will be removed ('--force' specified)"%(coll_id), file=sys.stderr)
Collection.remove(site, coll_id)
else:
print("Collection already exists: %s"%(coll_id), file=sys.stderr)
return am_errors.AM_COLLECTIONEXISTS
# Install collection now
src_dir = os.path.join(annroot, "annalist/data", src_dir_name)
print("Installing collection '%s' from data directory '%s'"%(coll_id, src_dir))
coll_metadata = installable_collections[coll_id]['coll_meta']
date_time_now = datetime.datetime.now().replace(microsecond=0)
coll_metadata[ANNAL.CURIE.comment] = (
"Initialized at %s by `annalist-manager installcollection`"%
date_time_now.isoformat()
)
coll = site.add_collection(coll_id, coll_metadata)
msgs = initialize_coll_data(src_dir, coll)
if msgs:
for msg in msgs:
print(msg)
status = am_errors.AM_INSTALLCOLLFAIL
return status
def am_copycollection(annroot, userhome, options):
"""
Copy collection data
annalist_manager copycollection old_coll_id new_coll_id
Copies data from an existing collection to a new collection.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
status, settings, site = get_settings_site(annroot, userhome, options)
if status != am_errors.AM_SUCCESS:
return status
if len(options.args) > 2:
print(
"Unexpected arguments for %s: (%s)"%
(options.command, " ".join(options.args)),
file=sys.stderr
)
return am_errors.AM_UNEXPECTEDARGS
old_coll_id = getargvalue(getarg(options.args, 0), "Old collection Id: ")
old_coll = Collection.load(site, old_coll_id)
if not (old_coll and old_coll.get_values()):
print("Old collection not found: %s"%(old_coll_id), file=sys.stderr)
return am_errors.AM_NOCOLLECTION
new_coll_id = getargvalue(getarg(options.args, 1), "New collection Id: ")
new_coll = Collection.load(site, new_coll_id)
if (new_coll and new_coll.get_values()):
print("New collection already exists: %s"%(new_coll_id), file=sys.stderr)
return am_errors.AM_COLLECTIONEXISTS
# Copy collection now
print("Copying collection '%s' to '%s'\n"%(old_coll_id, new_coll_id))
new_coll = site.add_collection(new_coll_id, old_coll.get_values())
msgs = copy_coll_data(old_coll, new_coll)
if msgs:
for msg in msgs:
print(msg)
status = am_errors.AM_COPYCOLLFAIL
print("")
return status
def am_check_site_updated(coll):
"""
Check that site data has ben updated before perfoprming data migration.
Data migraton is performed incompletely if the "_field" type is not visible, so
that is the test used here.
"""
if layout.FIELD_TYPEID in coll._children(RecordType, altscope="all"):
return am_errors.AM_SUCCESS
print("Perform 'annalist-manager updatesitedata' before collection data migration.")
print("Collection data not migrated.")
return am_errors.AM_MIGRATECOLLFAIL
def am_migratecollection(annroot, userhome, options):
"""
Apply migrations for a specified collection
annalist_manager migratecollection coll
Reads and writes every entity in a collection, thereby applying data
migrations and saving them in the stored data.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
status, settings, site = get_settings_site(annroot, userhome, options)
if status != am_errors.AM_SUCCESS:
return status
coll_id = getargvalue(getarg(options.args, 0), "Collection Id: ")
coll = Collection.load(site, coll_id)
if not (coll and coll.get_values()):
print("Collection not found: %s"%(coll_id), file=sys.stderr)
return am_errors.AM_NOCOLLECTION
status = am_check_site_updated(coll)
if status != am_errors.AM_SUCCESS:
return status
print("Apply data migrations in collection '%s'"%(coll_id,))
msgs = migrate_coll_data(coll)
if msgs:
for msg in msgs:
print(msg)
status = am_errors.AM_MIGRATECOLLFAIL
else:
coll.update_software_compatibility_version()
return status
def am_migrateallcollections(annroot, userhome, options):
"""
Apply migrations to all collections
annalist_manager migrateallcollections
Reads and writes every entity in all collections, thereby
applying data migrations and saving them in the stored data.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
status, settings, site = get_settings_site(annroot, userhome, options)
if status != am_errors.AM_SUCCESS:
return status
print("Apply data migrations in all collections:")
for coll in site.collections():
status = am_check_site_updated(coll)
if status != am_errors.AM_SUCCESS:
return status
coll_id = coll.get_id()
if coll_id != layout.SITEDATA_ID:
log.debug("========== Processing '%s' =========="%(coll_id,))
print("---- Processing '%s'"%(coll_id,))
msgs = migrate_coll_data(coll)
if msgs:
for msg in msgs:
print(msg)
status = am_errors.AM_MIGRATECOLLFAIL
print("Data migrations complete.")
return status
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist_manager/am_managecollections.py
|
am_managecollections.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import sys
import logging
import importlib
import subprocess
log = logging.getLogger(__name__)
from utils.SuppressLoggingContext import SuppressLogging
from utils.py3porting import bytes_to_str
from annalist.util import ensure_dir
from . import am_errors
from .am_settings import am_get_settings
def am_initialize(annroot, userhome, userconfig, options):
"""
Initialize Annalist server data, database, etc.
annroot is the root directory for the annalist software installation.
userhome is the home directory for the host system user issuing the initialize command.
userconfig is the directory used for user-specific configuration files.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
settings_obj = am_get_settings(annroot, userhome, options)
if not settings_obj:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) != 0:
print("Unexpected arguments for initialize: (%s)"%(" ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
# Get config base directory from settings, and make sure it exists
with SuppressLogging(logging.INFO):
sitesettings = importlib.import_module(settings_obj.modulename)
# For unknown reason, the database path in DATABASES gets zapped,
# so code changed to use separately saved DATABASE_PATH.
providersdir = os.path.join(sitesettings.CONFIG_BASE, "providers")
databasedir = os.path.dirname(sitesettings.DATABASE_PATH)
ensure_dir(providersdir)
ensure_dir(databasedir)
# Initialze the database
status = am_errors.AM_SUCCESS
subprocess_command = (
"django-admin migrate --pythonpath=%s --settings=%s"%
(annroot, settings_obj.modulename)
)
log.debug("am_initialize subprocess: %s"%subprocess_command)
# status = subprocess.call(
# subprocess_command.split(),
# #stdout=sys.stdout, stderr=sys.stderr
# )
# Allow stdout and stderr to be captured for testing
p = subprocess.Popen(
subprocess_command.split(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = p.communicate(None)
status = p.returncode
sys.stdout.write(bytes_to_str(out))
sys.stderr.write(bytes_to_str(err))
log.debug("am_initialize subprocess status: %s"%status)
return status
def am_collectstatic(annroot, userhome, userconfig, options):
"""
Collect Annalist static data to (e.g.) `annalist_site/static`
See: https://docs.djangoproject.com/en/1.11/howto/static-files/
annroot is the root directory for the annalist software installation.
userhome is the home directory for the host system user issuing the initialize command.
userconfig is the directory used for user-specific configuration files.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
settings_obj = am_get_settings(annroot, userhome, options)
if not settings_obj:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) != 0:
print("Unexpected arguments for collectstatic: (%s)"%(" ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
# Get config base directory from settings, and make sure it exists
with SuppressLogging(logging.INFO):
sitesettings = importlib.import_module(settings_obj.modulename)
# Assemble static data
status = am_errors.AM_SUCCESS
subprocess_command = (
"django-admin collectstatic --pythonpath=%s --settings=%s --clear --noinput"%
(annroot, settings_obj.modulename)
)
log.debug("am_collectstatic subprocess: %s"%subprocess_command)
print("Collect static data: "+subprocess_command)
# Allow stdout and stderr to be captured for testing
p = subprocess.Popen(
subprocess_command.split(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = p.communicate(None)
status = p.returncode
sys.stdout.write(bytes_to_str(out))
sys.stderr.write(bytes_to_str(err))
log.debug("am_collectstatic subprocess status: %s"%status)
return status
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist_manager/am_initialize.py
|
am_initialize.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import sys
import logging
import subprocess
log = logging.getLogger(__name__)
from . import am_errors
command_summary_help = ("\n"+
"Commands:\n"+
"\n"+
" %(prog)s help [command]\n"+
" %(prog)s runtests [testlabel]\n"+
" %(prog)s initialize [ CONFIG ]\n"+
" %(prog)s collectstatic [ CONFIG ]\n"+
#@@ " %(prog)s idprovider ...\n"+ #@@ TODO
" %(prog)s createlocaluser [ username [ email [ firstname [ lastname ] ] ] ] [ CONFIG ]\n"+
" %(prog)s createadminuser [ username [ email [ firstname [ lastname ] ] ] ] [ CONFIG ]\n"+
" %(prog)s defaultadminuser [ CONFIG ]\n"+
" %(prog)s updateadminuser [ username ] [ CONFIG ]\n"+
" %(prog)s setuserpermissions [ username ] [ permissions ] [ CONFIG ]\n"+
" %(prog)s setdefaultpermissions [ permissions ] [ CONFIG ]\n"+
" %(prog)s setpublicpermissions [ permissions ] [ CONFIG ]\n"+
" %(prog)s deleteuser [ username ] [ CONFIG ]\n"+
" %(prog)s createsitedata [ CONFIG ]\n"+
" %(prog)s updatesitedata [ CONFIG ]\n"+
" %(prog)s installcollection coll_id [--force] [ CONFIG ]\n"+
" %(prog)s copycollection old_coll_id new_coll_id [ CONFIG ]\n"+
" %(prog)s migrationreport old_coll_id new_coll_id [ CONFIG ]\n"+
" %(prog)s migratecollection coll_id [ CONFIG ]\n"+
" %(prog)s migrateallcollections [ CONFIG ]\n"+
" %(prog)s runserver [ CONFIG ]\n"+
" %(prog)s stopserver [ CONFIG ]\n"+
" %(prog)s pidserver [ CONFIG ]\n"+
" %(prog)s rundevserver [ CONFIG ]\n"+
" %(prog)s sitedirectory [ CONFIG ]\n"+
" %(prog)s settingsmodule [ CONFIG ]\n"+
" %(prog)s settingsdir [ CONFIG ]\n"+
" %(prog)s settingsfile [ CONFIG ]\n"+
" %(prog)s serverlog [ CONFIG ]\n"+
" %(prog)s accesslog [ CONFIG ]\n"+
" %(prog)s errorlog [ CONFIG ]\n"+
" %(prog)s version\n"+
"")
config_options_help = (
"Annalist can be run in a number of configurations, notably\n"+
"'development', 'personal' and 'shared'.\n"+
"\n"+
"A configuration can be selected by using one of the following options:\n"+
"--devel selects the 'development' configuration, which stores all site data\n"+
" within the source code tree, and configuration data in the user's\n"+
" home directory ('~/.annalist/')\n"+
"--personal selects the 'personal' configuration, which stores all site data\n"+
" and configuration data in the activating user's home directory\n"+
" ('~/annalist_site/' and '~/.annalist/')\n"+
"--shared selects the 'shared' configuration, which stores all site and configuration\n"+
" data in system directories '/var/annalist_site', and configuration.\n"+
" data in '/etc/annalist/'\n"+
"--configuration=NAME\n"+
" allows selection of any named configuration, where configuration files\n"+
" are stored in the Annalist source tree as '.../annalist_site/settings/NAME.py'\n"+
"\n"+
"The above options may be abbreviated as '-d', '-p', '-s' and '-c' respectively.\n"+
"If no configuration is explicitly specified, '--personal' is used.\n"+
"")
permissions_help = (
"The 'permissions' parameter is a list of space-separated permission keywords,\n"+
"or may be empty. If multiple permissions are specified, some form of command-line\n"+
"quoting should be used so they are presented as a single argument (e.g. enclose\n"+
"the list of keywords in double quotes).\n"+
"\n"+
"If not specified on the command line, the user will be prompted for default permissions.\n"+
"\n"+
"Initially defined permissions are:\n"+
"CREATE_COLLECTION site-level permission required to create new collection (or ADMIN).\n"+
"DELETE_COLLECTION site-level permission required to delete a collection (or ADMIN).\n"+
"VIEW permission to view or list data in a collection\n"+
"CREATE permission to create new data in a collection\n"+
"UPDATE permission to update existing data in a collection\n"+
"DELETE permission to delete data from a collection\n"+
"CONFIG permission to add or modify configuration data for a collection\n"+
" (i.e. types, views, lists, fields, and field groups)\n"+
"ADMIN permission to add or modify user permissions\n"+
"")
def am_help(options, progname):
"""
Display annalist-manager command help
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
if len(options.args) > 1:
print("Unexpected arguments for %s: (%s)"%(options.command, " ".join(options.args)), file=sys.stderr)
return am_errors.AM_UNEXPECTEDARGS
status = am_errors.AM_SUCCESS
if len(options.args) == 0:
help_text = (
command_summary_help+
"\n"+
"For more information about command options, use:\n"+
"\n"+
" %(prog)s --help\n"+
"")
elif options.args[0].startswith("runt"):
help_text = ("\n"+
" %(prog)s runtests [testlabel]\n"+
"\n"+
"Runs annalist test suite using installed software\n"+
"\n"+
"If 'testlabel' is specified, only the named test or test suite is run, and\n"+
"the full path name of the log file is displayed after the tests have run.\n"+
"\n"+
"")
elif options.args[0].startswith("init"):
help_text = ("\n"+
" %(prog)s initialize [ CONFIG ]\n"+
"\n"+
"Initializes the installed software for an indicated configuration.\n"+
"Mainly, this involves creating the internal database used to manage users, etc.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("collect"):
help_text = ("\n"+
" %(prog)s collectstatic [ CONFIG ]\n"+
"\n"+
"Copies Annalist static data to a location where is can be served directly\n"+
"by a front-end server. The location is generally subdirectory 'static'\n"+
"in the Annalist site base directory (see 'annalist-manager sitedirectory').\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("createl"):
help_text = ("\n"+
" %(prog)s createlocaluser [ username [ email [ firstname [ lastname ] ] ] ] [ CONFIG ]\n"+
"\n"+
"Creates an Annalist local user.\n"+
"\n"+
"Prompts for a username, email address and password,\n"+
"where these are not provided on the command line.\n"+
"\n"+
"The local user details can be used to log in to Annalist using\n"+
"the 'Local user' login provider button on the login page.\n"+
"\n"+
"Annalist is intended to be used with a federated authentication service,\n"+
"such as Google+, but setting up such a service can be tricky, and for evaluation\n"+
"or personal-only use it may be quicker to use locally managed user credentials\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("createa"):
help_text = ("\n"+
" %(prog)s createadminuser [ username [ email [ firstname [ lastname ] ] ] ] [ CONFIG ]\n"+
"\n"+
"Creates an Annalist administrative user.\n"+
"\n"+
"Prompts for a username, email address and password,\n"+
"where these are not provided on the command line.\n"+
"\n"+
"The administrative user details can be used to log in to Annalist using\n"+
"the 'Local user' login provider button on the login page.\n"+
"An administrative user can then use the 'Admin' link at the bottom of other\n"+
"Annalist pages to create, modify or delete other local user credentials.\n"+
"\n"+
"Annalist is intended to be used with a federated authentication service,\n"+
"such as Google+, but setting up such a service can be tricky, and for evaluation\n"+
"or personal-only use it may be quicker to use locally managed user credentials\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("defaulta"):
help_text = ("\n"+
" %(prog)s defaultadminuser [ CONFIG ]\n"+
"\n"+
"Creates a default Annalist administrative user.\n"+
"\n"+
"Uses default values for username and email address, prompts for\n"+
"a password, and creates a new admin user with username 'admin'.\n"+
"\n"+
"The administrative user details can be used to log in to Annalist using\n"+
"the 'Local user' login provider button on the login page.\n"+
"An administrative user can then use the 'Admin' link at the bottom of other\n"+
"Annalist pages to create, modify or delete other local user credentials.\n"+
"\n"+
"Annalist is intended to be used with a federated authentication service,\n"+
"such as Google+, but setting up such a service can be tricky, and for evaluation\n"+
"or personal-only use it may be quicker to use locally managed user credentials\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("updatea"):
help_text = ("\n"+
" %(prog)s updateadminuser [ username ] [ CONFIG ]\n"+
"\n"+
"Updates an existing Django user to admin status; i.e. they are assigned 'staff'\n"+
"and 'superuser' attributes in the Django user database, and assigned site-wide\n"+
"ADMIN permissions in the Annalist site indicated by CONFIG.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("setuse"):
help_text = ("\n"+
" %(prog)s setuserpermissions [ username ] [ permissions ] [ CONFIG ]\n"+
"\n"+
"Sets site permissions for designated user in the Annalist site indicated by CONFIG.\n"+
"The designated user must already exist in the local Django database.\n"+
"\n"+
permissions_help+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("setdef"):
help_text = ("\n"+
" %(prog)s setdefaultpermissions [ permissions ] [ CONFIG ]\n"+
"\n"+
"Sets site-wide default permissions for logged-in users in the\n"+
"Annalist site indicated by CONFIG. These permissions are superseded by\n"+
"any permissions defined specifically for a logged-in user, or by\n"+
"user '_default_user_perms' entry defined for any collection.\n"+
"\n"+
permissions_help+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("setpub"):
help_text = ("\n"+
" %(prog)s setpublicpermissions [ permissions ] [ CONFIG ]\n"+
"\n"+
"Sets site-wide public access permissions (i.e. for requests where there is no active login)\n"+
"in the Annalist site indicated by CONFIG. These permissions may be superseded by\n"+
"'_unknown_user_perms' permissions defined for any specific collection.\n"+
"\n"+
permissions_help+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("deleteu"):
help_text = ("\n"+
" %(prog)s deleteuser [ username ] [ CONFIG ]\n"+
"\n"+
"Deletes the specified Django user, and also removes any site-wide permissions defined\n"+
"for that user in he Annalist site indicated by CONFIG.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("creates"):
help_text = ("\n"+
" %(prog)s createsite [ CONFIG ] [ --force | -f ]\n"+
"\n"+
"Creates Annalist empty site data.\n"+
"\n"+
"Creates empty site data (i.e. with no collections) for an Annalist service.\n"+
"\n"+
"If the site already exists, the command is refused unless the '--force' or '-f'\n"+
"option is given.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("updates"):
help_text = ("\n"+
" %(prog)s updatesite [ CONFIG ]\n"+
"\n"+
"Updates the site-wide data in an existing annalist site.\n"+
"\n"+
"\nExisting collection data is left untouched.\n"+
"\n"+
"If the site does not exist, the command fails.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("copyc"):
help_text = ("\n"+
" %(prog)s copycollection old_coll_id new_coll_id [ CONFIG ]\n"+
"\n"+
"Copy collection 'old_coll_id' to a new collection called 'new_coll_id'\n"+
"\n"+
"Existing collection data in 'old_coll_id' is left untouched.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("installc"):
help_text = ("\n"+
" %(prog)s installcollection coll_id [--force] [ CONFIG ]\n"+
"\n"+
"Install collection 'coll_id' from site data included in software distribution.\n"+
"\n"+
"If the collection already exists, it will not be overwritten unless\n"+
"the '--force' option is specified\n"+
"\n"+
"Annalist software ships with a number of predefined collections that are part of\n"+
"the annalist software installation. These collections can be used as starting\n"+
"points for defining a new collection.\n"+
"\n"+
"Available collections include:\n"+
" bibdata: BiblioGraphic data definitions, creating structures similar to BibJSON.\n"+
" namedata: defines some additional vocabulary namespaces beyond those that are part\n"+
" of a standard Annalistr installation.\n"+
" RDF_Schema_defs: for creating RDF schema in an Annalist collection.\n"+
" Journal_defs: definitions for creating a journal with web and media resources.\n"+
" Provenance_defs: @@to be added@@\n"+
" Annalist_schema: defines RDF schema for terms in Annalist namespace.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("migrationr"):
help_text = ("\n"+
" %(prog)s migrationreport old_coll_id new_coll_id [ CONFIG ]\n"+
"\n"+
"This data migration helper generates report of changes needed to move data\n"+
"from collection 'old_coll_id' to 'new_coll_id', based on the type, view and\n"+
"field definitions in those collections.\n"+
"\n"+
"Existing collection data in 'old_coll_id' is left untouched.\n"+
"\n"+
"@@NOTE: this is exploratoty code.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("migratec"):
help_text = ("\n"+
" %(prog)s migratecollection coll_id [ CONFIG ]\n"+
"\n"+
"This command applies migrations to data for all entities in\n"+
"collection 'coll_id', by updating older forms of collection\n"+
"configuration data, and reading and rewriting data for each entity.\n"+
"The entity migrations applied are defined by supertypes and field\n"+
"aliases defined for types used by the collection, along with any\n"+
"Annalist software version data migrations that may be applicable.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("migratea"):
help_text = ("\n"+
" %(prog)s migrateallcollections [ CONFIG ]\n"+
"\n"+
"This command applies migrations to data for all entities in all collections\n"+
"See 'migratecollection' for information about the migrations applied.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("runs"):
help_text = ("\n"+
" %(prog)s runserver [ CONFIG ]\n"+
"\n"+
"Starts an Annalist server running asynchronously, and\n"+
"writes its process id to stdout.\n"+
"\n"+
"Normally run behind an Apache or Nginx HTTPS proxy. Use:\n"+
" OAUTHLIB_INSECURE_TRANSPORT=1 %(prog)s runserver [ CONFIG ]\n"+
"for local testing under HTTP\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("stop"):
help_text = ("\n"+
" %(prog)s stopserver [ CONFIG ]\n"+
"\n"+
"Stops a running Annalist server.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("pid"):
help_text = ("\n"+
" %(prog)s pidserver [ CONFIG ]\n"+
"\n"+
"Displays PID of a running Annalist server.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("rund"):
help_text = ("\n"+
" %(prog)s rundevserver [ CONFIG ]\n"+
"\n"+
"Starts an Annalist development server running. "+
"(Not recommended for production use.)\n"+
"\n"+
"Use:\n"+
" OAUTHLIB_INSECURE_TRANSPORT=1 %(prog)s rundevserver [ CONFIG ]\n"+
"to allow OpenId Connect login under HTTP\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("site"):
help_text = ("\n"+
" %(prog)s sitedirectory [ CONFIG ]\n"+
"\n"+
"Sends the name of Annalist site directory to standard output.\n"+
"\n"+
"This is a convenience function to locate the site data directory, which\n"+
"may be buried deep in the Python virtual environment files.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("settingsm"):
help_text = ("\n"+
" %(prog)s settingsmodule [ CONFIG ]\n"+
"\n"+
"Sends the name of Annalist settings module to standard output.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("settingsd"):
help_text = ("\n"+
" %(prog)s settingsdir [ CONFIG ]\n"+
"\n"+
"Sends the name of Annalist settings directory to standard output.\n"+
"\n"+
"This is a convenience function to locate the settings data, which\n"+
"may be buried deep in the Python virtual environment files.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("settingsf"):
help_text = ("\n"+
" %(prog)s settingsfile [ CONFIG ]\n"+
"\n"+
"Sends the name of Annalist settings file name (without extension) to standard output.\n"+
"\n"+
"This is a convenience function to locate the settings data, which\n"+
"may be buried deep in the Python virtual environment files.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("ser"):
help_text = ("\n"+
" %(prog)s serverlog [ CONFIG ]\n"+
"\n"+
"Sends the Annalist log filename to standard output.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("acc"):
help_text = ("\n"+
" %(prog)s accesslog [ CONFIG ]\n"+
"\n"+
"Sends the WSGI (HTTP server) access log filename to standard output.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("err"):
help_text = ("\n"+
" %(prog)s errorlog [ CONFIG ]\n"+
"\n"+
"Sends the WSGI (HTTP server) error log filename to standard output.\n"+
"\n"+
config_options_help+
"\n"+
"")
elif options.args[0].startswith("ver"):
help_text = ("\n"+
" %(prog)s version\n"+
"\n"+
"Sends the Annalist software version string to standard output.\n"+
"\n"+
"")
else:
help_text = "Unrecognized command for %s: (%s)"%(options.command, options.args[0])
status = am_errors.AM_UNKNOWNCMD
print(help_text%{'prog': progname}, file=sys.stderr)
return status
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist_manager/am_help.py
|
am_help.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import sys
import logging
import subprocess
import importlib
# import shutil
log = logging.getLogger(__name__)
from annalist.identifiers import ANNAL, RDFS
from annalist import layout
from annalist.util import removetree, replacetree, updatetree, ensure_dir
from annalist.models.site import Site
from . import am_errors
from .am_settings import am_get_settings, am_get_site_settings
def am_createsite(annroot, userhome, options):
"""
Create Annalist empty site data.
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
status = am_errors.AM_SUCCESS
sitesettings = am_get_site_settings(annroot, userhome, options)
if not sitesettings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print(
"Unexpected arguments for %s: (%s)"%
(options.command, " ".join(options.args)),
file=sys.stderr
)
return am_errors.AM_UNEXPECTEDARGS
site_layout = layout.Layout(sitesettings.BASE_DATA_DIR, sitesettings.SITE_DIR_NAME)
sitebasedir = site_layout.SITE_PATH
#@@@@@ sitebaseurl = "/annalist/" # @@TODO: figure more robust way to define this
sitebaseurl = sitesettings.STATIC_URL
# --- If old site exists and --force option given, remove it
if os.path.exists(os.path.join(sitebasedir, site_layout.SITEDATA_DIR)):
if options.force:
print("Removing old Annalist site at %s"%(sitebasedir))
log.info("rmtree: %s"%(sitebasedir))
removetree(sitebasedir)
else:
print(
"Old data already exists at %s (use '--force' or '-f' to overwrite)."%
(sitebasedir), file=sys.stderr
)
print(
"NOTE: using '--force' or '-f' "+
"removes old site user permissions and namespace data "+
"and requires re-initialization of Django database with local usernames; "+
"consider using 'annalist-manager updatesite'."
)
return am_errors.AM_EXISTS
# --- Initialize empty site data in target directory
print("Initializing Annalist site in %s"%(sitebasedir))
site = Site.create_site_metadata(
sitebaseurl, sitebasedir,
label="Annalist site (%s configuration)"%options.configuration,
description="Annalist %s site metadata and site-wide values."%options.configuration
)
sitedata = site.site_data_collection()
Site.create_site_readme(site)
site_data_src = os.path.join(annroot, "annalist/data/sitedata") # @@TODO: more robust definition
site_data_tgt, site_data_file = sitedata._dir_path()
print("Copy Annalist site data")
print("from %s"%site_data_src)
for sdir in layout.COLL_DIRS:
print("- %s -> %s"%(sdir, site_data_tgt))
Site.replace_site_data_dir(sitedata, sdir, site_data_src)
# @@TODO: filename logic copied from EntityRoot and Collection - create separate method for getting this
(sitedata_dir, sitedata_file) = sitedata._dir_path()
context_dir = os.path.join(sitedata_dir, layout.META_COLL_BASE_REF)
context_file = os.path.join(context_dir, layout.COLL_CONTEXT_FILE)
#@@
print("Generating %s"%(context_file))
sitedata.generate_coll_jsonld_context()
# --- Copy provider data to site config provider directory
provider_dir_src = os.path.join(annroot, "annalist/data/identity_providers")
provider_dir_tgt = os.path.join(sitesettings.CONFIG_BASE, "providers")
print("Copy identity provider data:")
print("- from: %s"%(provider_dir_src,))
print("- to: %s"%(provider_dir_tgt,))
ensure_dir(provider_dir_tgt)
updatetree(provider_dir_src, provider_dir_tgt)
# --- Copy sample system configuration files to config directory
config_dir_src = os.path.join(annroot, "annalist/data/config_examples")
config_dir_tgt = os.path.join(sitesettings.CONFIG_BASE, "config")
print("Copy system configuration sample files:")
print("- from: %s"%(config_dir_src,))
print("- to: %s"%(config_dir_tgt,))
ensure_dir(config_dir_tgt)
updatetree(config_dir_src, config_dir_tgt)
# --- Created
print("Now run 'annalist-manager initialize' to create site admin database")
return status
def am_updatesite(annroot, userhome, options):
"""
Update site data, leaving user data alone
annroot is the root directory for the Annalist software installation.
userhome is the home directory for the host system user issuing the command.
options contains options parsed from the command line.
returns 0 if all is well, or a non-zero status code.
This value is intended to be used as an exit status code
for the calling program.
"""
status = am_errors.AM_SUCCESS
sitesettings = am_get_site_settings(annroot, userhome, options)
if not sitesettings:
print("Settings not found (%s)"%(options.configuration), file=sys.stderr)
return am_errors.AM_NOSETTINGS
if len(options.args) > 0:
print(
"Unexpected arguments for %s: (%s)"%
(options.command, " ".join(options.args)),
file=sys.stderr
)
return am_errors.AM_UNEXPECTEDARGS
site_layout = layout.Layout(sitesettings.BASE_DATA_DIR, sitesettings.SITE_DIR_NAME)
sitebasedir = site_layout.SITE_PATH
sitebaseurl = "/annalist/" # @@TODO: figure more robust way to define this
site = Site(sitebaseurl, site_layout.SITE_PATH)
sitedata = site.site_data_collection(test_exists=False)
if sitedata is None:
print("Initializing Annalist site metadata in %s (migrating to new layout)"%(sitebasedir))
site = Site.create_site_metadata(
sitebaseurl, sitebasedir,
label="Annalist site (%s configuration)"%options.configuration,
description="Annalist %s site metadata and site-wide values."%options.configuration
)
sitedata = site.site_data_collection()
site_data_src = os.path.join(annroot, "annalist/data/sitedata") # @@TODO: more robust definition
site_data_tgt, site_data_file = sitedata._dir_path()
# --- Migrate old site data to new site directory
# _annalist_site/
site_data_old1 = os.path.join(sitebasedir, site_layout.SITEDATA_OLD_DIR1)
old_site_metadata = os.path.join(site_data_old1, site_layout.SITE_META_FILE)
old_site_database = os.path.join(site_data_old1, site_layout.SITE_DATABASE_FILE)
old_users1 = os.path.join(site_data_old1, layout.USER_DIR_PREV)
old_vocabs1 = os.path.join(site_data_old1, layout.VOCAB_DIR_PREV)
if os.path.isfile(old_site_metadata):
print("Move old site metadata: %s -> %s"%(old_site_metadata, sitebasedir))
new_site_metadata = os.path.join(sitebasedir, site_layout.SITE_META_FILE)
os.rename(old_site_metadata, new_site_metadata)
if os.path.isfile(old_site_database):
print("Move old site database: %s -> %s"%(old_site_database, sitebasedir))
new_site_database = os.path.join(sitebasedir, site_layout.SITE_DATABASE_FILE)
os.rename(old_site_database, new_site_database)
if os.path.isdir(old_users1) or os.path.isdir(old_vocabs1):
print("Copy Annalist old user and/or vocab data from %s"%site_data_old1)
migrate_old_data(site_data_old1, layout.USER_DIR_PREV, site_data_tgt, layout.USER_DIR )
migrate_old_data(site_data_old1, layout.VOCAB_DIR_PREV, site_data_tgt, layout.VOCAB_DIR)
# c/_annalist_site/_annalist_collection/ - using new dir names
site_data_old2 = os.path.join(sitebasedir, site_layout.SITEDATA_OLD_DIR2)
old_users2 = os.path.join(site_data_old2, layout.USER_DIR)
old_vocabs2 = os.path.join(site_data_old2, layout.VOCAB_DIR)
if os.path.isdir(old_users2) or os.path.isdir(old_vocabs2):
print("Copy Annalist old user and/or vocab data from %s"%site_data_old2)
migrate_old_data(site_data_old2, layout.USER_DIR_PREV, site_data_tgt, layout.USER_DIR )
migrate_old_data(site_data_old2, layout.VOCAB_DIR_PREV, site_data_tgt, layout.VOCAB_DIR)
# --- Archive old site data so it's not visible next time
if os.path.isdir(site_data_old1):
archive_old_data(site_data_old1, "")
if os.path.isdir(site_data_old2):
archive_old_data(site_data_old2, "")
# --- Copy latest site data to target directory
print("Copy Annalist site data")
print("from %s"%site_data_src)
for sdir in layout.DATA_DIRS:
print("- %s -> %s"%(sdir, site_data_tgt))
Site.replace_site_data_dir(sitedata, sdir, site_data_src)
for sdir in (layout.USER_DIR, layout.VOCAB_DIR):
print("- %s +> %s"%(sdir, site_data_tgt))
Site.update_site_data_dir(sitedata, sdir, site_data_src)
for sdir in (layout.INFO_DIR,):
print("- %s ~> %s"%(sdir, site_data_tgt))
Site.expand_site_data_dir(sitedata, sdir, site_data_src)
for sdir in layout.COLL_DIRS_PREV:
remove_old_data(site_data_tgt, sdir)
print("Generating context for site data")
sitedata.generate_coll_jsonld_context()
# --- Copy provider data to site config provider directory
provider_dir_src = os.path.join(annroot, "annalist/data/identity_providers")
provider_dir_tgt = os.path.join(sitesettings.CONFIG_BASE, "providers")
print("Copy identity provider data:")
print("- from: %s"%(provider_dir_src,))
print("- to: %s"%(provider_dir_tgt,))
ensure_dir(provider_dir_tgt)
updatetree(provider_dir_src, provider_dir_tgt)
# --- Copy sample system configuration files to config directory
config_dir_src = os.path.join(annroot, "annalist/data/config_examples")
config_dir_tgt = os.path.join(sitesettings.CONFIG_BASE, "config")
print("Copy system configuration sample files:")
print("- from: %s"%(config_dir_src,))
print("- to: %s"%(config_dir_tgt,))
ensure_dir(config_dir_tgt)
updatetree(config_dir_src, provider_dir_tgt)
return status
def migrate_old_data(old_site_dir, old_data_dir, new_site_dir, new_data_dir):
"""
Migrate data from a single old-site directory to the new site
"""
old_dir = os.path.join(old_site_dir, old_data_dir)
new_dir = os.path.join(new_site_dir, new_data_dir)
if os.path.isdir(old_dir):
print("- %s +> %s (migrating)"%(old_dir, new_dir))
updatetree(old_dir, new_dir)
archive_old_data(old_site_dir, old_data_dir)
return
def archive_old_data(site_dir, data_dir):
"""
Archive old data no longer required.
"""
# print("@@ site_dir %s, data_dir %s"%(site_dir, data_dir))
old_dir = os.path.join(site_dir, data_dir)
if os.path.isdir(old_dir):
if old_dir.endswith("/"):
old_dir = old_dir[:-1]
old_dir_arc = old_dir+".saved"
print("- %s >> %s (rename)"%(old_dir, old_dir_arc))
os.rename(old_dir, old_dir_arc)
return
def remove_old_data(site_dir, data_dir):
"""
Remove old data no longer required.
"""
old_dir = os.path.join(site_dir, data_dir)
if os.path.isdir(old_dir):
print("- %s (remove)"%(old_dir,))
removetree(old_dir)
return
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist_manager/am_createsite.py
|
am_createsite.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import os
import django
import sys
import string
import random
import logging
import logging.handlers
from annalist import __version__
from annalist import layout
DJANGO_ROOT = os.path.dirname(os.path.realpath(django.__file__))
SETTINGS_DIR = os.path.dirname(os.path.realpath(__file__)) # src/annalist_root/annalist_site/settings
SITE_CONFIG_DIR = os.path.dirname(SETTINGS_DIR) # src/annalist_root/annalist_site
SITE_SRC_ROOT = os.path.dirname(SITE_CONFIG_DIR) # src/annalist_root
SAMPLEDATA_DIR = SITE_SRC_ROOT+"/sampledata/data" # src/annalist_root/sampledata
ANNALIST_SITE_SEG = "annalist" # Base URL path segment for Annalist site
ANNALIST_SITE_REF = ANNALIST_SITE_SEG+"/" # Base URL path for Annalist site
ANNALIST_LOG_FILE = "annalist.log"
ACCESS_LOG_FILE = "annalist-wsgi-access.log"
ERROR_LOG_FILE = "annalist-wsgi-error.log"
# Number of gunicorn server threads to use
# A value of less than 2 causes a deadlock when accessing data as Turtle.
# This can be overridden by specific configuration settings files.
SERVER_THREADS = 2
class RotatingNewFileHandler(logging.handlers.RotatingFileHandler):
"""
Define a rotating file logging handler that additionally forces a new file
the first time it is instantiated in a run of the containing program.
NOTE: if multiple file handlers are used with in an application, only the
first one instantiated will be allocated a new file at startup. The
class variable '_newfile' might be replaced with a dictionary
indexed by the (fully expanded) filename.
"""
_newfile = False
def __init__(self, *args, **kwargs):
super(RotatingNewFileHandler, self).__init__(*args, **kwargs)
if not RotatingNewFileHandler._newfile:
self.doRollover()
RotatingNewFileHandler._newfile = True
return
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# Define secret key for various Django functions
# See:
# https://stackoverflow.com/a/15383766/324122
# https://stackoverflow.com/a/23728630/324122
#
# SECURITY WARNING: keep the secret key used in production secret!
KEY_CHARS = string.ascii_letters + string.digits + string.punctuation
SECRET_KEY = ''.join(
random.SystemRandom().choice(KEY_CHARS)
for _ in range(32)
)
# See also: https://stackoverflow.com/a/23728630/324122
# SECRET_KEY = '@-+h*%@h+0yj(^c9y-=1a@9l^@xzub200ofq2@a$gm2k_l*$pf'
# UPDATE: running under 'gunicorn', I've found that session logins get dumped
# periodcally when the worker process periodically restarts.
# Hence I'm trying to find an alternative that attempts to be more "sticky",
# without depending on a predefinbed secret key.
#
# Options I'm considering:
# (a) use an environment variable set randomly when starting the server,
# (b) use the file system to persist a randomly generated value
#
# Currently, I prefer the environment variable approach:
# For this to work, set the environment variable in 'annalist-manager' when
# running under 'gunicorn'. If not defined, use randomly key (from above).
if "ANNALIST_KEY" in os.environ:
SECRET_KEY = os.environ["ANNALIST_KEY"]
# SECURITY WARNING: don't run with debug turned on in production!
# (overrides in settings.devel and settings.runtests)
DEBUG = False
# Logging level used by selected log statements whose output may be useful
# for tracing field values displayed in Annalist edit/view forms.
# Suggested use is to raise level to logging.INFO when running a single named
# test, when trying to understand how values end up in a form.
TRACE_FIELD_VALUE = logging.INFO
ALLOWED_HOSTS = []
ROOT_URLCONF = 'annalist_site.urls'
WSGI_APPLICATION = 'annalist_site.wsgi.application'
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTOCOL", "https")
# Customize authentication backends
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend', # default
'login.OAuth2CheckBackend.OAuth2CheckBackend'
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'annalist',
'login',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
"templates"
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
],
'debug': False,
},
},
]
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
# DATABASE_PATH = os.path.join(SAMPLEDATA_DIR, 'annalist_site/db.sqlite3')
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': DATABASE_PATH,
# }
# }
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATIC_SEG = 'static'
STATIC_URL = '/'+STATIC_SEG+"/"
STATICFILES_DIRS = (
("", SITE_SRC_ROOT+"/annalist/data/static"),
("images", SITE_SRC_ROOT+"/annalist/data/identity_providers/images"),
)
ANNALIST_VERSION = __version__
ANNALIST_VERSION_MSG = "Annalist version %s (common configuration)"%(ANNALIST_VERSION)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist_site/settings/common.py
|
common.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
from .common import *
ANNALIST_VERSION_MSG = "Annalist version %s (shared service configuration)"%(ANNALIST_VERSION)
SETTINGS_MODULE = __name__
SITE_DIR_NAME = "annalist_site"
BASE_DATA_DIR = "/var"
BASE_SITE_DIR = os.path.join(BASE_DATA_DIR, SITE_DIR_NAME)
CONFIG_BASE = "/etc/annalist/"
STATIC_ROOT = os.path.join(BASE_SITE_DIR, 'static')
BASE_LOG_DIR = "/var/log/annalist/"
ANNALIST_LOG_PATH = BASE_LOG_DIR+ANNALIST_LOG_FILE
ACCESS_LOG_PATH = BASE_LOG_DIR+ACCESS_LOG_FILE
ERROR_LOG_PATH = BASE_LOG_DIR+ERROR_LOG_FILE
DATABASE_PATH = os.path.join(BASE_SITE_DIR, 'db.sqlite3')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': DATABASE_PATH,
}
}
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['.annalist.net'] # @@FIXME
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
# Include the default Django email handler for errors
# This is what you'd get without configuring logging at all.
'mail_admins': {
'class': 'django.utils.log.AdminEmailHandler',
'level': 'ERROR',
# But the emails are plain text by default - HTML is nicer
'include_html': True,
},
# Log to a text file that can be rotated by logrotate
'logfile': {
'class': 'logging.handlers.WatchedFileHandler',
'filename': ANNALIST_LOG_PATH
},
},
'loggers': {
# Again, default Django configuration to email unhandled exceptions
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
# Might as well log any errors anywhere else in Django
'django': {
'handlers': ['logfile'],
'level': 'ERROR',
'propagate': False,
},
'annalist': {
'handlers': ['logfile'],
'level': 'INFO', # Or maybe INFO or DEBUG
'propagate': False
},
'login': {
'handlers': ['logfile'],
'level': 'INFO', # Or maybe INFO or DEBUG
'propagate': False
},
},
}
import logging
log = logging.getLogger(__name__)
log.info(ANNALIST_VERSION_MSG)
log.debug("SETTINGS_MODULE: "+SETTINGS_MODULE)
log.debug("BASE_DATA_DIR: "+BASE_DATA_DIR)
log.debug("CONFIG_BASE: "+CONFIG_BASE)
log.debug("DJANGO_ROOT: "+DJANGO_ROOT)
log.debug("SITE_CONFIG_DIR: "+SITE_CONFIG_DIR)
log.debug("SITE_SRC_ROOT: "+SITE_SRC_ROOT)
log.debug("STATICFILES_DIRS: "+repr(STATICFILES_DIRS))
log.debug("DB PATH: "+DATABASES['default']['NAME'])
log.debug("ALLOWED_HOSTS: "+",".join(ALLOWED_HOSTS))
log.debug("ANNALIST_LOG_PATH: "+ANNALIST_LOG_PATH)
log.debug("TRACE_FIELD_VALUE: "+str(TRACE_FIELD_VALUE))
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist_site/settings/shared.py
|
shared.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
from .common import *
ANNALIST_VERSION_MSG = "Annalist version %s (personal_debug configuration)"%(ANNALIST_VERSION)
SETTINGS_MODULE = __name__
SITE_DIR_NAME = "annalist_site"
BASE_DATA_DIR = os.path.expanduser("~")
BASE_SITE_DIR = os.path.join(BASE_DATA_DIR, SITE_DIR_NAME)
CONFIG_BASE = os.path.join(os.path.expanduser("~"), ".annalist/")
STATIC_ROOT = os.path.join(BASE_SITE_DIR, 'static')
BASE_LOG_DIR = BASE_SITE_DIR+"/"
ANNALIST_LOG_PATH = BASE_LOG_DIR+ANNALIST_LOG_FILE
ACCESS_LOG_PATH = BASE_LOG_DIR+ACCESS_LOG_FILE
ERROR_LOG_PATH = BASE_LOG_DIR+ERROR_LOG_FILE
DATABASE_PATH = os.path.join(BASE_SITE_DIR, 'db.sqlite3')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': DATABASE_PATH,
}
}
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*'] # Insecure: use e.g. ['.annalist.net']
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'timed': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
# Include the default Django email handler for errors
# This is what you'd get without configuring logging at all.
'mail_admins': {
'class': 'django.utils.log.AdminEmailHandler',
'level': 'ERROR',
# But the emails are plain text by default - HTML is nicer
'include_html': True,
'formatter': 'verbose'
},
# Log to a text file that can be rotated by logrotate
'logfile': {
# 'class': 'logging.handlers.WatchedFileHandler',
# 'class': 'logging.handlers.RotatingFileHandler',
'class': 'annalist_site.settings.common.RotatingNewFileHandler',
'filename': ANNALIST_LOG_PATH,
'maxBytes': 2*1024*1024, # 2Mb
'backupCount': 9, # Keep 9 files
'level': 'DEBUG',
'formatter': 'timed'
},
},
'loggers': {
# Again, default Django configuration to email unhandled exceptions
# 'django.request': {
# 'handlers': ['mail_admins'],
# 'level': 'ERROR',
# 'propagate': True,
# },
'django.request': {
'handlers': ['logfile'],
'level': 'INFO',
'propagate': True,
},
# Might as well log any errors anywhere else in Django
'django': {
'handlers': ['logfile'],
'level': 'INFO',
'propagate': False,
},
'annalist_root': {
'handlers': ['logfile'],
'level': 'INFO',
'propagate': False
},
'annalist_site': {
'handlers': ['logfile'],
'level': 'INFO',
'propagate': False
},
'annalist': {
'handlers': ['logfile'],
'level': 'DEBUG',
'propagate': False
},
'login': {
'handlers': ['logfile'],
'level': 'DEBUG',
'propagate': False
},
},
}
import logging
log = logging.getLogger(__name__)
log.info("Annalist starting...")
# Force new log files for any rotating file log handlers
for h in log.handlers:
log.info("@@ log handler %r"%(h,))
if isinstance(h, logging.handlers.RotatingFileHandler):
log.info("@@ log rollover")
h.doRollover()
# log.info("Annalist version %s (personal configuration)"%(ANNALIST_VERSION))
log.info(ANNALIST_VERSION_MSG)
log.debug("SETTINGS_MODULE: "+SETTINGS_MODULE)
log.debug("BASE_DATA_DIR: "+BASE_DATA_DIR)
log.debug("CONFIG_BASE: "+CONFIG_BASE)
log.debug("DJANGO_ROOT: "+DJANGO_ROOT)
log.debug("SITE_CONFIG_DIR: "+SITE_CONFIG_DIR)
log.debug("SITE_SRC_ROOT: "+SITE_SRC_ROOT)
log.debug("STATICFILES_DIRS: "+repr(STATICFILES_DIRS))
log.debug("DB PATH: "+DATABASES['default']['NAME'])
log.debug("ALLOWED_HOSTS: "+",".join(ALLOWED_HOSTS))
log.debug("ANNALIST_LOG_PATH: "+ANNALIST_LOG_PATH)
log.debug("TRACE_FIELD_VALUE: "+str(TRACE_FIELD_VALUE))
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist_site/settings/personal_debug.py
|
personal_debug.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
from .common import *
ANNALIST_VERSION_MSG = "Annalist version %s (FAST project configuration)"%(ANNALIST_VERSION)
SETTINGS_MODULE = __name__
SITE_DIR_NAME = "annalist_fast"
BASE_DATA_DIR = os.path.expanduser("~")
BASE_SITE_DIR = os.path.join(BASE_DATA_DIR, SITE_DIR_NAME)
CONFIG_BASE = os.path.join(os.path.expanduser("~"), ".annalist/")
STATIC_ROOT = os.path.join(BASE_SITE_DIR, 'static')
BASE_LOG_DIR = BASE_SITE_DIR+"/"
ANNALIST_LOG_PATH = BASE_LOG_DIR+ANNALIST_LOG_FILE
ACCESS_LOG_PATH = BASE_LOG_DIR+ACCESS_LOG_FILE
ERROR_LOG_PATH = BASE_LOG_DIR+ERROR_LOG_FILE
DATABASE_PATH = os.path.join(BASE_SITE_DIR, 'db.sqlite3')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': DATABASE_PATH,
}
}
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*'] # Insecure: use e.g. ['.annalist.net']
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'timed': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
# Include the default Django email handler for errors
# This is what you'd get without configuring logging at all.
'mail_admins': {
'class': 'django.utils.log.AdminEmailHandler',
'level': 'ERROR',
# But the emails are plain text by default - HTML is nicer
'include_html': True,
'formatter': 'verbose'
},
# Log to a text file that can be rotated by logrotate
'logfile': {
# 'class': 'logging.handlers.WatchedFileHandler',
# 'class': 'logging.handlers.RotatingFileHandler',
'class': 'annalist_site.settings.common.RotatingNewFileHandler',
'filename': ANNALIST_LOG_PATH,
'maxBytes': 2*1024*1024, # 2Mb
'backupCount': 9, # Keep 9 files
'level': TRACE_FIELD_VALUE,
'formatter': 'timed'
},
},
'loggers': {
# Again, default Django configuration to email unhandled exceptions
# 'django.request': {
# 'handlers': ['mail_admins'],
# 'level': 'ERROR',
# 'propagate': True,
# },
'django.request': {
'handlers': ['logfile'],
'level': 'INFO',
'propagate': True,
},
# Might as well log any errors anywhere else in Django
'django': {
'handlers': ['logfile'],
'level': 'INFO',
'propagate': False,
},
'annalist_root': {
'handlers': ['logfile'],
'level': 'INFO', # Or maybe INFO or DEBUG
'propagate': False
},
'annalist_site': {
'handlers': ['logfile'],
'level': 'INFO', # Or maybe INFO or DEBUG
'propagate': False
},
'annalist': {
'handlers': ['logfile'],
'level': TRACE_FIELD_VALUE, # Or maybe INFO or DEBUG
'propagate': False
},
'login': {
'handlers': ['logfile'],
'level': 'INFO', # Or maybe INFO or DEBUG
'propagate': False
},
},
}
import logging
log = logging.getLogger(__name__)
log.info("Annalist starting...")
# Force new log files for any rotating file log handlers
for h in log.handlers:
log.info("@@ log handler %r"%(h,))
if isinstance(h, logging.handlers.RotatingFileHandler):
log.info("@@ log rollover")
h.doRollover()
# log.info("Annalist version %s (FAST project configuration)"%(ANNALIST_VERSION))
log.info(ANNALIST_VERSION_MSG)
log.debug("SETTINGS_MODULE: "+SETTINGS_MODULE)
log.debug("BASE_DATA_DIR: "+BASE_DATA_DIR)
log.debug("CONFIG_BASE: "+CONFIG_BASE)
log.debug("DJANGO_ROOT: "+DJANGO_ROOT)
log.debug("SITE_CONFIG_DIR: "+SITE_CONFIG_DIR)
log.debug("SITE_SRC_ROOT: "+SITE_SRC_ROOT)
log.debug("STATICFILES_DIRS: "+repr(STATICFILES_DIRS))
log.debug("DB PATH: "+DATABASES['default']['NAME'])
log.debug("ALLOWED_HOSTS: "+",".join(ALLOWED_HOSTS))
log.debug("ANNALIST_LOG_PATH: "+ANNALIST_LOG_PATH)
log.debug("TRACE_FIELD_VALUE: "+TRACE_FIELD_VALUE)
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist_site/settings/fast_project.py
|
fast_project.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
from .common import *
ANNALIST_VERSION_MSG = "Annalist version %s (personal configuration)"%(ANNALIST_VERSION)
SETTINGS_MODULE = __name__
SITE_DIR_NAME = "annalist_demo"
BASE_DATA_DIR = os.path.expanduser("~")
BASE_SITE_DIR = os.path.join(BASE_DATA_DIR, SITE_DIR_NAME)
CONFIG_BASE = os.path.join(os.path.expanduser("~"), ".annalist/demo/")
STATIC_ROOT = os.path.join(BASE_SITE_DIR, 'static')
BASE_LOG_DIR = BASE_SITE_DIR+"/"
ANNALIST_LOG_PATH = BASE_LOG_DIR+ANNALIST_LOG_FILE
ACCESS_LOG_PATH = BASE_LOG_DIR+ACCESS_LOG_FILE
ERROR_LOG_PATH = BASE_LOG_DIR+ERROR_LOG_FILE
DATABASE_PATH = os.path.join(BASE_SITE_DIR, 'db.sqlite3')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': DATABASE_PATH,
}
}
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*'] # Insecure: use e.g. ['.annalist.net']
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'timed': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
# Include the default Django email handler for errors
# This is what you'd get without configuring logging at all.
'mail_admins': {
'class': 'django.utils.log.AdminEmailHandler',
'level': 'ERROR',
# But the emails are plain text by default - HTML is nicer
'include_html': True,
'formatter': 'verbose'
},
# Log to a text file that can be rotated by logrotate
'logfile': {
# 'class': 'logging.handlers.WatchedFileHandler',
# 'class': 'logging.handlers.RotatingFileHandler',
'class': 'annalist_site.settings.common.RotatingNewFileHandler',
'filename': ANNALIST_LOG_PATH,
'maxBytes': 2*1024*1024, # 2Mb
'backupCount': 9, # Keep 9 files
'level': TRACE_FIELD_VALUE,
'formatter': 'timed'
},
},
'loggers': {
# Again, default Django configuration to email unhandled exceptions
# 'django.request': {
# 'handlers': ['mail_admins'],
# 'level': 'ERROR',
# 'propagate': True,
# },
'django.request': {
'handlers': ['logfile'],
'level': 'INFO',
'propagate': True,
},
# Might as well log any errors anywhere else in Django
'django': {
'handlers': ['logfile'],
'level': 'INFO',
'propagate': False,
},
'annalist_root': {
'handlers': ['logfile'],
'level': 'INFO', # Or maybe INFO or DEBUG
'propagate': False
},
'annalist_site': {
'handlers': ['logfile'],
'level': 'INFO', # Or maybe INFO or DEBUG
'propagate': False
},
'annalist': {
'handlers': ['logfile'],
'level': TRACE_FIELD_VALUE, # Or maybe INFO or DEBUG
'propagate': False
},
'login': {
'handlers': ['logfile'],
'level': 'INFO', # Or maybe INFO or DEBUG
'propagate': False
},
},
}
import logging
log = logging.getLogger(__name__)
log.info("Annalist starting...")
# Force new log files for any rotating file log handlers
for h in log.handlers:
log.info("@@ log handler %r"%(h,))
if isinstance(h, logging.handlers.RotatingFileHandler):
log.info("@@ log rollover")
h.doRollover()
# log.info("Annalist version %s (demo configuration)"%(ANNALIST_VERSION))
log.info(ANNALIST_VERSION_MSG)
# For development/testing: don't log SECRET_KEY in production!
# log.info("SECRET_KEY: "+SECRET_KEY)
log.debug("SETTINGS_MODULE: "+SETTINGS_MODULE)
log.debug("BASE_DATA_DIR: "+BASE_DATA_DIR)
log.debug("BASE_SITE_DIR: "+BASE_SITE_DIR)
log.debug("CONFIG_BASE: "+CONFIG_BASE)
log.debug("DJANGO_ROOT: "+DJANGO_ROOT)
log.debug("SITE_CONFIG_DIR: "+SITE_CONFIG_DIR)
log.debug("SITE_SRC_ROOT: "+SITE_SRC_ROOT)
log.debug("STATICFILES_DIRS: "+repr(STATICFILES_DIRS))
log.debug("DB PATH: "+DATABASES['default']['NAME'])
log.debug("ALLOWED_HOSTS: "+",".join(ALLOWED_HOSTS))
log.debug("ANNALIST_LOG_PATH: "+ANNALIST_LOG_PATH)
log.debug("TRACE_FIELD_VALUE: "+str(TRACE_FIELD_VALUE))
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist_site/settings/demo.py
|
demo.py
|
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
from .common import *
ANNALIST_VERSION_MSG = "Annalist version %s (personal configuration)"%(ANNALIST_VERSION)
SETTINGS_MODULE = __name__
SITE_DIR_NAME = "annalist_site"
BASE_DATA_DIR = os.path.expanduser("~")
BASE_SITE_DIR = os.path.join(BASE_DATA_DIR, SITE_DIR_NAME)
CONFIG_BASE = os.path.join(os.path.expanduser("~"), ".annalist/")
STATIC_ROOT = os.path.join(BASE_SITE_DIR, 'static')
BASE_LOG_DIR = BASE_SITE_DIR+"/"
ANNALIST_LOG_PATH = BASE_LOG_DIR+ANNALIST_LOG_FILE
ACCESS_LOG_PATH = BASE_LOG_DIR+ACCESS_LOG_FILE
ERROR_LOG_PATH = BASE_LOG_DIR+ERROR_LOG_FILE
DATABASE_PATH = os.path.join(BASE_SITE_DIR, 'db.sqlite3')
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': DATABASE_PATH,
}
}
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*'] # Insecure: use e.g. ['.annalist.net']
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'timed': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
# Include the default Django email handler for errors
# This is what you'd get without configuring logging at all.
'mail_admins': {
'class': 'django.utils.log.AdminEmailHandler',
'level': 'ERROR',
# But the emails are plain text by default - HTML is nicer
'include_html': True,
'formatter': 'verbose'
},
# Log to a text file that can be rotated by logrotate
'logfile': {
# 'class': 'logging.handlers.WatchedFileHandler',
# 'class': 'logging.handlers.RotatingFileHandler',
'class': 'annalist_site.settings.common.RotatingNewFileHandler',
'filename': ANNALIST_LOG_PATH,
'maxBytes': 2*1024*1024, # 2Mb
'backupCount': 9, # Keep 9 files
'level': TRACE_FIELD_VALUE,
'formatter': 'timed'
},
},
'loggers': {
# Again, default Django configuration to email unhandled exceptions
# 'django.request': {
# 'handlers': ['mail_admins'],
# 'level': 'ERROR',
# 'propagate': True,
# },
'django.request': {
'handlers': ['logfile'],
'level': 'INFO',
'propagate': True,
},
# Might as well log any errors anywhere else in Django
'django': {
'handlers': ['logfile'],
'level': 'INFO',
'propagate': False,
},
'annalist_root': {
'handlers': ['logfile'],
'level': 'INFO', # Or maybe INFO or DEBUG
'propagate': False
},
'annalist_site': {
'handlers': ['logfile'],
'level': 'INFO', # Or maybe INFO or DEBUG
'propagate': False
},
'annalist': {
'handlers': ['logfile'],
'level': TRACE_FIELD_VALUE, # Or maybe INFO or DEBUG
'propagate': False
},
'login': {
'handlers': ['logfile'],
'level': 'INFO', # Or maybe INFO or DEBUG
'propagate': False
},
},
}
import logging
log = logging.getLogger(__name__)
log.info("Annalist starting...")
# Force new log files for any rotating file log handlers
for h in log.handlers:
log.info("@@ log handler %r"%(h,))
if isinstance(h, logging.handlers.RotatingFileHandler):
log.info("@@ log rollover")
h.doRollover()
# log.info("Annalist version %s (personal configuration)"%(ANNALIST_VERSION))
log.info(ANNALIST_VERSION_MSG)
# For development/testing: don't log SECRET_KEY in production!
# log.info("SECRET_KEY: "+SECRET_KEY)
log.debug("SETTINGS_MODULE: "+SETTINGS_MODULE)
log.debug("BASE_DATA_DIR: "+BASE_DATA_DIR)
log.debug("BASE_SITE_DIR: "+BASE_SITE_DIR)
log.debug("CONFIG_BASE: "+CONFIG_BASE)
log.debug("DJANGO_ROOT: "+DJANGO_ROOT)
log.debug("SITE_CONFIG_DIR: "+SITE_CONFIG_DIR)
log.debug("SITE_SRC_ROOT: "+SITE_SRC_ROOT)
log.debug("STATICFILES_DIRS: "+repr(STATICFILES_DIRS))
log.debug("DB PATH: "+DATABASES['default']['NAME'])
log.debug("ALLOWED_HOSTS: "+",".join(ALLOWED_HOSTS))
log.debug("ANNALIST_LOG_PATH: "+ANNALIST_LOG_PATH)
log.debug("TRACE_FIELD_VALUE: "+str(TRACE_FIELD_VALUE))
# End.
|
Annalist
|
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist_site/settings/personal.py
|
personal.py
|
# AnnoMate
A package for using and creating interactive dashboards for manual review.

# Quick Start
## Install
### Set up Conda Environment
This is _highly_ recommended to manage different dependencies required by different reviewers.
1. Install conda
Credit to Raymond Chu this article: https://medium.com/google-cloud/set-up-anaconda-under-google-cloud-vm-on-windows-f71fc1064bd7
```
sudo apt-get update
sudo apt-get install bzip2 libxml2-dev
wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
bash Miniconda3-latest-Linux-x86_64.sh
rm Miniconda3-latest-Linux-x86_64.sh
source .bashrc
conda install scikit-learn pandas jupyter ipython
```
2. Create a conda environment
If you do not already have a designated environment:
```
conda create --name <your_env> python==<py_version>
```
`<your_env>` is the name of your environment (ie purity_review_env). Check the corresponding reviewer's `setup.py` file to get the proper python version for `py_version`.
3. Add conda environment to ipykernel
Credit to Nihal Sangeeth from StackOverflow: https://stackoverflow.com/questions/53004311/how-to-add-conda-environment-to-jupyter-lab.
```
conda activate <your_env>
conda install ipykernel
ipython kernel install --user --name=<your_env>
conda deactivate
```
When you open a jupyter notebook, you can change the environment the notebook cells are run in to `<your_env>`
### Install AnnoMate with pip
If you are developing a brand new reviewer, you can install from PyPi
```
conda activate <your_env>
pip install AnnoMate
```
### Install with Git
AnnoMate and most prebuilt reviewers can be downloaded with git.
```
git clone [email protected]:getzlab/AnnoMate.git
cd AnnoMate
conda activate <your_env> --file requirements.txt
pip install -e .
```
### Tutorials and Documentation
See a more detailed tutorial in `tutorial_notebooks/Intro_to_Reviewers.ipynb`.
View the catalog of existing reviewers at [catalog/ReviewerCatalog.ipynb](https://github.com/getzlab/AnnoMate/blob/master/catalog/ReviewerCatalog.ipynb).
For developers, see `tutorial_notebooks/Developer_Jupyter_Reviewer_Tutorial.ipynb`.
## Why Jupyter Reviewer
### Why and how we review data
Part of any study is ensuring data are consistent and drawing conclusions about the data from multiple sources. Studies are often novel, so frequently there are steps along the way that do not have existing, validated automation techniques. Therefore, we must perform manual review.
Typically, the person reviewing all this data opens a bunch of windows to view data from different places (a clinical information spreadsheet from a collaborator, a few outputs from a Terra workflow, and/or previous notes from another reviewer, etc.). Next they look at all the data and keep notes in yet a separate document, such as a spreadsheet or digital/physical notes. Then, they go row by row, sample by sample, until they finish.
### Why we need something better
While straightforward to do in theory, this review method is very brittle, error prone, and very time consuming.
Reviewing can take a very long time, such as reviewing large datasets on the order of hundreds to thousands of data points, or if the review needs to be repeated multiple times due to changes in processes upstream.
Some review processes are iterative, or new information is gained from some other source to inform the review process, or we need to pass off the review process to someone else. We should be able to easily incorporate old data with new data, and share that history and information with others.
Some reviews require calculations, or exploring the the data in ways that a static plot cannot provide. Some Terra workflows do produce some interactive html files, but this is rare. Sometimes, a reviewer realizes mid-way through the review process that a different kind of plot could be very informative. It should be easy to generate such a plot on the fly without having to modify or create a new Terra workflow, or opening a new notebook to calculate manually.
Lastly, humans are humans, and we make mistakes. It can be very tedious to maintain and update a large spreadsheet with hundreds of rows and multiple columns to annotate. Annotations are difficult to enforce in this setting, and changes (intentional or accidental) are difficult to track.
### The Solution: Jupyter notebook and Plotly-Dash!
Most ACBs use jupyter notebooks for their analysis. So why not keep the review process in jupyter notebooks too? Additionally, there already exist great tools for making interactive figures and dashboards. We can use these packages to help automatically consildate information and create figures that will make it easier to review, enforce annotation standards, and track changes over time.
The `AnnoMate` package makes it simple to create dashboards for reviewing data. Developers and users can easily customize their dashboards to incorpate any data they like, and automatically provides a reviewer an easy way to annotate their data, track changes, and share their annotations with others.
### Get Started
See `tutorial_notebooks/` for documentation and tutorials.
|
AnnoMate
|
/AnnoMate-0.0.2.tar.gz/AnnoMate-0.0.2/README.md
|
README.md
|
# AnnoSpat automatically annotates cell types and quantifies inter-cellular arrangements from spatial proteomics
Aanchal Mongia, Diane Sauders, Alvin C. Powers, Ali Naji, Gregory W. Schwartz and Robert B. Faryabi
## Introduction
`Annospat` is a tool for annotating and inferring patterns in single cells from spatial
proteomics. It uses neural network and point process algorithms to automatically identify cell types and quantify cell-cell spatial relationships in the absence of manual annotation. Using the canonical markers for each protein in the antibody panel , it predicts the cell type labels by rendering the similarity between the cells in the proteomic space.
## Dependencies
Python 3.6 and libraries:
> numpy, scikit-learn, pandas
## Implementation
`Annospat` can be used as a python library or as a command line tool using the argument `generateLabels` which takes as input the path to the raw proteomics matrix (with cells on rows and proteins as columns) and a signature file holding the canonical protein markers with other data related inputs.
```
Usage: AnnoSpat generateLabels [OPTIONS]
Generate cell type annotations
Options:
-i, --inputfile TEXT [required] #path ot input proteomics file
-m, --markerfile TEXT [required] #path to marker file
-o, --outputdir TEXT [required] #path to output dir
-f, --firstprotein TEXT [required] #first protein to pick in the proteomics file
-l, --lastprotein TEXT [required] #last protein to pick in the proteomics file
-r, --roicol TEXT [required] #ROI column
-n, --colsToNeglect TEXT [default: ] #proteins to negelct in the proteomics file
-d, --diseasecol TEXT [default: ] # T1D/control
-s, --suffix TEXT [default: _IMC_T1D_AnnoSpat] #suffix to be given
-c, --classifier TEXT [default: ELM] #classifier to use
-t, --thresholdmaxandunknown TEXT
[default: [99.999, 70]] #thresholds for each protein default: b/w 99 and 99.9
-a, --thresholdadaptive TEXT [default: [99.5,99.5,99.5,99.5,99.9,
99,99.5, 99,99, 99.5,99.9,
99.9,99.9,99.9, 99.5,99.5]] #adaptive thresholds for each protein
-b, --fileseparator TEXT [default: ,] #file spearator
```
Sample run:
```bash
pip install -i https://test.pypi.org/simple/ AnnoSpat
```
From your working directory, execute:
```
mkdir outputdir
```
<!-- AnnoSpat generateLabels -i /mnt/data2/aanchal/data/IMC_T1D/raw_data/mgDF.csv -m /mnt/data2/aanchal/data/IMC_T1D/signatures_T1D.csv -o outputdir -f 'HLA.ABC' -l 'Ghrelin' -r 'TIFFfilename'
python3 run.py generateLabels -i /mnt/data2/aanchal/data/IMC_T1D/raw_data/mgDF.csv -m /mnt/data2/aanchal/data/IMC_T1D/signatures_T1D_withImmuneCelltypes_withNegMarkers_d.csv -o delete_outputdir -f 'HLA.ABC' -l 'Ghrelin' -r 'Status'
-->
```
AnnoSpat generateLabels -i <path_to_proteomics_matrix> -m <path_to_marker_file> -o outputdir -f <first_protein_name> -l <last_protein_name> -r <name_of_col_holding_diseaseStatus>
```
Please replace the arguments to --inputfile/-i argument, --markerfile/-m argument and other arguments as per your own paths to proteomics and marker files and data.
|
AnnoSpat
|
/AnnoSpat-1.0.0.tar.gz/AnnoSpat-1.0.0/README.md
|
README.md
|
import argparse
import re,sys,os,math,gc
import numpy as np
import pandas as pd
import matplotlib as mpl
import copy
import math
from math import pi
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from scipy import sparse
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import copy
import math
import seaborn as sns
from iced import normalization
from scipy.interpolate import BSpline, make_interp_spline
plt.rcParams.update({'figure.max_open_warning': 100000})
plt.style.use('seaborn-colorblind')
mpl.rcParams['ytick.direction'] = 'out'
mpl.rcParams['savefig.dpi'] = 300 #图片像素
mpl.rcParams['figure.dpi'] = 300
mpl.rcParams['pdf.fonttype']=42
mpl.rcParams['ps.fonttype']=42
__author__ ='赵玥'
__mail__ ='[email protected]'
_data__ ='20191101'
def draw_boundaries(ax,Boundary_dict,start,end,samplelist,str_x,sam_x):
ax.tick_params(top='off',bottom='off',left='on',right='off')
for loc in ['top','left','right','bottom']:
ax.spines[loc].set_visible(False)
#ax.spines['left'].set_color('k')
#ax.spines['left'].set_linewidth(2)
#ax.spines['left'].set_smart_bounds(True)
#ax.spines['left'].set_linewidth(1)
#ax.spines['right'].set_visible(False)
#ax.spines['bottom'].set_visible(False)
ax.set_axis_bgcolor('w')
ax.set(xticks=[])
ax.set(yticks=[])
sample1 = samplelist[0]
sample2 = samplelist[1]
boundary_mid1 = Boundary_dict[sample1]['mid'].tolist()
boundary_mid2 = Boundary_dict[sample2]['mid'].tolist()
bound_y1min = [1.25 for i in boundary_mid1]
bound_y1max = [1.75 for i in boundary_mid1]
bound_y2min = [0.25 for i in boundary_mid2]
bound_y2max = [0.75 for i in boundary_mid2]
ax.set_ylim(0,2)
ax.vlines(boundary_mid1,bound_y1min,bound_y1max,lw=2,color='red')
ax.vlines(boundary_mid2,bound_y2min,bound_y2max,lw=2,color='green')
ax.set_xlim(start,end)
ax.text(str_x,0.5,'bound',horizontalalignment='right',verticalalignment='center',rotation='vertical',transform=ax.transAxes,fontsize=8)
ax.text(sam_x,0.75,sample1,horizontalalignment='right',verticalalignment='center',rotation='horizontal',transform=ax.transAxes,color="red",fontsize=8)
ax.text(sam_x,0.25,sample2,horizontalalignment='right',verticalalignment='center',rotation='horizontal',transform=ax.transAxes,color="green",fontsize=8)
def cut_boundaries(Boundary_dict,sample,boundaryPath,chrom,start,end):
Boundary_df = pd.read_table(boundaryPath,header=0,index_col=None,encoding='utf-8')
Boundary_df = Boundary_df.fillna(0)
Boundary_df = Boundary_df[['start','end']]
Boundary_df['mid'] = (Boundary_df['start'] + Boundary_df['end'])/2
Boundary_df = Boundary_df[Boundary_df['mid']>=start]
Boundary_df = Boundary_df[Boundary_df['mid']<=end]
Boundary_df.reset_index(drop=True)
Boundary_dict[sample] = Boundary_df
return Boundary_dict
def draw_insulation(ax,insu,chrs,start,end,color):
#df_insu=cut_insulation(insu,chrs,start,end)
df_insu=pd.read_table(insu,sep='\t',names=['chrs','start','end','insu'])
ax.tick_params(top='off',bottom='off',left='on',right='off')
line=ax.plot(df_insu['start'],df_insu['insu'], color=color, linewidth=0.8, label="insulation")
ax.set_xlim(start,end)
ax.set_xticks([])
ax.set_ylim(df_insu['insu'].min(),df_insu['insu'].max())
#ax.set_yticks([df_insu['insu'].min(),df_insu['insu'].max()])
for loc in ['left','top','bottom']:
ax.spines[loc].set_linewidth(0)
ax.spines[loc].set_color('black')
ax.spines['right'].set_linewidth(0)
ax.spines[loc].set_color('black')
def draw_SV(files,ax,chrom,start,end,sample,color,types):
markdf=pd.read_table(files,sep='\t')
markdf=markdf[markdf['types']==types]
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
markdf['sign']=[1]*len(markdf)
#vectorf = np.vectorize(np.float)
#vectori = np.vectorize(np.int)
#starts=list(markdf['start'])
#hight=list(markdf['sign'])
#width=(markdf['width'])
ax.bar(x=list(markdf['start']),height=list(markdf['sign']),bottom=0, width = list(markdf['width']),color=color,linewidth=0,align='edge')
ax.set_xlim([start,end])
ax.set_ylim([0,1])
xts = np.linspace(start,end,2)
yts = np.linspace(0,1,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.set_yticks([])
#ax.set_yticklabels(ytkls,fontsize=5)
ax.text(-0.11,0.0,sample,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='vertical',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
ax.spines['bottom'].set_linewidth(0)
ax.spines['left'].set_linewidth(0)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=12)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def cut_insulation(insu,chrs,start,end):
file=open(insu)
file_list=[]
for i in file:
i=i.strip()
file_list.append(i)
insu_list=[]
for i in range(len(file_list)):
x=file_list[i].split('/')
insu_list.append([x[-2],file_list[i]])
list_df=pd.DataFrame(insu_list,columns=['chrs','insu'])
list_df=list_df[list_df['chrs']==chrs]
list_df=list_df.reset_index(drop=True)
df_insu=pd.read_table(list_df['insu'][0],sep='\t',names=['chrs','start','end','insu'],comment='t')
df_insu['mid']=(df_insu['start']+df_insu['end'])/2
df_insu=df_insu.fillna(0)
df_insu=df_insu[(df_insu['start']>start)&(df_insu['end']<end)]
return df_insu
def draw_AB(files,res,chrom,start,end,sample,ax):
compartdf = pd.read_table(files,sep='\t',names=['chrom','start','end','eigen1'])
compartdf = compartdf[compartdf['chrom']==chrom]
compartdf = compartdf.reset_index(drop=True)
df = compartdf
df=df[df['end']>=start]
df=df[df['start']<=end]
df=df.reset_index(drop=True)
ax.tick_params(top='off',bottom='on',left='off',right='off')
for loc in ['left','right','top','bottom']:
ax.spines[loc].set_visible(False)
df['width']=df['end']-df['start']
#ax.axis([start, end, min,max])
for i in range(len(df)):
if df['eigen1'][i]>0:
ax.bar(x=df['start'][i],height=df['eigen1'][i],bottom=0, width = df['width'][i],color='#E7605B',linewidth=0,align='edge')
else:
ax.bar(x=df['start'][i],height=df['eigen1'][i],bottom=0, width = df['width'][i],color='#3B679E',linewidth=0,align='edge')
ax.set_ylim(-0.1,0.1)
ax.set_ylabel(sample)
ax.set_yticks([])
ax.set_xticks([])
def Express_Swith(Epipath,chrom,start,end):
Expressdf = pd.read_table(Epipath,header=None,index_col=False,sep='\t')
Expressdf.columns = ['chrom','start','end','sign']
Expressdf = Expressdf[Expressdf['chrom']==chrom]
Expressdf = Expressdf[Expressdf['start']>=int(start)]
Expressdf = Expressdf[Expressdf['end']<=int(end)]
Expressdf = Expressdf.reset_index(drop=True)
return Expressdf
def draw_epigenetic(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
recs = ax.bar(x=list(markdf['start']),height=list(markdf['sign']),bottom=0, width = list(markdf['width']),color=color,linewidth=0,align='edge')
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,5)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(float(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=5)
ax.text(-0.11,0.4,sample,fontsize=6,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=8)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def draw_epigenetic2(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
#print (markdf.head())
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
markdf['width'] = markdf['end'] - markdf['start']
x = np.linspace(start,end,int(len(markdf)/8))
a_BSpline=make_interp_spline(markdf['start'],markdf['sign'],k=3)
y_new=a_BSpline(x)
ax.plot(x, y_new, color=color,linewidth=2)
ax.fill_between(x,y_new ,0,facecolor=color,linewidth=0,label=sample)
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,4)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.tick_params(top=False,right=False,width=1,colors='black',direction='out')
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=12)
ax.text(-0.11,0.0,sample,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='vertical',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=12)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def draw_RNA(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
#print (markdf.head())
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
vectorf = np.vectorize(np.float)
vectori = np.vectorize(np.int)
starts=vectori(markdf['start'])
hight=vectorf(markdf['sign'])
width=vectori(markdf['width'])
ax.bar(x=starts,height=hight,bottom=0,width=width,color=color,linewidth=0,align='edge')
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,5)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=12)
ax.text(-0.11,0.4,sample,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='vertical',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=12)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=12,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def Express_Swith(Epipath,chrs,start,end):
Expressdf = pd.read_table(Epipath,header=None,index_col=False,sep='\t')
Expressdf.columns = ['chrs','start','end','sign']
Expressdf = Expressdf[Expressdf['chrs']==chrs]
Expressdf = Expressdf[Expressdf['start']>=int(start)]
Expressdf = Expressdf[Expressdf['end']<=int(end)]
Expressdf = Expressdf.reset_index(drop=True)
return Expressdf
def draw_diff_epigenetic(file1,file2,ax,chrs,start,end,color,MaxYlim,MinYlim,type):
df1=Express_Swith(file1,chrs,start,end)
df2=Express_Swith(file2,chrs,start,end)
markdf = pd.merge(df1,df2,on='start',how='inner')
markdf['sign'] = np.log2(markdf['sign_x']) - np.log2(markdf['sign_y'])
markdf = markdf[['chrs_x','start','end_x','sign']]
markdf.columns = ['chrs','start','end','sign']
markdf = markdf.reset_index(drop=True)
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
recs = ax.bar(markdf['start'],markdf['sign'],bottom=0, width = markdf['width'],color=color,linewidth=0)
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(MinYlim)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,5)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(int(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=5)
#ax.text(-0.11,0.4,sample,fontsize=6,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
#ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=8)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrs,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
ax.spines['bottom'].set_linewidth(0)
ax.spines['left'].set_linewidth(1)
ax.spines['top'].set_linewidth(0)
ax.spines['right'].set_linewidth(0)
gc.collect()
def draw_bar(ax,file,chrom,start,end,max,min):
df=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
df=df[df['chrs']==chrom]
df=df[df['start']>start]
df=df[df['end']<end]
df=df.reset_index(drop=True)
ax.tick_params(top='off',bottom='on',left='off',right='off')
for loc in ['left','right','top']:
ax.spines[loc].set_visible(False)
df['width']=df['end']-df['start']
#ax.axis([start, end, min,max])
for i in range(len(df)):
if df['sign'][i]>0:
ax.bar(df['start'][i],df['sign'][i],bottom=0, width = df['width'][i],color='#E7605B',linewidth=0)
else:
ax.bar(df['start'][i],df['sign'][i],bottom=0, width = df['width'][i],color='#3B679E',linewidth=0)
ax.set_ylim(min,max)
ax.set_yticks([])
ax.set_xticks([])
def get_4C_data(matrix,tstart,tend,binsize,start,end):
print (binsize)
t=int((tstart-start)/int(binsize))
print ('t',t,'matrix',len(matrix))
datalist=matrix.loc[:,[t]]
return datalist
from statsmodels.nonparametric.smoothers_lowess import lowess
def draw_4C_module(ax,df_list,chrs,start,end,color_list,ymin,ymax,sample_list):
ax.tick_params(top='off',bottom='off',left='on',right='off')
i=0
for df in df_list:
x = np.linspace(start,end,len(df))
#df['width']=[]*len(df)
df_loess = pd.DataFrame(lowess(df['sign'], np.arange(len(df['sign'])), frac=0.05)[:, 1], index=df.index, columns=['sign'])
ax.plot(x,list(df_loess['sign']), color=color_list[i], linewidth=2,label=sample_list[i],alpha=0.3)
i+=1
#ax.legend(handles2, labels2)
ax.set_xlim(start,end)
ax.set_ylim(ymin,ymax)
ax.set_yticks([ymin,ymax])
ax.legend(loc='right',bbox_to_anchor=(1.05,0.3),handlelength=1,handleheight=0.618,fontsize=6,frameon=False)
for loc in ['left']:
ax.spines[loc].set_linewidth(0.6)
ax.spines[loc].set_color('gray')
#ax.tick_params(top=False,right=False,width=1,colors='black',direction='out')
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.tick_params(top=False,right=False,bottom=False,width=1,colors='black',direction='out')
def draw_4C(ax,chrs,start,end,matrix_list,samples,binsize,tstart,tend,colors,ymax):
sample_list=samples.split(',')
bed_list=[]
for sample in sample_list:
matrix,min=extract_raw_matrix(matrix_list,sample,chrs,start,end,binsize,'raw')
datalist=get_4C_data(matrix,int(tstart),int(tend),binsize,int(start),int(end))
bed_list.append(datalist)
#starts=[]
#for i in range(start,end,int(binsize)):
#starts.append(i)
df_list=[]
for i in bed_list:
df=pd.DataFrame({'start':['start']*len(i)})
#df['chrs']=[chrs]*len(df)
#df['end']=df['start']+int(binsize)
df['sign']=i
df_list.append(df)
color_list=colors.split(',')
draw_4C_module(ax,df_list,chrs,start,end,color_list,0,int(ymax),sample_list)
def draw_compartment(ax,sample,compmergedf,chrom,start,end,type='top'):
ax.tick_params(top='off',bottom='on',left='off',right='off')
for loc in ['left','right','top']:
ax.spines[loc].set_visible(False)
mat = compmergedf[sample]
#print(mat)
s = compmergedf['start']
colors = ['red','blue','#458B00','#B9BBF9','black']
ax.set_xlim(start,end)
if sample == 'Merge':
ax.fill_between(s, 0, 0.25,where=mat==1, facecolor=colors[0],linewidth=0,label='CompartmentA')
ax.fill_between(s, 0.25, 0.5,where=mat==2, facecolor=colors[2],linewidth=0,label='A Switch B')
ax.fill_between(s, 0, -0.25,where=mat==-1,facecolor=colors[1],linewidth=0,label='CompartmentB')
ax.fill_between(s, -0.25,-0.5,where=mat==-2,facecolor=colors[3],linewidth=0,label='B Switch A')
legend = ax.legend(bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0.,prop={'size':4},ncol=1)
legend.get_frame().set_facecolor('white')
else:
ax.fill_between(s, 0, mat,where=mat>= 0, facecolor=colors[0],linewidth=0,label='CompartmentA')
ax.fill_between(s, 0, mat,where=mat< 0, facecolor=colors[1],linewidth=0,label='CompartmentB')
#ax.text(max(mat)/4,-5,'A');ax.text(max(mat)/2,-5,'B')
ax.text(-0.11,0.4,sample,fontsize=6,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
ymax = mat.max()+0.005
ymin = mat.min()-0.005
xts = np.linspace(start,end,5)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ax.set_ylim(ymin,ymax)
ax.set_yticks([])
#ax.set_ylabel(sample,rotation='vertical',fontsize='small')
#compmergedf = pd.DataFrame()
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=8)
ax.spines['bottom'].set_linewidth(1)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.spines['bottom'].set_visible(False)
gc.collect()
def colorbar(ax,im,vmin,vmax):
axins1 = inset_axes(ax, width=0.1,height=0.6,loc=3, bbox_to_anchor=(0, 0.2, 0.5, 1), bbox_transform=ax.transAxes,borderpad=0)
print (vmin,vmax)
cbar=plt.colorbar(im, cax=axins1, orientation='vertical',ticks=[math.ceil(vmin),int(vmax)])
axins1.tick_params(left='on',right='off',top='off',bottom='off',labelsize=12)
axins1.yaxis.set_ticks_position('left')
return cbar
import math
from matplotlib import pyplot as plt
plt.style.use('seaborn-colorblind')
pd.set_option('display.precision',2)
from scipy import sparse
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
#from annotation_GenePred import Gene_annotation
def cut_mat(mat,start,end,resolution,min):
start = int(int(start)/resolution)
end = math.ceil(int(end)/resolution)
start = int(start - min)
end = int(end - min)
mat = mat.fillna(0)
mat = mat.iloc[start:end+1,start:end+1]
gc.collect()
return mat,start,end
def self_zscore(df):
dsc = pd.DataFrame(np.ravel(df)).describe(include=[np.number])
df = (df - dsc.loc['mean',0])/dsc.loc['std',0]
return df
from scipy.ndimage import gaussian_filter
def get_matrix(mat_path,binsize,start,end):
binsize=int(binsize)
mat=pd.read_table(mat_path,names=['b1','b2','contacts'])
mat=mat[(mat['b1']>=start-3000000) & (mat['b2']>=start-3000000)]
mat=mat[(mat['b1']<=end+3000000) & (mat['b2']<=end+3000000)]
#-----------xlim genome start genome end-------------------------------
min=mat['b1'].min()
max=mat['b1'].max()
min=math.ceil(int(min)/binsize)*binsize
max=int(int(max)/binsize)*binsize
N=int(max/binsize)-math.ceil(min/binsize)+1
mat['b1']=mat['b1'].apply(lambda x: (x-min-1)/binsize)
mat['b2']=mat['b2'].apply(lambda x: (x-min-1)/binsize)
#-----------coo matrix-----------------------------------------------
counts=sparse.coo_matrix((mat['contacts'],(mat['b1'],mat['b2'])),shape=(N, N),dtype=float).toarray()
diag_matrix=np.diag(np.diag(counts))
counts=counts.T + counts
#counts=counts-diag_matrix
counts=counts-diag_matrix-diag_matrix
df=pd.DataFrame(counts)
#----------zscore minus ---------------------------------
#df=self_zscore(df)
min=int(min/binsize)
df,min,max=cut_mat(df,start,end,binsize,min)
np.fill_diagonal(df.values, 0)
return df,min
def get_matrix_df(lists,sample,chrs):
df=pd.read_table(lists,sep='\t',names=['sample','chrs','matrix'])
df=df[df['sample']==sample]
df=df[df['chrs']==chrs]
df=df.reset_index(drop=True)
matrix=df['matrix'][0]
return matrix
def extract_matrix(lists,sample1,sample2,chrs,start,end,binsize):
matrix1=get_matrix_df(lists,sample1,chrs)
matrix2=get_matrix_df(lists,sample2,chrs)
zscore1,min=get_matrix(matrix1,binsize,start,end)
zscore2,min=get_matrix(matrix2,binsize,start,end)
delta=zscore1-zscore2
delta = gaussian_filter(delta, sigma=0.5)
#start=(start - min)/binsize
#end= (end -min)/binsize
#delta=delta.loc[start:end,start:end]
return delta,min
class MidpointNormalize(mpl.colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
mpl.colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
def heat_map(ax,matrix,title,start,end):
for loc in ['left','right','top']:
ax.spines[loc].set_visible(False)
matrix = Triangular(matrix)
triArr = np.array(matrix)
triArr_ravel = pd.DataFrame(triArr.ravel())
triArr_ravel = triArr_ravel[triArr_ravel!= -np.inf]
triArr_ravel = triArr_ravel[triArr_ravel!=np.inf]
#vmax=np.nanpercentile(triArr_ravel,97)
#vmin=np.nanpercentile(triArr_ravel,0)
triMat = ''
triArr_ravel = ''
tmp_trimat = ''
matrix= triArr
my_colors=['#5A1216','#A61B29','#F0A1A8','#E3B4B8','#FFFEF8','#93B5CF','#2775B6','#144A74','#101F30']
colormap = mpl.colors.LinearSegmentedColormap.from_list('cmap',my_colors[::-1],500)
pp = matrix[matrix>0]
pm = matrix[matrix<0]
vmax = np.nanpercentile(pp, 99)
vmin = np.nanpercentile(pp, 1)
im=ax.imshow(matrix,cmap=colormap,interpolation="nearest",aspect='auto',norm=MidpointNormalize(midpoint=0,vmin=-2, vmax=2),origin='lower')
colormap.set_bad('white')
ax.set_title(title,y=0.5,x=0,fontsize=12)
ax.tick_params(direction='out',pad=5)
ax.spines['bottom'].set_linewidth(0)
ax.spines['left'].set_linewidth(0)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.tick_params(top=False,right=False,left=False,bottom=False,width=0,colors='black',direction='out')
ax.set_yticks([])
n=len(matrix)
#ax.set_xticks([0,n*2])
ax.set_xticks([])
ax.set_xticklabels([str(start),str(end)])
cbar=colorbar(ax,im,vmin,vmax)
def get_raw_matrix(mat_path,binsize,start,end,ice):
mat=pd.read_table(mat_path,names=['frag1','frag2','contacts'])
mat=mat[(mat['frag1']>=start) & (mat['frag2']>=start)]
mat=mat[(mat['frag1']<=end) & (mat['frag2']<=end)]
#-----------xlim genome start genome end-------------------------------
min=mat['frag1'].min()
max=mat['frag1'].max()
binsize=int(binsize)
min=math.ceil(int(min)/binsize)*binsize
max=int(int(max)/binsize)*binsize
N=int(max/binsize)-math.ceil(min/binsize)+1
#------------------tranform matrix ----------------------------------
mat['b1']=mat['frag1'].apply(lambda x: (x-min)/binsize)
mat['b2']=mat['frag2'].apply(lambda x: (x-min)/binsize)
#-----------coo matrix-----------------------------------------------
counts=sparse.coo_matrix((mat['contacts'],(mat['b1'],mat['b2'])),shape=(N, N),dtype=float).toarray()
diag_matrix=np.diag(np.diag(counts))
counts=counts.T + counts
counts=counts-diag_matrix
if ice=='ice':
counts = normalization.ICE_normalization(counts)
else:
counts = counts
df=pd.DataFrame(counts)
#----------zscore minus ---------------------------------
np.fill_diagonal(df.values, 0)
return df,min
def extract_raw_matrix(lists,sample,chrs,start,end,binsize,ice):
matrix=get_matrix_df(lists,sample,chrs)
matrix,min=get_raw_matrix(matrix,binsize,start,end,ice)
return matrix,min
def Triangular(mat):
mat=np.array(mat)
n=0
length=len(mat)
tri_mat=np.zeros([length,length*2])
tri_mat[tri_mat==0]=np.nan
for i in range(length):
curl=np.array(np.diag(mat,i))
tri_mat[i,n:(length*2-n)]=curl.repeat(2)
n+=1
mat = ''
gc.collect()
return tri_mat
def draw_delta(ax,chrs,start,end,matrix_list,samples,binsize):
samples=samples.split(',')
sample1,sample2=samples[0],samples[1]
delta,min=extract_matrix(matrix_list,sample1,sample2,chrs,start,end,binsize)
heat_map(ax,delta,'{}-\n{}'.format(sample1,sample2),start,end)
#return datalist
def rawheat_map(ax,matrix,title,start,end):
#fig = plt.figure(figsize=(6,6))
#ax = fig.add_axes([0.15,0.15,0.7,0.7])
for loc in ['left','right','top']:
ax.spines[loc].set_visible(False)
matrix = Triangular(matrix)
triArr = np.array(matrix)
triArr_ravel = pd.DataFrame(triArr.ravel())
triArr_ravel = triArr_ravel[triArr_ravel!= -np.inf]
triArr_ravel = triArr_ravel[triArr_ravel!=np.inf]
vmax=np.nanpercentile(triArr_ravel,97)
vmin=np.nanpercentile(triArr_ravel,0)
triMat = ''
triArr_ravel = ''
tmp_trimat = ''
matrix= triArr
my_colors=['#5A1216','#C02C38','#EE3F4D','#F07C82','#F1FB00','#2AFFBD','#0000A4']
colormap = mpl.colors.LinearSegmentedColormap.from_list('cmap',my_colors[::-1],500)
colormap.set_bad('white')
im=ax.imshow(matrix,cmap=colormap,clim=(vmin,vmax),interpolation="nearest",aspect='auto',origin='lower')
ax.set_title(title,y=0.5,x=0,fontsize=16)
ax.tick_params(direction='out',pad=5)
ax.spines['bottom'].set_linewidth(0)
ax.spines['left'].set_linewidth(0)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.tick_params(top=False,right=False,left=False,bottom=False,width=0,colors='black',direction='out')
#ticks=np.linspace(0,length*2,3)
n=len(matrix)
#ax.set_xticks([0,n*2])
ax.set_xticks([])
#ax.set_xticklabels([str(start),str(end)])
ax.set_yticks([])
#ax.set_xticklabels(sampleLst)
#vmin=matrix.stack().min()
#vmax=matrix.stack().max()
print (ax)
cbar=colorbar(ax,im,vmin,vmax)
#fig.savefig('{}.pdf'.format(outfile))
def draw_matrix(ax,chrs,start,end,matrix_list,sample,binsize,ice):
matrix,min=extract_raw_matrix(matrix_list,sample,chrs,start,end,binsize,ice)
matrix=gaussian_filter(matrix, sigma=0.5)
rawheat_map(ax,matrix,sample,start,end)
#return
def draw_compare_border(sample,ax,files,chrs,start,end,color):
df=pd.read_table(files,sep='\t',names=['chrs','start','end'])
df=df[df['chrs']==chrs]
df=df[df['start']>start]
df=df[df['end']<end]
df['sign']=1
df['width']=(end-start)/198
title = sample
ax.axis([start, end, 0,1])
ax.bar(x=list(df['start']),height=list(df['sign']),bottom=0,width=list(df['width']),color=color,align='edge')
#ax.bar(df['start'],df['sign'],bottom=0, width = df['width'],color=color,linewidth=0)
for loc in ['bottom','left','right','top']:
ax.spines[loc].set_linewidth(0)
ax.set_xticks([])
ax.set_yticks([])
def draw_CDB(sample,ax,files,chrs,start,end):
df=pd.read_table(files,sep='\t')
df=df[df['chrs']==chrs]
df=df[df['start']>start]
df=df[df['end']<end]
df['sign']=[1]*len(df)
df['width']=(end-start)/198
df1=df[df['level']==0]
df2=df[df['level']==1]
title = sample
df1=df1.reset_index(drop=True)
df2=df2.reset_index(drop=True)
df1=df1.loc[:,['chrs','start','end','sign','width','level']]
df2=df2.loc[:,['chrs','start','end','sign','width','level']]
ax.axis([start, end, 0,1])
ax.bar(df1['start'],df1['sign'],bottom=0, width = df1['width'],color='#5A685B',linewidth=0)
ax.bar(df2['start'],df2['sign'],bottom=0, width = df2['width'],color='#FEC79E',linewidth=0)
ax.set_xticks([])
ax.set_yticks([])
def gene_match(geneinfo):
gene_version ="";gene_name="";
#gene_id "ENSRNOG00000046319"; gene_version "4";
#gene_name "Vom2r3"; gene_source "ensembl_havana";
match = re.search("gene_name \"(.+?)\";",geneinfo)
if match:
gene_name = match.group(1)
#print(gene_name)
return (gene_name)
def transcript_match(geneinfo):
#transcript_id "ENSRNOT00000044187";
transcript_id = ''
match = re.search("transcript_id \"(.+?)\";",geneinfo)
if match:
transcript_id = match.group(1)
return (transcript_id)
def exon_match(geneinfo):
exon_num = 1
match = re.search("exon_number \"(.+?)\";",geneinfo)
if match:
exon_num = int(match.group(1))
return (exon_num)
def gene_biotype_match(geneinfo):
#gene_biotype "processed_transcript";
gene_biotype =''
match = re.search("gene_biotype \"(.+?)\";",geneinfo)
if match:
gene_biotype = match.group(1)
return gene_biotype
def filter_gtffile(gtffile,chr,start,end,Type='all'):
gtf_bed = pd.read_table(gtffile,usecols=[0,2,3,4,6,8],names=['chr','transcript_ID','start','end','orient','desc'])
gtf_bed = gtf_bed[(gtf_bed['chr']==chr) & ((gtf_bed['transcript_ID']=='exon')|(gtf_bed['transcript_ID']=='three_prime_utr') | (gtf_bed['transcript_ID']=='five_prime_utr'))]
gtf_bed = gtf_bed.reset_index(drop=True)
start =int(start)
end = int(end)
gtf_bed['start'] = pd.to_numeric(gtf_bed.start, errors='coerce' )
gtf_bed['end'] = pd.to_numeric(gtf_bed.end, errors='coerce' )
gtf_bed = gtf_bed[(gtf_bed['start']>start) & (gtf_bed['end']<end)]
gtf_bed = gtf_bed.reset_index(drop=True)
gtf_bed['mid'] = (gtf_bed['start'] + gtf_bed['end'])/2
gtf_bed['gene_desc'] = gtf_bed['desc'].apply(gene_match)
gtf_bed['transcript_desc'] = gtf_bed['desc'].apply(transcript_match)
gtf_bed['exon_num'] = gtf_bed['desc'].apply(exon_match)
gtf_bed['gene_type'] = gtf_bed['desc'].apply(gene_biotype_match)
if Type == 'all':
gtf_bed = gtf_bed
else:
geneType= Type.split(' ')
gtf_bed = gtf_bed[gtf_bed['gene_type'].isin(geneType)]
#gtf_bed.loc[(gtf_bed['transcript_desc'=='three_prime_utr'),'transcript_ID']='3UTR'
#gtf_bed.loc[(gtf_bed['transcript_desc'=='five_prime_utr'),'transcript_ID']='3UTR'
return gtf_bed
def draw_genes(ax,gtf_bed,start,end):
ax.tick_params(left='off',top='off',bottom='on',right='off')
ax.spines['bottom'].set_color('k')
ax.spines['bottom'].set_linewidth(0.05)
for loc in ['top','left','right']:
ax.spines[loc].set_visible(False)
gtf_bed = gtf_bed.sort_values(['mid'],ascending=[True])
#print(gtf_bed['gene_desc'].tolist())
exon_bed = gtf_bed[gtf_bed['transcript_ID']=='exon']
UTR3_bed = gtf_bed[gtf_bed['transcript_ID']=='three_prime_utr']
UTR5_bed = gtf_bed[gtf_bed['transcript_ID']=='five_prime_utr']
k = 0
genelist = []
for i in gtf_bed.index:
if (gtf_bed.loc[i,'gene_desc']!=None) and (gtf_bed.loc[i,'gene_desc'] not in genelist):
genelist.append(gtf_bed.loc[i,'gene_desc'])
cutbed = pd.DataFrame()
print (gtf_bed['desc'][0])
for name in genelist:
#print(name)
tmpdf = copy.deepcopy(gtf_bed)
cutbed = tmpdf[tmpdf['gene_desc']==name]
exons = np.array(cutbed[['start','end']])
Sample_UTR3_bed = UTR3_bed[UTR3_bed['gene_desc']==name]
Sample_UTR5_bed = UTR5_bed[UTR5_bed['gene_desc']==name]
Sample_CDS_bed = exon_bed[exon_bed['gene_desc']==name]
utr3 = np.array(Sample_UTR3_bed[['start','end']])
utr5 = np.array(Sample_UTR5_bed[['start','end']])
cds = np.array(Sample_CDS_bed[['start','end']])
gene_start = np.min(exons.ravel())
gene_end = np.max(exons.ravel())#143084
genenum = len(genelist)
num = math.ceil(genenum/10)+2
m = math.fmod(k,num)
k = k + 1
rand = math.ceil(k/num)%2
range = 1/num
rec=mpatches.Rectangle((gene_start,0.9475 - m*range),abs(gene_end-gene_start),0.005,color='grey',alpha=0.5)
ax.add_patch(rec)
if rand == 1:
ax.text(((gene_start+gene_end)/2),(0.99 - m*range),"{}".format(name),fontsize=9,horizontalalignment='center',verticalalignment='center',rotation='horizontal',color="black")
else:
ax.text(((gene_start+gene_end)/2),(0.905 - m*range),"{}".format(name),fontsize=9,horizontalalignment='center',verticalalignment='center',rotation='horizontal',color="black")
for exon in cds:
rec0 = mpatches.Rectangle((exon[0], 0.9375 - m*range),(exon[1]-exon[0]),0.025,color="black")
ax.add_patch(rec0)
for i in utr3:
rec1 = mpatches.Rectangle((i[0],0.94375 - m*range ),(i[1]-i[0]),0.0125 ,color="grey",alpha=0.8)
ax.add_patch(rec1)
for j in utr5:
rec2 = mpatches.Rectangle((j[0],0.94375 - m*range ),(j[1]-j[0]),0.0125 ,color="blue",alpha=0.8)
ax.add_patch(rec2)
xmin = start
xmax = end
ax.set_xlim(xmin,xmax)
xtick = np.linspace(start,end,4)
xtlabs=[]
for i in xtick:
i=i/1000000
i='%.2f' % i
xtlabs.append(str(i))
#xtlabs = ["{:,}".format(int(x)/1000000) for x in xtick]
ax.set_ylim([0,1])
ax.set_xticks(xtick)
ax.set_xticklabels(xtlabs,fontsize=15)
ax.set_yticks([])
def pairwise(file,ax,chrs,start,end,label,color):
t = np.linspace(0, 1*pi, 100)
df=pd.read_table(file,sep='\t',names=['chr','loci1','loci2','observed','expected'])
df_new=df[df['chr']==chrs]
df_new=df_new[df_new['loci1']>int(start)]
df_new=df_new[df_new['loci2']<int(end)]
max=(df_new['loci2']-df_new['loci1']).max()+100000
ax.axis([start, end, 0,max])
for i in range(len(df['chr'])):
if str(df['chr'][i])==chrs and df['loci1'][i]>int(start) and df['loci2'][i]<int(end):
u=(df['loci1'][i]+df['loci2'][i])/2
v=0
a=(df['loci2'][i]-df['loci1'][i])/2
b=df['loci2'][i]-df['loci1'][i]
ax.plot( u+a*np.cos(t) , v+b*np.sin(t) ,color=color,linewidth=1)
ax.set_ylabel(label,fontsize=20)
if math.isnan(max):
max=100
top=max
xticks=[]
print (start,end)
for i in range(start,end,int((end-start)/5)):
xticks.append(i)
print (xticks)
xticks.pop(0)
yticks=[0,top]
ax.set_yticks(yticks)
ax.set_yticklabels(yticks,fontsize=3)
ax.set_xticks(xticks)
ax.set_xticklabels(xticks,fontsize=3)
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
def draw_line(file,ax,chrom,start,end,sample,color,MaxYlim,type,mins):
markdf=pd.read_table(file,sep='\t',names=['chrs','start','end','sign'])
markdf=markdf[markdf['chrs']==chrom]
markdf=markdf[markdf['start']>start]
markdf=markdf[markdf['end']<end]
ax.tick_params(left='on',right='off',top='off',bottom='on')
markdf['width'] = markdf['end'] - markdf['start']
vectorf = np.vectorize(np.float)
vectori = np.vectorize(np.int)
starts=vectori(markdf['start'])
hight=vectorf(markdf['sign'])
width=vectori(markdf['width'])
ax.bar(x=starts,height=hight,bottom=0,width=width,color=color,linewidth=0.1,align='edge',fill=False,edgecolor=color)
if MaxYlim == 'None':
ymaxlim = markdf['sign'].max()
yminlim = markdf['sign'].min()
else:
ymaxlim = float(MaxYlim)
yminlim = float(mins)
ax.set_xlim([start,end])
ax.set_ylim([yminlim,ymaxlim])
xts = np.linspace(start,end,5)
yts = np.linspace(yminlim,ymaxlim,2)
xtkls = ['{:,}'.format(int(i)) for i in xts]
ytkls = ['{:,}'.format(float(j)) for j in yts]
ax.tick_params(direction='out',pad=1)
ax.tick_params(direction='out',pad=1)
ax.set_yticks(yts)
ax.set_yticklabels(ytkls,fontsize=5)
ax.text(-0.11,0.4,sample,fontsize=6,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.spines['right'].set_linewidth(0)
ax.spines['top'].set_linewidth(0)
ax.tick_params(top=False,right=False,width=1,colors='black',direction='out') #ax.set_title("{}_{}_{}_{}".format(sample,chrom,start,end),fontsize=10)
if type =='bottom':
ax.set_xticks(xts)
ax.set_xticklabels(xtkls,fontsize=8)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['bottom'].set_color('k')
ax.text(-0.11,-0.7,chrom,fontsize=8,color='k',horizontalalignment='left',verticalalignment='bottom',rotation='horizontal',transform=ax.transAxes)
else:
ax.set_xticks([])
ax.set_xticklabels('')
markdf = pd.DataFrame()
gc.collect()
def main():
parser=argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,epilog='author:\t{0}\nmail:\t{1}'.format(__author__,__mail__))
parser.add_argument('-fl','--file_list',dest='file_list',type=str,required=True)
parser.add_argument('-u','--uppercent',help='up_percent : 85',dest='uppercent',type=str,default='85')
parser.add_argument('-d','--downpercent',help='down_percent :5',dest='downpercent',type=str,default='5')
parser.add_argument('-o','--output',dest='output',type=str,required=True)
parser.add_argument('-chr','--chrom',dest='chromosome',type=str,required=True)
parser.add_argument('-st','--start',dest='start',type=str,required=True)
parser.add_argument('-ed','--end',dest='end',type=str,required=True)
parser.add_argument('-t','--genetype',help='gene type',dest='genetype',type=str,default='all')
parser.add_argument('-b1','--bound1',help='bound1',dest='bound1',type=int,default='0')
parser.add_argument('-b2','--bound2',help='bound2',dest='bound2',type=int,default='0')
args = parser.parse_args()
uppercent = int(args.uppercent)
downpercent = int(args.downpercent)
start=int(args.start)-args.bound1
end=int(args.end)+args.bound2
tstart=int(args.start)
tend=int(args.end)
chrs=args.chromosome
files=open(args.file_list)
file_list=[]
for i in files:
i=i.strip().split('\t')
file_list.append(i)
dic={'matrix':3,'delta':3,'gtf':3,'bedgraph':0.8,'pairwise':1.2,'SV':0.5,'insu':0.8,'delta_epi':1.6,'DI':0.8,'AB':0.8,'boundary':0.8,'assembly':1.3,'CDB':0.8,'RNA':0.8,'4C':1.2,'line':0.8}
l=0
for i in file_list:
l+=dic[i[0]]+dic[i[0]]/3
fig = plt.figure(figsize=(8,l))
x=0
j=0
while j < len(file_list):
#print (j)
if file_list[j][0]=='matrix':
x+=dic[file_list[j][0]]
ax = fig.add_axes([0.1,1-(x/l)-x/(4*l),0.8,(1/l)*3.0])
draw_matrix(ax,chrs,start,end,file_list[j][3],file_list[j][1],file_list[j][2],file_list[j][4])
j+=1
elif file_list[j][0]=='delta':
x+=dic[file_list[j][0]]
ax = fig.add_axes([0.1,1-(x/l)-x/(4*l),0.8,(1/l)*3.0])
draw_delta(ax,chrs,start,end,file_list[j][3],file_list[j][1],file_list[j][2])
j+=1
elif file_list[j][0]=='4C':
x+=dic[file_list[j][0]]
ax = fig.add_axes([0.1,1-(x/l)-x/(4*l),0.8,(1/l)*1.1])
#matrix_list:4,samples:2,colors:3,binsize:1,ymax:5
draw_4C(ax,chrs,start,end,file_list[j][4],file_list[j][2],file_list[j][1],tstart,tend,file_list[j][3],file_list[j][5])
j+=1
elif file_list[j][0]=='bedgraph':
x+=dic[file_list[j][0]]
ax = fig.add_axes([0.1,1-(x/l)-x/(4*l),0.8,(1/l)*0.65])
draw_epigenetic(file_list[j][3],ax,chrs,start,end,file_list[j][1],file_list[j][2],file_list[j][4],'up',file_list[j][5])
j+=1
elif file_list[j][0]=='line':
x+=dic[file_list[j][0]]
ax = fig.add_axes([0.1,1-(x/l)-x/(4*l),0.8,(1/l)*0.65])
draw_line(file_list[j][3],ax,chrs,start,end,file_list[j][1],file_list[j][2],file_list[j][4],'up',file_list[j][5])
j+=1
elif file_list[j][0]=='RNA':
x+=dic[file_list[j][0]]
ax = fig.add_axes([0.1,1-(x/l)-x/(4*l),0.8,(1/l)*0.65])
draw_RNA(file_list[j][3],ax,chrs,start,end,file_list[j][1],file_list[j][2],file_list[j][4],'up',file_list[j][5])
j+=1
elif file_list[j][0]=='SV':
x+=dic[file_list[j][0]]
ax = fig.add_axes([0.1,1-(x/l)-x/(4*l),0.8,(1/l)*0.35])
draw_SV(file_list[j][4],ax,chrs,start,end,file_list[j][1],file_list[j][2],file_list[j][3])
j+=1
elif file_list[j][0]=='pairwise':
x+=dic[file_list[j][0]]
ax = fig.add_axes([0.1,1-(x/l)-x/(4*l),0.8,(1/l)*1.1])
pairwise(file_list[j][3],ax,chrs,start,end,file_list[j][1],file_list[j][2])
j+=1
elif file_list[j][0]=='insu':
x+=dic[file_list[j][0]]
ax = fig.add_axes([0.1,1-(x/l)-x/(4*l),0.8,(1/l)*0.65])
draw_insulation(ax,file_list[j][2],chrs,start,end,file_list[j][1])
j+=1
elif file_list[j][0]=='delta_epi':
x+=dic[file_list[j][0]]
ax = fig.add_axes([0.1,1-(x/l)-x/(4*l),0.8,(1/l)*1.5])
y=int(file_list[j][1])-1
z=int(file_list[j][2])-1
draw_diff_epigenetic(file_list[y][3],file_list[z][3],ax,chrs,start,end,file_list[j][3],file_list[j][4],file_list[j][5],'none')
j+=1
elif file_list[j][0]=='gtf':
x+=dic[file_list[j][0]]
ax = fig.add_axes([0.1,1-(x/l)-x/(4*l),0.8,(1/l)*3.0])
gtf_bed = filter_gtffile(file_list[j][1],chrs,start,end,args.genetype)
draw_genes(ax,gtf_bed,start,end)
#gtf_bed.to_csv(args.output,sep='\t',header=None,index=False)
j+=1
elif file_list[j][0]=='DI':
x+=dic[file_list[j][0]]
ax = fig.add_axes(fig.add_axes([0.1,1-(x/l)-x/(4*l),0.8,(1/l)*0.9]))
draw_bar(ax,file_list[j][1],chrs,start,end,1000,-1000)
j+=1
elif file_list[j][0]=='AB':
x+=dic[file_list[j][0]]
ax = fig.add_axes(fig.add_axes([0.1,1-(x/l)-x/(4*l),0.8,(1/l)*0.7]))
draw_AB(file_list[j][3],int(file_list[j][1]),chrs,start,end,file_list[j][2],ax)
j+=1
elif file_list[j][0]== "boundary":
title = file_list[j][1]
x+=dic[file_list[j][0]]
ax = fig.add_axes([0.1,1-(x/l)+((1/l)*0.45)*1-x/(4*l),0.8,(1/l)*0.47])
#ax2 = fig.add_axes([0.1,1-(x/l)+((1/l)*0.45)*3-x/(4*l),0.8,(1/l)*0.47])
#draw_border(ax1,ax2,file_list[j][2],chrs,start,end)
draw_compare_border(title,ax,file_list[j][3],chrs,start,end,file_list[j][2])
j+=1
elif file_list[j][0]== "CDB":
title = file_list[j][1]
x+=dic[file_list[j][0]]
ax = fig.add_axes([0.1,1-(x/l)+((1/l)*0.45)*1-x/(4*l),0.8,(1/l)*0.47])
draw_CDB(title,ax,file_list[j][2],chrs,start,end)
j+=1
fig.savefig(args.output)
if __name__=="__main__":
main()
"""
elif file_list[j][0]=='compartment':
ax = fig.add_axes([0.1,1-j/n,0.8,(1/n)*0.9])
draw_compartment(ax,file_list[j][1],file_list[j][2],file_list[j][3],x)
j+=1
x+=diff_dic[zfile_list[zj][0]]
elif file_list[j][0]=='boundary':
ax = fig.add_axes([0.1,1-(j/n+(1/(2*n))),0.8,(1/n)/4])
draw_insu(ax,file_list[j][1],file_list[j][2],file_list[j][3],x)
j+=1
x+=diff_dic[file_list[j][0]]
"""
|
Annoroad-OMIC-vis
|
/Annoroad_OMIC_vis-1.0.3-py3-none-any.whl/Annoroad_vis/main.py
|
main.py
|
from coalib.bearlib.languages.LanguageDefinition import LanguageDefinition
from coalib.bears.LocalBear import LocalBear
from coalib.results.HiddenResult import HiddenResult
from coalib.results.Result import Result, RESULT_SEVERITY
from coalib.results.SourceRange import SourceRange
from coalib.results.AbsolutePosition import AbsolutePosition
from coala_utils.string_processing.Core import unescaped_search_for
class AnnotationBear(LocalBear):
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'[email protected]'}
LICENSE = 'AGPL-3.0'
def run(self, filename, file, language: str, coalang_dir: str = None):
"""
Finds out all the positions of strings and comments in a file.
The Bear searches for valid comments and strings and yields their
ranges as SourceRange objects in HiddenResults.
:param language:
The programming language of the source code.
:param coalang_dir:
External directory for coalang file.
:return:
One HiddenResult containing a dictionary with keys being 'strings'
or 'comments' and values being a tuple of SourceRanges pointing to
the strings and a tuple of SourceRanges pointing to all comments
respectively. The ranges do include string quotes or the comment
starting separator but not anything before (e.g. when using
``u"string"``, the ``u`` will not be in the source range).
"""
try:
lang_dict = LanguageDefinition(language, coalang_dir=coalang_dir)
except FileNotFoundError:
content = ('coalang specification for ' + language +
' not found.')
yield HiddenResult(self, content)
return
string_delimiters = dict(lang_dict['string_delimiters'])
multiline_string_delimiters = dict(
lang_dict['multiline_string_delimiters'])
multiline_comment_delimiters = dict(
lang_dict['multiline_comment_delimiters'])
comment_delimiter = dict(lang_dict['comment_delimiter'])
string_ranges = comment_ranges = ()
try:
string_ranges, comment_ranges = self.find_annotation_ranges(
file,
filename,
string_delimiters,
multiline_string_delimiters,
comment_delimiter,
multiline_comment_delimiters)
except NoCloseError as e:
yield Result(self, str(e), severity=RESULT_SEVERITY.MAJOR,
affected_code=(e.code,))
content = {'strings': string_ranges, 'comments': comment_ranges}
yield HiddenResult(self, content)
def find_annotation_ranges(self,
file,
filename,
string_delimiters,
multiline_string_delimiters,
comment_delimiter,
multiline_comment_delimiters):
"""
Finds ranges of all annotations.
:param file:
A tuple of strings, with each string being a line in the file.
:param filename:
The name of the file.
:param string_delimiters:
A dictionary containing the various ways to define single-line
strings in a language.
:param multiline_string_delimiters:
A dictionary containing the various ways to define multi-line
strings in a language.
:param comment_delimiter:
A dictionary containing the various ways to define single-line
comments in a language.
:param multiline_comment_delimiters:
A dictionary containing the various ways to define multi-line
comments in a language.
:return:
Two tuples first containing a tuple of strings, the second a tuple
of comments.
"""
text = ''.join(file)
strings_range = []
comments_range = []
position = 0
while position <= len(text):
def get_new_position():
_range, end_position = self.get_range_end_position(
file,
filename,
text,
multiline_string_delimiters,
position,
self.get_multiline)
if end_position and _range:
strings_range.append(_range)
return end_position + 1
_range, end_position = self.get_range_end_position(
file,
filename,
text,
string_delimiters,
position,
self.get_singleline_strings)
if end_position and _range:
strings_range.append(_range)
return end_position + 1
_range, end_position = self.get_range_end_position(
file,
filename,
text,
multiline_comment_delimiters,
position,
self.get_multiline)
if end_position and _range:
comments_range.append(_range)
return end_position + 1
_range, end_position = self.get_range_end_position(
file,
filename,
text,
comment_delimiter,
position,
self.get_singleline_comment,
single_comment=True)
if end_position and _range:
comments_range.append(_range)
return end_position + 1
return position + 1
position = get_new_position()
return tuple(strings_range), tuple(comments_range)
@staticmethod
def get_range_end_position(file,
filename,
text,
annotations,
position,
func,
single_comment=False):
_range = end_position = None
for annotation in annotations.keys():
if text[position:].startswith(annotation):
if not single_comment:
ret_val = func(file,
filename,
text,
annotation,
annotations[annotation],
position)
else:
ret_val = func(file,
filename,
text,
annotation,
position)
if ret_val:
_range, end_position = ret_val[0], ret_val[1]
return _range, end_position
@staticmethod
def get_multiline(file,
filename,
text,
annotation_start,
annotation_end,
position):
"""
Gets sourcerange and end position of an annotation that can span
multiple lines.
:param file:
A tuple of strings, with each string being a line in the file.
:param filename:
The name of the file.
:param annotation_start:
The string specifying the start of the annotation.
:param annotation_end:
The string specifying the end of the annotation.
:param position:
An integer identifying the position where the annotation started.
:return:
A SourceRange object holding the range of the multi-line annotation
and the end_position of the annotation as an integer.
"""
end_end = get_end_position(annotation_end,
text,
position + len(annotation_start) - 1)
if end_end == -1:
_range = SourceRange.from_absolute_position(
filename,
AbsolutePosition(file, position))
raise NoCloseError(annotation_start, _range)
return (SourceRange.from_absolute_position(
filename,
AbsolutePosition(file, position),
AbsolutePosition(file, end_end)),
end_end)
@staticmethod
def get_singleline_strings(file,
filename,
text,
string_start,
string_end,
position):
"""
Gets sourcerange of a single-line string and its end position.
:param file:
A tuple of strings, with each string being a line in the file.
:param filename:
The name of the file.
:param string_start:
The string which specifies how a string starts.
:param string_end:
The string which specifies how a string ends.
:position:
An integer identifying the position where the string started.
:return:
A SourceRange object identifying the range of the single-line
string and the end_position of the string as an integer.
"""
end_position = get_end_position(string_end,
text,
position + len(string_start) - 1)
newline = get_end_position('\n', text, position)
if newline == -1:
newline = len(text)
if end_position == -1:
_range = SourceRange.from_absolute_position(
filename,
AbsolutePosition(file, position))
raise NoCloseError(string_start, _range)
if newline > end_position:
return (SourceRange.from_absolute_position(
filename,
AbsolutePosition(file, position),
AbsolutePosition(file, end_position)),
end_position)
@staticmethod
def get_singleline_comment(file, filename, text, comment, position):
"""
Gets Sourcerange of a single-line comment where the start is the
start of comment and the end is the end of line.
:param file:
A tuple of strings, with each string being a line in the file.
:param filename:
The name of the file.
:param comment:
The string which specifies the comment.
:position:
An integer identifying the position where the string started.
:return:
A SourceRange object identifying the range of the single-line
comment and the end_position of the comment as an integer.
"""
end_position = get_end_position('\n',
text,
position + len(comment) - 1)
if end_position == -1:
end_position = len(text) - 1
return (SourceRange.from_absolute_position(
filename,
AbsolutePosition(file, position),
AbsolutePosition(file, end_position)),
end_position)
def get_end_position(end_marker, text, position):
try:
end_match = next(unescaped_search_for(end_marker, text[position + 1:]))
end_position = position + end_match.span()[1]
except StopIteration:
end_position = -1
return end_position
class NoCloseError(Exception):
def __init__(self, annotation, code):
Exception.__init__(self, annotation + ' has no closure')
self.code = code
|
AnnotationBear
|
/AnnotationBear-0.10.0.tar.gz/AnnotationBear-0.10.0/coalaAnnotationBear/AnnotationBear.py
|
AnnotationBear.py
|
from Bio import SeqIO
from joblib import Parallel, delayed
import multiprocessing
import subprocess as sp
import random
import time
class Blaster():
def blastFasta(fasta_file, blast_type, n_threads, out_dir, database = 'nr', remote = '-remote'):
"""Blast all records in a fasta fileself.
Blasting can be done parallelized, to reduce execution times (recommended is not to use to many threads).
Keyword Arguments:
fasta_file -- str, filepath
blast_type -- str, blast type to use. E.g. blastn, blastp, blastx, etc.
n_threads -- int, number of parallel blast threads to use.
database -- str, blast database to use, may either be a standard (remote) database or a local one.
remote -- str, argument to indicate if the blast database is remote. Argument should either be "-remote" or ""
"""
# Parse the fast file
records = list(SeqIO.parse(fasta_file, 'fasta'))
# Parallel execution
#results = Parallel(n_jobs = n_threads)(delayed(blast) (i, blast_type, database) for i in records)
results = [blast(record, 'blastn', database, remote = remote) for record in records]
print("Results: ")
print(results)
no_hit_ids = [result[0] for result in results if result[1] == 0 ]
print("Hit ids")
print(no_hit_ids)
records_keep = filter_records(no_hit_ids, records)
# Write the remaining (unaligned records) back to the file
SeqIO.write(records_keep, fasta_file, 'fasta')
def filter_records(no_hit_ids, records):
for record in records:
if record.id in no_hit_ids:
yield record
else:
print(record.id)
def blast(record, blast_type, database = 'nr', remote = "-remote"):
"""Do a blast search and return wether a significant hit was found
"""
if remote == '-remote':
wait_time = random.randint(1, 10)
time.sleep(wait_time) # Make sure we don't spam the NCBI servers all at once
blast_cmd = "{0} -db {1} {2} -evalue 10e-5 -query - ".format(blast_type, database, remote)
print(blast_cmd)
p = sp.Popen(blast_cmd, stdin = sp.PIPE, stdout = sp.PIPE, stderr = sp.PIPE, shell = True)
blast_out, err = p.communicate(input=str(record.seq).encode())
blast_out = blast_out.decode()
if "Sequences producing significant alignments:" in blast_out:
return (record.id, 1)
elif "***** No hits found *****":
return (record.id, 0)
else:
return (record.id, 0)
return blast_out
|
AnnotationPipeline
|
/AnnotationPipeline-0.18.tar.gz/AnnotationPipeline-0.18/rnaseqpipeline/Blast.py
|
Blast.py
|
class Install():
"""Installation class for pipeline dependencies.
This class does its best job to detect if dependencies are already installed
in the provided path, and if not, to install them.
"""
def verify_installation(options):
""" Veryify the installation without trying to install any missing packages
currently not supported.
Keyword Arguments:
options -- argparse opject containing the command line arguments
"""
print_bold('veryifying the installation is currently in development, don\'t expect good results')
import subprocess as sp
def All(options):
print("UNSUPPORTED -> use RepeatModeler")
def Verify_RepeatModeler(options):
"""Verify the installation of the RepeatModler program and its dependencies
While some programs are very easy to verify (RECON, RepeatScout, etc.),
other programs such as RepeatMasker & RepeatModler have a -sigh- suboptimal design, making the validation
important and less straightforward.
"""
out_file = open("{}/out.log".format(options.install_dir), 'w') # logging standard output
err_file = open("{}/err.log".format(options.install_dir), 'w') # Logging standeard error
recon = verify_installation('edgeredef', 'usage')
msg = "RECON installed: {}".format(recon)
if recon: print_pass(msg)
else: print_fail(msg)
repeatscout = False not in [verify_installation('build_lmer_table', "Usage"),
verify_installation("RepeatScout", "RepeatScout Version 1.0.5")
]
msg = "RepeatScout installed: {}".format(repeatscout)
if repeatscout: print_pass(msg)
else: print_fail(msg)
trf = verify_installation('trf409.linux64', 'Please use:')
msg = "TandemRepeatFinder installed: {}".format(trf)
if trf: print_pass(msg)
else: print_fail(msg)
rmblast = False not in [verify_installation('{0}/ncbi-blast-2.6.0+-src/bin/blastn'.format(options.install_dir), 'BLAST query/options error'),
verify_installation('{0}/ncbi-blast-2.6.0+-src/bin/rmblastn'.format(options.install_dir),"BLAST query/options error")
]
msg = "RMBlast installed: {}".format(rmblast)
if rmblast: print_pass(msg)
else: print_fail(msg)
repeatmasker_install = verify_installation('RepeatMasker', 'RepeatMasker version')
msg = "RepeatMasker installed: {}".format(repeatmasker_install)
if repeatmasker_install:
print_pass(msg)
else:
print_fail(msg)
repeatmasker_config_interpreter_cmd = 'head -n1 {}/RepeatMasker/RepeatMasker'.format(options.install_dir)
if verify_installation(repeatmasker_config_interpreter_cmd, "#!/u1/local/bin/perl"):
print_warn(" RepeatMasker is trying to use a wrong (non-existing) perl interpreter. I will now try to fix it..")
sp.call("cd {}/RepeatMasker/; for file in *;do sed -i \"s+\#\!/u1/local/bin/perl+\#\!$(which perl)+g\" $file; done".format(options.install_dir),
shell = True)#, stderr = err_file, stdout = out_file)
if verify_installation(repeatmasker_config_interpreter_cmd, "#!/u1/local/bin/perl"):
print_fail(" I wasn't able to fix it automatically, please manually run the configure script: {}/RepeatMasker/configure".format(options.install_dir))
else:
print_pass(" RepeatMasker is now using the right perl interpreter")
else:
print_pass(" RepeatMasker is using the right perl interpreter")
if not verify_installation('RepeatMasker', 'RepeatMasker version'):
print_fail(" RepeatMasker is still not working. I will work on a fix for this in a future release of the pipeline (perl libraries etc....)")
# check RepeatMasker databases
db_str = 'DfamConsensus.embl\nDfam.hmm\nREADME.meta\nRepeatAnnotationData.pm\nRepeatMasker.lib\nRepeatMaskerLib.embl\nRepeatMasker.lib.nhr\nRepeatMasker.lib.nin\nRepeatMasker.lib.nsq\nRepeatPeps.lib\nRepeatPeps.lib.phr\nRepeatPeps.lib.pin\nRepeatPeps.lib.psq\nRepeatPeps.readme\nRMRBMeta.embl\ntaxonomy.dat\n'
if verify_installation("ls -1 {}/RepeatMasker/Libraries".format(options.install_dir), db_str):
print_pass(" RepeatMasker Libraries are installed correctly")
else:
print_warn(" The RepeatMasker libraries are not configured. I will try to generate them..")
sp.call("{0}/ncbi-blast-2.6.0+-src/bin/makeblastdb -dbtype nucl -in {0}/RepeatMasker/Libraries/RepeatMasker.lib ".format(options.install_dir), shell = True)
sp.call("{0}/ncbi-blast-2.6.0+-src/bin/makeblastdb -dbtype prot -in {0}/RepeatMasker/Libraries/RpeatPeps.lib ".format(options.install_dir), shell = True)
if verify_installation("ls -1 {}/RepeatMasker/Libraries".format(options.install_dir), db_str):
print_pass(" RepeatMasker Libraries are now installed correctly")
else:
print_fail(" RepeatMasker Librareis are still not installed correctly. Please manually run the configure script: {}/RepeatMasker/configure".format(options.install_dir))
possibilities = {"all": All,
"RepeatModeler": Verify_RepeatModeler}
prog_choice = options.check_prog
possibilities[prog_choice](options)
return
def perform_installation(options):
""" Install the required tools for the pipeline to a certain directory
Any missing tools or dependencies will be installed automatically.
Keyword Arguments:
options -- argparse opject containing the command line arguments
"""
import subprocess as sp
print('Performing installation in {} '.format(options.install_dir))
out_file = open("{}/out.log".format(options.install_dir), 'w') # logging standard output
err_file = open("{}/err.log".format(options.install_dir), 'w') # Logging standeard error
def RepeatModeler():
print_pass("Installing RepeatModeler")
def RECON():
# Check first if already installed
if verify_installation('edgeredef', 'usage'):
print(' Skipping RECON (already installed)...')
return
print(" Installing RECON...")
recon_url = 'http://www.repeatmasker.org/RepeatModeler/RECON-1.08.tar.gz'
download_cmd = 'wget {0} -O {1}/recon.tar.gz; cd {1}; tar xf recon.tar.gz;'.format(
recon_url, options.install_dir
)
# Download and extract
sp.call(download_cmd, shell=True, stdout=out_file, stderr = err_file)
# Building
sp.call('cd {}/RECON-1.08/src; make; make install'.format(options.install_dir), shell = True,
stdout=out_file, stderr = err_file)
# Modify the REcon scrip to use the right paths
sed_cmd = "sed -i 's+$path = \"\";+$path = {0}/RECON-1.08/bin+g' {0}/RECON-1.08/scripts/recon.pl".format(
options.install_dir)
sp.call(sed_cmd,
shell=True, stdout=out_file, stderr = err_file)
# Cleanup
sp.call('rm {}/recon.tar.gz'.format(options.install_dir),
shell=True, stdout=out_file, stderr = err_file)
#Add files to path
sp.call("echo \'# RECON-1.08 installation dir\' >> ~/.bashrc; echo \'export PATH=$PATH:{0}/RECON-1.08/bin\' >> ~/.bashrc".format(options.install_dir),
shell = True, stdout=out_file, stderr = err_file)
def RepeatScout():
if verify_installation('build_lmer_table', "Usage"):
print(' Skipping RepeatScout (already installed)...')
return
print(" Installing RepeatScout...")
recon_url = 'http://www.repeatmasker.org/RepeatScout-1.0.5.tar.gz'
download_cmd = 'wget {0} -O {1}/RepeatScout.tar.gz; cd {1}; tar xf RepeatScout.tar.gz;'.format(
recon_url, options.install_dir )
# Download and extract
sp.call(download_cmd,
shell = True, stdout=out_file, stderr = err_file)
# Building
sp.call('cd {}/RepeatScout-1/ ; make'.format(options.install_dir),
shell = True, stdout=out_file, stderr = err_file)
# Cleanup
sp.call('rm {}/RepeatScout.tar.gz'.format(options.install_dir),
shell=True, stdout=out_file, stderr = err_file)
bashrc = "echo \'# RepeatScout 1.0.5 installation dir\'; echo \'export PATH=$PATH:{}/RepeatScout-1/ \' >> ~/.bashrc".format(options.install_dir)
sp.call(bashrc,
shell = True, stdout=out_file, stderr = err_file)
def TandenRepeatFinder():
if verify_installation('trf409.linux64', 'Please use:'):
print(' Skipping TandemRepeatFinder (already installed)')
return
print(" Installing TendemRepeatFinder...")
conda_channel = "conda config --add channels {}"
sp.call(conda_channel.format('bioconda'),
shell = True, stdout=out_file, stderr = err_file)
sp.call(conda_channel.format('conda-forge'),
shell = True, stdout=out_file, stderr = err_file)
sp.call(conda_channel.format('WURnematology'),
shell = True, stdout=out_file, stderr = err_file)
sp.call("conda install -y tandemrepeatfinder",
shell = True, stdout=out_file, stderr = err_file)
def RMBlast():
path_check = verify_installation('{0}/ncbi-blast-2.6.0+-src/bin/blastn'.format(options.install_dir), 'BLAST query/options error')
if path_check:
print(" Skipping RMBlast (already installed)")
return
print(" Installing RMBlast...")
sp.call("conda install -y gnutls",
shell = True, stdout = out_file, stderr = err_file)
#Download ncbiblast and RMBLAST
sp.call("cd {}; wget ftp://ftp.ncbi.nlm.nih.gov/blast/executables/blast+/2.2.28/ncbi-blast-2.2.28+-x64-linux.tar.gz; tar xf ncbi-blast-2.2.28+-x64-linux.tar.gz".format(
options.install_dir), shell = True, stdout = out_file, stderr = err_file)
sp.call("cd {}; wget ftp://ftp.ncbi.nlm.nih.gov/blast/executables/rmblast/2.2.28/ncbi-rmblastn-2.2.28-x64-linux.tar.gz; tar xf ncbi-rmblastn-2.2.28-x64-linux.tar.gz".format(
options.install_dir), shell = True, stdout = out_file, stderr = err_file)
sp.call("cd {}; cp -R ncbi-rmblastn-2.2.28/* ncbi-blast-2.2.28+/; rm -rf ncbi-rmblastn-2.2.28; mv ncbi-blast-2.2.28+ ncbi-blast-2.6.0+-src".format(options.install_dir),
shell = True, stdout = out_file, stderr = err_file)
path = "{0}/ncbi-blast-2.6.0+-src/bin".format(options.install_dir)
sp.call("conda install -y blast -c bioconda", shell = True,
stdout = out_file, stderr = err_file)
def RepeatMasker():
if verify_installation('RepeatMasker', 'RepeatMasker version'):
print(" Skipping RepeatMasker (Already installed)")
return
print(" Installing RepeatMasker")
sp.call('wget -c http://www.bioinformatics.nl/~steen176/RepeatMasker.tar.gz -O {}/RepeatMasker.tar.gz'.format(
options.install_dir),
shell = True, stdout=out_file, stderr = err_file)
sp.call('cd {}; tar xf RepeatMasker.tar.gz'.format(options.install_dir),
shell = True, stdout=out_file, stderr = err_file)
# DEPRECATED
# sp.call('cp {0}/RepeatMasker/RepeatMaskerConfig.tmpl {0}/RepeatMasker/RepeatMaskerConfig.pm'.format(options.install_dir),
# shell = True, stdout=out_file, stderr = err_file)
# Configure the program
# RMBLAST
sp.call("sed -i \'s,INSTALLDIR,{0},g\' {0}/RepeatMasker/RepeatMaskerConfig.pm ".format(options.install_dir),
shell = True, stdout=out_file, stderr = err_file)
sp.call('sed -i "s,CONDABIN,$(which conda),g" tools/RepeatMasker/RepeatMaskerConfig.pm',
shell = True, stdout=out_file, stderr = err_file)
# sp.call('cd {}/RepeatMasker ; for i in *; do sed -i "s,\#\!/u1/local/bin/perl,\#\!$(which perl),g" $i; done'.format(options.install_dir),
# shell = True, stdout = out_file, stderr = err_file)
# Configure rmbblast databases DEPRECATED
# sp.call('cd {}; ncbi-blast-2.6.0+-src/bin/makeblastdb -dbtype nucl -in RepeatMasker/Libraries/RepeatMasker.lib'.format(options.install_dir),
# shell = True, stdout = out_file, stderr = err_file)
# sp.call('cd {}; ncbi-blast-2.6.0+-src/bin/makeblastdb -dbtype prot -in RepeatMasker/Libraries/RepeatPeps.lib'.format(options.install_dir),
# shell = True, stdout = out_file, stderr = err_file)
#
# sp.call("echo \'# RepeatMasker install dir\' >> ~/.bashrc ; echo \'export PATH={}/RepeatMasker:$PATH\' >> ~/.bashrc".format(
# options.install_dir
# ),
# shell = True, stdout=out_file, stderr = err_file)
if options.global_install:
sp.call("cpanm Text::Soundex", shell = True)
sp.call("cpanm JSON", shell = True)
sp.call("cpanm Module::Util", shell = True)
else:
sp.call("wget -O- http://cpanmin.us | perl - -l ~/perl5 App::cpanminus local::lib",
shell = True, stdout=out_file, stderr = err_file)
sp.call("eval `perl -I ~/perl5/lib/perl5 -Mlocal::lib`",
shell = True, stdout=out_file, stderr = err_file)
sp.call("echo 'eval `perl -I ~/perl5/lib/perl5 -Mlocal::lib`' >> ~/.bashrc",
shell = True, stdout=out_file, stderr = err_file)
sp.call("perl -MCPAN -Mlocal::lib -e 'CPAN::install(Text::Soundex)'",
shell = True, stdout=out_file, stderr = err_file)
sp.call("perl -MCPAN -Mlocal::lib -e 'CPAN::install(JSON)'",
shell = True, stdout=out_file, stderr = err_file)
sp.call("perl -MCPAN -Mlocal::lib -e 'CPAN::install(Module::Util)'",
shell = True, stdout=out_file, stderr = err_file)
def NSEG():
if verify_installation('nseg', "Usage:"):
print(" Skipping NSEG (Already installed)")
return
sp.call("mkdir {0}/nseg; cd {0}/nseg; wget ftp://ftp.ncbi.nih.gov/pub/seg/nseg/*".format(options.install_dir),
shell = True, stdout=out_file, stderr = err_file)
sp.call("cd {}/nseg; make".format(options.install_dir),
shell = True, stdout=out_file, stderr = err_file)
sp.call("echo \'# NSEG installation dir\' >> ~/.bashrc; echo \'export PATH={}/nseg:$PATH\' >> ~/.bashrc".format(
options.install_dir
),
shell = True, stdout=out_file, stderr = err_file)
RECON()
RepeatScout()
TandenRepeatFinder()
RMBlast()
RepeatMasker()
NSEG()
# Actual RepeatModeler installation
# Download the RELEASE
if verify_installation('RepeatModeler', " RepeatModeler - Model repetitive DNA"):
print(" RepeatModeler Already Installed")
return
sp.call("wget -c http://www.repeatmasker.org/RepeatModeler/RepeatModeler-open-1.0.11.tar.gz -O {}/RepeatModeler-open-1.0.11.tar.gz".format(options.install_dir),
shell = True, stdout=out_file, stderr = err_file)
sp.call('cd {}; tar xf RepeatModeler-open-1.0.11.tar.gz'.format(options.install_dir),
shell = True, stdout=out_file, stderr = err_file)
# By default, the configure script requires manual input for different configuration steps,
# This is annoying in a headless installation (such as this one) therefore I modified the original
# one so it doesn't require the manual input.
# Download that now:
sp.call(["wget", "http://www.bioinformatics.nl/~steen176/repeatmodeler_config", # Rreplace with actual URL
"-O", "{}/RepeatModeler_CONFIG".format(options.install_dir)
], stdout=out_file, stderr = err_file)
# Now we need to update all the paths required relative to the installation directory
repeat_mask_cmd = "sed -i 's+ACTUALINSTALLDIR+{0}+g' {0}/RepeatModeler_CONFIG; sed -i \"s+TRFBINLOCATION+$(which trf409.linux64)+g\" {0}/RepeatModeler_CONFIG".format(
options.install_dir
)
sp.call(repeat_mask_cmd, shell = True, stdout=out_file, stderr = err_file)
sp.call('cd {}/RepeatModeler-open-1.0.11;cp ../RepeatModeler_CONFIG RepModelConfig.pm'.format(options.install_dir),
shell = True, stdout=out_file, stderr = err_file)
sp.call('sed -i "s,\#\!/u1/local/bin/perl,\#\!$(which perl),g" {}/RepeatModeler-open-1.0.11/RepeatModeler'.format(options.install_dir),
shell = True, stdout=out_file, stderr = err_file) # replace the perl shebang line
sp.call('cd {}/RepeatModeler-open-1.0.11 ; for i in *; do sed -i "s,\#\!/u1/local/bin/perl,\#\!$(which perl),g" $i; done'.format(options.install_dir),
shell = True, stdout = out_file, stderr = err_file)
sp.call("echo \'# RepeatModeler installation dir\' >> ~/.bashrc; echo \'export PATH={}/RepeatModeler-open-1.0.11:$PATH\' >> ~/.bashrc".format(
options.install_dir
),
shell = True, stdout=out_file, stderr = err_file)
def RNAmmer():
"""This will only install on one of the bioinformatics servers, as therefore
software is only deployable within the institution. Othewise you have
to download a copy manually
"""
if verify_installation('rnammer -v', "This rnammer 1.2"):
print ("Skipping RNAmmer (Already installed)")
return
print_pass("Installing RNAmmer")
if options.global_install:
sp.call("cd {0}; wget http://www.bioinformatics.nl/~steen176/rna/rnammer.tar.gz; tar xf rnammer.tar.gz".format(options.install_dir),
shell = True)
else:
sp.call("cp /home/steen176/tools/dontmove/rnammer.tar.gz {0}; cd {0}; tar xf rnammer.tar.gz".format(options.install_dir),
shell = True, stdout = out_file, stderr = err_file)
#sed_cmd = "sed -i 's+$path = \"\";+$path = {0}/RECON-1.08/bin+g' {0}/RECON-1.08/scripts/recon.pl".format(
# options.install_dir)
sp.call("cd {0}/rnammer; sed -i \"s+INSTALLDIR+{0}/+g\" rnammer ".format(options.install_dir),
shell = True)
sp.call("cd {0}/rnammer; sed -i \"s+hmmsearchBIN+$(which hmmsearch)+g\" rnammer ".format(options.install_dir),
shell = True)
sp.call("cd {0}/rnammer; sed -i \"s+PERLBIN+$(which perl)+g\" rnammer ".format(options.install_dir),
shell = True)
sp.call("cd {0}/rnammer; sed -i \"s+PERLBIN+$(which perl)+g\" core-rnammer ".format(options.install_dir),
shell = True)
sp.call("echo \'# rnammer installation dir\' >> ~/.bashrc; echo \'export PATH={}/rnammer:$PATH\' >> ~/.bashrc".format(
options.install_dir
),
shell = True, stdout=out_file, stderr = err_file)
if options.global_install:
sp.call("cpanm Getopt::Long", shell = True)
sp.call("cpanm XML::Simple", shell = True)
else:
sp.call("perl -MCPAN -Mlocal::lib -e 'CPAN::install(Getopt::Long)'", shell = True)
sp.call("conda install -y tandemrepeatfinder -c bioconda",
shell = True, stdout=out_file, stderr = err_file)
def tRNAscan():
if verify_installation("tRNAscan-SE", "Usage: tRNAscan-SE [-options] <FASTA file(s)>"):
print("Skipping tRNAscan-se (Already installed)")
return
print ("Installing tRNAscan-se")
sp.call("cd /tmp; wget http://trna.ucsc.edu/software/trnascan-se-2.0.0.tar.gz; tar xf trnascan-se-2.0.0.tar.gz; mkdir {}/tRNAscan".format(options.install_dir),
shell = True, stdout = out_file, stderr = err_file)
sp.call("cd /tmp/tRNAscan-SE-2.0; ./configure --prefix={}/tRNAscan; make; make install".format(options.install_dir),
shell = True, stdout = out_file, stderr = err_file)
sp.call("ln -s $(which cmsearch) {}/tRNAscan/bin".format(options.install_dir), shell = True, stdout = out_file, stderr = err_file)
sp.call("ln -s $(which cmscan) {}/tRNAscan/bin".format(options.install_dir), shell = True, stdout = out_file, stderr = err_file)
sp.call("echo \'# tRNAscan-se installation dir\' >> ~/.bashrc; echo \'export PATH={}/tRNAscan/bin:$PATH\' >> ~/.bashrc".format(
options.install_dir
),
shell = True, stdout=out_file, stderr = err_file)
def Maker2():
if verify_installation('maker', 'ERROR: Control files not found'):
print("Skipping Maker (Already installed)")
return
print_pass("Installing Maker2 is not supported at this time")
def Braker2():
print_pass("Install Braker2")
def GeneMark():
if verify_installation('get_sequence_from_GTF.pl', 'Usage: <gene coordinates in GTF> <sequence in FASTA>'):
print(" Skipping GeneMark (Already Installed)")
return
print(" Installing GeneMark")
if options.global_install:
sp.call("wget http://www.bioinformatics.nl/~steen176/gm_et_linux_64.tar.gz -O {0}/gm_et_linux_64.tar.gz; cd {0}; tar xf gm_et_linux_64.tar.gz; mv gm_et_linux_64 genemark".format(options.install_dir),
shell = True)
sp.call("wget http://www.bioinformatics.nl/~steen176/.gm_key -O ~/.gm_key", shell = True)
else: # Remains Untested
sp.call("cp /home/steen176/tools/dontmove/genemark/gm_et_linux_64 {}/genemark".format(options.install_dir),
shell = True)
sp.call("echo \'# GeneMark ET installation dir\' >> ~/.bashrc; echo \'export PATH={}/genemark:$PATH\' >> ~/.bashrc".format(options.install_dir),
shell = True)
def Augustus():
"""Install AUGUSTUS (DEPRECATED)
"""
def bamtools():
"""Install bamtools (DEPRECATED)
"""
download_cmd = "cd {}; git clone git://github.com/pezmaster31/bamtools.git".format(options.install_dir)
build_cmd = "cd {0}/bamtools; mkdir build; cd build; cmake --DCMAKE_INSTALL_PREFIX={0}/bamtools ..".format(options.install_dir)
sp.call(download_cmd, shell = True)
sp.call(build_cmd, shell = True)
bamtools()
download_cmd = "cd {}; git clone https://github.com/Gaius-Augustus/Augustus.git".format(options.install_dir)
GeneMark()
# AUGUSTUS installatin is moved to the Docker file instead.
# Augustus()
if verify_installation("braker.pl", "Pipeline for predicting genes with GeneMark-ET and AUGUSTUS"):
print(" Skipping Braker (Already installed)")
#Actual installation
sp.call("wget https://github.com/Gaius-Augustus/BRAKER/archive/v2.1.2.tar.gz -O {0}/BRAKER2.tar.gz; cd {0}; tar xf BRAKER2.tar.gz".format(options.install_dir),
shell = True)
if options.global_install:
sp.call("cpanm File::Spec::Functions", shell = True)
sp.call("cpanm Hash::Merge", shell = True)
sp.call("cpanm List::Util", shell = True)
sp.call("cpanm Logger::Simple", shell = True)
sp.call("cpanm Module::Load::Conditional", shell = True)
sp.call("cpanm Parallel::ForkManager", shell = True)
sp.call("cpanm POSIX", shell = True)
sp.call("cpanm Scalar::Util::Numeric", shell = True)
sp.call("cpanm YAML", shell = True)
sp.call("cpanm File::Which", shell = True)
else:
sp.call("perl -MCPAN -Mlocal::lib -e 'CPAN::install(Scalar::Util::Numeric)'", shell = True)
sp.call("perl -MCPAN -Mlocal::lib -e 'CPAN::install(File::Spec::FUnctions)'", shell = True)
sp.call("perl -MCPAN -Mlocal::lib -e 'CPAN::install(Hash::Merge)'", shell = True)
sp.call("perl -MCPAN -Mlocal::lib -e 'CPAN::install(List::Util)'", shell = True)
sp.call("perl -MCPAN -Mlocal::lib -e 'CPAN::install(Logger::Simple)'", shell = True)
sp.call("perl -MCPAN -Mlocal::lib -e 'CPAN::install(Module::Load::Conditional)'", shell = True)
sp.call("perl -MCPAN -Mlocal::lib -e 'CPAN::install(Parallel::ForkManager)'", shell = True)
sp.call("perl -MCPAN -Mlocal::lib -e 'CPAN::install(POSIX)'", shell = True)
sp.call("perl -MCPAN -Mlocal::lib -e 'CPAN::install(YAML)'", shell = True)
sp.call("perl -MCPAN -Mlocal::lib -e 'CPAN::install(File::Which)'", shell = True)
sp.call("echo \'# BRAKER2 Installation dir\' >> ~/.bashrc; echo \'export PATH=$PATH:{}/BRAKER-2.1.2/scripts\' >> ~/.bashrc".format(options.install_dir),
shell = True)
RepeatModeler()
RNAmmer()
tRNAscan()
#Maker2()
Braker2()
def verify_installation(command, required_out):
import subprocess as sp
required_out = required_out.encode()
try:
out, err = sp.Popen(command, stdout = sp.PIPE, stderr = sp.PIPE, shell = True).communicate()
if required_out in out or required_out in err: # This check is only to be safe, it will not reach the else
return True
else:
return False
except FileNotFoundError:
return False
import sys
def print_fail(message, end = '\n'):
sys.stderr.write('\x1b[1;31m' + message.rstrip() + '\x1b[0m' + end)
def print_pass(message, end = '\n'):
sys.stdout.write('\x1b[1;32m' + message.rstrip() + '\x1b[0m' + end)
def print_warn(message, end = '\n'):
sys.stderr.write('\x1b[1;33m' + message.rstrip() + '\x1b[0m' + end)
def print_bold(message, end = '\n'):
sys.stdout.write('\x1b[1;37m' + message.rstrip() + '\x1b[0m' + end)
|
AnnotationPipeline
|
/AnnotationPipeline-0.18.tar.gz/AnnotationPipeline-0.18/rnaseqpipeline/Install.py
|
Install.py
|
repeatmodeler_dir = ""
progress_file_path = ""
class Run():
def run_all(options):
func_sequence = [RepeatModeler, blastPrep, blastNR, blastRFAM,
blastRetro, RepeatMasker, rnammer, infernalRfam,
tRNAscan]
entry_point = lookup_progress(options)
for i in range(entry_point, len(func_sequence)):
func = func_sequence[i]
func(options)
import subprocess as sp
from rnaseqpipeline.Blast import Blaster
# out_file = open("{}/out.log".format(options.install_dir), 'w') # logging standard output
# err_file = open("{}/err.log".format(options.install_dir), 'w') # Logging standeard error
def lookup_progress(options):
"""Look up if a previous run was partially finished and continue where it left of.
This method looks for the `.progress_file` in the working directory. If absent,
it is created, otherwise the progress is returned by this function.
"""
return_table = {"RepeatModeler" : 1,
"blastPrep" : 2,
"BlastNR" : 3,
"BlastRFAM" : 4,
"BlastRetro" : 5,
"RepeatMasker" : 6,
"rnammer" : 7,
"infernalRfam" : 8,
"tRNAscan" : 9,
}
global progress_file_path
progress_file_path = "{}/.progress_file".format(options.workdir)
try:
with open(progress_file_path) as progress_file:
global repeatmodeler_dir
file_content = [line.rstrip("\n").split() for line in progress_file]
names = [line[0] for line in file_content]
if 'RepeatModeler' in names:
repeatmodeler_dir = file_content[0][1]
else: # RepeatModeler was not finished running
return 0
return return_table[file_content[-1][0]]
except FileNotFoundError:
# TODO: Create the file
open(progress_file_path, 'w')
return 0
def call_sp(command):
sp.call(command, shell = True)#, stdout = out_file, stderr = err_file)
def call_sp_retrieve(command):
out, err = sp.Popen(command, shell = True, stdout = sp.PIPE).communicate()
return out.decode()
def RepeatModeler(options):
global repeatmodeler_dir
# Prepare and Build Genome database
prepare_cmd = "cp {} {}/genome.fa".format(options.assembly, options.workdir)
build_cmd = "cd {}; BuildDatabase -engine ncbi -n \"genome_db\" genome.fa".format(options.workdir)
call_sp(prepare_cmd)
call_sp(build_cmd)
# Run RepeatModeler
repeatModeler_cmd = "cd {}; RepeatModeler -pa {} -database genome_db 2>&1 | tee RepeatModeler.stdout".format(
options.workdir, options.n_threads)
call_sp(repeatModeler_cmd)
# Retrieve the workdir from RepeatModeler
repeatModeler_workdir_cmd = "cd {}; cat RepeatModeler.stdout | egrep \"Working directory: .+\"".format(
options.workdir)
repeatmodeler_dir = call_sp_retrieve(repeatModeler_workdir_cmd).split(" ")[1].strip("\n")
# write progress report
with open(progress_file_path, 'a') as progress_file:
progress_file.write("RepeatModeler\t{}\n".format(repeatmodeler_dir))
def blastPrep(options):
# Create folder structure
create_folders_cmd = "cd {}; mkdir -p blastResults; cd blastResults; mkdir -p NR; mkdir -p RFAM; mkdir -p Retrotransposon".format(options.workdir)
cp_repeatmodel_file = "cd {}; cp {}/consensi.fa.classified blastResults".format(
options.workdir, repeatmodeler_dir)
call_sp(create_folders_cmd)
# write progress report
with open(progress_file_path, 'a') as progress_file:
progress_file.write("blastPrep\t1\n")
def blastNR(options):
"""Blast the entries in the RepeatModeler fasta file to the NCBI nr database.
The results are written to a file named blast output
"""
print("Blast NR")
fasta_file = "{}/consensi.fa.classified".format(repeatmodeler_dir)
out_dir = "{}/blastResults/NR".format(options.workdir)
n_threads = 6 if options.n_threads > 6 else options.n_threads
Blaster.blastFasta(fasta_file = fasta_file,
blast_type = 'blastn',
n_threads = n_threads,
out_dir = out_dir,
database = "nr")
# write progress report
with open(progress_file_path, 'a') as progress_file:
progress_file.write("BlastNR\t1\n")
def blastRFAM(options):
"""Blast the entries in the RepeatModeler fasta file to the NCBI nr database.
The results are written to a file named blast output
"""
print("Blast RFAM")
fasta_file = "{}/consensi.fa.classified".format(repeatmodeler_dir)
db = "{}/rfamDB/rfamDB.fa".format(options.workdir)
out_dir = "{}/blastResults/RFAM".format(options.workdir)
n_threads = 6 if options.n_threads > 6 else options.n_threads
try:
open("{}/rfamDB/rfamDB.fa".format(options.workdir))
except FileNotFoundError:
# we have to download the database....
call_sp('cd {}; mkdir rfamDB; cd rfamDB; wget -c ftp://ftp.ebi.ac.uk/pub/databases/Rfam/14.0/fasta_files/*'.format(
options.workdir))
call_sp("cd {}/rfamDB; gunzip *; cat *.fa > rfamDB.fa; makeblastdb -dbtype nucl -in rfamDB.fa".format(options.workdir))
Blaster.blastFasta(fasta_file = fasta_file,
blast_type = 'blastn',
n_threads = n_threads,
out_dir = out_dir,
database = db,
remote = "")
print("RFAM done")
# write progress report
with open(progress_file_path, 'a') as progress_file:
progress_file.write("BlastRFAM\t1\n")
def blastRetro(options):
"""Blast the entries in the RepeatModeler fasta file to the NCBI nr database.
The results are written to a file named blast output
"""
fasta_file = "{}/consensi.fa.classified".format(repeatmodeler_dir)
out_dir = "{}/blastResults/retroDB".format(options.workdir)
n_threads = 6 if options.n_threads > 6 else options.n_threads
# We have to download the database..
call_sp("cd {0}; mkdir retroDB; cd retroDB; wget -c http://botserv2.uzh.ch/kelldata/trep-db/downloads/trep-db_complete_Rel-16.fasta.gz -O retroDB.fa.gz; gunzip retroDB.fa.gz".format(
options.workdir))
call_sp("cd {}/retroDB; makeblastdb -in retroDB.fa -dbtype nucl".format(options.workdir))
db = "{}/retroDB/retroDB.fa".format(options.workdir)
Blaster.blastFasta(fasta_file = fasta_file,
blast_type = 'tblastx',
n_threads = n_threads,
out_dir = out_dir,
database = db,
remote = "")
# write progress report
with open(progress_file_path, 'a') as progress_file:
progress_file.write("BlastRetro\t1\n")
def RepeatMasker(options):
"""Mask repeat sequences without blast hits
"""
mask_cmd = "cd {0}; RepeatMasker -lib {1}/consensi.fa.classified -pa {2} -gff -xsmall genome.fa".format(
options.workdir, repeatmodeler_dir, options.n_threads)
call_sp(mask_cmd)
# write progress report
with open(progress_file_path, 'a') as progress_file:
progress_file.write("RepeatMasker\t1\n")
def rnammer(options):
"""Run rnammer
"""
prep_cmd = "cd {}; mkdir rnammer; cp genome.fa.masked rnammer/genome.fa.masked".format(options.workdir)
rnammer_cmd = "cd {}/rnammer; rnammer -S euk -m lsu,ssu,tsu -gff genome.masked.rnammer.gff -h genome.masked.rnammer.hmmreport -f genome.masked.rnammer.fa genome.fa.masked ".format(options.workdir)
call_sp(prep_cmd)
call_sp(rnammer_cmd)
# preparing softmasking
call_sp("cat {0}/rnammer/genome.masked.rnammer.gff | grep -v \"^#\" |cut -f 3,4,5 >> {0}/maskingfile.txt".format(options.workdir))
with open(progress_file_path, 'a') as progress_file:
progress_file.write("rnammer\t1\n")
def infernalRfam(options):
download_cmd = "mkdir {0}/infernalRfam; cd {0}/infernalRfam; wget ftp://ftp.ebi.ac.uk/pub/databases/Rfam/CURRENT/Rfam.cm.gz; gunzip Rfam.cm.gz; wget ftp://ftp.ebi.ac.uk/pub/databases/Rfam/CURRENT/Rfam.clanin".format(
options.workdir)
call_sp(download_cmd)
create_db_cmd = "cd {}/infernalRfam; cmpress Rfam.cm; ln -s ../genome.fa.masked . ".format(options.workdir)
call_sp(create_db_cmd)
cmscan_cmd = "cd {}/infernalRfam; cmscan --rfam --cut_ga --nohmmonly --tblout genome.tblout --fmt 2 --cpu {} --clanin Rfam.clanin Rfam.cm genome.fa.masked 2>&1 |tee cmscan.output".format(options.workdir, options.n_threads)
call_sp(cmscan_cmd)
# Merge output with other tools 4,
#call_sp("cat {0}/infernalRfam/genome.tblout | cut -f ")
out_file = open("{0}/maskingfile.txt", "a")
with open("{}/infernalRfam/genome.tblout") as in_file:
#skip headers
in_file.readline()
in_file.readline()
for line in in_file:
sub_cols = [3, 9, 10]
line = line.split()
cols = [ line[i] for i in sub_cols]
out_file.write("\t".join(cols) + '\n')
out_file.close()
# Write progress
with open(progress_file_path, 'a') as progress_file:
progress_file.write("infernalRfam\t1\n")
def tRNAscan(options):
cmd = "cd {}; mkdir tRNAscan; tRNAscan-SE -o tRNAscan/genome.masked.tRNAscan.out genome.fa.masked 2>&1 | tee tRNAscan/tRNAscan-SE.stdout".format(options.workdir)
call_sp(cmd)
with open(progress_file_path, 'a') as progress_file:
progress_file.write("tRNAscan\t1\n")
|
AnnotationPipeline
|
/AnnotationPipeline-0.18.tar.gz/AnnotationPipeline-0.18/rnaseqpipeline/Run.py
|
Run.py
|
# Annotations2Sub
下载和转换 Youtube 注释
Download and convert Youtube Annotation
[](https://pypi.org/project/Annotations2Sub/)
[](https://github.com/USED255/Annotations2Sub/actions/workflows/test.yml)
[](https://www.codacy.com/gh/USED255/Annotations2Sub/dashboard?utm_source=github.com&utm_medium=referral&utm_content=USED255/Annotations2Sub&utm_campaign=Badge_Grade)
[](https://codecov.io/gh/USED255/Annotations2Sub)
[](https://pypi.org/project/Annotations2Sub)
[](https://pypi.org/project/Annotations2Sub)
---
```bash
pip install Annotations2Sub
```
```help
usage: Annotations2Sub.py [-h] [-l] [-x 100] [-y 100] [-f Arial ] [-o Folder] [-d]
[-i invidious.domain] [-p] [-g] [-s] [-n] [-k] [-u] [-v]
[-V]
File or videoId [File or videoId ...]
Download and convert Youtube Annotation
positional arguments:
File or videoId File path or video ID of multiple files to be convert
optional arguments:
-h, --help Show this help message and exit
-l, --embrace-libass Embrace libass's quirks and features, and not specifying
this option will adapt to xy-vsfilter
-x 100, --transform-resolution-x 100
Transform resolution X
-y 100, --transform-resolution-y 100
Transform resolution Y
-f Arial, --font Arial
Specify font
-d, --download-for-archive
Try to download the Annotations file from Internet Archive
-D, --download-annotation-only
Download Annotation only
-p, --preview-video Preview video, requires mpv(https://mpv.io/)
-g, --generate-video Generate video, requires FFmpeg(https://ffmpeg.org/)
-i invidious-instances.domain, --invidious-instances invidious-instances.domain
Specify invidious instances (https://redirect.invidious.io/)
-s, --output-to-stdout
Output to stdout
-n, --no-overwrite-files
Do not overwrite files
-N, --no-keep-intermediate-files
Do not keep intermediate files
-O directory, --output-directory directory
Specify the output directory for the converted file
-o File, --output File
Save to this file
-v, --version Show version
-V, --verbose Show more messages
```
---
转换注释文件
```bash
Annotations2Sub 29-q7YnyUmY.xml
```
下载并转换注释文件
```bash
Annotations2Sub -d 29-q7YnyUmY
```
生成视频
```bash
Annotations2Sub -g 29-q7YnyUmY
```
|
Annotations2Sub
|
/Annotations2Sub-2.5.0.tar.gz/Annotations2Sub-2.5.0/README.md
|
README.md
|
import json
import numpy as np
import pandas as pd
from datetime import datetime
import time
import copy
import matplotlib.pyplot as plt
import ibm_boto3
from botocore.client import Config
import statsmodels.api as sm
from statsmodels.tsa.seasonal import STL
icos_client = ibm_boto3.client(service_name='s3',
ibm_api_key_id='rYQmTc0U-gvfzOxV69jbUED3Fcy5QkuOL942TNeAWpT0',
ibm_auth_endpoint="https://iam.ng.bluemix.net/oidc/token",
config=Config(signature_version='oauth'),
endpoint_url='https://s3.us-east.cloud-object-storage.appdomain.cloud')
bucketName='magna-formet-bucket'
bucketName_model='magna-mig-models-data'
#-----------------------------
def Read_Problem_data(model_id,icos_client,bucketName,feature_data_original,feature_data_normal):
deviceId = model_id.split('_weld')[0] # Formet_FR4_STA60_LH_R1_weld32_toolB
weldId_with_toolID = model_id.split('_weld')[1]
weld_id = int(weldId_with_toolID.split('_')[0])
# Read Problem dta
DR_file_name = 'Production_Anomaly_data/Formet_FR4_STA60_MIG_Weld_DR_Defects.csv'
print(DR_file_name)
body = icos_client.get_object(Bucket=bucketName,Key=DR_file_name)['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
DR_file_df = pd.read_csv(body)
DR_file_df = DR_file_df.drop_duplicates(subset =['PSN','WeldID'],keep='first',inplace=False)
DR_PSN_red_list = DR_file_df['PSN'].loc[(DR_file_df.WeldID == weld_id) & (DR_file_df.deviceId == deviceId) & (DR_file_df.Level == 'red')].tolist()
DR_PSN_green_list = DR_file_df['PSN'].loc[(DR_file_df.WeldID == weld_id) & (DR_file_df.deviceId == deviceId) & (DR_file_df.Level == 'green')].tolist()
DR_PSN_yellow_list = DR_file_df['PSN'].loc[(DR_file_df.WeldID == weld_id) & (DR_file_df.deviceId == deviceId) & (DR_file_df.Level == 'yellow')].tolist()
DR_PSN_red_list = list(set(DR_PSN_red_list))
DR_PSN_red_list.sort()
DR_PSN_green_list = list(set(DR_PSN_green_list))
DR_PSN_green_list.sort()
DR_PSN_yellow_list = list(set(DR_PSN_yellow_list))
DR_PSN_yellow_list.sort()
#get RED validation data
validate_df = feature_data_original.loc[lambda x: (x.LPSN.isin(DR_PSN_red_list)) & (x.weld_id == weld_id),:]
validate_df['label'] = 1
Data_PSN_for_red_list = validate_df['LPSN'].tolist()
Data_PSN_for_red_list = list(set(Data_PSN_for_red_list))
Data_PSN_for_red_list.sort()
Data_WRI_for_red_list = validate_df['weld_record_index'].apply(lambda y: int(y)).tolist()
Data_WRI_for_red_list = list(set(Data_WRI_for_red_list))
Data_WRI_for_red_list.sort()
# Green
validate_green_df = feature_data_original.loc[lambda x: (x.LPSN.isin(DR_PSN_green_list)) & (x.weld_id == weld_id),:]
validate_green_df['label'] = 0
Data_PSN_for_green_list = validate_green_df['LPSN'].tolist()
Data_PSN_for_green_list = list(set(Data_PSN_for_green_list))
Data_PSN_for_green_list.sort()
Data_WRI_for_green_list = validate_green_df['weld_record_index'].apply(lambda y: int(y)).tolist()
Data_WRI_for_green_list = list(set(Data_WRI_for_green_list))
Data_WRI_for_green_list.sort()
# Yellow
validate_yellow_df = feature_data_original.loc[lambda x: (x.LPSN.isin(DR_PSN_yellow_list)) & (x.weld_id == weld_id),:]
validate_yellow_df['label'] = 0 # .loc[row_indexer,col_indexer]
Data_PSN_for_yellow_list = validate_yellow_df['LPSN'].tolist()
Data_PSN_for_yellow_list = list(set(Data_PSN_for_yellow_list))
Data_PSN_for_yellow_list.sort()
Data_WRI_for_yellow_list = validate_yellow_df['weld_record_index'].apply(lambda y: int(y)).tolist()
Data_WRI_for_yellow_list = list(set(Data_WRI_for_yellow_list))
Data_WRI_for_yellow_list.sort()
# Cancat validation df
validate_green_df = pd.concat([validate_green_df, validate_yellow_df])
validate_df = pd.concat([validate_df, validate_green_df])
validate_df = validate_df.drop_duplicates(subset ="weld_record_index",keep='first',inplace=False)
Data_PSN_for_all_list = validate_df['LPSN'].tolist()
Data_PSN_for_all_list = list(set(Data_PSN_for_all_list))
Data_PSN_for_all_list.sort()
Data_WRI_for_all_list = validate_df['weld_record_index'].apply(lambda y: int(y)).tolist()
Data_WRI_for_all_list = list(set(Data_WRI_for_all_list))
Data_WRI_for_all_list.sort()
PSN_WRI_dict = {}
for i in range(len(Data_PSN_for_all_list)):
PSN_WRI_dict[Data_WRI_for_all_list[i]] = Data_PSN_for_all_list[i]
#print(PSN_WRI_dict)
print('Data_PSN_for_all_list:',len(Data_PSN_for_all_list))
print('All WRI point (weld_record_index) list = ',len(Data_WRI_for_all_list))
print('Red WRI point list = ',len(Data_WRI_for_red_list))
print('Green WRI point list = ',len(Data_WRI_for_green_list))
print('Yellow WRI point list = ',len(Data_WRI_for_yellow_list))
validation_report_df= DR_file_df.loc[lambda x: (x.PSN.isin(Data_PSN_for_all_list)) & (x.WeldID == weld_id),:]
display(validation_report_df[['Date','PSN','FailureType','Level','WeldID','ToolID']])
#
# remove the defect that belongs to normal data
#
# get defect from normal data
#get validation data
validate_normal_df = feature_data_normal.loc[lambda x: (x.LPSN.isin(DR_PSN_red_list)) & (x.weld_id == weld_id),:]
Data_PSN_for_normal_list = validate_normal_df['LPSN'].tolist()
Data_WRI_for_normal_list = validate_normal_df['weld_record_index'].apply(lambda y: int(y)).tolist()
Data_PSN_removeNormal_list = list(set(Data_PSN_for_all_list) - set(Data_PSN_for_normal_list))
Data_WRI_removeNormal_list = list(set(Data_WRI_for_all_list) - set(Data_WRI_for_normal_list))
print('Data_PSN_removeNormal_list=',len(Data_PSN_removeNormal_list))
print('Data_WRI_removeNormal_list=',len(Data_WRI_removeNormal_list))
validation_wo_normal_df = validate_df.loc[lambda x: (x.LPSN.isin(Data_PSN_removeNormal_list)),:]
#print(validation_wo_normal_df)
Problem_object = {
'DR_file_df':DR_file_df,
'DR_PSN_red_list':DR_PSN_red_list,
'DR_PSN_green_list':DR_PSN_green_list,
'DR_PSN_yellow_list':DR_PSN_yellow_list,
'PSN_WRI_dict':PSN_WRI_dict,
'Data_WRI_for_red_list':Data_WRI_for_red_list,
'Data_WRI_for_green_list':Data_WRI_for_green_list,
'Data_WRI_for_yellow_list':Data_WRI_for_yellow_list,
'Data_PSN_for_red_list':Data_PSN_for_red_list,
'Data_PSN_for_green_list':Data_PSN_for_green_list,
'Data_PSN_for_yellow_list':Data_PSN_for_yellow_list,
'Data_WRI_for_all_list':Data_WRI_for_all_list,
'Data_PSN_for_all_list':Data_PSN_for_all_list,
'validate_df':validate_df,
'validation_wo_normal_df':validation_wo_normal_df,
'validation_report_df':validation_report_df
}
return Problem_object
def read_feature_data(model_id,feature_file_base_list,icos_client,bucketName,COS_feature_folder,COS_feature_normal_folder,read_normal_data='Y'):
#
# read all feature data set from the file list
#
feature_data_original = pd.DataFrame()
feature_data_normal = pd.DataFrame()
deviceId = model_id.split('_weld')[0] # Formet_FR4_STA60_LH_R1_weld32_toolB
toolID = 'tool'+model_id.split('_tool')[1]
for feature_file_base in feature_file_base_list:
# Read feature data
try:
feature_file_name = feature_file_base.replace('toolX',toolID)
inputFile_date = feature_file_base.split('_LincolnFANUC_')[1].split('_welding_')[0]
print(feature_file_name)
body = icos_client.get_object(Bucket=bucketName,Key=feature_file_name)['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
feature_data = pd.read_csv(body)
feature_data_original = pd.concat([feature_data_original, feature_data])
except Exception as e:
# Just print(e) is cleaner and more likely what you want,
# but if you insist on printing message specifically whenever possible...
print('###### Exception, feature_data file might not exist in COS:' + feature_file_name)
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
# Read normal data
if (read_normal_data == 'Y'):
try:
normal_file_name = feature_file_name[:-4] + '_normal.csv'
normal_file_name = normal_file_name.replace(COS_feature_folder,COS_feature_normal_folder)
print(normal_file_name)
body = icos_client.get_object(Bucket=bucketName,Key=normal_file_name)['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
normal_data = pd.read_csv(body)
feature_data_normal = pd.concat([feature_data_normal, normal_data])
except Exception as e:
# Just print(e) is cleaner and more likely what you want,
# but if you insist on printing message specifically whenever possible...
print('###### Exception, feature_data file might not exist in COS:' + normal_file_name)
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
feature_data_original = feature_data_original.reset_index(drop = True)
if (read_normal_data == 'Y'):
feature_data_normal = feature_data_normal.reset_index(drop = True)
print('feature_data_normal=',feature_data_normal.shape)
print('feature_data_original=',feature_data_original.shape)
return feature_data_original,feature_data_normal
def read_feature_normal_data(model_id,feature_normal_file_list,icos_client,bucketName):
#
# read all feature data set from the file list
#
feature_data_normal = pd.DataFrame()
deviceId = model_id.split('_weld')[0] # Formet_FR4_STA60_LH_R1_weld32_toolB
toolID = 'tool'+model_id.split('_tool')[1]
for feature_file_base in feature_normal_file_list:
# Read feature data
try:
normal_file_name = feature_file_base.replace('toolX',toolID)
print(normal_file_name)
body = icos_client.get_object(Bucket=bucketName,Key=normal_file_name)['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
normal_data = pd.read_csv(body)
feature_data_normal = pd.concat([feature_data_normal, normal_data])
except Exception as e:
# Just print(e) is cleaner and more likely what you want,
# but if you insist on printing message specifically whenever possible...
print('###### Exception, feature_data file might not exist in COS:' + normal_file_name)
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
feature_data_normal = feature_data_normal.reset_index(drop = True)
print('feature_data_normal=',feature_data_normal.shape)
return feature_data_normal
#--------------------------------------------------------------------
def export_training_validation_to_COS(model_id,icos_client,bucketName_model,feature_data_original,feature_data_normal,validate_df,validation_wo_normal_df,dateRange='',localPath='./Production_Anomaly_data/'):
# Note: using model_id for file name in COS
# Train
fileName = 'Train_'+model_id+dateRange+'.csv'
localfileName=localPath+fileName
feature_data_normal.to_csv(localfileName,index=False)
# write to COS
csv_fileName=model_id+'/'+ fileName
icos_client.upload_file(Filename=localfileName,Bucket=bucketName_model,Key=csv_fileName)
print('write to COS:'+csv_fileName)
# original feature data
fileName = 'Original_feature_data_'+model_id+dateRange+'.csv'
localfileName=localPath+fileName
feature_data_original.to_csv(localfileName,index=False)
# write to COS
csv_fileName=model_id+'/'+ fileName
icos_client.upload_file(Filename=localfileName,Bucket=bucketName_model,Key=csv_fileName)
print('write to COS:'+csv_fileName)
# validation
fileName = 'validation_'+model_id+dateRange+'.csv'
localfileName=localPath+fileName
validate_df.to_csv(localfileName,index=False)
# write to COS
csv_fileName=model_id+'/'+ fileName
icos_client.upload_file(Filename=localfileName,Bucket=bucketName_model,Key=csv_fileName)
print('write to COS:'+csv_fileName)
# validation_without_normal
fileName = 'validation_wo_normal_'+model_id+dateRange+'.csv'
localfileName=localPath+fileName
validation_wo_normal_df.to_csv(localfileName,index=False)
# write to COS
csv_fileName=model_id+'/'+ fileName
icos_client.upload_file(Filename=localfileName,Bucket=bucketName_model,Key=csv_fileName)
print('write to COS:'+csv_fileName)
return
def read_training_validation_from_COS(model_id,icos_client,bucketName,bucketName_model,testData_date_list,COS_folder_source,dateRange='',join_validation_data=False): #
#############################################################################################
# Training data
#COS_folder_source= 'Production_Lincoln_features_data/' # original
#COS_folder_source= 'Production_Lincoln_TSA_features_data/' # TSA
#-----------------------------------------------------------------------------------------
csv_fileName=model_id+'/Train_'+model_id+dateRange+'.csv'
body = icos_client.get_object(Bucket=bucketName_model,Key=csv_fileName)['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
feature_data_normal = pd.read_csv(body)
print('feature_data_normal = ',feature_data_normal.shape)
# Raw data
csv_fileName=model_id+'/Original_feature_data_'+model_id+dateRange+'.csv'
body = icos_client.get_object(Bucket=bucketName_model,Key=csv_fileName)['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
feature_data_original = pd.read_csv(body)
print('feature_data_original = ',feature_data_original.shape)
#############################################################################################
# validation data
csv_fileName=model_id+'/validation_'+model_id+dateRange+'.csv'
body = icos_client.get_object(Bucket=bucketName_model,Key=csv_fileName)['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
validate_df = pd.read_csv(body)
print ('Validation Data=',validate_df.shape)
#############################################################################################
# validation_wo_normal data
csv_fileName=model_id+'/validation_wo_normal_'+model_id+dateRange+'.csv'
body = icos_client.get_object(Bucket=bucketName_model,Key=csv_fileName)['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
validation_wo_normal_df = pd.read_csv(body)
print ('Validation_without_normal Data=',validation_wo_normal_df.shape)
##############################################################################################
#
# === testDate date
#
#testData_date_list = ['2020-08-24','2020-08-25']
#
deviceId = model_id.split('_weld')[0] # Formet_FR4_STA60_LH_R1_weld32_toolB
weldId_with_toolID = model_id.split('_weld')[1]
# get test data
testData_join = pd.DataFrame()
for weldDay in testData_date_list:
try:
feature_file_name = COS_folder_source+deviceId+'_LincolnFANUC_'+weldDay+'_welding_stable_data_weldid_'+weldId_with_toolID+'_feature.csv'
body = icos_client.get_object(Bucket=bucketName,Key=feature_file_name)['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
testData1 = pd.read_csv(body)
testData_join = pd.concat([testData_join, testData1])
print('add testData:',feature_file_name)
except Exception as e:
# Just print(e) is cleaner and more likely what you want,
# but if you insist on printing message specifically whenever possible...
print('###### Exception in read_training_validation_from_COS, COS name=',feature_file_name)
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
# if ('current_rms_min_resi' in testData_join.columns):
# pass
# else:
# testData_join = add_TSA_trend_and_residual(testData_join)
if (join_validation_data == True):
validate_noLabel_df = validate_df[testData_join.columns]
#testData_join = pd.concat([testData_join,validate_df.loc[validate_df.label == 1]], sort=True).reset_index(drop=True)
testData_join = pd.concat([testData_join,validate_noLabel_df], sort=True).reset_index(drop=True)
testData_join = testData_join.drop_duplicates(subset ="weld_record_index",keep='first',inplace=False)
testData = testData_join.sort_values(['weld_record_index'],inplace=False).reset_index(drop=True)
print ('test Data=',testData.shape)
return feature_data_original,feature_data_normal,validate_df,validation_wo_normal_df,testData
#----------------------------------------------------------------
#
# add anomaly score as one column of feature csv file, and save it to COS
#----------------------------------------------------------------
def print_and_plot_anomaly_score(model_id,model_name,df_join_TF,Problem_object,anomaly_threshold,anomaly_score,percentile_point_95,plot_anomaly_score='YES'):
deviceId = model_id.split('_weld')[0] # Formet_FR4_STA60_LH_R1_weld32_toolB
weldId_with_toolID = model_id.split('_weld')[1]
weld_id = int(weldId_with_toolID.split('_')[0])
# get from problem report:
DR_file_df = Problem_object['DR_file_df']
DR_PSN_red_list = Problem_object['DR_PSN_red_list']
DR_PSN_green_list = Problem_object['DR_PSN_green_list']
PSN_WRI_dict = Problem_object['PSN_WRI_dict']
Data_WRI_for_red_list = Problem_object['Data_WRI_for_red_list']
Data_WRI_for_green_list = Problem_object['Data_WRI_for_green_list']
Data_WRI_for_yellow_list = Problem_object['Data_WRI_for_yellow_list']
Data_WRI_for_all_list = Problem_object['Data_WRI_for_all_list']
print('percentile_point_95 : ',percentile_point_95)
outlier95_list = list(set(df_join_TF.loc[lambda x: (x.anomaly_score >= percentile_point_95), 'weld_record_index']))
anomaly_above_threshold_list = list(set(df_join_TF.loc[lambda x: (x.anomaly_score >= anomaly_threshold), 'weld_record_index']))
#print('outlier95_list: ', outlier90_list)
num_red_points_above_95 = len( set(outlier95_list) & set(Data_WRI_for_red_list) )
num_anomaly_above_95 = len(outlier95_list)
anomaly_above_threshold_join_defects = list(set(anomaly_above_threshold_list) & set(Data_WRI_for_red_list))
# normal
normal_below_threshold_list = list(set(df_join_TF.loc[lambda x: (x.anomaly_score < anomaly_threshold), 'weld_record_index']))
normal_green_list = list(set(normal_below_threshold_list) & set(Data_WRI_for_green_list))
num_normal_below_threshold = len(normal_below_threshold_list)
num_normal_green_list = len(normal_green_list)
# yellow
yellow_above_threshold = list(set(anomaly_above_threshold_list) & set(Data_WRI_for_yellow_list))
num_yellow_above_threshold = len(yellow_above_threshold)
# plot anomaly score
plt.figure(figsize=(20,4))
Y = anomaly_score.loc[: , 0]
X = list(range(len(Y)))
plt.scatter(X, Y)
#plt.plot(anomaly_score)
plt.title('Anomaly Score of each Weld based on Model ' + model_name)
plt.xlabel('Observation')
plt.ylabel('Anomaly Score')
plt.axhline(y=percentile_point_95, ls="--", c="red")
plt.axhline(y=anomaly_threshold, ls="--", c="yellow")
if (plot_anomaly_score == 'YES'):
# bad welds:
plot_problem_data(Data_WRI_for_red_list, df_join_TF, 'anomaly_score', "red")
# normal welds"
plot_problem_data(Data_WRI_for_green_list, df_join_TF , 'anomaly_score', "green")
plot_problem_data(Data_WRI_for_yellow_list, df_join_TF , 'anomaly_score', "orange")
plt.show()
period = 9
stl_res = STL(anomaly_score[0],period=period,robust=True).fit()
plt.figure(figsize=(20,4))
plt.plot(stl_res.trend)
plt.title('Anomaly Score Trend based on Model ' + model_name)
plt.xlabel('Time Steps')
plt.ylabel('Anomaly Trend')
plt.axhline(y=percentile_point_95, ls="--", c="red")
plt.axhline(y=anomaly_threshold, ls="--", c="yellow")
if (plot_anomaly_score == 'YES'):
# bad welds:
plot_problem_data(Data_WRI_for_red_list, df_join_TF, 'anomaly_score', "red")
# normal welds"
plot_problem_data(Data_WRI_for_green_list, df_join_TF , 'anomaly_score', "green")
plot_problem_data(Data_WRI_for_yellow_list, df_join_TF , 'anomaly_score', "orange")
plt.show()
# print
df_join_PSN_TF = df_join_TF.rename(columns={"LPSN": "PSN"})
# abnormal points
num_total_test = len(anomaly_score)
num_anomaly_list_PSN = [PSN_WRI_dict[x] for x in anomaly_above_threshold_join_defects]
num_anomaly_df = DR_file_df.loc[lambda x: (x.PSN.isin(num_anomaly_list_PSN)) & (x.WeldID == weld_id),:]
num_anomaly_df = num_anomaly_df.merge(df_join_PSN_TF[['PSN','anomaly_score']],on = ['PSN'], how = 'left')
num_anomaly_df = num_anomaly_df.drop_duplicates(subset ="PSN",keep='first',inplace=False)
num_anomaly_threshold = num_anomaly_df.shape[0]
# normal points
num_green_list_PSN = [PSN_WRI_dict[x] for x in normal_green_list]
num_green_df = DR_file_df.loc[lambda x: (x.PSN.isin(num_green_list_PSN)) & (x.WeldID == weld_id),:]
num_total_validation = len(Data_WRI_for_all_list)
num_total_red = len(Data_WRI_for_red_list)
num_total_green = len(Data_WRI_for_green_list)
num_total_yellow = len(Data_WRI_for_yellow_list)
num_green_df = num_green_df.merge(df_join_PSN_TF[['PSN','anomaly_score']],on = ['PSN'], how = 'left')
num_green_df = num_green_df.drop_duplicates(subset ="PSN",keep='first',inplace=False)
print('=========number of anomaly =============== \n total test #=',num_total_test,', total validation #=', num_total_validation,', total red=',num_total_red,', total green=',num_total_green,', total yellow= ',num_total_yellow)
print('# red above percentile_95:',num_red_points_above_95,', bad num above threshold:',num_anomaly_threshold, ', # welds above percentile_95:',num_anomaly_above_95)
print('---------number of normal ---------------- \n green below threshold:',num_normal_green_list,', total num welds below threshold:',num_normal_below_threshold,', # yellow above threshold:',num_yellow_above_threshold)
display(num_anomaly_df[['Date','PSN','FailureType','anomaly_score','Level','ToolID']])
print('--------------------------------\n')
display(num_green_df[['Date','PSN','FailureType','anomaly_score','Level','ToolID']])
return num_red_points_above_95,num_anomaly_threshold
#
#----------- anomaly score -----------------------
def get_anomaly_score(model_id,icos_client,bucketName,pipeline,selected_features,testData_date,COS_folder_source='Production_Lincoln_TSA_features_data/',localPath='./Production_Anomaly_data/'):
COS_folder_source= 'Production_Lincoln_features_data/' # original
#COS_folder_source= 'Production_Lincoln_TSA_features_data/' # TSA
deviceId = model_id.split('_weld')[0] # Formet_FR4_STA60_LH_R1_weld32_toolB
weldId_with_toolID = model_id.split('_weld')[1]
feature_file_name = COS_folder_source+deviceId+'_LincolnFANUC_'+testData_date+'_welding_stable_data_weldid_'+weldId_with_toolID+'_feature.csv'
body = icos_client.get_object(Bucket=bucketName,Key=feature_file_name)['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
feature_data = pd.read_csv(body)
scoring_data = feature_data[selected_features]
###########################################################################
anomaly_score_initial = pipeline.predict_proba(scoring_data)
###########################################################################
#anomaly_threshold_initial = pipeline.get_best_thresholds()
anomaly_score = pd.DataFrame(anomaly_score_initial)
df_anomaly_score = pd.DataFrame(anomaly_score)
feature_data['anomaly_score'] = anomaly_score
#
# --- write the scoring result to csv file
#
fileName = model_id+'_anomaly_score_'+testData_date+'.csv'
localfileName=localPath+fileName
feature_data.to_csv(localfileName,index=False)
# write to COS
csv_fileName='Production_Features_and_Anomaly_Score_data/'+ fileName
icos_client.upload_file(Filename=localfileName,Bucket=bucketName,Key=csv_fileName)
print('write to COS:'+csv_fileName)
return feature_data
#-----------------------------------------
#------------------add TSA components
def add_TSA_trend_and_residual(feature_data,freqInput=125):
# feature list
features_select_TSA = ['current_rms_min','current_rms_max', 'current_rms_mean','current_rms_std','current_cd_max',
'voltage_rms_min','voltage_rms_max', 'voltage_rms_mean','voltage_rms_std','voltage_cd_max',
'motor_current_rms_min','motor_current_rms_max','motor_current_rms_mean','motor_current_rms_std',
'motor_current_rms_skew','wire_feed_speed_rms_std',
'power_rms_min','power_rms_max','power_rms_mean','power_rms_std', 'std_power','max_energy']
# loop the features
for tsa_feature in features_select_TSA:
# original data
res = sm.tsa.seasonal_decompose(feature_data[tsa_feature], freq=freqInput)
df_resi = pd.DataFrame(res.resid).abs()
df_trend = pd.DataFrame(res.trend).abs()
feature_data[tsa_feature+'_resi'] = df_resi
feature_data[tsa_feature+'_trend'] = df_trend
#
feature_data = feature_data.dropna(how = 'any', axis = 0).reset_index(drop =True)
return feature_data
from datetime import datetime, timedelta
import dateutil
def to_magna_datetime(collect_time):
# collect_time = collect_time + timedelta(days=-13)
time_start = collect_time + timedelta(hours=-1)
time_end = collect_time
datetime_str = collect_time.strftime("%Y-%m-%d")
time_start_str = time_start.strftime("%Y-%m-%dT%H:%M")
time_end_str = time_end.strftime("%Y-%m-%dT%H:%M")
return datetime_str,time_start_str,time_end_str
def write_featureData_COS_scoring(model_id,feature_data,row_start,row_end,model_features,write_to_COS=1):
po_feature_data = copy.deepcopy(feature_data[row_start:row_end])
po_scoring_data = po_feature_data[model_features]
welddate = po_feature_data.loc[row_end-1,'event_time']
date_time_obj = datetime.strptime(welddate, '%Y-%m-%dT%H:%M:%S.%fZ')
print('welddate=',welddate)
dtobj = dateutil.parser.parse(welddate)
est_welddate = dtobj.astimezone(dateutil.tz.gettz('US/Eastern'))
print('est_welddate=',est_welddate) # 2020-09-30T00:01:15.762Z
weld_date_now,weld_start,weld_end = to_magna_datetime(est_welddate)
#print(weld_date_now,weld_end)
target_folder_scoring = 'Production_Lincoln_Fanuc_feature_data/'
target_folder_feature = 'Production_Lincoln_Fanuc_feature_data/'+weld_date_now + '/'
print('target_folder_feature=',target_folder_feature)
deviceId = model_id.split('_weld')[0] # Formet_FR4_STA60_LH_R1_weld32_toolB
weldId_with_toolID = model_id.split('_weld')[1]
weld_id = int(weldId_with_toolID.split('_')[0])
toolID = 'tool'+model_id.split('_tool')[1]
target_feature_fileName = deviceId + '_welding_stable_data_weld'+str(weld_id)+'_'+toolID+'_'+weld_end+'_feature.csv'
target_scoring_fileName = deviceId + '_welding_stable_data_weld'+str(weld_id)+'_'+toolID+'_feature.csv'
print(target_feature_fileName,'\n',target_scoring_fileName)
localPath='./Production_Anomaly_data/'
local_feature_file_name = localPath + target_feature_fileName
local_scoring_file_name = localPath + target_scoring_fileName
print(local_feature_file_name,'\n',local_scoring_file_name)
csv_feature_file_name = target_folder_feature + target_feature_fileName
csv_scoring_file_name = target_folder_scoring + target_scoring_fileName
print(csv_feature_file_name,'\n',csv_scoring_file_name)
if (write_to_COS == 1):
po_feature_data.to_csv(local_feature_file_name, index= False)
po_scoring_data.to_csv(local_scoring_file_name, index= False)
# to COS
icos_client.upload_file(Filename=local_feature_file_name,Bucket=bucketName,Key=csv_feature_file_name)
print('write to COS:'+csv_feature_file_name)
icos_client.upload_file(Filename=local_scoring_file_name,Bucket=bucketName,Key=csv_scoring_file_name)
print('write to COS:'+csv_scoring_file_name)
#
# ---------------------plot problem data
#
def plot_problem_data(problem_weld_list, feature_join_df , fea_name, color="red" ):
for i in problem_weld_list:
Y = feature_join_df.loc[feature_join_df.weld_record_index == i, fea_name ]
X = feature_join_df.loc[feature_join_df.weld_record_index == i, : ].index.tolist()[0]
plt.scatter(X, Y, s= 30, c = color ) # plot 点图
|
Anomaly-Model
|
/Anomaly-Model-1.8.2.tar.gz/Anomaly-Model-1.8.2/detect_service/Anomaly_Model_Training_Service.py
|
Anomaly_Model_Training_Service.py
|
# Pypi-uploader
#### Upload your Python libraries to PyPi with a beautiful interface.
</br>
<a href="README_RU.md" ><img src="https://emojio.ru/images/twitter-64/1f1f7-1f1fa.png" width="30" height="30"></img>Читать на Русском</a>
</br></br>
This program is written using the <a href="https://pypi.org/project/PySimpleGUI/">PySimpleGUI</a> library.
Just run <a href="Pypi_uploader.py">Pypi_uploader.py</a> and follow the instructions on the screen.</br>
The program will automatically install the necessary libraries, compile your project into an archive and upload it to Pypi or Test Pypi.</br>
After uploading, program can also clean up all generated files.
#### Important! Selected project folder should contain the ```__init__.py``` file! This is the main file of your library.
#### Folder hierarchy:
```
.../Any_folder_name
|__Pypi-uploader.py
|__Your_Project_Folder/
|__ __init__.py
|__Other_files...
```
<img src="Image.png"></img>
#### If you are using api key:
**Username:** ```__token__``` </br>
**Password:** *The token value, including the ```pypi-``` prefix*
### Possible mistakes:
<ul>
<li> Login or password is incorrect (or API token if you uploaded through it). </li>
<li> You signed up for PyPi, and you are trying to upload a project to Test Pypi (or vice versa). </li>
<li> A library with this name already exists. So if this is your library - change the version. </li>
</ul>
|
AnonimTest
|
/AnonimTest-1.0.tar.gz/AnonimTest-1.0/README.md
|
README.md
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.