content
stringlengths 228
999k
| pred_label
stringclasses 1
value | pred_score
float64 0.5
1
|
---|---|---|
From 1be13d57dc8357576a8285c6dadc03db9e3ed7b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Delafond?= Date: Tue, 25 Aug 2015 12:27:35 +0200 Subject: Imported Upstream version 8.3.1 --- lisp/ox-latex.el | 1924 +++++++++++++++++++++++++++++++++++------------------- 1 file changed, 1256 insertions(+), 668 deletions(-) (limited to 'lisp/ox-latex.el') diff --git a/lisp/ox-latex.el b/lisp/ox-latex.el index 2c71f7d..c3eb1ea 100644 --- a/lisp/ox-latex.el +++ b/lisp/ox-latex.el @@ -1,6 +1,6 @@ ;;; ox-latex.el --- LaTeX Back-End for Org Export Engine -;; Copyright (C) 2011-2014 Free Software Foundation, Inc. +;; Copyright (C) 2011-2015 Free Software Foundation, Inc. ;; Author: Nicolas Goaziou ;; Keywords: outlines, hypermedia, calendar, wp @@ -43,8 +43,6 @@ (center-block . org-latex-center-block) (clock . org-latex-clock) (code . org-latex-code) - (comment . (lambda (&rest args) "")) - (comment-block . (lambda (&rest args) "")) (drawer . org-latex-drawer) (dynamic-block . org-latex-dynamic-block) (entity . org-latex-entity) @@ -65,13 +63,13 @@ (latex-fragment . org-latex-latex-fragment) (line-break . org-latex-line-break) (link . org-latex-link) + (node-property . org-latex-node-property) (paragraph . org-latex-paragraph) (plain-list . org-latex-plain-list) (plain-text . org-latex-plain-text) (planning . org-latex-planning) - (property-drawer . (lambda (&rest args) "")) + (property-drawer . org-latex-property-drawer) (quote-block . org-latex-quote-block) - (quote-section . org-latex-quote-section) (radio-target . org-latex-radio-target) (section . org-latex-section) (special-block . org-latex-special-block) @@ -88,7 +86,10 @@ (timestamp . org-latex-timestamp) (underline . org-latex-underline) (verbatim . org-latex-verbatim) - (verse-block . org-latex-verse-block)) + (verse-block . org-latex-verse-block) + ;; Pseudo objects and elements. + (latex-math-block . org-latex-math-block) + (latex-matrices . org-latex-matrices)) :export-block '("LATEX" "TEX") :menu-entry '(?l "Export to LaTeX" @@ -99,13 +100,52 @@ (lambda (a s v b) (if a (org-latex-export-to-pdf t s v b) (org-open-file (org-latex-export-to-pdf nil s v b))))))) - :options-alist '((:latex-class "LATEX_CLASS" nil org-latex-default-class t) - (:latex-class-options "LATEX_CLASS_OPTIONS" nil nil t) - (:latex-header "LATEX_HEADER" nil nil newline) - (:latex-header-extra "LATEX_HEADER_EXTRA" nil nil newline) - (:latex-hyperref-p nil "texht" org-latex-with-hyperref t) - ;; Redefine regular options. - (:date "DATE" nil "\\today" t))) + :filters-alist '((:filter-options . org-latex-math-block-options-filter) + (:filter-parse-tree org-latex-math-block-tree-filter + org-latex-matrices-tree-filter)) + :options-alist + '((:latex-class "LATEX_CLASS" nil org-latex-default-class t) + (:latex-class-options "LATEX_CLASS_OPTIONS" nil nil t) + (:latex-header "LATEX_HEADER" nil nil newline) + (:latex-header-extra "LATEX_HEADER_EXTRA" nil nil newline) + (:description "DESCRIPTION" nil nil parse) + (:keywords "KEYWORDS" nil nil parse) + (:subtitle "SUBTITLE" nil nil parse) + ;; Other variables. + (:latex-active-timestamp-format nil nil org-latex-active-timestamp-format) + (:latex-caption-above nil nil org-latex-caption-above) + (:latex-classes nil nil org-latex-classes) + (:latex-default-figure-position nil nil org-latex-default-figure-position) + (:latex-default-table-environment nil nil org-latex-default-table-environment) + (:latex-default-table-mode nil nil org-latex-default-table-mode) + (:latex-diary-timestamp-format nil nil org-latex-diary-timestamp-format) + (:latex-footnote-separator nil nil org-latex-footnote-separator) + (:latex-format-drawer-function nil nil org-latex-format-drawer-function) + (:latex-format-headline-function nil nil org-latex-format-headline-function) + (:latex-format-inlinetask-function nil nil org-latex-format-inlinetask-function) + (:latex-hyperref-template nil nil org-latex-hyperref-template t) + (:latex-image-default-height nil nil org-latex-image-default-height) + (:latex-image-default-option nil nil org-latex-image-default-option) + (:latex-image-default-width nil nil org-latex-image-default-width) + (:latex-inactive-timestamp-format nil nil org-latex-inactive-timestamp-format) + (:latex-inline-image-rules nil nil org-latex-inline-image-rules) + (:latex-link-with-unknown-path-format nil nil org-latex-link-with-unknown-path-format) + (:latex-listings nil nil org-latex-listings) + (:latex-listings-langs nil nil org-latex-listings-langs) + (:latex-listings-options nil nil org-latex-listings-options) + (:latex-minted-langs nil nil org-latex-minted-langs) + (:latex-minted-options nil nil org-latex-minted-options) + (:latex-prefer-user-labels nil nil org-latex-prefer-user-labels) + (:latex-subtitle-format nil nil org-latex-subtitle-format) + (:latex-subtitle-separate nil nil org-latex-subtitle-separate) + (:latex-table-scientific-notation nil nil org-latex-table-scientific-notation) + (:latex-tables-booktabs nil nil org-latex-tables-booktabs) + (:latex-tables-centered nil nil org-latex-tables-centered) + (:latex-text-markup-alist nil nil org-latex-text-markup-alist) + (:latex-title-command nil nil org-latex-title-command) + (:latex-toc-command nil nil org-latex-toc-command) + ;; Redefine regular options. + (:date "DATE" nil "\\today" parse))) @@ -164,11 +204,112 @@ ("uk" . "ukrainian")) "Alist between language code and corresponding Babel option.") +(defconst org-latex-polyglossia-language-alist + '(("am" "amharic") + ("ast" "asturian") + ("ar" "arabic") + ("bo" "tibetan") + ("bn" "bengali") + ("bg" "bulgarian") + ("br" "breton") + ("bt-br" "brazilian") + ("ca" "catalan") + ("cop" "coptic") + ("cs" "czech") + ("cy" "welsh") + ("da" "danish") + ("de" "german" "german") + ("de-at" "german" "austrian") + ("de-de" "german" "german") + ("dv" "divehi") + ("el" "greek") + ("en" "english" "usmax") + ("en-au" "english" "australian") + ("en-gb" "english" "uk") + ("en-nz" "english" "newzealand") + ("en-us" "english" "usmax") + ("eo" "esperanto") + ("es" "spanish") + ("et" "estonian") + ("eu" "basque") + ("fa" "farsi") + ("fi" "finnish") + ("fr" "french") + ("fu" "friulan") + ("ga" "irish") + ("gd" "scottish") + ("gl" "galician") + ("he" "hebrew") + ("hi" "hindi") + ("hr" "croatian") + ("hu" "magyar") + ("hy" "armenian") + ("id" "bahasai") + ("ia" "interlingua") + ("is" "icelandic") + ("it" "italian") + ("kn" "kannada") + ("la" "latin" "modern") + ("la-modern" "latin" "modern") + ("la-classic" "latin" "classic") + ("la-medieval" "latin" "medieval") + ("lo" "lao") + ("lt" "lithuanian") + ("lv" "latvian") + ("mr" "maranthi") + ("ml" "malayalam") + ("nl" "dutch") + ("nb" "norsk") + ("nn" "nynorsk") + ("nko" "nko") + ("no" "norsk") + ("oc" "occitan") + ("pl" "polish") + ("pms" "piedmontese") + ("pt" "portuges") + ("rm" "romansh") + ("ro" "romanian") + ("ru" "russian") + ("sa" "sanskrit") + ("hsb" "usorbian") + ("dsb" "lsorbian") + ("sk" "slovak") + ("sl" "slovenian") + ("se" "samin") + ("sq" "albanian") + ("sr" "serbian") + ("sv" "swedish") + ("syr" "syriac") + ("ta" "tamil") + ("te" "telugu") + ("th" "thai") + ("tk" "turkmen") + ("tr" "turkish") + ("uk" "ukrainian") + ("ur" "urdu") + ("vi" "vietnamese")) + "Alist between language code and corresponding Polyglossia option") + + + (defconst org-latex-table-matrix-macros '(("bordermatrix" . "\\cr") - ("qbordermatrix" . "\\cr") - ("kbordermatrix" . "\\\\")) + ("qbordermatrix" . "\\cr") + ("kbordermatrix" . "\\\\")) "Alist between matrix macros and their row ending.") +(defconst org-latex-math-environments-re + (format + "\\`[ \t]*\\\\begin{%s\\*?}" + (regexp-opt + '("equation" "eqnarray" "math" "displaymath" + "align" "gather" "multline" "flalign" "alignat" + "xalignat" "xxalignat" + "subequations" + ;; breqn + "dmath" "dseries" "dgroup" "darray" + ;; empheq + "empheq"))) + "Regexp of LaTeX math environments.") ;;; User Configurable Variables @@ -178,6 +319,79 @@ :tag "Org Export LaTeX" :group 'org-export) +;;;; Generic + +(defcustom org-latex-caption-above '(table) + "When non-nil, place caption string at the beginning of elements. +Otherwise, place it near the end. When value is a list of +symbols, put caption above selected elements only. Allowed +symbols are: `image', `table', `src-block' and `special-block'." + :group 'org-export-latex + :version "25.1" + :package-version '(Org . "8.3") + :type '(choice + (const :tag "For all elements" t) + (const :tag "For no element" nil) + (set :tag "For the following elements only" :greedy t + (const :tag "Images" image) + (const :tag "Tables" table) + (const :tag "Source code" src-block) + (const :tag "Special blocks" special-block)))) + +(defcustom org-latex-prefer-user-labels nil + "Use user-provided labels instead of internal ones when non-nil. + +When this variable is non-nil, Org will use the value of +CUSTOM_ID property, NAME keyword or Org target as the key for the +\\label commands generated. + +By default, Org generates its own internal labels during LaTeX +export. This process ensures that the \\label keys are unique +and valid, but it means the keys are not available in advance of +the export process. + +Setting this variable gives you control over how Org generates +labels during LaTeX export, so that you may know their keys in +advance. One reason to do this is that it allows you to refer to +various elements using a single label both in Org's link syntax +and in embedded LaTeX code. + +For example, when this variable is non-nil, a headline like this: + + ** Some section + :PROPERTIES: + :CUSTOM_ID: sec:foo + :END: + This is section [[#sec:foo]]. + #+BEGIN_LATEX + And this is still section \\ref{sec:foo}. + #+END_LATEX + +will be exported to LaTeX as: + + \\subsection{Some section} + \\label{sec:foo} + This is section \\ref{sec:foo}. + And this is still section \\ref{sec:foo}. + +Note, however, that setting this variable introduces a limitation +on the possible values for CUSTOM_ID and NAME. When this +variable is non-nil, Org passes their value to \\label unchanged. +You are responsible for ensuring that the value is a valid LaTeX +\\label key, and that no other \\label commands with the same key +appear elsewhere in your document. (Keys may contain letters, +numbers, and the following punctuation: '_' '.' '-' ':'.) There +are no such limitations on CUSTOM_ID and NAME when this variable +is nil. + +For headlines that do not define the CUSTOM_ID property or +elements without a NAME, Org will continue to use its default +labeling scheme to generate labels and resolve links into proper +references." + :group 'org-export-latex + :type 'boolean + :version "25.1" + :package-version '(Org . "8.3")) ;;;; Preamble @@ -264,11 +478,15 @@ AUTO will automatically be replaced with a coding system derived from `buffer-file-coding-system'. See also the variable `org-latex-inputenc-alist' for a way to influence this mechanism. -Likewise, if your header contains \"\\usepackage[AUTO]{babel}\", -AUTO will be replaced with the language related to the language -code specified by `org-export-default-language', which see. Note -that constructions such as \"\\usepackage[french,AUTO,english]{babel}\" -are permitted. +Likewise, if your header contains \"\\usepackage[AUTO]{babel}\" +or \"\\usepackage[AUTO]{polyglossia}\", AUTO will be replaced +with the language related to the language code specified by +`org-export-default-language'. Note that constructions such as +\"\\usepackage[french,AUTO,english]{babel}\" are permitted. For +Polyglossia the language will be set via the macros +\"\\setmainlanguage\" and \"\\setotherlanguage\". See also +`org-latex-guess-babel-language' and +`org-latex-guess-polyglossia-language'. The sectioning structure ------------------------ @@ -328,11 +546,42 @@ are written as utf8 files." (defcustom org-latex-title-command "\\maketitle" "The command used to insert the title just after \\begin{document}. -If this string contains the formatting specification \"%s\" then -it will be used as a formatting string, passing the title as an -argument." + +This format string may contain these elements: + + %a for AUTHOR keyword + %t for TITLE keyword + %s for SUBTITLE keyword + %k for KEYWORDS line + %d for DESCRIPTION line + %c for CREATOR line + %l for Language keyword + %L for capitalized language keyword + %D for DATE keyword + +If you need to use a \"%\" character, you need to escape it +like that: \"%%\". + +Setting :latex-title-command in publishing projects will take +precedence over this variable." :group 'org-export-latex - :type 'string) + :type '(string :tag "Format string")) + +(defcustom org-latex-subtitle-format "\\\\\\medskip\n\\large %s" + "Format string used for transcoded subtitle. +The format string should have at most one \"%s\"-expression, +which is replaced with the subtitle." + :group 'org-export-latex + :version "25.1" + :package-version '(Org . "8.3") + :type '(string :tag "Format string")) + +(defcustom org-latex-subtitle-separate nil + "Non-nil means the subtitle is not typeset as part of title." + :group 'org-export-latex + :version "25.1" + :package-version '(Org . "8.3") + :type 'boolean) (defcustom org-latex-toc-command "\\tableofcontents\n\n" "LaTeX command to set the table of contents, list of figures, etc. @@ -341,10 +590,32 @@ the toc:nil option, not to those generated with #+TOC keyword." :group 'org-export-latex :type 'string) -(defcustom org-latex-with-hyperref t - "Toggle insertion of \\hypersetup{...} in the preamble." +(defcustom org-latex-hyperref-template + "\\hypersetup{\n pdfauthor={%a},\n pdftitle={%t},\n pdfkeywords={%k}, + pdfsubject={%d},\n pdfcreator={%c}, \n pdflang={%L}}\n" + "Template for hyperref package options. + +This format string may contain these elements: + + %a for AUTHOR keyword + %t for TITLE keyword + %s for SUBTITLE keyword + %k for KEYWORDS line + %d for DESCRIPTION line + %c for CREATOR line + %l for Language keyword + %L for capitalized language keyword + %D for DATE keyword + +If you need to use a \"%\" character, you need to escape it +like that: \"%%\". + +Setting :latex-hyperref-template in publishing projects will take +precedence over this variable." :group 'org-export-latex - :type 'boolean) + :version "25.1" + :package-version '(Org . "8.3") + :type '(string :tag "Format string")) ;;;; Headline @@ -352,17 +623,15 @@ the toc:nil option, not to those generated with #+TOC keyword." 'org-latex-format-headline-default-function "Function for formatting the headline's text. -This function will be called with 5 arguments: -TODO the todo keyword (string or nil). +This function will be called with six arguments: +TODO the todo keyword (string or nil) TODO-TYPE the type of todo (symbol: `todo', `done', nil) PRIORITY the priority of the headline (integer or nil) -TEXT the main headline text (string). -TAGS the tags as a list of strings (list of strings or nil). +TEXT the main headline text (string) +TAGS the tags (list of strings or nil) +INFO the export options (plist) -The function result will be used in the section format string. - -Use `org-latex-format-headline-default-function' by default, -which format headlines like for Org version prior to 8.0." +The function result will be used in the section format string." :group 'org-export-latex :version "24.4" :package-version '(Org . "8.0") @@ -489,12 +758,14 @@ When modifying this variable, it may be useful to change :type '(choice (const :tag "Table" table) (const :tag "Matrix" math) (const :tag "Inline matrix" inline-math) - (const :tag "Verbatim" verbatim))) + (const :tag "Verbatim" verbatim)) + :safe (lambda (s) (memq s '(table math inline-math verbatim)))) (defcustom org-latex-tables-centered t "When non-nil, tables are exported in a center environment." :group 'org-export-latex - :type 'boolean) + :type 'boolean + :safe #'booleanp) (defcustom org-latex-tables-booktabs nil "When non-nil, display tables in a formal \"booktabs\" style. @@ -505,13 +776,8 @@ attributes." :group 'org-export-latex :version "24.4" :package-version '(Org . "8.0") - :type 'boolean) - -(defcustom org-latex-table-caption-above t - "When non-nil, place caption string at the beginning of the table. -Otherwise, place it near the end." - :group 'org-export-latex - :type 'boolean) + :type 'boolean + :safe #'booleanp) (defcustom org-latex-table-scientific-notation "%s\\,(%s)" "Format string to display numbers in scientific notation. @@ -526,11 +792,10 @@ When nil, no transformation is made." (string :tag "Format string") (const :tag "No formatting" nil))) - ;;;; Text markup (defcustom org-latex-text-markup-alist '((bold . "\\textbf{%s}") - (code . verb) + (code . protectedtexttt) (italic . "\\emph{%s}") (strike-through . "\\sout{%s}") (underline . "\\uline{%s}") @@ -550,6 +815,8 @@ to typeset and try to protect special characters. If no association can be found for a given markup, text will be returned as-is." :group 'org-export-latex + :version "25.1" + :package-version '(Org . "8.3") :type 'alist :options '(bold code italic strike-through underline verbatim)) @@ -575,44 +842,24 @@ The default function simply returns the value of CONTENTS." ;;;; Inlinetasks -(defcustom org-latex-format-inlinetask-function 'ignore +(defcustom org-latex-format-inlinetask-function + 'org-latex-format-inlinetask-default-function "Function called to format an inlinetask in LaTeX code. -The function must accept six parameters: - TODO the todo keyword, as a string - TODO-TYPE the todo type, a symbol among `todo', `done' and nil. - PRIORITY the inlinetask priority, as a string - NAME the inlinetask name, as a string. - TAGS the inlinetask tags, as a list of strings. - CONTENTS the contents of the inlinetask, as a string. - -The function should return the string to be exported. +The function must accept seven parameters: + TODO the todo keyword (string or nil) + TODO-TYPE the todo type (symbol: `todo', `done', nil) + PRIORITY the inlinetask priority (integer or nil) + NAME the inlinetask name (string) + TAGS the inlinetask tags (list of strings or nil) + CONTENTS the contents of the inlinetask (string or nil) + INFO the export options (plist) -For example, the variable could be set to the following function -in order to mimic default behaviour: - -\(defun org-latex-format-inlinetask \(todo type priority name tags contents\) -\"Format an inline task element for LaTeX export.\" - \(let ((full-title - \(concat - \(when todo - \(format \"\\\\textbf{\\\\textsf{\\\\textsc{%s}}} \" todo)) - \(when priority (format \"\\\\framebox{\\\\#%c} \" priority)) - title - \(when tags - \(format \"\\\\hfill{}\\\\textsc{:%s:}\" - \(mapconcat 'identity tags \":\"))))) - \(format (concat \"\\\\begin{center}\\n\" - \"\\\\fbox{\\n\" - \"\\\\begin{minipage}[c]{.6\\\\textwidth}\\n\" - \"%s\\n\\n\" - \"\\\\rule[.8em]{\\\\textwidth}{2pt}\\n\\n\" - \"%s\" - \"\\\\end{minipage}}\" - \"\\\\end{center}\") - full-title contents))" +The function should return the string to be exported." :group 'org-export-latex - :type 'function) + :type 'function + :version "25.1" + :package-version '(Org . "8.3")) ;; Src blocks @@ -640,7 +887,7 @@ the minted package to `org-latex-packages-alist', for example using customize, or with \(require 'ox-latex) - \(add-to-list 'org-latex-packages-alist '(\"\" \"minted\")) + \(add-to-list 'org-latex-packages-alist '(\"newfloat\" \"minted\")) In addition, it is necessary to install pygments \(http://pygments.org), and to configure the variable @@ -656,7 +903,8 @@ into previewing problems, please consult :type '(choice (const :tag "Use listings" t) (const :tag "Use minted" minted) - (const :tag "Export verbatim" nil))) + (const :tag "Export verbatim" nil)) + :safe (lambda (s) (memq s '(t nil minted)))) (defcustom org-latex-listings-langs '((emacs-lisp "Lisp") (lisp "Lisp") (clojure "Lisp") @@ -668,7 +916,8 @@ into previewing problems, please consult (shell-script "bash") (gnuplot "Gnuplot") (ocaml "Caml") (caml "Caml") - (sql "SQL") (sqlite "sql")) + (sql "SQL") (sqlite "sql") + (makefile "make")) "Alist mapping languages to their listing language counterpart. The key is a symbol, the major mode symbol without the \"-mode\". The value is the string that should be inserted as the language @@ -676,6 +925,8 @@ parameter for the listings package. If the mode name and the listings name are the same, the language does not need an entry in this list - but it does not hurt if it is present." :group 'org-export-latex + :version "24.4" + :package-version '(Org . "8.3") :type '(repeat (list (symbol :tag "Major mode ") @@ -697,7 +948,13 @@ will typeset the code in a small size font with underlined, bold black keywords. Note that the same options will be applied to blocks of all -languages." +languages. If you need block-specific options, you may use the +following syntax: + + #+ATTR_LATEX: :options key1=value1,key2=value2 + #+BEGIN_SRC + ... + #+END_SRC" :group 'org-export-latex :type '(repeat (list @@ -744,7 +1001,13 @@ will result in src blocks being exported with \\begin{minted}[bgcolor=bg,frame=lines]{} as the start of the minted environment. Note that the same -options will be applied to blocks of all languages." +options will be applied to blocks of all languages. If you need +block-specific options, you may use the following syntax: + + #+ATTR_LATEX: :options key1=value1,key2=value2 + #+BEGIN_SRC + ... + #+END_SRC" :group 'org-export-latex :type '(repeat (list @@ -840,11 +1103,14 @@ file name as its single argument." (function))) (defcustom org-latex-logfiles-extensions - '("aux" "idx" "log" "out" "toc" "nav" "snm" "vrb") + '("aux" "bcf" "blg" "fdb_latexmk" "fls" "figlist" "idx" "log" "nav" "out" + "ptc" "run.xml" "snm" "toc" "vrb" "xdv") "The list of file extensions to consider as LaTeX logfiles. -The logfiles will be remove if `org-latex-remove-logfiles' is +The logfiles will be removed if `org-latex-remove-logfiles' is non-nil." :group 'org-export-latex + :version "25.1" + :package-version '(Org . "8.3") :type '(repeat (string :tag "Extension"))) (defcustom org-latex-remove-logfiles t @@ -855,19 +1121,20 @@ logfiles to remove, set `org-latex-logfiles-extensions'." :group 'org-export-latex :type 'boolean) -(defcustom org-latex-known-errors - '(("Reference.*?undefined" . "[undefined reference]") - ("Citation.*?undefined" . "[undefined citation]") - ("Undefined control sequence" . "[undefined control sequence]") - ("^! LaTeX.*?Error" . "[LaTeX error]") - ("^! Package.*?Error" . "[package error]") - ("Runaway argument" . "Runaway argument")) +(defcustom org-latex-known-warnings + '(("Reference.*?undefined" . "[undefined reference]") + ("Runaway argument" . "[runaway argument]") + ("Underfull \\hbox" . "[underfull hbox]") + ("Overfull \\hbox" . "[overfull hbox]") + ("Citation.*?undefined" . "[undefined citation]") + ("Undefined control sequence" . "[undefined control sequence]")) "Alist of regular expressions and associated messages for the user. -The regular expressions are used to find possible errors in the -log of a latex-run." +The regular expressions are used to find possible warnings in the +log of a latex-run. These warnings will be reported after +calling `org-latex-compile'." :group 'org-export-latex - :version "24.4" - :package-version '(Org . "8.0") + :version "25.1" + :package-version '(Org . "8.3") :type '(repeat (cons (string :tag "Regexp") @@ -877,6 +1144,54 @@ log of a latex-run." ;;; Internal Functions +(defun org-latex--caption-above-p (element info) + "Non nil when caption is expected to be located above ELEMENT. +INFO is a plist holding contextual information." + (let ((above (plist-get info :latex-caption-above))) + (if (symbolp above) above + (let ((type (org-element-type element))) + (memq (if (eq type 'link) 'image type) above))))) + +(defun org-latex--label (datum info &optional force full) + "Return an appropriate label for DATUM. +DATUM is an element or a `target' type object. INFO is the +current export state, as a plist. + +Return nil if element DATUM has no NAME or VALUE affiliated +keyword or no CUSTOM_ID property, unless FORCE is non-nil. In +this case always return a unique label. + +Eventually, if FULL is non-nil, wrap label within \"\\label{}\"." + (let* ((type (org-element-type datum)) + (user-label + (org-element-property + (case type + ((headline inlinetask) :CUSTOM_ID) + (target :value) + (otherwise :name)) + datum)) + (label + (and (or user-label force) + (if (and user-label (plist-get info :latex-prefer-user-labels)) + user-label + (concat (case type + (headline "sec:") + (table "tab:") + (latex-environment + (and (org-string-match-p + org-latex-math-environments-re + (org-element-property :value datum)) + "eq:")) + (paragraph + (and (org-element-property :caption datum) + "fig:"))) + (org-export-get-reference datum info)))))) + (cond ((not full) label) + (label (format "\\label{%s}%s" + label + (if (eq type 'target) "" "\n"))) + (t "")))) + (defun org-latex--caption/label-string (element info) "Return caption and label LaTeX string for ELEMENT. @@ -884,25 +1199,39 @@ INFO is a plist holding contextual information. If there's no caption nor label, return the empty string. For non-floats, see `org-latex--wrap-label'." - (let* ((label (org-element-property :name element)) - (label-str (if (not (org-string-nw-p label)) "" - (format "\\label{%s}" - (org-export-solidify-link-text label)))) + (let* ((label (org-latex--label element info nil t)) (main (org-export-get-caption element)) + (attr (org-export-read-attribute :attr_latex element)) + (type (org-element-type element)) + (nonfloat (or (and (plist-member attr :float) + (not (plist-get attr :float)) + main) + (and (eq type 'src-block) + (not (plist-get attr :float)) + (memq (plist-get info :latex-listings) + '(nil minted))))) (short (org-export-get-caption element t)) - (caption-from-attr-latex (org-export-read-attribute :attr_latex element :caption))) + (caption-from-attr-latex (plist-get attr :caption))) (cond ((org-string-nw-p caption-from-attr-latex) (concat caption-from-attr-latex "\n")) - ((and (not main) (equal label-str "")) "") - ((not main) (concat label-str "\n")) + ((and (not main) (equal label "")) "") + ((not main) (concat label "\n")) ;; Option caption format with short name. - (short (format "\\caption[%s]{%s%s}\n" - (org-export-data short info) - label-str - (org-export-data main info))) - ;; Standard caption format. - (t (format "\\caption{%s%s}\n" label-str (org-export-data main info)))))) + (t + (format (if nonfloat "\\captionof{%s}%s{%s%s}\n" + "\\caption%s%s{%s%s}\n") + (if nonfloat + (case type + (paragraph "figure") + (src-block (if (plist-get info :latex-listings) + "listing" + "figure")) + (t (symbol-name type))) + "") + (if short (format "[%s]" (org-export-data short info)) "") + label + (org-export-data main info)))))) (defun org-latex-guess-inputenc (header) "Set the coding system in inputenc to what the buffer is. @@ -958,6 +1287,59 @@ Return the new header." ", ") t nil header 1))))) +(defun org-latex-guess-polyglossia-language (header info) + "Set the Polyglossia language according to the LANGUAGE keyword. + +HEADER is the LaTeX header string. INFO is the plist used as +a communication channel. + +Insertion of guessed language only happens when the Polyglossia +package has been explicitly loaded. + +The argument to Polyglossia may be \"AUTO\" which is then +replaced with the language of the document or +`org-export-default-language'. Note, the language is really set +using \setdefaultlanguage and not as an option to the package. + +Return the new header." + (let ((language (plist-get info :language))) + ;; If no language is set or Polyglossia is not loaded, return + ;; HEADER as-is. + (if (or (not (stringp language)) + (not (string-match + "\\\\usepackage\\(?:\\[\\([^]]+?\\)\\]\\){polyglossia}\n" + header))) + header + (let* ((options (org-string-nw-p (match-string 1 header))) + (languages (and options + ;; Reverse as the last loaded language is + ;; the main language. + (nreverse + (delete-dups + (save-match-data + (org-split-string + (replace-regexp-in-string + "AUTO" language options t) + ",[ \t]*")))))) + (main-language-set + (string-match-p "\\\\setmainlanguage{.*?}" header))) + (replace-match + (concat "\\usepackage{polyglossia}\n" + (mapconcat + (lambda (l) + (let ((l (or (assoc l org-latex-polyglossia-language-alist) + l))) + (format (if main-language-set "\\setotherlanguage%s{%s}\n" + (setq main-language-set t) + "\\setmainlanguage%s{%s}\n") + (if (and (consp l) (= (length l) 3)) + (format "[variant=%s]" (nth 2 l)) + "") + (nth 1 l)))) + languages + "")) + t t header 0))))) + (defun org-latex--find-verb-separator (s) "Return a character not used in string S. This is used to choose a separator for constructs like \\verb." @@ -978,52 +1360,50 @@ nil." options ",")) -(defun org-latex--wrap-label (element output) +(defun org-latex--wrap-label (element output info) "Wrap label associated to ELEMENT around OUTPUT, if appropriate. -This function shouldn't be used for floats. See +INFO is the current export state, as a plist. This function +should not be used for floats. See `org-latex--caption/label-string'." - (let ((label (org-element-property :name element))) - (if (not (and (org-string-nw-p output) (org-string-nw-p label))) output - (concat (format "\\label{%s}\n" (org-export-solidify-link-text label)) - output)))) - -(defun org-latex--text-markup (text markup) + (if (not (and (org-string-nw-p output) (org-element-property :name element))) + output + (concat (format "\\phantomsection\n\\label{%s}\n" + (org-latex--label element info)) + output))) + +(defun org-latex--protect-text (text) + "Protect special characters in string TEXT and return it." + (replace-regexp-in-string + "--\\|[\\{}$%&_#~^]" + (lambda (m) + (cond ((equal m "--") "-{}-") + ((equal m "\\") "\\textbackslash{}") + ((equal m "~") "\\textasciitilde{}") + ((equal m "^") "\\textasciicircum{}") + (t (concat "\\" m)))) + text nil t)) + +(defun org-latex--text-markup (text markup info) "Format TEXT depending on MARKUP text markup. -See `org-latex-text-markup-alist' for details." - (let ((fmt (cdr (assq markup org-latex-text-markup-alist)))) - (cond - ;; No format string: Return raw text. - ((not fmt) text) - ;; Handle the `verb' special case: Find and appropriate separator - ;; and use "\\verb" command. - ((eq 'verb fmt) - (let ((separator (org-latex--find-verb-separator text))) - (concat "\\verb" separator - (replace-regexp-in-string "\n" " " text) - separator))) - ;; Handle the `protectedtexttt' special case: Protect some - ;; special chars and use "\texttt{%s}" format string. - ((eq 'protectedtexttt fmt) - (let ((start 0) - (trans '(("\\" . "\\textbackslash{}") - ("~" . "\\textasciitilde{}") - ("^" . "\\textasciicircum{}"))) - (rtn "") - char) - (while (string-match "[\\{}$%&_#~^]" text) - (setq char (match-string 0 text)) - (if (> (match-beginning 0) 0) - (setq rtn (concat rtn (substring text 0 (match-beginning 0))))) - (setq text (substring text (1+ (match-beginning 0)))) - (setq char (or (cdr (assoc char trans)) (concat "\\" char)) - rtn (concat rtn char))) - (setq text (concat rtn text) - fmt "\\texttt{%s}") - (while (string-match "--" text) - (setq text (replace-match "-{}-" t t text))) - (format fmt text))) - ;; Else use format string. - (t (format fmt text))))) +INFO is a plist used as a communication channel. See +`org-latex-text-markup-alist' for details." + (let ((fmt (cdr (assq markup (plist-get info :latex-text-markup-alist))))) + (case fmt + ;; No format string: Return raw text. + ((nil) text) + ;; Handle the `verb' special case: Find an appropriate separator + ;; and use "\\verb" command. + (verb + (let ((separator (org-latex--find-verb-separator text))) + (concat "\\verb" separator + (replace-regexp-in-string "\n" " " text) + separator))) + ;; Handle the `protectedtexttt' special case: Protect some + ;; special chars and use "\texttt{%s}" format string. + (protectedtexttt + (format "\\texttt{%s}" (org-latex--protect-text text))) + ;; Else use format string. + (t (format fmt text))))) (defun org-latex--delayed-footnotes-definitions (element info) "Return footnotes definitions in ELEMENT as a string. @@ -1065,6 +1445,57 @@ just outside of it." (funcall search-refs element)) "")) +(defun org-latex--translate (s info) + "Translate string S according to specified language. +INFO is a plist used as a communication channel." + (org-export-translate s :latex info)) + +(defun org-latex--format-spec (info) + "Create a format-spec for document meta-data. +INFO is a plist used as a communication channel." + (let ((language (let ((lang (plist-get info :language))) + (or (cdr (assoc lang org-latex-babel-language-alist)) + lang)))) + `((?a . ,(org-export-data (plist-get info :author) info)) + (?t . ,(org-export-data (plist-get info :title) info)) + (?k . ,(org-export-data (org-latex--wrap-latex-math-block + (plist-get info :keywords) info) + info)) + (?d . ,(org-export-data (org-latex--wrap-latex-math-block + (plist-get info :description) info) + info)) + (?c . ,(plist-get info :creator)) + (?l . ,language) + (?L . ,(capitalize language)) + (?D . ,(org-export-get-date info))))) + +(defun org-latex--make-header (info) + "Return a formatted LaTeX header. +INFO is a plist used as a communication channel." + (let* ((class (plist-get info :latex-class)) + (class-options (plist-get info :latex-class-options)) + (header (nth 1 (assoc class (plist-get info :latex-classes)))) + (document-class-string + (and (stringp header) + (if (not class-options) header + (replace-regexp-in-string + "^[ \t]*\\\\documentclass\\(\\(\\[[^]]*\\]\\)?\\)" + class-options header t nil 1))))) + (if (not document-class-string) + (user-error "Unknown LaTeX class `%s'" class) + (org-latex-guess-polyglossia-language + (org-latex-guess-babel-language + (org-latex-guess-inputenc + (org-element-normalize-string + (org-splice-latex-header + document-class-string + org-latex-default-packages-alist + org-latex-packages-alist nil + (concat (org-element-normalize-string + (plist-get info :latex-header)) + (plist-get info :latex-header-extra))))) + info) + info)))) ;;; Template @@ -1073,34 +1504,14 @@ just outside of it." "Return complete document string after LaTeX conversion. CONTENTS is the transcoded contents string. INFO is a plist holding export options." - (let ((title (org-export-data (plist-get info :title) info))) + (let ((title (org-export-data (plist-get info :title) info)) + (spec (org-latex--format-spec info))) (concat ;; Time-stamp. (and (plist-get info :time-stamp-file) (format-time-string "%% Created %Y-%m-%d %a %H:%M\n")) ;; Document class and packages. - (let* ((class (plist-get info :latex-class)) - (class-options (plist-get info :latex-class-options)) - (header (nth 1 (assoc class org-latex-classes))) - (document-class-string - (and (stringp header) - (if (not class-options) header - (replace-regexp-in-string - "^[ \t]*\\\\documentclass\\(\\(\\[[^]]*\\]\\)?\\)" - class-options header t nil 1))))) - (if (not document-class-string) - (user-error "Unknown LaTeX class `%s'" class) - (org-latex-guess-babel-language - (org-latex-guess-inputenc - (org-element-normalize-string - (org-splice-latex-header - document-class-string - org-latex-default-packages-alist - org-latex-packages-alist nil - (concat (org-element-normalize-string - (plist-get info :latex-header)) - (plist-get info :latex-header-extra))))) - info))) + (org-latex--make-header info) ;; Possibly limit depth for headline numbering. (let ((sec-num (plist-get info :section-numbers))) (when (integerp sec-num) @@ -1117,40 +1528,46 @@ holding export options." ;; Date. (let ((date (and (plist-get info :with-date) (org-export-get-date info)))) (format "\\date{%s}\n" (org-export-data date info))) - ;; Title - (format "\\title{%s}\n" title) + ;; Title and subtitle. + (let* ((subtitle (plist-get info :subtitle)) + (formatted-subtitle + (when subtitle + (format (plist-get info :latex-subtitle-format) + (org-export-data subtitle info)))) + (separate (plist-get info :latex-subtitle-separate))) + (concat + (format "\\title{%s%s}\n" title + (if separate "" (or formatted-subtitle ""))) + (when (and separate subtitle) + (concat formatted-subtitle "\n")))) ;; Hyperref options. - (when (plist-get info :latex-hyperref-p) - (format "\\hypersetup{\n pdfkeywords={%s},\n pdfsubject={%s},\n pdfcreator={%s}}\n" - (or (plist-get info :keywords) "") - (or (plist-get info :description) "") - (if (not (plist-get info :with-creator)) "" - (plist-get info :creator)))) + (let ((template (plist-get info :latex-hyperref-template))) + (and (stringp template) + (format-spec template spec))) ;; Document start. "\\begin{document}\n\n" ;; Title command. - (org-element-normalize-string - (cond ((string= "" title) nil) - ((not (stringp org-latex-title-command)) nil) - ((string-match "\\(?:[^%]\\|^\\)%s" - org-latex-title-command) - (format org-latex-title-command title)) - (t org-latex-title-command))) + (let* ((title-command (plist-get info :latex-title-command)) + (command (and (stringp title-command) + (format-spec title-command spec)))) + (org-element-normalize-string + (cond ((not (plist-get info :with-title)) nil) + ((string= "" title) nil) + ((not (stringp command)) nil) + ((string-match "\\(?:[^%]\\|^\\)%s" command) + (format command title)) + (t command)))) ;; Table of contents. (let ((depth (plist-get info :with-toc))) (when depth (concat (when (wholenump depth) (format "\\setcounter{tocdepth}{%d}\n" depth)) - org-latex-toc-command))) + (plist-get info :latex-toc-command)))) ;; Document's body. contents ;; Creator. - (let ((creator-info (plist-get info :with-creator))) - (cond - ((not creator-info) "") - ((eq creator-info 'comment) - (format "%% %s\n" (plist-get info :creator))) - (t (concat (plist-get info :creator) "\n")))) + (and (plist-get info :with-creator) + (concat (plist-get info :creator) "\n")) ;; Document end. "\\end{document}"))) @@ -1164,7 +1581,7 @@ holding export options." "Transcode BOLD from Org to LaTeX. CONTENTS is the text with bold markup. INFO is a plist holding contextual information." - (org-latex--text-markup contents 'bold)) + (org-latex--text-markup contents 'bold info)) ;;;; Center Block @@ -1174,8 +1591,7 @@ contextual information." CONTENTS holds the contents of the center block. INFO is a plist holding contextual information." (org-latex--wrap-label - center-block - (format "\\begin{center}\n%s\\end{center}" contents))) + center-block (format "\\begin{center}\n%s\\end{center}" contents) info)) ;;;; Clock @@ -1187,10 +1603,8 @@ information." (concat "\\noindent" (format "\\textbf{%s} " org-clock-string) - (format org-latex-inactive-timestamp-format - (concat (org-translate-time - (org-element-property :raw-value - (org-element-property :value clock))) + (format (plist-get info :latex-inactive-timestamp-format) + (concat (org-timestamp-translate (org-element-property :value clock)) (let ((time (org-element-property :duration clock))) (and time (format " (%s)" time))))) "\\\\")) @@ -1202,7 +1616,7 @@ information." "Transcode a CODE object from Org to LaTeX. CONTENTS is nil. INFO is a plist used as a communication channel." - (org-latex--text-markup (org-element-property :value code) 'code)) + (org-latex--text-markup (org-element-property :value code) 'code info)) ;;;; Drawer @@ -1212,9 +1626,9 @@ channel." CONTENTS holds the contents of the block. INFO is a plist holding contextual information." (let* ((name (org-element-property :drawer-name drawer)) - (output (funcall org-latex-format-drawer-function + (output (funcall (plist-get info :latex-format-drawer-function) name contents))) - (org-latex--wrap-label drawer output))) + (org-latex--wrap-label drawer output info))) ;;;; Dynamic Block @@ -1223,7 +1637,7 @@ holding contextual information." "Transcode a DYNAMIC-BLOCK element from Org to LaTeX. CONTENTS holds the contents of the block. INFO is a plist holding contextual information. See `org-export-data'." - (org-latex--wrap-label dynamic-block contents)) + (org-latex--wrap-label dynamic-block contents info)) ;;;; Entity @@ -1232,8 +1646,7 @@ holding contextual information. See `org-export-data'." "Transcode an ENTITY object from Org to LaTeX. CONTENTS are the definition itself. INFO is a plist holding contextual information." - (let ((ent (org-element-property :latex entity))) - (if (org-element-property :latex-math-p entity) (format "$%s$" ent) ent))) + (org-element-property :latex entity)) ;;;; Example Block @@ -1243,10 +1656,16 @@ contextual information." CONTENTS is nil. INFO is a plist holding contextual information." (when (org-string-nw-p (org-element-property :value example-block)) - (org-latex--wrap-label - example-block - (format "\\begin{verbatim}\n%s\\end{verbatim}" - (org-export-format-code-default example-block info))))) + (let ((environment (or (org-export-read-attribute + :attr_latex example-block :environment) + "verbatim"))) + (org-latex--wrap-label + example-block + (format "\\begin{%s}\n%s\\end{%s}" + environment + (org-export-format-code-default example-block info) + environment) + info)))) ;;;; Export Block @@ -1276,7 +1695,8 @@ CONTENTS is nil. INFO is a plist holding contextual information." fixed-width (format "\\begin{verbatim}\n%s\\end{verbatim}" (org-remove-indentation - (org-element-property :value fixed-width))))) + (org-element-property :value fixed-width))) + info)) ;;;; Footnote Reference @@ -1288,7 +1708,7 @@ CONTENTS is nil. INFO is a plist holding contextual information." ;; Insert separator between two footnotes in a row. (let ((prev (org-export-get-previous-element footnote-reference info))) (when (eq (org-element-type prev) 'footnote-reference) - org-latex-footnote-separator)) + (plist-get info :latex-footnote-separator))) (cond ;; Use \footnotemark if the footnote has already been defined. ((not (org-export-footnote-first-reference-p footnote-reference info)) @@ -1296,9 +1716,8 @@ CONTENTS is nil. INFO is a plist holding contextual information." (org-export-get-footnote-number footnote-reference info))) ;; Use \footnotemark if reference is within another footnote ;; reference, footnote definition or table cell. - ((loop for parent in (org-export-get-genealogy footnote-reference) - thereis (memq (org-element-type parent) - '(footnote-reference footnote-definition table-cell))) + ((org-element-lineage footnote-reference + '(footnote-reference footnote-definition table-cell)) "\\footnotemark") ;; Otherwise, define it with \footnote command. (t @@ -1321,7 +1740,7 @@ holding contextual information." (let* ((class (plist-get info :latex-class)) (level (org-export-get-relative-level headline info)) (numberedp (org-export-numbered-headline-p headline info)) - (class-sectioning (assoc class org-latex-classes)) + (class-sectioning (assoc class (plist-get info :latex-classes))) ;; Section formatting will set two placeholders: one for ;; the title and the other for the contents. (section-fmt @@ -1365,16 +1784,12 @@ holding contextual information." (org-element-property :priority headline))) ;; Create the headline text along with a no-tag version. ;; The latter is required to remove tags from toc. - (full-text (funcall org-latex-format-headline-function - todo todo-type priority text tags)) + (full-text (funcall (plist-get info :latex-format-headline-function) + todo todo-type priority text tags info)) ;; Associate \label to the headline for internal links. - (headline-label - (format "\\label{sec-%s}\n" - (mapconcat 'number-to-string - (org-export-get-headline-number headline info) - "-"))) + (headline-label (org-latex--label headline info t t)) (pre-blanks - (make-string (org-element-property :pre-blank headline) 10))) + (make-string (org-element-property :pre-blank headline) ?\n))) (if (or (not section-fmt) (org-export-low-level-p headline info)) ;; This is a deep sub-tree: export it as a list item. Also ;; export as items headlines for which no section format has @@ -1404,15 +1819,32 @@ holding contextual information." ;; an alternative heading when possible, and when this is not ;; identical to the usual heading. (let ((opt-title - (funcall org-latex-format-headline-function + (funcall (plist-get info :latex-format-headline-function) todo todo-type priority (org-export-data-with-backend (org-export-get-alt-title headline info) section-back-end info) - (and (eq (plist-get info :with-tags) t) tags)))) - (if (and numberedp opt-title + (and (eq (plist-get info :with-tags) t) tags) + info)) + ;; Maybe end local TOC (see `org-latex-keyword'). + (contents + (concat + contents + (let ((case-fold-search t) + (section + (let ((first (car (org-element-contents headline)))) + (and (eq (org-element-type first) 'section) first)))) + (org-element-map section 'keyword + (lambda (k) + (and (equal (org-element-property :key k) "TOC") + (let ((v (org-element-property :value k))) + (and (org-string-match-p "\\" v) + (org-string-match-p "\\" v) + (format "\\stopcontents[level-%d]" level))))) + info t))))) + (if (and opt-title (not (equal opt-title full-text)) - (string-match "\\`\\\\\\(.*?[^*]\\){" section-fmt)) + (string-match "\\`\\\\\\(.+?\\){" section-fmt)) (format (replace-match "\\1[%s]" nil nil section-fmt 1) ;; Replace square brackets with parenthesis ;; since square brackets are not supported in @@ -1427,7 +1859,7 @@ holding contextual information." (concat headline-label pre-blanks contents)))))))) (defun org-latex-format-headline-default-function - (todo todo-type priority text tags) + (todo todo-type priority text tags info) "Default format function for a headline. See `org-latex-format-headline-function' for details." (concat @@ -1435,7 +1867,9 @@ See `org-latex-format-headline-function' for details." (and priority (format "\\framebox{\\#%c} " priority)) text (and tags - (format "\\hfill{}\\textsc{%s}" (mapconcat 'identity tags ":"))))) + (format "\\hfill{}\\textsc{%s}" + (mapconcat (lambda (tag) (org-latex-plain-text tag info)) + tags ":"))))) ;;;; Horizontal Rule @@ -1456,7 +1890,8 @@ CONTENTS is nil. INFO is a plist holding contextual information." horizontal-rule (format "\\rule{%s}{%s}" (or (plist-get attr :width) "\\linewidth") - (or (plist-get attr :thickness) "0.5pt")))))) + (or (plist-get attr :thickness) "0.5pt")) + info)))) ;;;; Inline Src Block @@ -1467,34 +1902,33 @@ CONTENTS holds the contents of the item. INFO is a plist holding contextual information." (let* ((code (org-element-property :value inline-src-block)) (separator (org-latex--find-verb-separator code))) - (cond - ;; Do not use a special package: transcode it verbatim. - ((not org-latex-listings) - (concat "\\verb" separator code separator)) - ;; Use minted package. - ((eq org-latex-listings 'minted) - (let* ((org-lang (org-element-property :language inline-src-block)) - (mint-lang (or (cadr (assq (intern org-lang) - org-latex-minted-langs)) - (downcase org-lang))) - (options (org-latex--make-option-string - org-latex-minted-options))) - (concat (format "\\mint%s{%s}" - (if (string= options "") "" (format "[%s]" options)) - mint-lang) - separator code separator))) - ;; Use listings package. - (t - ;; Maybe translate language's name. - (let* ((org-lang (org-element-property :language inline-src-block)) - (lst-lang (or (cadr (assq (intern org-lang) - org-latex-listings-langs)) - org-lang)) - (options (org-latex--make-option-string - (append org-latex-listings-options - `(("language" ,lst-lang)))))) - (concat (format "\\lstinline[%s]" options) - separator code separator)))))) + (case (plist-get info :latex-listings) + ;; Do not use a special package: transcode it verbatim. + ((nil) (format "\\texttt{%s}" (org-latex--protect-text code))) + ;; Use minted package. + (minted + (let* ((org-lang (org-element-property :language inline-src-block)) + (mint-lang (or (cadr (assq (intern org-lang) + (plist-get info :latex-minted-langs))) + (downcase org-lang))) + (options (org-latex--make-option-string + (plist-get info :latex-minted-options)))) + (concat (format "\\mint%s{%s}" + (if (string= options "") "" (format "[%s]" options)) + mint-lang) + separator code separator))) + ;; Use listings package. + (otherwise + ;; Maybe translate language's name. + (let* ((org-lang (org-element-property :language inline-src-block)) + (lst-lang (or (cadr (assq (intern org-lang) + (plist-get info :latex-listings-langs))) + org-lang)) + (options (org-latex--make-option-string + (append (plist-get info :latex-listings-options) + `(("language" ,lst-lang)))))) + (concat (format "\\lstinline[%s]" options) + separator code separator)))))) ;;;; Inlinetask @@ -1511,31 +1945,33 @@ holding contextual information." (tags (and (plist-get info :with-tags) (org-export-get-tags inlinetask info))) (priority (and (plist-get info :with-priority) - (org-element-property :priority inlinetask)))) - ;; If `org-latex-format-inlinetask-function' is provided, call it - ;; with appropriate arguments. - (if (not (eq org-latex-format-inlinetask-function 'ignore)) - (funcall org-latex-format-inlinetask-function - todo todo-type priority title tags contents) - ;; Otherwise, use a default template. - (org-latex--wrap-label - inlinetask - (let ((full-title - (concat - (when todo (format "\\textbf{\\textsf{\\textsc{%s}}} " todo)) - (when priority (format "\\framebox{\\#%c} " priority)) - title - (when tags (format "\\hfill{}\\textsc{:%s:}" - (mapconcat #'identity tags ":")))))) - (concat "\\begin{center}\n" - "\\fbox{\n" - "\\begin{minipage}[c]{.6\\textwidth}\n" - full-title "\n\n" - (and (org-string-nw-p contents) - (concat "\\rule[.8em]{\\textwidth}{2pt}\n\n" contents)) - "\\end{minipage}\n" - "}\n" - "\\end{center}")))))) + (org-element-property :priority inlinetask))) + (contents (concat (org-latex--label inlinetask info) contents))) + (funcall (plist-get info :latex-format-inlinetask-function) + todo todo-type priority title tags contents info))) + +(defun org-latex-format-inlinetask-default-function + (todo todo-type priority title tags contents info) + "Default format function for a inlinetasks. +See `org-latex-format-inlinetask-function' for details." + (let ((full-title + (concat (when todo (format "\\textbf{\\textsf{\\textsc{%s}}} " todo)) + (when priority (format "\\framebox{\\#%c} " priority)) + title + (when tags + (format "\\hfill{}\\textsc{:%s:}" + (mapconcat + (lambda (tag) (org-latex-plain-text tag info)) + tags ":")))))) + (concat "\\begin{center}\n" + "\\fbox{\n" + "\\begin{minipage}[c]{.6\\textwidth}\n" + full-title "\n\n" + (and (org-string-nw-p contents) + (concat "\\rule[.8em]{\\textwidth}{2pt}\n\n" contents)) + "\\end{minipage}\n" + "}\n" + "\\end{center}"))) ;;;; Italic @@ -1544,7 +1980,7 @@ holding contextual information." "Transcode ITALIC from Org to LaTeX. CONTENTS is the text with italic markup. INFO is a plist holding contextual information." - (org-latex--text-markup contents 'italic)) + (org-latex--text-markup contents 'italic info)) ;;;; Item @@ -1621,24 +2057,31 @@ CONTENTS is nil. INFO is a plist holding contextual information." ((string= key "LATEX") value) ((string= key "INDEX") (format "\\index{%s}" value)) ((string= key "TOC") - (let ((value (downcase value))) + (let ((case-fold-search t)) (cond - ((string-match "\\" value) - (let ((depth (or (and (string-match "[0-9]+" value) - (string-to-number (match-string 0 value))) - (plist-get info :with-toc)))) - (concat - (when (wholenump depth) - (format "\\setcounter{tocdepth}{%s}\n" depth)) - "\\tableofcontents"))) - ((string= "tables" value) "\\listoftables") - ((string= "listings" value) - (cond - ((eq org-latex-listings 'minted) "\\listoflistings") - (org-latex-listings "\\lstlistoflistings") - ;; At the moment, src blocks with a caption are wrapped - ;; into a figure environment. - (t "\\listoffigures"))))))))) + ((org-string-match-p "\\" value) + (let* ((localp (org-string-match-p "\\" value)) + (parent (org-element-lineage keyword '(headline))) + (level (if (not (and localp parent)) 0 + (org-export-get-relative-level parent info))) + (depth + (and (string-match "\\<[0-9]+\\>" value) + (format + "\\setcounter{tocdepth}{%d}" + (+ (string-to-number (match-string 0 value)) level))))) + (if (and localp parent) + ;; Start local TOC, assuming package "titletoc" is + ;; required. + (format "\\startcontents[level-%d] +\\printcontents[level-%d]{}{0}{%s}" + level level (or depth "")) + (concat depth (and depth "\n") "\\tableofcontents")))) + ((org-string-match-p "\\" value) "\\listoftables") + ((org-string-match-p "\\" value) + (case (plist-get info :latex-listings) + ((nil) "\\listoffigures") + (minted "\\listoflistings") + (otherwise "\\lstlistoflistings"))))))))) ;;;; Latex Environment @@ -1647,10 +2090,9 @@ CONTENTS is nil. INFO is a plist holding contextual information." "Transcode a LATEX-ENVIRONMENT element from Org to LaTeX. CONTENTS is nil. INFO is a plist holding contextual information." (when (plist-get info :with-latex) - (let ((label (org-element-property :name latex-environment)) - (value (org-remove-indentation + (let ((value (org-remove-indentation (org-element-property :value latex-environment)))) - (if (not (org-string-nw-p label)) value + (if (not (org-element-property :name latex-environment)) value ;; Environment is labeled: label must be within the environment ;; (otherwise, a reference pointing to that element will count ;; the section instead). @@ -1658,8 +2100,7 @@ CONTENTS is nil. INFO is a plist holding contextual information." (insert value) (goto-char (point-min)) (forward-line) - (insert - (format "\\label{%s}\n" (org-export-solidify-link-text label))) + (insert (org-latex--label latex-environment info nil t)) (buffer-string)))))) @@ -1668,8 +2109,14 @@ CONTENTS is nil. INFO is a plist holding contextual information." (defun org-latex-latex-fragment (latex-fragment contents info) "Transcode a LATEX-FRAGMENT object from Org to LaTeX. CONTENTS is nil. INFO is a plist holding contextual information." - (when (plist-get info :with-latex) - (org-element-property :value latex-fragment))) + (let ((value (org-element-property :value latex-fragment))) + ;; Trim math markers since the fragment is enclosed within + ;; a latex-math-block object anyway. + (cond ((string-match "\\`\\(\\$\\{1,2\\}\\)\\([^\000]*\\)\\1\\'" value) + (match-string 2 value)) + ((string-match "\\`\\\\(\\([^\000]*\\)\\\\)\\'" value) + (match-string 1 value)) + (t value)))) ;;;; Line Break @@ -1692,36 +2139,41 @@ used as a communication channel." (expand-file-name raw-path)))) (filetype (file-name-extension path)) (caption (org-latex--caption/label-string parent info)) + (caption-above-p (org-latex--caption-above-p link info)) ;; Retrieve latex attributes from the element around. (attr (org-export-read-attribute :attr_latex parent)) (float (let ((float (plist-get attr :float))) - (cond ((and (not float) (plist-member attr :float)) nil) - ((string= float "wrap") 'wrap) + (cond ((string= float "wrap") 'wrap) + ((string= float "sideways") 'sideways) ((string= float "multicolumn") 'multicolumn) ((or float (org-element-property :caption parent) (org-string-nw-p (plist-get attr :caption))) - 'figure)))) + (if (and (plist-member attr :float) (not float)) + 'nonfloat + 'figure)) + ((and (not float) (plist-member attr :float)) nil)))) (placement (let ((place (plist-get attr :placement))) - (cond (place (format "%s" place)) - ((eq float 'wrap) "{l}{0.5\\textwidth}") - ((eq float 'figure) - (format "[%s]" org-latex-default-figure-position)) - (t "")))) + (cond + (place (format "%s" place)) + ((eq float 'wrap) "{l}{0.5\\textwidth}") + ((eq float 'figure) + (format "[%s]" (plist-get info :latex-default-figure-position))) + (t "")))) (comment-include (if (plist-get attr :comment-include) "%" "")) ;; It is possible to specify width and height in the ;; ATTR_LATEX line, and also via default variables. (width (cond ((plist-get attr :width)) ((plist-get attr :height) "") ((eq float 'wrap) "0.48\\textwidth") - (t org-latex-image-default-width))) + (t (plist-get info :latex-image-default-width)))) (height (cond ((plist-get attr :height)) ((or (plist-get attr :width) (memq float '(figure wrap))) "") - (t org-latex-image-default-height))) + (t (plist-get info :latex-image-default-height)))) (options (let ((opt (or (plist-get attr :options) - org-latex-image-default-option))) + (plist-get info :latex-image-default-option)))) (if (not (string-match "\\`\\[\\(.*\\)\\]\\'" opt)) opt (match-string 1 opt)))) image-code) @@ -1750,6 +2202,12 @@ used as a communication channel." (setq options (concat options ",width=" width))) (when (org-string-nw-p height) (setq options (concat options ",height=" height))) + (let ((search-option (org-element-property :search-option link))) + (when (and search-option + (equal filetype "pdf") + (org-string-match-p "\\`[0-9]+\\'" search-option) + (not (org-string-match-p "page=" options))) + (setq options (concat options ",page=" search-option)))) (setq image-code (format "\\includegraphics%s{%s}" (cond ((not (org-string-nw-p options)) "") @@ -1769,17 +2227,43 @@ used as a communication channel." ;; Return proper string, depending on FLOAT. (case float (wrap (format "\\begin{wrapfigure}%s -\\centering +%s\\centering +%s%s +%s\\end{wrapfigure}" + placement + (if caption-above-p caption "") + comment-include image-code + (if caption-above-p "" caption))) + (sideways (format "\\begin{sidewaysfigure} +%s\\centering %s%s -%s\\end{wrapfigure}" placement comment-include image-code caption)) +%s\\end{sidewaysfigure}" + (if caption-above-p caption "") + comment-include image-code + (if caption-above-p "" caption))) (multicolumn (format "\\begin{figure*}%s -\\centering +%s\\centering %s%s -%s\\end{figure*}" placement comment-include image-code caption)) +%s\\end{figure*}" + placement + (if caption-above-p caption "") + comment-include image-code + (if caption-above-p "" caption))) (figure (format "\\begin{figure}%s -\\centering +%s\\centering +%s%s +%s\\end{figure}" + placement + (if caption-above-p caption "") + comment-include image-code + (if caption-above-p "" caption))) + (nonfloat + (format "\\begin{center} %s%s -%s\\end{figure}" placement comment-include image-code caption)) +%s\\end{center}" + (if caption-above-p caption "") + image-code + (if caption-above-p "" caption))) (otherwise image-code)))) (defun org-latex-link (link desc info) @@ -1794,15 +2278,15 @@ INFO is a plist holding contextual information. See ;; Ensure DESC really exists, or set it to nil. (desc (and (not (string= desc "")) desc)) (imagep (org-export-inline-image-p - link org-latex-inline-image-rules)) + link (plist-get info :latex-inline-image-rules))) (path (cond - ((member type '("http" "https" "ftp" "mailto")) + ((member type '("http" "https" "ftp" "mailto" "doi")) (concat type ":" raw-path)) - ((and (string= type "file") (file-name-absolute-p raw-path)) - (concat "file:" raw-path)) - (t raw-path))) - protocol) + ((string= type "file") (org-export-file-uri raw-path)) + (t raw-path)))) (cond + ;; Link type is handled by a special function. + ((org-export-custom-protocol-maybe link desc 'latex)) ;; Image file. (imagep (org-latex--inline-image link info)) ;; Radio link: Transcode target's contents and use them as link's @@ -1811,8 +2295,7 @@ INFO is a plist holding contextual information. See (let ((destination (org-export-resolve-radio-link link info))) (if (not destination) desc (format "\\hyperref[%s]{%s}" - (org-export-solidify-link-text - (org-element-property :value destination)) + (org-export-get-reference destination info) desc)))) ;; Links pointing to a headline: Find destination and build ;; appropriate referencing command. @@ -1826,8 +2309,8 @@ INFO is a plist holding contextual information. See (if desc (format "\\href{%s}{%s}" destination desc) (format "\\url{%s}" destination))) ;; Fuzzy link points nowhere. - ('nil - (format org-latex-link-with-unknown-path-format + ((nil) + (format (plist-get info :latex-link-with-unknown-path-format) (or desc (org-export-data (org-element-property :raw-link link) info)))) @@ -1836,12 +2319,7 @@ INFO is a plist holding contextual information. See ;; number. Otherwise, display description or headline's ;; title. (headline - (let ((label - (format "sec-%s" - (mapconcat - 'number-to-string - (org-export-get-headline-number destination info) - "-")))) + (let ((label (org-latex--label destination info t))) (if (and (not desc) (org-export-numbered-headline-p destination info)) (format "\\ref{%s}" label) @@ -1851,23 +2329,32 @@ INFO is a plist holding contextual information. See (org-element-property :title destination) info)))))) ;; Fuzzy link points to a target. Do as above. (otherwise - (let ((path (org-export-solidify-link-text path))) - (if (not desc) (format "\\ref{%s}" path) - (format "\\hyperref[%s]{%s}" path desc))))))) + (let ((ref (org-latex--label destination info t))) + (if (not desc) (format "\\ref{%s}" ref) + (format "\\hyperref[%s]{%s}" ref desc))))))) ;; Coderef: replace link with the reference name or the ;; equivalent line number. ((string= type "coderef") (format (org-export-get-coderef-format path desc) (org-export-resolve-coderef path info))) - ;; Link type is handled by a special function. - ((functionp (setq protocol (nth 2 (assoc type org-link-protocols)))) - (funcall protocol (org-link-unescape path) desc 'latex)) ;; External link with a description part. ((and path desc) (format "\\href{%s}{%s}" path desc)) ;; External link without a description part. (path (format "\\url{%s}" path)) ;; No path, only description. Try to do something useful. - (t (format org-latex-link-with-unknown-path-format desc))))) + (t (format (plist-get info :latex-link-with-unknown-path-format) desc))))) + + +;;;; Node Property + +(defun org-latex-node-property (node-property contents info) + "Transcode a NODE-PROPERTY element from Org to LaTeX. +CONTENTS is nil. INFO is a plist holding contextual +information." + (format "%s:%s" + (org-element-property :key node-property) + (let ((value (org-element-property :value node-property))) + (if value (concat " " value) "")))) ;;;; Paragraph @@ -1898,7 +2385,8 @@ contextual information." latex-type (or (plist-get attr :options) "") contents - latex-type)))) + latex-type) + info))) ;;;; Plain Text @@ -1907,47 +2395,35 @@ contextual information." "Transcode a TEXT string from Org to LaTeX. TEXT is the string to transcode. INFO is a plist holding contextual information." - (let ((specialp (plist-get info :with-special-strings)) - (output text)) - ;; Protect %, #, &, $, _, { and }. - (while (string-match "\\([^\\]\\|^\\)\\([%$#&{}_]\\)" output) - (setq output - (replace-match - (format "\\%s" (match-string 2 output)) nil t output 2))) - ;; Protect ^. - (setq output - (replace-regexp-in-string - "\\([^\\]\\|^\\)\\(\\^\\)" "\\\\^{}" output nil nil 2)) - ;; Protect \. If special strings are used, be careful not to - ;; protect "\" in "\-" constructs. - (let ((symbols (if specialp "-%$#&{}^_\\" "%$#&{}^_\\"))) - (setq output + (let* ((specialp (plist-get info :with-special-strings)) + (output + ;; Turn LaTeX into \LaTeX{} and TeX into \TeX{}. + (let ((case-fold-search nil)) (replace-regexp-in-string - (format "\\(?:[^\\]\\|^\\)\\(\\\\\\)\\(?:[^%s]\\|$\\)" symbols) - "$\\backslash$" output nil t 1))) - ;; Protect ~. - (setq output - (replace-regexp-in-string - "\\([^\\]\\|^\\)\\(~\\)" "\\textasciitilde{}" output nil t 2)) + "\\<\\(?:La\\)?TeX\\>" "\\\\\\&{}" + ;; Protect ^, ~, %, #, &, $, _, { and }. Also protect \. + ;; However, if special strings are used, be careful not + ;; to protect "\" in "\-" constructs. + (replace-regexp-in-string + (concat "[%$#&{}_~^]\\|\\\\" (and specialp "\\([^-]\\|$\\)")) + (lambda (m) + (case (string-to-char m) + (?\\ "$\\\\backslash$\\1") + (?~ "\\\\textasciitilde{}") + (?^ "\\\\^{}") + (t "\\\\\\&"))) + text))))) ;; Activate smart quotes. Be sure to provide original TEXT string ;; since OUTPUT may have been modified. (when (plist-get info :with-smart-quotes) (setq output (org-export-activate-smart-quotes output :latex info text))) - ;; LaTeX into \LaTeX{} and TeX into \TeX{}. - (let ((case-fold-search nil) - (start 0)) - (while (string-match "\\<\\(\\(?:La\\)?TeX\\)\\>" output start) - (setq output (replace-match - (format "\\%s{}" (match-string 1 output)) nil t output) - start (match-end 0)))) ;; Convert special strings. (when specialp - (setq output - (replace-regexp-in-string "\\.\\.\\." "\\ldots{}" output nil t))) + (setq output (replace-regexp-in-string "\\.\\.\\." "\\\\ldots{}" output))) ;; Handle break preservation if required. (when (plist-get info :preserve-breaks) (setq output (replace-regexp-in-string - "\\(\\\\\\\\\\)?[ \t]*\n" " \\\\\\\\\n" output))) + "\\(?:[ \t]*\\\\\\\\\\)?[ \t]*\n" "\\\\\n" output nil t))) ;; Return value. output)) @@ -1968,27 +2444,169 @@ information." (when closed (concat (format "\\textbf{%s} " org-closed-string) - (format org-latex-inactive-timestamp-format - (org-translate-time - (org-element-property :raw-value closed)))))) + (format (plist-get info :latex-inactive-timestamp-format) + (org-timestamp-translate closed))))) (let ((deadline (org-element-property :deadline planning))) (when deadline (concat (format "\\textbf{%s} " org-deadline-string) - (format org-latex-active-timestamp-format - (org-translate-time - (org-element-property :raw-value deadline)))))) + (format (plist-get info :latex-active-timestamp-format) + (org-timestamp-translate deadline))))) (let ((scheduled (org-element-property :scheduled planning))) (when scheduled (concat (format "\\textbf{%s} " org-scheduled-string) - (format org-latex-active-timestamp-format - (org-translate-time - (org-element-property :raw-value scheduled)))))))) + (format (plist-get info :latex-active-timestamp-format) + (org-timestamp-translate scheduled))))))) " ") "\\\\")) +;;;; Property Drawer + +(defun org-latex-property-drawer (property-drawer contents info) + "Transcode a PROPERTY-DRAWER element from Org to LaTeX. +CONTENTS holds the contents of the drawer. INFO is a plist +holding contextual information." + (and (org-string-nw-p contents) + (format "\\begin{verbatim}\n%s\\end{verbatim}" contents))) + + +;;;; Pseudo Element: LaTeX Matrices + +;; `latex-matrices' elements have the following properties: +;; `:caption', `:post-blank' and `:markup' (`inline', `equation' or +;; `math'). + +(defun org-latex--wrap-latex-matrices (data info) + "Merge contiguous tables with the same mode within a pseudo-element. +DATA is a parse tree or a secondary string. INFO is a plist +containing export options. Modify DATA by side-effect and return +it." + (org-element-map data 'table + (lambda (table) + (when (eq (org-element-property :type table) 'org) + (let ((mode (or (org-export-read-attribute :attr_latex table :mode) + (plist-get info :latex-default-table-mode)))) + (when (and (member mode '("inline-math" "math")) + ;; Do not wrap twice the same table. + (not (eq (org-element-type + (org-element-property :parent table)) + 'latex-matrices))) + (let* ((caption (and (not (string= mode "inline-math")) + (org-element-property :caption table))) + (matrices + (list 'latex-matrices + (list :caption caption + :markup + (cond ((string= mode "inline-math") 'inline) + (caption 'equation) + (t 'math))))) + (previous table) + (next (org-export-get-next-element table info))) + (org-element-insert-before matrices table) + ;; Swallow all contiguous tables sharing the same mode. + (while (and + (zerop (or (org-element-property :post-blank previous) 0)) + (setq next (org-export-get-next-element previous info)) + (eq (org-element-type next) 'table) + (eq (org-element-property :type next) 'org) + (string= (or (org-export-read-attribute + :attr_latex next :mode) + (plist-get info :latex-default-table-mode)) + mode)) + (org-element-extract-element previous) + (org-element-adopt-elements matrices previous) + (setq previous next)) + (org-element-put-property + matrices :post-blank (org-element-property :post-blank previous)) + (org-element-extract-element previous) + (org-element-adopt-elements matrices previous)))))) + info) + data) + +(defun org-latex-matrices (matrices contents info) + "Transcode a MATRICES element from Org to LaTeX. +CONTENTS is a string. INFO is a plist used as a communication +channel." + (format (case (org-element-property :markup matrices) + (inline "\\(%s\\)") + (equation "\\begin{equation}\n%s\\end{equation}") + (t "\\[\n%s\\]")) + contents)) + +(defun org-latex-matrices-tree-filter (tree backend info) + (org-latex--wrap-latex-matrices tree info)) + +;;;; Pseudo Object: LaTeX Math Block + +;; `latex-math-block' objects have the following property: +;; `:post-blank'. + +(defun org-latex--wrap-latex-math-block (data info) + "Merge contiguous math objects in a pseudo-object container. +DATA is a parse tree or a secondary string. INFO is a plist +containing export options. Modify DATA by side-effect and return it." + (let ((valid-object-p + (function + ;; Non-nil when OBJ can be added to the latex math block. + (lambda (obj) + (case (org-element-type obj) + (entity (org-element-property :latex-math-p obj)) + (latex-fragment + (let ((value (org-element-property :value obj))) + (or (org-string-match-p "\\`\\\\([^\000]*\\\\)\\'" value) + (org-string-match-p "\\`\\$[^\000]*\\$\\'" value)))) + ((subscript superscript) t)))))) + (org-element-map data '(entity latex-fragment subscript superscript) + (lambda (object) + ;; Skip objects already wrapped. + (when (and (not (eq (org-element-type + (org-element-property :parent object)) + 'latex-math-block)) + (funcall valid-object-p object)) + (let ((math-block (list 'latex-math-block nil)) + (next-elements (org-export-get-next-element object info t)) + (last object)) + ;; Wrap MATH-BLOCK around OBJECT in DATA. + (org-element-insert-before math-block object) + (org-element-extract-element object) + (org-element-adopt-elements math-block object) + (when (zerop (or (org-element-property :post-blank object) 0)) + ;; MATH-BLOCK swallows consecutive math objects. + (catch 'exit + (dolist (next next-elements) + (if (not (funcall valid-object-p next)) (throw 'exit nil) + (org-element-extract-element next) + (org-element-adopt-elements math-block next) + ;; Eschew the case: \beta$x$ -> \(\betax\). + (unless (memq (org-element-type next) + '(subscript superscript)) + (org-element-put-property last :post-blank 1)) + (setq last next) + (when (> (or (org-element-property :post-blank next) 0) 0) + (throw 'exit nil)))))) + (org-element-put-property + math-block :post-blank (org-element-property :post-blank last))))) + info nil '(subscript superscript latex-math-block) t) + ;; Return updated DATA. + data)) + +(defun org-latex-math-block-tree-filter (tree backend info) + (org-latex--wrap-latex-math-block tree info)) + +(defun org-latex-math-block-options-filter (info backend) + (dolist (prop '(:author :date :title) info) + (plist-put info prop + (org-latex--wrap-latex-math-block (plist-get info prop) info)))) + +(defun org-latex-math-block (math-block contents info) + "Transcode a MATH-BLOCK object from Org to LaTeX. +CONTENTS is a string. INFO is a plist used as a communication +channel." + (when (org-string-nw-p contents) + (format "\\(%s\\)" (org-trim contents)))) + ;;;; Quote Block (defun org-latex-quote-block (quote-block contents info) @@ -1996,18 +2614,7 @@ information." CONTENTS holds the contents of the block. INFO is a plist holding contextual information." (org-latex--wrap-label - quote-block - (format "\\begin{quote}\n%s\\end{quote}" contents))) - - -;;;; Quote Section - -(defun org-latex-quote-section (quote-section contents info) - "Transcode a QUOTE-SECTION element from Org to LaTeX. -CONTENTS is nil. INFO is a plist holding contextual information." - (let ((value (org-remove-indentation - (org-element-property :value quote-section)))) - (when value (format "\\begin{verbatim}\n%s\\end{verbatim}" value)))) + quote-block (format "\\begin{quote}\n%s\\end{quote}" contents) info)) ;;;; Radio Target @@ -2016,10 +2623,7 @@ CONTENTS is nil. INFO is a plist holding contextual information." "Transcode a RADIO-TARGET object from Org to LaTeX. TEXT is the text of the target. INFO is a plist holding contextual information." - (format "\\label{%s}%s" - (org-export-solidify-link-text - (org-element-property :value radio-target)) - text)) + (format "\\label{%s}%s" (org-export-get-reference radio-target info) text)) ;;;; Section @@ -2037,14 +2641,14 @@ holding contextual information." "Transcode a SPECIAL-BLOCK element from Org to LaTeX. CONTENTS holds the contents of the block. INFO is a plist holding contextual information." - (let ((type (downcase (org-element-property :type special-block))) - (opt (org-export-read-attribute :attr_latex special-block :options))) + (let ((type (org-element-property :type special-block)) + (opt (org-export-read-attribute :attr_latex special-block :options)) + (caption (org-latex--caption/label-string special-block info)) + (caption-above-p (org-latex--caption-above-p special-block info))) (concat (format "\\begin{%s}%s\n" type (or opt "")) - ;; Insert any label or caption within the block - ;; (otherwise, a reference pointing to that element will - ;; count the section instead). - (org-latex--caption/label-string special-block info) + (and caption-above-p caption) contents + (and (not caption-above-p) caption) (format "\\end{%s}" type)))) @@ -2057,6 +2661,7 @@ contextual information." (when (org-string-nw-p (org-element-property :value src-block)) (let* ((lang (org-element-property :language src-block)) (caption (org-element-property :caption src-block)) + (caption-above-p (org-latex--caption-above-p src-block info)) (label (org-element-property :name src-block)) (custom-env (and lang (cadr (assq (intern lang) @@ -2066,56 +2671,68 @@ contextual information." (new 0))) (retain-labels (org-element-property :retain-labels src-block)) (attributes (org-export-read-attribute :attr_latex src-block)) - (float (plist-get attributes :float))) + (float (plist-get attributes :float)) + (listings (plist-get info :latex-listings))) (cond ;; Case 1. No source fontification. - ((not org-latex-listings) + ((not listings) (let* ((caption-str (org-latex--caption/label-string src-block info)) (float-env - (cond ((and (not float) (plist-member attributes :float)) "%s") - ((string= "multicolumn" float) - (format "\\begin{figure*}[%s]\n%%s%s\n\\end{figure*}" - org-latex-default-figure-position - caption-str)) - ((or caption float) - (format "\\begin{figure}[H]\n%%s%s\n\\end{figure}" - caption-str)) + (cond ((string= "multicolumn" float) + (format "\\begin{figure*}[%s]\n%s%%s\n%s\\end{figure*}" + (plist-get info :latex-default-figure-position) + (if caption-above-p caption-str "") + (if caption-above-p "" caption-str))) + (caption (concat + (if caption-above-p caption-str "") + "%s" + (if caption-above-p "" (concat "\n" caption-str)))) (t "%s")))) (format float-env (concat (format "\\begin{verbatim}\n%s\\end{verbatim}" (org-export-format-code-default src-block info)))))) ;; Case 2. Custom environment. - (custom-env (format "\\begin{%s}\n%s\\end{%s}\n" - custom-env - (org-export-format-code-default src-block info) - custom-env)) + (custom-env + (let ((caption-str (org-latex--caption/label-string src-block info))) + (format "\\begin{%s}\n%s\\end{%s}\n" + custom-env + (concat (and caption-above-p caption-str) + (org-export-format-code-default src-block info) + (and (not caption-above-p) caption-str)) + custom-env))) ;; Case 3. Use minted package. - ((eq org-latex-listings 'minted) + ((eq listings 'minted) (let* ((caption-str (org-latex--caption/label-string src-block info)) (float-env - (cond ((and (not float) (plist-member attributes :float)) "%s") - ((string= "multicolumn" float) - (format "\\begin{listing*}\n%%s\n%s\\end{listing*}" - caption-str)) - ((or caption float) - (format "\\begin{listing}[H]\n%%s\n%s\\end{listing}" - caption-str)) - (t "%s"))) + (cond + ((string= "multicolumn" float) + (format "\\begin{listing*}\n%s%%s\n%s\\end{listing*}" + (if caption-above-p caption-str "") + (if caption-above-p "" caption-str))) + (caption + (concat (if caption-above-p caption-str "") + "%s" + (if caption-above-p "" (concat "\n" caption-str)))) + (t "%s"))) + (options (plist-get info :latex-minted-options)) (body (format "\\begin{minted}[%s]{%s}\n%s\\end{minted}" ;; Options. - (org-latex--make-option-string - (if (or (not num-start) - (assoc "linenos" org-latex-minted-options)) - org-latex-minted-options - (append - `(("linenos") - ("firstnumber" ,(number-to-string (1+ num-start)))) - org-latex-minted-options))) + (concat + (org-latex--make-option-string + (if (or (not num-start) (assoc "linenos" options)) + options + (append + `(("linenos") + ("firstnumber" ,(number-to-string (1+ num-start)))) + options))) + (let ((local-options (plist-get attributes :options))) + (and local-options (concat "," local-options)))) ;; Language. - (or (cadr (assq (intern lang) org-latex-minted-langs)) + (or (cadr (assq (intern lang) + (plist-get info :latex-minted-langs))) (downcase lang)) ;; Source code. (let* ((code-info (org-export-unravel-code src-block)) @@ -2142,7 +2759,9 @@ contextual information." ;; Case 4. Use listings package. (t (let ((lst-lang - (or (cadr (assq (intern lang) org-latex-listings-langs)) lang)) + (or (cadr (assq (intern lang) + (plist-get info :latex-listings-langs))) + lang)) (caption-str (when caption (let ((main (org-export-get-caption src-block)) @@ -2151,28 +2770,32 @@ contextual information." (format "{%s}" (org-export-data main info)) (format "{[%s]%s}" (org-export-data secondary info) - (org-export-data main info))))))) + (org-export-data main info)))))) + (lst-opt (plist-get info :latex-listings-options))) (concat ;; Options. (format "\\lstset{%s}\n" - (org-latex--make-option-string - (append - org-latex-listings-options - (cond - ((and (not float) (plist-member attributes :float)) nil) - ((string= "multicolumn" float) '(("float" "*"))) - ((and float (not (assoc "float" org-latex-listings-options))) - `(("float" ,org-latex-default-figure-position)))) - `(("language" ,lst-lang)) - (if label `(("label" ,label)) '(("label" " "))) - (if caption-str `(("caption" ,caption-str)) '(("caption" " "))) - (cond ((assoc "numbers" org-latex-listings-options) nil) - ((not num-start) '(("numbers" "none"))) - ((zerop num-start) '(("numbers" "left"))) - (t `(("numbers" "left") - ("firstnumber" - ,(number-to-string (1+ num-start))))))))) + (concat + (org-latex--make-option-string + (append + lst-opt + (cond + ((and (not float) (plist-member attributes :float)) nil) + ((string= "multicolumn" float) '(("float" "*"))) + ((and float (not (assoc "float" lst-opt))) + `(("float" ,(plist-get info :latex-default-figure-position))))) + `(("language" ,lst-lang)) + (if label `(("label" ,label)) '(("label" " "))) + (if caption-str `(("caption" ,caption-str)) '(("caption" " "))) + `(("captionpos" ,(if caption-above-p "t" "b"))) + (cond ((assoc "numbers" lst-opt) nil) + ((not num-start) '(("numbers" "none"))) + ((zerop num-start) '(("numbers" "left"))) + (t `(("firstnumber" ,(number-to-string (1+ num-start))) + ("numbers" "left")))))) + (let ((local-options (plist-get attributes :options))) + (and local-options (concat "," local-options))))) ;; Source code. (format "\\begin{lstlisting}\n%s\\end{lstlisting}" @@ -2210,7 +2833,7 @@ CONTENTS is nil. INFO is a plist holding contextual information." "Transcode STRIKE-THROUGH from Org to LaTeX. CONTENTS is the text with strike-through markup. INFO is a plist holding contextual information." - (org-latex--text-markup contents 'strike-through)) + (org-latex--text-markup contents 'strike-through info)) ;;;; Subscript @@ -2219,17 +2842,7 @@ holding contextual information." "Transcode a subscript or superscript object. OBJECT is an Org object. INFO is a plist used as a communication channel." - (let ((in-script-p - ;; Non-nil if object is already in a sub/superscript. - (let ((parent object)) - (catch 'exit - (while (setq parent (org-export-get-parent parent)) - (let ((type (org-element-type parent))) - (cond ((memq type '(subscript superscript)) - (throw 'exit t)) - ((memq type org-element-all-elements) - (throw 'exit nil)))))))) - (type (org-element-type object)) + (let ((type (org-element-type object)) (output "")) (org-element-map (org-element-contents object) (cons 'plain-text org-element-all-objects) @@ -2255,31 +2868,12 @@ channel." (let ((blank (org-element-property :post-blank obj))) (and blank (> blank 0) "\\ "))))))) info nil org-element-recursive-objects) - ;; Result. Do not wrap into math mode if already in a subscript - ;; or superscript. Do not wrap into curly brackets if OUTPUT is - ;; a single character. Also merge consecutive subscript and - ;; superscript into the same math snippet. - (concat (and (not in-script-p) - (let ((prev (org-export-get-previous-element object info))) - (or (not prev) - (not (eq (org-element-type prev) - (if (eq type 'subscript) 'superscript - 'subscript))) - (let ((blank (org-element-property :post-blank prev))) - (and blank (> blank 0))))) - "$") - (if (eq (org-element-type object) 'subscript) "_" "^") + ;; Result. Do not wrap into curly brackets if OUTPUT is a single + ;; character. + (concat (if (eq (org-element-type object) 'subscript) "_" "^") (and (> (length output) 1) "{") output - (and (> (length output) 1) "}") - (and (not in-script-p) - (or (let ((blank (org-element-property :post-blank object))) - (and blank (> blank 0))) - (not (eq (org-element-type - (org-export-get-next-element object info)) - (if (eq type 'subscript) 'superscript - 'subscript)))) - "$")))) + (and (> (length output) 1) "}")))) (defun org-latex-subscript (subscript contents info) "Transcode a SUBSCRIPT object from Org to LaTeX. @@ -2316,7 +2910,7 @@ contextual information." ;; "table.el" table. Convert it using appropriate tools. (org-latex--table.el-table table info) (let ((type (or (org-export-read-attribute :attr_latex table :mode) - org-latex-default-table-mode))) + (plist-get info :latex-default-table-mode)))) (cond ;; Case 1: Verbatim table. ((string= type "verbatim") @@ -2376,14 +2970,15 @@ This function assumes TABLE has `org' as its `:type' property and (alignment (org-latex--align-string table info)) ;; Determine environment for the table: longtable, tabular... (table-env (or (plist-get attr :environment) - org-latex-default-table-environment)) + (plist-get info :latex-default-table-environment))) ;; If table is a float, determine environment: table, table* ;; or sidewaystable. (float-env (unless (member table-env '("longtable" "longtabu")) (let ((float (plist-get attr :float))) (cond ((and (not float) (plist-member attr :float)) nil) - ((string= float "sidewaystable") "sidewaystable") + ((or (string= float "sidewaystable") + (string= float "sideways")) "sidewaystable") ((string= float "multicolumn") "table*") ((or float (org-element-property :caption table) @@ -2392,23 +2987,26 @@ This function assumes TABLE has `org' as its `:type' property and ;; Extract others display options. (fontsize (let ((font (plist-get attr :font))) (and font (concat font "\n")))) - (width (plist-get attr :width)) + ;; "tabular" environment doesn't allow to define a width. + (width (and (not (equal table-env "tabular")) (plist-get attr :width))) (spreadp (plist-get attr :spread)) - (placement (or (plist-get attr :placement) - (format "[%s]" org-latex-default-figure-position))) + (placement + (or (plist-get attr :placement) + (format "[%s]" (plist-get info :latex-default-figure-position)))) (centerp (if (plist-member attr :center) (plist-get attr :center) - org-latex-tables-centered))) + (plist-get info :latex-tables-centered))) + (caption-above-p (org-latex--caption-above-p table info))) ;; Prepare the final format string for the table. (cond ;; Longtable. ((equal "longtable" table-env) (concat (and fontsize (concat "{" fontsize)) (format "\\begin{longtable}{%s}\n" alignment) - (and org-latex-table-caption-above + (and caption-above-p (org-string-nw-p caption) (concat caption "\\\\\n")) contents - (and (not org-latex-table-caption-above) + (and (not caption-above-p) (org-string-nw-p caption) (concat caption "\\\\\n")) "\\end{longtable}\n" @@ -2421,11 +3019,11 @@ This function assumes TABLE has `org' as its `:type' property and (format " %s %s " (if spreadp "spread" "to") width) "") alignment) - (and org-latex-table-caption-above + (and caption-above-p (org-string-nw-p caption) (concat caption "\\\\\n")) contents - (and (not org-latex-table-caption-above) + (and (not caption-above-p) (org-string-nw-p caption) (concat caption "\\\\\n")) "\\end{longtabu}\n" @@ -2434,9 +3032,15 @@ This function assumes TABLE has `org' as its `:type' property and (t (concat (cond (float-env (concat (format "\\begin{%s}%s\n" float-env placement) - (if org-latex-table-caption-above caption "") + (if caption-above-p caption "") (when centerp "\\centering\n") fontsize)) + ((and (not float-env) caption) + (concat + (and centerp "\\begin{center}\n" ) + (if caption-above-p caption "") + (cond ((and fontsize centerp) fontsize) + (fontsize (concat "{" fontsize))))) (centerp (concat "\\begin{center}\n" fontsize)) (fontsize (concat "{" fontsize))) (cond ((equal "tabu" table-env) @@ -2454,8 +3058,13 @@ This function assumes TABLE has `org' as its `:type' property and table-env))) (cond (float-env - (concat (if org-latex-table-caption-above "" caption) + (concat (if caption-above-p "" (concat "\n" caption)) (format "\n\\end{%s}" float-env))) + ((and (not float-env) caption) + (concat + (if caption-above-p "" (concat "\n" caption)) + (and centerp "\n\\end{center}") + (and fontsize (not centerp) "}"))) (centerp "\n\\end{center}") (fontsize "}"))))))) @@ -2492,7 +3101,7 @@ property." (incf n) (unless (= n 2) (setq output (replace-match "" nil nil output)))))) (let ((centerp (if (plist-member attr :center) (plist-get attr :center) - org-latex-tables-centered))) + (plist-get info :latex-tables-centered)))) (if (not centerp) output (format "\\begin{center}\n%s\n\\end{center}" output)))))) @@ -2503,12 +3112,10 @@ TABLE is the table type element to transcode. INFO is a plist used as a communication channel. This function assumes TABLE has `org' as its `:type' property and -`inline-math' or `math' as its `:mode' attribute.." - (let* ((caption (org-latex--caption/label-string table info)) - (attr (org-export-read-attribute :attr_latex table)) - (inlinep (equal (plist-get attr :mode) "inline-math")) +`inline-math' or `math' as its `:mode' attribute." + (let* ((attr (org-export-read-attribute :attr_latex table)) (env (or (plist-get attr :environment) - org-latex-default-table-environment)) + (plist-get info :latex-default-table-environment))) (contents (mapconcat (lambda (row) @@ -2519,38 +3126,18 @@ This function assumes TABLE has `org' as its `:type' property and (mapconcat (lambda (cell) (substring (org-element-interpret-data cell) 0 -1)) - (org-element-map row 'table-cell 'identity info) "&") + (org-element-map row 'table-cell #'identity info) "&") (or (cdr (assoc env org-latex-table-matrix-macros)) "\\\\") "\n"))) - (org-element-map table 'table-row 'identity info) "")) - ;; Variables related to math clusters (contiguous math tables - ;; of the same type). - (mode (org-export-read-attribute :attr_latex table :mode)) - (prev (org-export-get-previous-element table info)) - (next (org-export-get-next-element table info)) - (same-mode-p - (lambda (table) - ;; Non-nil when TABLE has the same mode as current table. - (string= (or (org-export-read-attribute :attr_latex table :mode) - org-latex-default-table-mode) - mode)))) + (org-element-map table 'table-row #'identity info) ""))) (concat - ;; Opening string. If TABLE is in the middle of a table cluster, - ;; do not insert any. - (cond ((and prev - (eq (org-element-type prev) 'table) - (memq (org-element-property :post-blank prev) '(0 nil)) - (funcall same-mode-p prev)) - nil) - (inlinep "\\(") - ((org-string-nw-p caption) (concat "\\begin{equation}\n" caption)) - (t "\\[")) ;; Prefix. - (or (plist-get attr :math-prefix) "") + (plist-get attr :math-prefix) ;; Environment. Also treat special cases. - (cond ((equal env "array") - (let ((align (org-latex--align-string table info))) - (format "\\begin{array}{%s}\n%s\\end{array}" align contents))) + (cond ((member env '("array" "tabular")) + (let ((align (make-string + (cdr (org-export-table-dimensions table info)) ?c))) + (format "\\begin{%s}{%s}\n%s\\end{%s}" env align contents env))) ((assoc env org-latex-table-matrix-macros) (format "\\%s%s{\n%s}" env @@ -2558,28 +3145,7 @@ This function assumes TABLE has `org' as its `:type' property and contents)) (t (format "\\begin{%s}\n%s\\end{%s}" env contents env))) ;; Suffix. - (or (plist-get attr :math-suffix) "") - ;; Closing string. If TABLE is in the middle of a table cluster, - ;; do not insert any. If it closes such a cluster, be sure to - ;; close the cluster with a string matching the opening string. - (cond ((and next - (eq (org-element-type next) 'table) - (memq (org-element-property :post-blank table) '(0 nil)) - (funcall same-mode-p next)) - nil) - (inlinep "\\)") - ;; Find cluster beginning to know which environment to use. - ((let ((cluster-beg table) prev) - (while (and (setq prev (org-export-get-previous-element - cluster-beg info)) - (memq (org-element-property :post-blank prev) - '(0 nil)) - (funcall same-mode-p prev)) - (setq cluster-beg prev)) - (and (or (org-element-property :caption cluster-beg) - (org-element-property :name cluster-beg)) - "\n\\end{equation}"))) - (t "\\]"))))) + (plist-get attr :math-suffix)))) ;;;; Table Cell @@ -2588,16 +3154,18 @@ This function assumes TABLE has `org' as its `:type' property and "Transcode a TABLE-CELL element from Org to LaTeX. CONTENTS is the cell contents. INFO is a plist used as a communication channel." - (concat (if (and contents - org-latex-table-scientific-notation - (string-match orgtbl-exp-regexp contents)) - ;; Use appropriate format string for scientific - ;; notation. - (format org-latex-table-scientific-notation - (match-string 1 contents) - (match-string 2 contents)) - contents) - (when (org-export-get-next-element table-cell info) " & "))) + (concat + (let ((scientific-format (plist-get info :latex-table-scientific-notation))) + (if (and contents + scientific-format + (string-match orgtbl-exp-regexp contents)) + ;; Use appropriate format string for scientific + ;; notation. + (format scientific-format + (match-string 1 contents) + (match-string 2 contents)) + contents)) + (when (org-export-get-next-element table-cell info) " & "))) ;;;; Table Row @@ -2606,44 +3174,62 @@ a communication channel." "Transcode a TABLE-ROW element from Org to LaTeX. CONTENTS is the contents of the row. INFO is a plist used as a communication channel." - ;; Rules are ignored since table separators are deduced from - ;; borders of the current row. - (when (eq (org-element-property :type table-row) 'standard) - (let* ((attr (org-export-read-attribute :attr_latex - (org-export-get-parent table-row))) - (longtablep (member (or (plist-get attr :environment) - org-latex-default-table-environment) - '("longtable" "longtabu"))) - (booktabsp (if (plist-member attr :booktabs) - (plist-get attr :booktabs) - org-latex-tables-booktabs)) - ;; TABLE-ROW's borders are extracted from its first cell. - (borders (org-export-table-cell-borders - (car (org-element-contents table-row)) info))) + (let* ((attr (org-export-read-attribute :attr_latex + (org-export-get-parent table-row))) + (booktabsp (if (plist-member attr :booktabs) (plist-get attr :booktabs) + (plist-get info :latex-tables-booktabs))) + (longtablep + (member (or (plist-get attr :environment) + (plist-get info :latex-default-table-environment)) + '("longtable" "longtabu")))) + (if (eq (org-element-property :type table-row) 'rule) + (cond + ((not booktabsp) "\\hline") + ((not (org-export-get-previous-element table-row info)) "\\toprule") + ((not (org-export-get-next-element table-row info)) "\\bottomrule") + ((and longtablep + (org-export-table-row-ends-header-p + (org-export-get-previous-element table-row info) info)) + "") + (t "\\midrule")) (concat ;; When BOOKTABS are activated enforce top-rule even when no ;; hline was specifically marked. - (cond ((and booktabsp (memq 'top borders)) "\\toprule\n") - ((and (memq 'top borders) (memq 'above borders)) "\\hline\n")) + (and booktabsp (not (org-export-get-previous-element table-row info)) + "\\toprule\n") contents "\\\\\n" (cond - ;; Special case for long tables. Define header and footers. + ;; Special case for long tables. Define header and footers. ((and longtablep (org-export-table-row-ends-header-p table-row info)) - (format "%s + (let ((columns (cdr (org-export-table-dimensions + (org-export-get-parent-table table-row) info)))) + (format "%s +\\endfirsthead +\\multicolumn{%d}{l}{%s} \\\\ +%s +%s \\\\\n +%s \\endhead -%s\\multicolumn{%d}{r}{Continued on next page} \\\\ +%s\\multicolumn{%d}{r}{%s} \\\\ \\endfoot \\endlastfoot" - (if booktabsp "\\midrule" "\\hline") - (if booktabsp "\\midrule" "\\hline") - ;; Number of columns. - (cdr (org-export-table-dimensions - (org-export-get-parent-table table-row) info)))) + (if booktabsp "\\midrule" "\\hline") + columns + (org-latex--translate "Continued from previous page" info) + (cond + ((not (org-export-table-row-starts-header-p table-row info)) + "") + (booktabsp "\\toprule\n") + (t "\\hline\n")) + contents + (if booktabsp "\\midrule" "\\hline") + (if booktabsp "\\midrule" "\\hline") + columns + (org-latex--translate "Continued on next page" info)))) ;; When BOOKTABS are activated enforce bottom rule even when ;; no hline was specifically marked. - ((and booktabsp (memq 'bottom borders)) "\\bottomrule") - ((and (memq 'bottom borders) (memq 'below borders)) "\\hline") - ((memq 'below borders) (if booktabsp "\\midrule" "\\hline"))))))) + ((and booktabsp (not (org-export-get-next-element table-row info))) + "\\bottomrule")))))) ;;;; Target @@ -2652,8 +3238,7 @@ a communication channel." "Transcode a TARGET object from Org to LaTeX. CONTENTS is nil. INFO is a plist holding contextual information." - (format "\\label{%s}" - (org-export-solidify-link-text (org-element-property :value target)))) + (format "\\label{%s}" (org-latex--label target info))) ;;;; Timestamp @@ -2662,13 +3247,14 @@ information." "Transcode a TIMESTAMP object from Org to LaTeX. CONTENTS is nil. INFO is a plist holding contextual information." - (let ((value (org-latex-plain-text - (org-timestamp-translate timestamp) info))) - (case (org-element-property :type timestamp) - ((active active-range) (format org-latex-active-timestamp-format value)) - ((inactive inactive-range) - (format org-latex-inactive-timestamp-format value)) - (otherwise (format org-latex-diary-timestamp-format value))))) + (let ((value (org-latex-plain-text (org-timestamp-translate timestamp) info))) + (format + (plist-get info + (case (org-element-property :type timestamp) + ((active active-range) :latex-active-timestamp-format) + ((inactive inactive-range) :latex-inactive-timestamp-format) + (otherwise :latex-diary-timestamp-format))) + value))) ;;;; Underline @@ -2677,7 +3263,7 @@ information." "Transcode UNDERLINE from Org to LaTeX. CONTENTS is the text with underline markup. INFO is a plist holding contextual information." - (org-latex--text-markup contents 'underline)) + (org-latex--text-markup contents 'underline info)) ;;;; Verbatim @@ -2686,7 +3272,8 @@ holding contextual information." "Transcode a VERBATIM object from Org to LaTeX. CONTENTS is nil. INFO is a plist used as a communication channel." - (org-latex--text-markup (org-element-property :value verbatim) 'verbatim)) + (org-latex--text-markup + (org-element-property :value verbatim) 'verbatim info)) ;;;; Verse Block @@ -2701,16 +3288,15 @@ contextual information." ;; character and change each white space at beginning of a line ;; into a space of 1 em. Also change each blank line with ;; a vertical space of 1 em. - (progn - (setq contents (replace-regexp-in-string - "^ *\\\\\\\\$" "\\\\vspace*{1em}" - (replace-regexp-in-string - "\\(\\\\\\\\\\)?[ \t]*\n" " \\\\\\\\\n" contents))) - (while (string-match "^[ \t]+" contents) - (let ((new-str (format "\\hspace*{%dem}" - (length (match-string 0 contents))))) - (setq contents (replace-match new-str nil t contents)))) - (format "\\begin{verse}\n%s\\end{verse}" contents)))) + (format "\\begin{verse}\n%s\\end{verse}" + (replace-regexp-in-string + "^[ \t]+" (lambda (m) (format "\\hspace*{%dem}" (length m))) + (replace-regexp-in-string + "^[ \t]*\\\\\\\\$" "\\vspace*{1em}" + (replace-regexp-in-string + "\\([ \t]*\\\\\\\\\\)?[ \t]*\n" "\\\\\n" + contents nil t) nil t) nil t)) + info)) @@ -2845,7 +3431,8 @@ Return PDF file name or an error if it couldn't be produced." (default-directory (if (file-name-absolute-p texfile) (file-name-directory full-name) default-directory)) - errors) + (time (current-time)) + warnings) (unless snippet (message (format "Processing LaTeX file %s..." texfile))) (save-window-excursion (cond @@ -2858,59 +3445,60 @@ Return PDF file name or an error if it couldn't be produced." ((consp org-latex-pdf-process) (let ((outbuf (and (not snippet) (get-buffer-create "*Org PDF LaTeX Output*")))) - (mapc - (lambda (command) - (shell-command + (dolist (command org-latex-pdf-process) + (shell-command + (replace-regexp-in-string + "%b" (shell-quote-argument base-name) (replace-regexp-in-string - "%b" (shell-quote-argument base-name) + "%f" (shell-quote-argument full-name) (replace-regexp-in-string - "%f" (shell-quote-argument full-name) - (replace-regexp-in-string - "%o" (shell-quote-argument out-dir) command t t) t t) t t) - outbuf)) - org-latex-pdf-process) + "%o" (shell-quote-argument out-dir) command t t) t t) t t) + outbuf)) ;; Collect standard errors from output buffer. - (setq errors (and (not snippet) (org-latex--collect-errors outbuf))))) + (setq warnings (and (not snippet) + (org-latex--collect-warnings outbuf))))) (t (error "No valid command to process to PDF"))) (let ((pdffile (concat out-dir base-name ".pdf"))) ;; Check for process failure. Provide collected errors if ;; possible. - (if (not (file-exists-p pdffile)) - (error (concat (format "PDF file %s wasn't produced" pdffile) - (when errors (concat ": " errors)))) + (if (or (not (file-exists-p pdffile)) + (time-less-p (nth 5 (file-attributes pdffile)) time)) + (error (format "PDF file %s wasn't produced" pdffile)) ;; Else remove log files, when specified, and signal end of ;; process to user, along with any error encountered. - (when (and (not snippet) org-latex-remove-logfiles) - (dolist (file (directory-files - out-dir t - (concat (regexp-quote base-name) - "\\(?:\\.[0-9]+\\)?" - "\\." - (regexp-opt org-latex-logfiles-extensions)))) - (delete-file file))) - (message (concat "Process completed" - (if (not errors) "." - (concat " with errors: " errors))))) + (unless snippet + (when org-latex-remove-logfiles + (dolist (file (directory-files + out-dir t + (concat (regexp-quote base-name) + "\\(?:\\.[0-9]+\\)?" + "\\." + (regexp-opt org-latex-logfiles-extensions)))) + (delete-file file))) + (message (concat "PDF file produced" + (cond + ((eq warnings 'error) " with errors.") + (warnings (concat " with warnings: " warnings)) + (t ".")))))) ;; Return output file name. pdffile)))) -(defun org-latex--collect-errors (buffer) - "Collect some kind of errors from \"pdflatex\" command output. - -BUFFER is the buffer containing output. - -Return collected error types as a string, or nil if there was -none." +(defun org-latex--collect-warnings (buffer) + "Collect some warnings from \"pdflatex\" command output. +BUFFER is the buffer containing output. Return collected +warnings types as a string, `error' if a LaTeX error was +encountered or nil if there was none." (with-current-buffer buffer (save-excursion (goto-char (point-max)) (when (re-search-backward "^[ \t]*This is .*?TeX.*?Version" nil t) - (let ((case-fold-search t) - (errors "")) - (dolist (latex-error org-latex-known-errors) - (when (save-excursion (re-search-forward (car latex-error) nil t)) - (setq errors (concat errors " " (cdr latex-error))))) - (and (org-string-nw-p errors) (org-trim errors))))))) + (if (re-search-forward "^!" nil t) 'error + (let ((case-fold-search t) + (warnings "")) + (dolist (warning org-latex-known-warnings) + (when (save-excursion (re-search-forward (car warning) nil t)) + (setq warnings (concat warnings " " (cdr warning))))) + (org-string-nw-p (org-trim warnings)))))))) ;;;###autoload (defun org-latex-publish-to-latex (plist filename pub-dir) -- cgit v1.2.1
|
__label__pos
| 0.876753 |
3
Simple question: I made a button in my plugin settings that creates a Craft.postActionRequest but what should the controller return?
It does not need to return anything else than true (successful) or string (action was not successful -> errormessage) but if I return a true or a false or anything else than a template to render I get an error...
Little example:
my template is just
{{ response }}
And my controller
//correct -> no error, this template is just empty
public function actionDeleteAccessToken(){
return $this->renderTemplate('srgoogleanalytics/tools/empty', array(response => true));
}
but if I make something like this
public function actionDeleteAccessToken(){
return true;
}
I receive a PHP error file not found. What do I wrong?
5
If you're just looking to return true or an error message from your controller's action, then it should probably be something like:
$this->returnJson(array('success' => true));
Or on error:
$this->returnErrorJson($e->getMessage());
returnErrorJson will set an 'error' key and pass whatever mess you use as its value.
| improve this answer | |
Your Answer
By clicking “Post Your Answer”, you agree to our terms of service, privacy policy and cookie policy
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.883283 |
Loops and Closing or Deleting Orders
Back to topics list | 1 2 3 4 To post a new topic, please log in or register
avatar
13118
WHRoeder 2014.09.23 17:04 #
ggekko:
Hi experts, what is your opinion about this solution?
while(OrdersTotal()>0)
It means it is incompatible with every other EA (including itself) and manual trading. (Trades on other charts)
avatar
1235
ggekko 2014.09.23 18:46 #
WHRoeder:
ggekko:
Hi experts, what is your opinion about this solution?
It means it is incompatible with every other EA (including itself) and manual trading. (Trades on other charts)
Of course. This is a total close all.
Sometimes there are situations (for example you want to close too many open trades immediately) when the "original" version doesn't work. This version is good for this.
avatar
14167
RaptorUK 2014.09.23 19:59 #
ggekko:
Of course. This is a total close all.
Sometimes there are situations (for example you want to close too many open trades immediately) when the "original" version doesn't work. This version is good for this.
Why aren't you attempting to react to errors ? shouldn't you analyse the error and react accordingly ? for some errors you can retry, for others you have to stop trying.
avatar
17
14967057 2015.02.02 14:23 #
Hi,
I have a question about while loops. I want to run the current EA on a loop as frequent as possible. So, my thought is to put the current code inside a while loop with a sleep function included inside that causes the while to only execute every 1 millisecond.
However, when I did this, (I have some Print("...") function calls put in which keeps track of what's happening on each loop), I see this freezing, and no more prints to the screen.
I'm wondering what the problem might be. Is it ok to run the EA on a loop as frequent as 1ms? Would there be an issue with trying to obtain the latest bid/ask for example from the server every millisecond? Would this put too much load there, causing the freeze?
Possibly something else my algorithm is doing is causing this, I'm not sure.
I have ran it on 200ms cycles and still get the same problem (though just not as immediate). Could there be an issue with printing too many lines to the log file?
thanks for any advice on this.
regards,
C.
avatar
2031
gooly 2015.02.02 14:58 #
Can be a code problem - but without knowing the code?
avatar
13118
WHRoeder 2015.02.02 16:27 #
14967057:
I have a question about while loops. I want to run the current EA on a loop as frequent as possible.
So, my thought is to put the current code inside a while loop with a sleep function included inside that causes the while to only execute every 1 millisecond.
Would there be an issue with trying to obtain the latest bid/ask.
Could there be an issue with printing too many lines to the log file?
1. This thread was about Closing and Deleting orders. Don't hijack threads for off topic questions. Post a new one.
2. Why do you want to continuously loop? Nothing is changing. All you are doing is wasting CPU time.
3. Return from start and when Bid/Ask changes, your start will be called as fast as possible.
4. Yes, you've filled all available ram with the lines, faster than Windows can put them to disk. All programs get paged out. Everything hangs.
avatar
17
14967057 2015.02.03 11:08 #
Hi,
Turns out, the problem I had was not due to encapsulating my EA code inside a while loop with a 1ms sleep. This part seems fine (regardless of where I print to screen or not) as I have it running now for the last half hour without fault.
The problem was due to another sneaky while I had in the code which eventually caused the EA to get trapped in an infinite loop.
Thanks for the input gooly and Roesder.
regards,
avatar
1
kraliczech 2015.02.18 23:27 #
Hello everybody. I read this topic and I have question. I try to make a code, which delete pending order (only STOP, no LIMIT), but only if another pending order was activated (start be OP_SELL or OP_BUY). So code is this (but there are a mistakes, but I don´t know where). Could you look at my code please?
bool result;
int i, j, cmd, cmd2,total;
total=OrdersTotal();
for(i=0; i<total; i++)
{
if(OrderSelect(i,SELECT_BY_POS,MODE_TRADES))
{
cmd=OrderType();
if(cmd!=OP_BUYSTOP && cmd!=OP_SELLSTOP) //take an orders which was activated
{
for(j=0; j<total; j++) //another variable j
{
if(OrderSelect(j,SELECT_BY_POS,MODE_TRADES)){ cmd2=OrderType();
if(cmd2!=OP_BUY && cmd2!=OP_SELL)
{
OrderPrint();
result=OrderDelete(OrderTicket()); //first pending order become to OP_SELL or OP_BUY, second pending order will be delete
if(result!=TRUE) Print("LastError = ", GetLastError());
break;
}
}
}
}
}
}
Back to topics list | 1 2 3 4
To add comments, please log in or register
|
__label__pos
| 0.81369 |
[Tips] Operasional Array PHP
Array merupakan variabel penting dalam semua pemrograman termasuk PHP, dimana array memiliki kemampuan membawa dalam satu variable dapat memuat banyak value dan key yang serupa. Contoh Array adalah misal akan kita bentuk sebuah variabel data nama anak-anak kelas III dalam sebuah sekolah akan diletakkan dalam sebuah tabel:
nis nama
001 Banu
002 Budi
003 Bayu
004 Bono
Isi dari sebuah tabel itu dapat dimasukkan dalam sebuah variabel array , dimana tiap item array akan berisi KEY dan VALUE, dan antara item satu dengan yang lain akan dipisahkan dengan tanda koma, seperti demikian :
Cara lain membuat array adalah dengan :
Adapun sebuah array dapat saja tidak memiliki KEY tapi memiliki Value. Misal:
MENAMPILKAN ARRAY dalam PHP
Menampilkan Array dalam PHP dapat dengan menggunakan fungsi perintah php print_r();
JENIS ARRAY
Array dapat meliputi tiga hal:
• Array Numerik / Numeric Array: adalah operasional array dengan penyebutan key nya adalah urutan item array tersebut, dimulai dari angka ke 0 sebagai item array pertama. Contoh diatas dapat dikatakan bahwa
$arr[0] = “Banu”
$arr[1] = “Budi”
dan seterusnya. Sehingga jika dilakukan echo dengan PHP:
Hasilnya adalah “Banu”.
• Array Asosiatif / Associative Array : adalah operasional array dengan penyebutan keynya adalah nama dari key tersebut. Contoh diatas dapat dikatakan bahwa
$arr["001"] = “Banu”
$arr["002"] = “Budi”
dan seterusnya. Sehingga jika dilakukan echo dengan PHP:
hasilnya adalah “Budi”
• Array Multidimensional / Multidimensional array : Adalah adanya array di dalam array. Misal :
PENGURAIAN VALUE ARRAY
Penguraian array dapat dilakukan dengan menggunakan looping untuk penyebutan seluruhnya, atau melakukan echo/print untuk penyebutan peritem. Contoh adalah :
atau kadang kita langsung menyebut per item jika bukan looping yang kita perlukan :
atau
PENGURAIAN KEY DAN VALUE ARRAY
Suatu ketika kita perlu mengurai Value Array beserta Keynya. Maka dapat digunakan looping seperti demikian :
PENGURAIAN ARRAY LANGSUNG MENJADI VARIABEL BARU
Sebuah array dapat dipecah langsung menjadi variabel-variabel baru misal :
Dapat langsung digunakan :
Maka akan muncul variabel baru :
$nama = “bimosaurus”;
$alamat = “wonosobo”;
$usia = “200 tahun”;
PEMBENTUKAN ARRAY DENGAN EXPLODE
Sebuah array dapat juga terbentuk dari kata atau kalimat yang dipecah dengan menggunakan explode. Sebagai contoh adalah :
Hasilnya jika dibongkar dengan array numerik adalah :
Hasilnya adalah “sebenarnya”. Coba juga dilakukan perintah fungsi print_r();
Kebalikan dari explode adalah implode
PENGURAIAN ARRAY dari MySQL
Andaikan tabel di atas, akan disebut dengan tabel siswa. Maka sebuah Query dari MySQL akan seperti berikut:
Setelah dilakukan query, maka SQL akan menghasilkan array. Array tersebut akan kita urai dengan beberapa cara. Fungsi yang digunakan juga bisa mysql_fetch_row, mysql_fetch_array, atau mysql_fetch_assoc. Sedangkan perintah looping yang digunakan akan sangat lebih mudah menggunakan while.
Apa perbedaan mysql_fetch_row, mysql_fetch_array, dan mysql_fetch_assoc? Perbedaannya adalah mysql_fetch_row untuk penyebutan array numerik, mysql_fetch_assoc adalah untuk penyebutan array assosiatif, dan mysql_fetch_array bisa untuk keduanya.
Sekian, semoga bermanfaat
|
__label__pos
| 0.999363 |
We use cookies to personalise content and advertisements and to analyse access to our website. Furthermore, our partners for online advertising receive pseudonymised information about your use of our website. cookie policy and privacy policy.
+0
0
197
2
avatar+630
One day, I decide to run to the park. On the way there, I run at a rate of \(x^2\) miles per hour for \(3\) hours. On the way back, I take the same path and jog at a slower rate of \(16 - 4x\) miles per hour so that it takes me \(4\) hours to get home. Given that \(x > 0\), what is \(x\)? Express your answer as a common fraction.
Sep 14, 2018
#1
avatar
+2
3x^2 = 4(16 - 4x), solve for x
Solve for x:
3 x^2 = 4 (16 - 4 x)
Expand out terms of the right hand side:
3 x^2 = 64 - 16 x
Subtract 64 - 16 x from both sides:
3 x^2 + 16 x - 64 = 0
The left hand side factors into a product with two terms:
(x + 8) (3 x - 8) = 0
Split into two equations:
x + 8 = 0 or 3 x - 8 = 0
Subtract 8 from both sides:
x = -8 or 3 x - 8 = 0
Add 8 to both sides:
x = -8 or 3 x = 8
Divide both sides by 3:
x = 8/3 =2 2/3 miles per hour.
Sep 14, 2018
#2
avatar+100516
+1
Rate * Time = Distance
Since the distances are equal, we have that
x^2 * 3 = (16 - 4x) * 4
3x^2 = 64 - 16x rearrange as
3x^2 + 16x - 64 = 0 factor
(3x - 8) (x + 8) = 0
Set both factors = 0 and solve for x and we have
x = 8/3 or x = -8
Since x > 0, then x = 8/3 is correct
cool cool cool
Sep 14, 2018
44 Online Users
avatar
avatar
|
__label__pos
| 0.988861 |
68 inches to feet (Inches to Feet)
By / Under Inches To Feet / Published on
Convert 68 inches to feet with our guide. Understand how 68 inches translates to feet and other useful tips.
68 inches to feet (Inches to Feet)
Let us understand the process of converting 68 inches to feet
68 inches is equal to 5.67 feet. If you're trying to convert inches to feet, it’s essential to know that 1 foot equals 12 inches. Dividing 68 inches by 12 gives us the result of 5.67 feet.
Knowing how to quickly and accurately convert inches to feet can be incredibly useful, whether you're working on home projects, buying furniture, or simply understanding measurements better. For example, a common situation where this conversion could be handy is when you need to know if a specific piece of furniture or a doorway fits through your space.
Why Convert 68 Inches to Feet?
Understanding Measurements: When you convert 68 inches to feet, you gain a better understanding of how this length measures up in a more commonly used unit. This can help both in planning and visualizing spaces or objects accurately.
Practical Applications: For instance, if you're buying a new bookshelf that is 68 inches tall, knowing it is 5.67 feet tall helps ensure it will fit in your room with an 8-foot ceiling without hitting any overhead fixtures.
Cost Efficiency: Statistics show that accurate measurements can save up to 20% on material costs because you’re less likely to overbuy or waste resources buying items that do not fit or suit your needs.
The Math Behind Converting Inches to Feet
To convert inches to feet, divide the number of inches by 12 since there are 12 inches in a foot. So for the conversion from 68 inches to feet:
[ 68 \text{ inches} \div 12 = 5.67 \text{ feet} ]
This conversion is invaluable in scenarios such as construction, architecture, interior design, and even when you're out shopping for items that have to fit within specific dimensions.
Additional Scenarios for Using Inches to Feet Conversion
DIY Projects: Imagine you are building a custom TV stand. Knowing the exact conversion of inches to feet helps ensure your stand fits your TV and complements your living space efficiently.
Real Estate: Real estate agents frequently need to convert measurements to describe room sizes vividly. Saying a room is 5.67 feet wide rather than 68 inches provides a clearer idea to most clients.
Travel Requirements: When packing for a trip, airlines often specify luggage size limits in inches. Converting your suitcase dimensions to feet might give a clearer picture and ensure you avoid extra charges.
FAQs
How do you convert 68 inches to feet?
To convert 68 inches to feet, divide 68 by 12, which equals 5.67 feet.
Why is it important to know how to convert inches to feet?
Knowing how to convert inches to feet is important for accurate measurement in various fields such as construction, interior design, and daily practical scenarios.
Are there tools available for inch to feet conversion?
Yes, many online calculators and mobile apps can easily convert inches to feet. For example, you may use an online measurement converter which makes conversions straightforward and quick.
Understanding these conversions helps not just in daily life, but also ensures precision in professional fields where measurements matter. Accurate conversions from inches to feet can save time, resources, and ultimately lead to better decision-making.
Conclusion
By knowing that 68 inches is equal to 5.67 feet, you are better prepared for a multitude of tasks. Whether you are engaged in DIY projects, planning home renovations, or working in a professional field, having this fundamental knowledge is incredibly advantageous.
This article is designed to be straightforward and informative, making it easy to understand the significance and method of converting inches to feet. The information is practical and includes real-world applications, underlining the importance of accurate measurements in various aspects of life and work.
Related Posts
|
__label__pos
| 0.996356 |
Sign up ×
Mathematics Stack Exchange is a question and answer site for people studying math at any level and professionals in related fields. It's 100% free, no registration required.
I am having trouble understanding the following theorem given in my textbook:
Let $A$ be an $ m \times n $ coefficient matrix. Then the following statements are logically equivalent (That is, for a particular $A$, either they are all true or all false statements.)
a) For each b in $\mathbb{R}^{m}$, the equation $A$ x = b has a solution.
b) Each b in $\mathbb{R}^{m}$ is a linear combination of the columns of $A$.
c) The columns of $A$ span $\mathbb{R}^{m}$.
d) $A$ has a pivot position in every row.
I understand that a, b, c are restatements of each other. I would like some intuition on why statement d and the others (particularly c) are logically equivalent.
share|cite|improve this question
@GitGud Added explanation for logically equivalent. – dal102 Feb 2 '13 at 21:01
I think you should say equivalent, rather than logically equivalent. – 1015 Feb 2 '13 at 21:49
1 Answer 1
up vote 2 down vote accepted
You will find the answer to this in most introductory linear algebra textbooks but a quick answer would be as follows:
• If for each $b$, you have a solution to $Ax=b$, it implies that the columns of $A$ are linearly independent. If this were not true, then they would only span a subspace of $R^n$ and if we chose a vector $b$ which was not in this subspace, the above statement would not hold.
• Since $Ax=b$ can be written out as $\sum_i A_i x_i =b$ where $A_i$ denotes the $i^{th}$ column of A and since the columns of $A$ are linearly independent and span $R^n$, they form a basis for $R^n$ and thus, any bector $b$ can be written out as a linear combination of the basis and hence of the columns of A.
• All of the above statements imply that the matrix is full rank and invertible.
• If one started a Gaussian Elimination on A and found a row where there was no pivot, then the matrix is not invertible which contradicts the above statement.
• Another way of thinking about it is that through Gaussian Elimination (or Gauss-Jordan Elimination), you are trying to "create" an identity matrix on the left and the solution for $Ax=b$ on the right. If you are unable to find a pivot in any position, it implies that you are unable to span the whole space since you are constraining the possible RHS.
share|cite|improve this answer
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.954646 |
Publishing Event in Google BigQuery
Google BigQuery offers you to analyse your data. You can build a data warehouse to run BI tools to do analysis. In this post we will discussing about a simple pipeline to publish your events data to Google BigQuery using Google pubsub.
Step by step guide
1. Create a table in Google BigQuery. You can use Google cloud console or cli
Below is a simple schema for a fact table.
2. create a google cloud function to insert the data into BigQuery. A generic cloud function which we have created would work for json serialized objects
Below is the example
This function is generic it expected the message to contain datasetInfo which contains the database and table name where you want to insert the object.
The object to be inserted is send as a base64 encoded string as part of the json
You can send a array of items to push to insert into bigquery
3. Once you have above code ready. Go to console create function and subscribe it to a pubsub topic something like below
4. Once you have a function which is listening to the topic and inserting into BigQuery. You now need to write a publisher to publish this information
Below you is the code written in golang which is publishing to google cloud pubsub topic
You can also find the code in our Coral server repository
You have make sure that the Column name in the Table are similar to the fields of your struct which is getting serialized.
With these simple steps you can quickly build a Datawarehouse of your real time apps in cost effective manner.
Feel free to reach out to us at [email protected].
Follow us on twitter https://twitter.com/k8scaleio
Simplifying writing cloud connected Apps.
Get the Medium app
A button that says 'Download on the App Store', and if clicked it will lead you to the iOS App store
A button that says 'Get it on, Google Play', and if clicked it will lead you to the Google Play store
|
__label__pos
| 0.748065 |
Home Discussion Discuss: Create After Insert Trigger in SQL
• my sqlerror 1442saranya jothiprakasam November 23, 2011 at 5:18 PM
i got the error as can't update table dctblloaddata in stored function/trigger because it is already used by statement which invoked this stored fuction/trigger when trying to insert the row.. my trigger is as follows delimiter $$ create trigger s after insert on dctblloaddata for each row begin declare v_value long; select LoadProfileID from cfw.dctblloaddata order by LoadProfileID desc limit 1 into v_value; update `dctblloaddata` set AvgCurrent=-99999.99, AvgVolt=-99999.99, ActiveDemandFwd=-99999.99, ApperentDemandFwd=-99999.99 where ((LoadProfileID=v_value ) AND (AvgCurrent>800 OR AvgCurrent = 0 OR AvgCurrent =-99999.99 OR AvgVolt >300 OR AvgVolt=0 OR AvgVolt=-99999.99 OR ActiveDemandFwd=0 OR ActiveDemandFwd=-99999.99 OR ApperentDemandFwd=0 OR ApperentDemandFwd=-99999.99)) ; end $$ delimiter ; give me some suggestion for me why it is not properly working.
Post your Comment
Your Name (*) :
Your Email :
Subject (*):
Your Comment (*):
Reload Image
Related Articles
Programming Tutorials
Create After Insert Trigger in SQL
Create After Insert Trigger in SQL ... from 'Create After Insert Trigger in SQL'. In this Example, we create...)); Create Trigger Stu_Insert The Query createtrigger stu_insert
Java-Tutorials
Create After Update Trigger in SQL
Create After Update Trigger in SQL After Trigger in SQL is fired before update... illustrate an example from 'Create After Update Trigger in SQL'. In order
SQL Question
trigger after insert for deletion
trigger after insert for deletion String query2 = "create TRIGGER trig after insert on "cart" + " FOR EACH ROW... = st1.executeUpdate( query2); i have to do the above trigger operation, but i'm stuck
General
Trigger
Trigger After Insert The Tutorial illustrate an example from Mysql Trigger After Insert. To understand this, we create a table 'Employee...; Mysql Trigger After Delete The Tutorial illustrate
SQL Tutorial
Create After Update Trigger in SQL
Create After Update Trigger in SQL After Trigger in SQL is fired before update... illustrate an example from 'Create After Update Trigger in SQL'. In order
SQL Tutorial
Create After Insert Trigger in SQL
Create After Insert Trigger in SQL ... from 'Create After Insert Trigger in SQL'. In this Example, we create..._log. delimiter $$ CREATE TRIGGER stu_insert AFTER
SQL Tutorial
Mysql Trigger After Insert
Mysql Trigger After Insert Mysql Trigger After Insert fired the trigger after you... The Tutorial illustrate an example from Mysql Trigger After Insert. To understand
MySQL
MySQL After Trigger
MySQL After Trigger This example illustrates how to create the trigger...' and 'description'. A trigger is defined to activate when an INSERT, DELETE, or UPDATE... before or after the triggering statement. Query CREATE TABLE Roseindia
SQL Question
How to trap Trigger Error - SQL
NVARCHAR(2000) ) select * froM ERROR_LOG CREATE OR REPLACE TRIGGER LogErrors AFTER SERVERERROR ON DATABASE BEGIN INSERT INTO error...How to trap Trigger Error Hi Guys, Can you please help with my
Programming Tutorials
Create Before Insert Trigger in SQL
Create Before Insert Trigger in SQL The Create Before Insert Trigger in SQL fires the trigger... Tutorial describe you a code on 'Create Before Insert Trigger in SQL'. To grasp
SQL Tutorial
Create Before Insert Trigger in SQL
Create Before Insert Trigger in SQL The Create Before Insert Trigger in SQL fires the trigger... Tutorial describe you a code on 'Create Before Insert Trigger in SQL'. To grasp
SQL Question
using trigger - SQL
it is possible ? Create trigger tr1 on student after update as begin update m...://www.roseindia.net/mysql/mysql5/triggers.shtml http://www.roseindia.net/sql/create-update-trigger.shtml http://www.roseindia.net/sql/trigger/mysql-trigger-after
SQL Tutorial
Mysql Trigger After Delete
Employee_Trigger AFTER delete ON employee FOR EACH ROW BEGIN insert into employee... Mysql Trigger After Delete Trigger in Mysql fired trigger automatically after you
SQL Tutorial
Mysql Trigger after Update
; delimiter $$ mysql> CREATE TRIGGER Employee_Trigger -> AFTER UPDATE...Mysql Trigger after Update Mysql Trigger after Update fired automatically after we perform
SQL Question
What is Trigger?
What is Trigger? What is Trigger? Hi, A trigger... in the database server.The event can be any event including INSERT, UPDATE and DELETE. The difference between a trigger and a stored procedure
SQL Question
trigger
trigger write a trigger program before insert into salary that it must not below Rs 1000
SQL Question
trigger
trigger write a trigger program before insert into salary that it must not below Rs 1000
SQL Tutorial
Create Delete Trigger in SQL
Create Delete Trigger in SQL Create Delete Trigger in SQL fire this trigger before... The Tutorial illustrate an example from 'Create Delete Trigger in SQL
Java-Tutorials
Create Delete Trigger in SQL
Create Delete Trigger in SQL Create Delete Trigger in SQL fire this trigger before... The Tutorial illustrate an example from 'Create Delete Trigger in SQL
SQL Tutorial
Mysql Time Trigger
. The trigger event can be before trigger, after trigger and instead of trigger... Trigger. To grasp this example we create a table 'Stu_Table1'. The create table... Timelastdeleted); Query to create trigger:- Now we use create trigger stu_delete
MySQL
Mysql Time Trigger
. The trigger event can be before trigger, after trigger and instead of trigger... Trigger. To grasp this example we create a table 'Stu_Table1'. The create table...); Query to create trigger:- Now we use create trigger stu_delete on table stu_table
Programming Tutorials
Create Before Update Trigger in SQL
Create Before Update Trigger in SQL Create Before Update Trigger in SQL is used in SQL... Update Trigger in SQL' . To understand this example we create a table 'Stu
SQL Tutorial
Create Before Update Trigger in SQL
Create Before Update Trigger in SQL Create Before Update Trigger in SQL is used in SQL... Update Trigger in SQL' . To understand this example we create a table 'Stu
Java Beginners
insert rows from browsed file to sql database
insert rows from browsed file to sql database i need to insert rows... the databases using odbc i can do this. but after browsing and uploaded the file, content of the file has to go to database. how can i insert record into database
SQL Question
can i use trigger to insert data within a time period ?
can i use trigger to insert data within a time period ? Hello sir, i... this trigger concept the data will be insert to each Employee's ID. Plz sir help me regarding this problem.how to solve this issue and how i will use this trigger
SQL Question
The INSERT INTO Statements in SQL?
The INSERT INTO Statements in SQL? The INSERT INTO Statements in SQL? Hi, The INSERT INTO statement is used to insert a new row or multiple rows into a table. SQL INSERT INTO Syntax INSERT INTO table_name VALUES
SQL Question
trigger
trigger write a trigger to see every manager must be highest salary
PHP
Php Sql Query Insert
Php Sql Query Insert This example illustrates how to execute insert query with values in php application. In this example we create two mysql query for insert statement with separate values in the database table. The table before
SQL Question
trigger
Free SQL books
SQL Example, Codes and Tutorials
; Create After Insert Trigger in SQL Create a Trigger is a special kind...; Create Before Insert Trigger in SQL The Create Before Insert Trigger... Trigger in SQL After Trigger in SQL is fired before update the column
DMCA.com
|
__label__pos
| 0.897168 |
Exercise 7.6
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import statsmodels.formula.api as smf
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn import metrics
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import f_regression
%matplotlib inline
df = pd.read_csv('../data/Wage.csv', index_col=0)
df.head()
year age sex maritl race education region jobclass health health_ins logwage wage
231655 2006 18 1. Male 1. Never Married 1. White 1. < HS Grad 2. Middle Atlantic 1. Industrial 1. <=Good 2. No 4.318063 75.043154
86582 2004 24 1. Male 1. Never Married 1. White 4. College Grad 2. Middle Atlantic 2. Information 2. >=Very Good 2. No 4.255273 70.476020
161300 2003 45 1. Male 2. Married 1. White 3. Some College 2. Middle Atlantic 1. Industrial 1. <=Good 1. Yes 4.875061 130.982177
155159 2003 43 1. Male 2. Married 3. Asian 4. College Grad 2. Middle Atlantic 2. Information 2. >=Very Good 1. Yes 5.041393 154.685293
11443 2005 50 1. Male 4. Divorced 1. White 2. HS Grad 2. Middle Atlantic 2. Information 1. <=Good 1. Yes 4.318063 75.043154
(a)
# Model variables
y = df['wage'][:,np.newaxis]
X = df['age'][:,np.newaxis]
# Compute regression models with different degrees
# Variable 'scores' saves mean squared errors resulting from the different degrees models.
# Cross validation is used
scores = []
for i in range(0,11):
model = Pipeline([('poly', PolynomialFeatures(degree=i)), ('linear', LinearRegression())])
model.fit(X,y)
score = cross_val_score(model, X, y, cv=5, scoring='neg_mean_squared_error')
scores.append(np.mean(score))
scores = np.abs(scores) # Scikit computes negative mean square errors, so we need to turn the values positive.
# Plot errors
x_plot = np.arange(0,11)
plt.plot(x_plot, scores)
plt.ylabel('Mean squared error (CV)')
plt.xlabel('Degrees')
plt.xlim(0,10)
plt.show()
png
# Print array element correspoding to the minimum mean squared error
# Element number = polynomial degree for minimum mean squared error
print(np.where(scores == np.min(scores)))
(array([4], dtype=int64),)
Optimal degree d for the polynomial according to cross-validation: 4
We will use statsmodels to perform the hypothesis testing using ANOVA. Statsmodels has a built-in function that simplifies our job and we didn't find an equivalent way of solving the problem with scikit-learn.
# Fit polynomial models to use in statsmodels.
models=[]
for i in range(0,11):
poly = PolynomialFeatures(degree=i)
X_pol = poly.fit_transform(X)
model = smf.GLS(y, X_pol).fit()
models.append(model)
# Hypothesis testing using ANOVA
sm.stats.anova_lm(models[0], models[1], models[2], models[3], models[4], models[5], models[6], typ=1)
df_resid ssr df_diff ss_diff F Pr(>F)
0 2999.0 5.222086e+06 0.0 NaN NaN NaN
1 2998.0 5.022216e+06 1.0 199869.664970 125.505882 1.444930e-28
2 2997.0 4.793430e+06 1.0 228786.010128 143.663571 2.285169e-32
3 2996.0 4.777674e+06 1.0 15755.693664 9.893609 1.674794e-03
4 2995.0 4.771604e+06 1.0 6070.152124 3.811683 5.098933e-02
5 2994.0 4.770322e+06 1.0 1282.563017 0.805371 3.695646e-01
6 2993.0 4.766389e+06 1.0 3932.257136 2.469216 1.162015e-01
The lower the values of F, the lower the significance of the coefficient. Degrees higher than 4 don't improve the polynomial regression model significantly. This results is in agreement with cross validation results.
# Save optimal degree
opt_degree = 4
# Plot polynomial regression
# Auxiliary variables X_line and y_line are created.
# These variables allow us to draw the polynomial regression.
# np.linspace() is used to create an ordered sequence of numbers. Then we can plot the polynomial regression.
model = Pipeline([('poly', PolynomialFeatures(degree = opt_degree)), ('linear', LinearRegression())])
model.fit(X,y)
X_lin = np.linspace(18,80)[:,np.newaxis]
y_lin = model.predict(X_lin)
plt.scatter(X,y)
plt.plot(X_lin, y_lin,'-r');
png
(b)
# Compute cross-validated errors of step function
'''
To define the step function, we need to cut the dataset into parts (pd.cut() does the job)
and associate a each part to a dummy variable. For example, if we have two parts (age<50
and age >= 50), we will have one dummy variable that gets value 1 if age<50 and value 0
if age>50.
Once we have the dataset in these conditions, we need to fit a linear regression to it.
The governing model will be defined by: y = b0 + b1 C1 + b2 C2 + ... + bn Cn, where
b stands for the regression coefficient;
C stands for the value of a dummy variable.
Using the same example as above, we have y = b0 + b1 C1, thus ...
'''
scores = []
for i in range(1,10):
age_groups = pd.cut(df['age'], i)
df_dummies = pd.get_dummies(age_groups)
X_cv = df_dummies
y_cv = df['wage']
model.fit(X_cv, y_cv)
score = cross_val_score(model, X_cv, y_cv, cv=5, scoring='neg_mean_squared_error')
scores.append(score)
scores = np.abs(scores) # Scikit computes negative mean square errors, so we need to turn the values positive.
# Number of cuts that minimize the error
min_scores = []
for i in range(0,9):
min_score = np.mean(scores[i,:])
min_scores.append(min_score)
print('Number of cuts: %i, error %.3f' % (i+1, min_score))
Number of cuts: 1, error 1741.335
Number of cuts: 2, error 1733.925
Number of cuts: 3, error 1687.688
Number of cuts: 4, error 1635.756
Number of cuts: 5, error 1635.556
Number of cuts: 6, error 1627.397
Number of cuts: 7, error 1619.168
Number of cuts: 8, error 1607.926
Number of cuts: 9, error 1616.550
The number of cuts that minimize the error is 8.
# Plot
# The following code shows, step by step, how to plot the step function.
# Convert ages to groups of age ranges
n_groups = 8
age_groups = pd.cut(df['age'], n_groups)
# Dummy variables
# Dummy variables is a way to deal with categorical variables in linear regressions.
# It associates the value 1 to the group to which the variable belongs, and the value 0 to the remaining groups.
# For example, if age == 20, the (18,25] will have the value 1 while the group (25, 32] will have the value 0.
age_dummies = pd.get_dummies(age_groups)
# Dataset for step function
# Add wage to the dummy dataset.
df_step = age_dummies.join(df['wage'])
df_step.head() # Just to visualize the dataset with the specified number of cuts
(17.938, 25.75] (25.75, 33.5] (33.5, 41.25] (41.25, 49] (49, 56.75] (56.75, 64.5] (64.5, 72.25] (72.25, 80] wage
231655 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 75.043154
86582 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 70.476020
161300 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 130.982177
155159 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 154.685293
11443 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 75.043154
# Variables to fit the step function
# X == dummy variables; y == wage.
X_step = df_step.iloc[:,:-1]
y_step = df_step.iloc[:,-1]
# Fit step function (statsmodels)
reg = sm.GLM(y_step[:,np.newaxis], X_step).fit()
# Auxiliary data to plot the step function
# We need to create a comprehensive set of ordered points to draw the figure.
# These points are based on 'age' values but include also the dummy variables identifiying the group that 'age' belongs.
X_aux = np.linspace(18,80)
groups_aux = pd.cut(X_aux, n_groups)
aux_dummies = pd.get_dummies(groups_aux)
# Plot step function
X_step_lin = np.linspace(18,80)
y_lin = reg.predict(aux_dummies)
plt.scatter(X,y)
plt.plot(X_step_lin, y_lin,'-r');
png
References
|
__label__pos
| 0.563886 |
Speeding up your AngularJS applications
In general AngularJS applications are quite fast, specially when compared to more traditional browser based applications that constantly post back to the server. However there are always a few things that will help performance and make an application even faster.
Disabling Debug Data
Normally AngularJS adds several things like CSS classes and some scope related properties to DOM elements. This is not really needed to run the application and is really only done to help development tools like Protractor and Batarang. When the application is in production that is not really needed and you can save some overhead by disabling this using the $compileProvider.debugInfoEnabled() function.
1: demoApp.config(function($compileProvider) {
2: $compileProvider.debugInfoEnabled(false);
3: });
Explicit dependency injection annotations
Another option to speed up your application is by using explicit dependency injection annotations. If the DI annotations are not present AngularJS has to parse functions to see the parameter names, something that can be avoided by adding the explicit annotations. The annotations can be added manually, which can be tedious to do, or automatically using something like ng-annotate with either a Gulp or Grunt task.
Adding the ngStrictDi directive to the same element as the ngApp directive can help you find
Reducing the number of $apply() calls
Another helpful option is to reduce the number of $apply() calls that are the result of $http request finishing. When you are doing multiple $http requests when a page loads each will trigger a $apply() function causing all watches and data bindings to be reevaluated. By combining these into a single $apply() call for requests that are done at almost the same time we can increase the load speed of you application, something that can be done using $httpProvider.useApplyAsync().
1: demoApp.config(function($httpProvider) {
2: $httpProvider.useApplyAsync(true);
3: });
Enjoy!
Testing an AngularJS directive with its template
Testing AngularJS directives usually isn’t very hard. Most of the time it is just a matter of instantiating the directive using the $compile() function and interacting with the scope or related controller to verify if the behavior is as expected. However that leaves a bit of a gap as most of the time the interaction between the directives template and it’s scope isn’t tested. With really simple templates you can include them in the template property but using the templateUrl and loading them on demand is much more common, specially with more complex templates. Now when it comes to unit testing the HTTP request to load the template if not doing to work and as a result the interaction isn’t tested. Sure it is possible to use the $httpBackend service to fake the response but that still doesn’t use the actual template so doesn’t really test the interaction.
Testing the template
It turns out testing the template isn’t that hard after all, there are just a few pieces to the puzzle. First of all Karma can server up other files beside the normal JavaScript files just fine, so we can tell it to serve our templates as well. With the pattern option for files we can tell Karma to watch and server the templates without including them in the default HTML page loaded. See the files section from the karma.conf.js file below.
1: files: [
2: 'app/bower_components/angular/angular.js',
3: 'app/bower_components/angular-mocks/angular-mocks.js',
4: 'app/components/**/*.js',
5: 'app/*.js',
6: 'tests/*.js',
7: {
8: pattern: 'app/*.html',
9: watched: true,
10: included: false,
11: served: true
12: }
13: ],
With that the files are available on the server. There are two problems here though. First of all when running unit tests the mock $httpBackend is used and that never does an actual HTTP request. Secondly the file is hosted at a slightly different URL, Karma includes ‘/base’ as the root of our files. So just letting AngularJS just load it is out of the question. However if we use a plain XMLHttpRequest object the mock $httpBackend is completely bypassed and we can load what we want. Using the plain XMLHttpRequest object has a second benefit in that we can do a synchronous request instead of the normal asynchronous request and use the response to pre-populate the $templateCache before the unit test runs. Using synchronous HTTP request is not advisable for code on the Internet and should be avoided in any production code but in a unit test like this would work perfectly fine.
So taking an AngularJS directive like this:
1: angular.module('myApp', [])
2: .directive('myDirective', function(){
3: return{
4: scope:{
5: clickMe:'&'
6: },
7: templateUrl:'/app/myDirective.html'
8: }
9: });
And a template like this:
1: <button ng-click="clickMe()">Click me</button>
Can be easily tested like this:
1: describe('The myDirective', function () {
2: var element, scope;
3:
4: beforeEach(module('myApp'));
5:
6: beforeEach(inject(function ($templateCache) {
7: var templateUrl = '/app/myDirective.html';
8: var asynchronous = false;
9: var req = new XMLHttpRequest();
10: req.onload = function () {
11: $templateCache.put(templateUrl, this.responseText);
12: };
13: req.open('get', '/base' + templateUrl, asynchronous);
14: req.send();
15: }));
16:
17: beforeEach(inject(function ($compile, $rootScope) {
18: scope = $rootScope.$new();
19: scope.doIt = angular.noop;
20:
21: var html = '<div my-directive="" click-me="doIt()"></div>'
22: element = $compile(html)(scope);
23: scope.$apply();
24: }));
25:
26: it('template should react to clicking', function () {
27: spyOn(scope, 'doIt');
28:
29: element.find('button')[0].click();
30:
31: expect(scope.doIt).toHaveBeenCalled();
32: });
33: });
Now making any breaking change to the template, like removing the ng-click, will immediately cause the unit test to fail in Karma.
Enjoy!
angular.module("module") is an anti pattern
There are two ways to use the angular.module() function. There is the call with one parameter, that returns an existing module and there is an option of using two parameter which creates a new module. The second way, where a new module is created, is perfectly fine and should be used. However the first option, where an existing module is loaded should be considered and anti pattern in most cases and should not be used unless there is an exceptional and very good reason.
What is wrong with angular.module(“module”)?
Why should this usage be seen as an anti pattern? Well both creating and retrieving using angular.module() returns the module so it can be extended. And that is exactly where the problem is. When you create a new module in a JavaScript file you can use that reference to add anything you want, no need to load it again. So the only place loading an exiting module is needed is when you want to add something to it in another JavaScript file.
Splitting modules introduces a big risk. As soon as you split an AngularJS module into separate files you can run into the possibility of loading a partially configured module. Where AngularJS checks if all module dependencies can be satisfied at load time it has no way of seeing if these modules are complete or not. Missing a complete module produces a very clear error message right at startup time like this:
Uncaught Error: [$injector:modulerr] Failed to instantiate module mainApp due to:
Error: [$injector:modulerr] Failed to instantiate module mainApp.data due to:
Error: [$injector:nomod] Module ‘mainApp.data’ is not available! You either misspelled the module name or forgot to load it. If registering a module ensure that you
As the complete application fails to load very obvious and hard not to spot.
However if you fail to load just a part of a module the errors are a lot less obvious. In this case the error doesn’t appear until the missing component is actually needed, everything up to that point will run just fine. The king of error message you will see is something like:
Error: [$injector:unpr] Unknown provider: productsProvider <- products
The error in itself is clear enough but discovering it might not be as easy. If the error occurs in a part of that application that is not used often it might go completely unnoticed.
My rule of the thumb: Always define a complete AngularJS module in one JavaScript file.
Want to split the functionality into multiple files. By all means go ahead but make sure to do so in a new module and take use module dependencies to make sure everything is loaded right at the application start time. And as angular.module(“module”) is only required to load a module defined in another file there really should almost never be a need to use it.
Enjoy!
Using browserify to manage JavaScript dependencies
Managing JavaScript dependencies in the browser is hard. Library scripts typically create global variables and functions. Other scripts now depend on those global objects to do their work. This works but in order to load all required scripts we have to add <script> elements to our HTML, making sure to add them in the right order, and basically know what each exposes.
The problem
Consider the following client side code:
1: // Print a message
2: utils.print("Hello");
This depends on another piece of script below:
1: // Expose the utility object with it's print function
2: var utils = {
3: print: function(msg){
4: console.log(msg);
5: }
6: };
And for all of that to work we have to load the scripts in the right order using some HTML as below:
1: <!DOCTYPE html>
2: <html>
3: <head lang="en">
4: <meta charset="UTF-8">
5: <title>Browserify demo</title>
6: </head>
7: <body>
8:
9:
10: <script src="utils.js"></script>
1:
2: <script src="demo.js">
</script>
11:
12: </body>
13: </html>
Not really rocket science here but if we want update utils.print() to call a printIt() function loaded from yet another library we have to go back to our HTML and make sure we load the printIt.js as well. Easy in a small app but this can become hard and error prone with larger applications.
Browserify to the rescue
Using browserify will make managing these dependencies a lot easier. To understand how it works we first must take a quick look at how NodeJS modules work.
With node each module can take a dependency on another module by requiring it using the require() function. And each module can define what it exports to other modules by using module.exports. The NodeJS runtime takes care of loading the files and adding dependencies inside a module will not require a change anywhere else in the program.
This system works really nice but unfortunately the browser doesn’t provide this NodeJS runtime capability. One problem here is that a call to require() is a synchronous call that returns the loaded module while the browser does all of its IO asynchronously. In the browser you can use something like RequireJS to asynchronously load scripts but while this works file this is not very efficient due to its asynchronous nature. As a result people usually use RequireJS during development and then create a bundle with all the code for production.
Browserify on the other hand will allow us to use the synchronous NodeJS approach with script loading in the browser. This is done by packaging up all files required based on the require() calls and creating one file to load at runtime. Converting the example above to use this style requires some small changes in the code.
The demo.js specifies it requires utils.js. The syntax “./utils” means that we should load the file from the same folder.
1: var utils = require("./utils");
2: // Print a message
3: utils.print("Hello");
Next the utils.js specifies what it exports:
1: // Expose the utility object with it's print function
2:
3: var utils = {
4: print: function(msg){
5: console.log(msg);
6: }
7: };
8:
9: module.exports = utils;
Next we need to run browserify to bundle the file for use in the browser. As browserify is a node application we need to install node and then, through the node package manager NPM, install browserify with
1: npm install -g browserify
With browserify installed we can bundle the files into one using:
1: browserify demo.js > bundle.js
This will create a bundle.js with the following content:
1: (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){
2: var utils = require("./utils");
3: // Print a message
4: utils.print("Hello");
5:
6: },{"./utils":2}],2:[function(require,module,exports){
7: // Expose the utility object with it's print function
8:
9: var utils = {
10: print: function(msg){
11: console.log(msg);
12: }
13: };
14:
15: module.exports = utils;
16: },{}]},{},[1]);
Not the most readable but then that was not what it was designed to do. Instead we can see all code we need is included. Now by just including this generated file we ready to start our browser application.
Adding the printIt() function
Doing the same change as above is simple and best of all doesn’t require any change to the HTML to load different files. Just update utils.js to require() printIt.js and explicity export the function in printIt.js, rerun browserify and you are all set.
1: function printIt(msg){
2: console.info(msg);
3: }
4:
5: module.exports = printIt;
Note that it’s fine to just export a single function here.
1: // Expose the utility object with it's print function
2: var printIt = require("./printIt");
3:
4: var utils = {
5: print: function(msg){
6: printIt(msg);
7: }
8: };
9:
10: module.exports = utils;
And the result of running browserify is:
1: (function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o<r.length;o++)s(r[o]);return s})({1:[function(require,module,exports){
2: var utils = require("./utils");
3: // Print a message
4: utils.print("Hello");
5:
6: },{"./utils":3}],2:[function(require,module,exports){
7: function printIt(msg){
8: console.info(msg);
9: }
10:
11: module.exports = printIt;
12:
13: },{}],3:[function(require,module,exports){
14: // Expose the utility object with it's print function
15: var printIt = require("./printIt");
16:
17: var utils = {
18: print: function(msg){
19: printIt(msg);
20: }
21: };
22:
23: module.exports = utils;
24: },{"./printIt":2}]},{},[1]);
Again not the most readable code but the printIt() function is now included. Nice and no changes required to the HTML :-)
Proper scoping
As a side benefit browserify also wraps all our JavaScript files in a function ensuring that proper scope for variables is used and we don’t accidently leak variables to the proper scope.
Using browserify works really nice but this way we do have to start it after every time. In the next blog post I will show how to use Gulp or Grunt to automate this making the workflow a lot smoother.
Enjoy!
X things every JavaScript developer should know: Automatic Semicolon Insertion
As with many other things in JavaScript Automatic Semicolon Insertion is usually not a problem but it can occasionally bite you if you are unaware of it. What Automatic Semicolon Insertion does is really simple. It basically boils down to semicolons being optional in JavaScript and the parser injecting them when it is appropriate. That might sound very nice, after all you can leave semicolons out and the right thing will happen. For example the following code, without a single semicolon, is completely valid and will print a sum of 3 as expected:
1: console.log(add(1, 2))
2:
3: function add(x, y) {
4: var sum
5: sum = x + y
6: return sum
7: }
What basically happens is that the JavaScript parser adds a semicolon at the end of each line if that doesn’t cause the syntax to become invalid. See section 7.9.1 of the ECMA-262 standard or read it online here.
Now that might sound great but it turns out that Automatic Semicolon Insertion can cause some interesting issues :-(
JavaScript style rules
One thing you might have noticed that the normal style of writing JavaScript is different than that of C# or Java. Compare the JavaScript code above with the same C# code below:
1: public int Add(int x, int y)
2: {
3: int sum;
4: sum = x + y;
5: return sum;
6: }
Besides the obvious difference with the typing and the required semicolons the open curly brace for the add function is on the same line as the declaration in JavaScript and the next line in C#. While the JavaScript conventions would work fine in C# the reverse is not always the case. If we reformatted the JavaScript to the following the code, in this case, would still run fine.
1: function add(x, y)
2: {
3: var sum
4: sum = x + y
5: return sum
6: }
However if we would return an object literal and format our code the same way we would run into a problem. Consider the following code:
1: console.log(add(1, 2))
2:
3: function add(x, y) {
4: var sum
5: sum = x + y
6:
7: return
8: {
9: sum: sum
10: }
11: }
You might expect this to print an object with a property sum containing the value 3. However the code prints “undefined”. Compare that with the following code that is only formatted differently:
1: console.log(add(1, 2))
2:
3: function add(x, y) {
4: var sum
5: sum = x + y
6:
7: return {
8: sum: sum
9: }
10: }
This will print the expected object with a sum of 3.
Blame JavaScript Automatic Semicolon Insertion
This unexpected behavior is caused by semicolon insertion. instead of the code you most likely think will execute the following executes:
1: console.log(add(1, 2));
2:
3: function add(x, y) {
4: var sum;
5: sum = x + y;
6:
7: return;
8: {
9: sum: sum
10: };
11: }
Notice the semicolon after the return statement?
That actually means return nothing, i.e. undefined, and just have some unreachable code on the next few lines. That is completely valid so that is what happens :-(
Best practices
The general advice, even though it doesn’t protect you is to always add semicolons and not leave it up the the JavaScript parser. It doesn’t really help a lot because the parser will still inject semicolons of it thinks it is appropriate. So the only real solution is to use the JavaScript formatting conventions and ensure that the open curly brace of the object literal is after the return statement. That way adding a semicolon there is invalid and you can be sure the right thing happens.
Unfortunately ‘use strict’ doesn’t help here either. It will prevent some errors but it doesn’t make semicolons required :-(
Enjoy!
X things every JavaScript developer should know: Comparisons
Another item of things every JavaScript developer should know is how comparisons work. Just like with some of the other JavaScript, or I should really say ECMAScript, features anything you know about C# or Java could actually be misleading here.
To == or to ===
One of the weird things is there are actually two comparison operators in JavaScript, the double and the triple equals. The == is called the equals operator, see section 11.9.1 of the ECMAScript standard, and was the original equality operator. Unfortunately the way this operator works is quite some cause for confusion and as a result the === or Strict Equals operator was introduced, see section 11.9.4 of the ECMAScript standard. It would have been nice if they had just fixed the original problem but if they had they would have broken existing JavaScript applications.
In general I would always advise you to use the Strict Equals Operator or === whenever you do a comparison unless you have a specific need for the behavior or the original operator.
What is the problem with ==
I mentioned that == has problems and should be avoided but its still helpful to understand these problems. These problems basically boil down to the fact that the == operator does type conversions if the two types being compared are not the same. For example the following all evaluate to true:
1: 0 == "0" // true
2: 1 == "1" // true
3: 2 == "2" // true
Sounds reasonable enough right?
Unfortunately it isn’t quite that simple all of the following evaluate to false:
1: false == "false" // false
2: true == "true" // false
These might seem weird, especially since the following evacuates to true again:
1: true == !!"true" // true
So what is going on here?
The Abstract Equality Comparison Algorithm
Section 11.9.3 of the ECMAScript standard describes what is happening here. If one operand is a number and the other a string, as was the case in the first examples, the string is converted to a number and the comparison is done based on those. So basically these comparisons where:
1: 0 == 0 // true
2: 1 == 1 // true
3: 2 == 2 // true
So what was the case in the other two comparisons?
In these cases almost the same happens and the Boolean values are converted to a number. That leaves a number to string comparison where the string is also converted to a number. And the result of converting true and false to a number is 1 and 0 but the result of the string to number conversions is an invalid number or NaN. And NaN being not equal to any other number means those comparisons result in false.
So why did the last comparison true == !!”true” evaluate to true? Well simple the double bang operator !! is evaluated first and a non empty string is truthy. End result of that is the expression true == true and that is obviously true. Sounds reasonable but that also means that any non empty string will result in true, so even true == !!"false" evaluates to true :-(
Conclusion
The double equality operator is a confusing part of the JavaScript history. You are best of to avoid it an use the Strict Equals Operator === instead.
Enjoy!
Converting the RavenDB Northwind database to a more denormalized form
In a previous blog post I demonstrated how to denormalize the RavenDB sample database and use the DenormalizedReference<T> and INamedDocument types from the RavenDB documentation to make life really sweet. That leaves us with one small problem and that is that the original sample database doesn’t work with our improved document design. With the sample database, small as it is, loading all document as a dynamic type, converting them and saving them would be easy enough but in a real database that would not be practical. So lets look at a better solution fixing the database.
Updating the database on the server
Instead of downloading each document, updating the structure and saving it back to the server it is much better to do these sort of actions on the server itself. Fortunately RavenDB has the capability to execute database commands on the server. These update commands can be PatchRequest objects that will let you do a large number of things using a nice C# API. And a the ultimate fallback there is the ScriptedPatchRequest which will let you execute a block of JavaScript code on the server. Why JavaScript? Well RavenDB stores things in JSON and the server is really not dependent on a .NET client.
Using the ScriptedPatchRequest we can either execute a patch on a single document or on a collection of documents. In this case I want to update all Order documents to reflect their new structure. It turns out this is quite simple
1: using (IDocumentStore documentStore = new DocumentStore
2: {
3: ConnectionStringName = "Northwind"
4: }.Initialize())
5: {
6: var javaScript = @"...";
7:
8: documentStore.DatabaseCommands.UpdateByIndex(
9: "Raven/DocumentsByEntityName",
10: new IndexQuery
11: {
12: Query = "Tag:Orders"
13: },
14: new ScriptedPatchRequest
15: {
16: Script = javaScript
17: });
18: }
This code will execute the JavaScript code to patch the document once for each document in the Orders collection.
The JavaScript code to execute is quite simple, just make the changes required to the document and you are set.
1: var company = LoadDocument(this.Company);
2: this.Company = {Id: this.Company, Name: company.Name};
3:
4: var employee = LoadDocument(this.Employee);
5: this.Employee = {Id: this.Employee, Name: employee.FirstName + ' ' + employee.LastName};
6:
7: var shipVia = LoadDocument(this.ShipVia);
8: this.ShipVia = {Id: this.ShipVia, Name: shipVia.Name};
9:
10: this.Lines.forEach(function(line){
11: var product = LoadDocument(line.Product);
12: line.Product = {Id: line.Product, Name: product.Name};
13: delete line.ProductName;
14: });
In this case I am converting the Company, Employee, ShipVia and Product properties to have the new structure. Additionally I am removing the ProductName from the OrderLine as that is no longer needed.
Sweet :-)
Denormalizing data in RavenDB
One of the things with RavenDB, or NoSQL document databases in general, is that you don’t do joins to combine data. Normally you try to model the documents you store in such a way that the data you need for most common actions is stored in the document itself. That often means denormalizing data. When you first get started with document databases that feels strange, after all with relational databases we are taught to normalize data as much as possible and not repeat the same values. Where normalizing data is great for updates and minimizing the size of databases it is less than ideal for querying. This is because when querying we need to join various tables to turn abstract foreign keys into something that is actually understandable by the end user. And while relational databases are pretty good at joining tables these operations are not free, instead we pay for the that with every query we do. Now it turns out that most applications are read heavy and not write heavy. And as a result optimizing writes actually hurts something like 99% of the database operations we do.
With document database like RavenDB we can’t even do a join action. When we normalize data the client actively has to fetch related data and turn those abstract identities to other documents into, for a user, meaningful values. Normally the documents in a RavenDB database are much more denormalized that similar data in a SQL server database would be. The result is that for most operations a single IDocumentSession.Load() is enough to work with a document.
That data makes sense to denormalize?
Not everything makes sense to denormalize, normally only relatively static data that is frequently needed is denormalized. Why relatively static data? Simple, every time the master document for that piece of data is updated all documents where it might be denormalized also need to be updated. And while not especially difficult it would become a bottleneck if it happened to often. Fortunately there is enough data that fits the criteria.
The RavenDB example data
The de-facto sample data for SQL Server is the Northwind database. And by sheer coincidence it so happens that RavenDB also ships with this same database, except now in document form. With lots of .NET developers being familiar with SQL Server this Northwind database is often the first stop at how a document database should be constructed.
image
As you can see in the screenshot from the RavenDB studio a relatively small number of collections replaces the tables from SQL Server. Nice :-)
image
The structure used to save an order is also nice and simple, just the Order and OrderLine classes saved in a single document.
1: public class Order
2: {
3: public string Id { get; set; }
4: public string Company { get; set; }
5: public string Employee { get; set; }
6: public DateTime OrderedAt { get; set; }
7: public DateTime RequireAt { get; set; }
8: public DateTime? ShippedAt { get; set; }
9: public Address ShipTo { get; set; }
10: public string ShipVia { get; set; }
11: public decimal Freight { get; set; }
12: public List<OrderLine> Lines { get; set; }
13: }
14:
15: public class OrderLine
16: {
17: public string Product { get; set; }
18: public string ProductName { get; set; }
19: public decimal PricePerUnit { get; set; }
20: public int Quantity { get; set; }
21: public decimal Discount { get; set; }
22: }
One missing thing
Nice as this may be there is one missing thing. Other than the product name being sold and it’s price there is no data denormalized. This means that if we want to display to the user for even the most basic of uses we will need to load additional document. For example the Company property in an order just contains the identity of a customer. If we want to display the order the very least we would have to do is load the company and display the customers name instead of its identity. And the same it true for the employee and shipper.
While this sample database is not denormalized it turns out is is quite easy to do so ourselves.
Denormalizing the RavenDB Northwind database
The first step is to store the related name along with each referred to identity as seen below.
image
The order is the same but this time we can do common user interaction operations with just the one document and not be required to load additional documents. It turns out this is quite easy to do. The RavenDB documentation has a nice description on how to do that using INamedDocument and DenormalizedReference<T>. Using this technique makes it really easy and consistent to work with denormalized data and create a document structure like the one above. The change to the Order and OrderLine classes are minimal. All I had to do is replace the string type Company property with one of type DenormalizedReference<Company>.
1: public class Order
2: {
3: public string Id { get; set; }
4: public DenormalizedReference<Company> Company { get; set; }
5: public DenormalizedReference<Employee> Employee { get; set; }
6: public DateTime OrderedAt { get; set; }
7: public DateTime RequireAt { get; set; }
8: public DateTime? ShippedAt { get; set; }
9: public Address ShipTo { get; set; }
10: public DenormalizedReference<Shipper> ShipVia { get; set; }
11: public decimal Freight { get; set; }
12: public List<OrderLine> Lines { get; set; }
13: }
14:
15: public class OrderLine
16: {
17: public DenormalizedReference<Product> Product { get; set; }
18: public string ProductName { get; set; }
19: public decimal PricePerUnit { get; set; }
20: public int Quantity { get; set; }
21: public decimal Discount { get; set; }
22: }
The DenormalizedReference<T> and INamedDocument are also really simple and straight from the RavenDB documentation.
1: public class DenormalizedReference<T> where T : INamedDocument
2: {
3: public string Id { get; set; }
4: public string Name { get; set; }
5:
6: public static implicit operator DenormalizedReference<T>(T doc)
7: {
8: return new DenormalizedReference<T>
9: {
10: Id = doc.Id,
11: Name = doc.Name
12: };
13: }
14: }
15:
16: public interface INamedDocument
17: {
18: string Id { get; }
19: string Name { get; }
20: }
The implicit cast operator in the DenormalizedReference<T> makes using this really simple. Just assign a property and it will take case of the proper reference needed.
1: var order = session.Load<Order>("orders/42");
2: order.Company = session.Load<Company>("companies/11");
One useful extension method
Loading the single document and doing common operations should be easy now but there are still operations where you will need more data from the related entity. Loading them is easy enough.
1: var customer = session.Load<Company>(order.Company.Id);
However using the DenormalizedReference<T> the structure and type is already captured in the Order class. Using this with a simple extension method makes the code even simpler which is always nice :-)
1: public static class IDocumentSessionExtensions
2: {
3: public static T Load<T>(this IDocumentSession session, DenormalizedReference<T> reference)
4: where T : INamedDocument
5: {
6: return session.Load<T>(reference.Id);
7: }
8: }
This simple extension method will let is load the customer as follows:
1: var customer = session.Load(order.Company);
Saves another few keystrokes and completely type safe. Sweet :-)
Enjoy!
|
__label__pos
| 0.997089 |
Social Media & Self Awareness: Everyone Curates a Persona
In a world saturated with social media all people are curators. The ways we organize, filter, select and edit material on our feeds like Facebook, Instagram and Twitter reflect the ways we mediate our personal and cultural representations.
By making these choices we curate a social media persona and experience. While these decisions may be unconsciously and consciously influenced by social factors like demographics and location, overall we pretty much choose what we share and view. The popularity of hashtags like #ootd and #tbt, and the cultural phenomenon that is the "selfie" reflect the ways this self-branding is just a part of daily life.
Obviously a newsfeed does not have the same depth that is a person. We are a multiplicity of conflicting factors all at once- identity is never stable. Yet we build chains of posts and tweets which represent an evolving self-representation.
I personally enjoy this process and take pleasure in the string of visuals, quotes and links I choose. I use a #BabaChic hashtag for my personal style, I have a poetry and fashion blog, and I take tons of photos because I like to create and share.
When I reflect back on past posts, I can see patterns in the imagery that inspired me that I was unaware of at the time. The question is then, how conscious am I as I engage with social media?
Work by Erik Kessels, Taken at Pier 24
Work by Erik Kessels, Taken at Pier 24
When styling with a client we bring consciousness to self-representation through fashion choices. If we can wake up in the morning and check in to see how we feel and translate that into an aesthetic, we are proactively adding awareness to the ways we interact in the world. Maybe we can also add pause and check in to notice the ways we create our persona through social media.
The images and text we share reflect interests, habits, attitudes and behaviors. If this goes unreflected upon we remain within our patterns unconsciously. Perhaps in gaining this self-awareness, we can think more critically about the things we reflect to the world and the information we choose to consume.
Work by Erik Kessels, Taken at Pier 24
Work by Erik Kessels, Taken at Pier 24
The body and clothing have always been a space for curation and narrative. Clothing is an immediate visual marker where styles stereotypically suggest ways of being.
This is always changing and relative to context; for instance, the leather jacket used to stereotype a rebel and now it's part of a normcore Gap ad campaign. But clothing is complicated because we never really just see clothes- we see clothes on a body, and the body is always marked by other social stereotypes like race, gender and ability. Furthermore presenting the body and identity through an array of digital spaces further complicates the ways we perceive.
Work by Maurizio Anzeri, Taken at Pier 24
Work by Maurizio Anzeri, Taken at Pier 24
The questions I find myself asking lately around social media and identity are:
1. What is the relationship between who we think we are and the image we project?
2. How dependent are we on external feedback from "friends" and "followers" for self-validation?
3. How can social media authentically represent self?
4. How are we constructing personal notions of truth by curating the information we engage with?
I have no direct answers- mostly passing thoughts and questions- but hopefully social media use, just like daily dress, can become a practice to check in and become more aware of the identity we project into the world and the ways we curate truth.
Work by Joachim Schmid, Taken at Pier 24
Work by Joachim Schmid, Taken at Pier 24
This post was inspired by an afternoon checking out the 'Secondhand' exhibit at Pier 24, a free San Francisco based photo gallery. I took the photos above at the exhibit.
Pier 24 doesn't let more than a few people view at a time, creating a unique opportunity to be alone with art. The artists' in the exhibit used techniques like found imagery, historical photos and various tactile experiences of digital imagery to talk about the ways the role of the curator has expanded from the museum to an aspect of everyday life.
|
__label__pos
| 0.710811 |
Take the 2-minute tour ×
Stack Overflow is a question and answer site for professional and enthusiast programmers. It's 100% free, no registration required.
How do you debug dependency injection (using Unity DI) when the dependancy does not instantiate?
eg Given a service class with dependencies:
public class FooService : IFooService
{
[Dependency]
public BarService BarService { get; set; }
[Dependency]
public AnotherService AnotherService { get; set; }
// other code fails because BarService and AnotherService are null
}
And in Global.asax.cs
private void ConfigureIoC()
{
container
.ConfigureAutoRegistration()
.LoadAssembliesFrom(assemblyPaths)
.ExcludeSystemAssemblies()
.Include(If.Any, Then.Register())
.ApplyAutoRegistration();
var serviceLocator = new UnityServiceLocator(container);
ServiceLocator.SetLocatorProvider(() => serviceLocator);
}
The IFooService is also instantiated by Unity, but that uses constructor injection instead (and it works):
public class FooController : Controller
{
private readonly IFooService _fooService;
public FooController(IFooService fooService)
{
_fooService = fooService;
}
}
How can I debug this to see why the dependencies are failing to instantiate. No exceptions are being thrown (or if they are then Elmah is not catching and logging them).
share|improve this question
Please avoid including things like _C#, Debugging, Unity, dependency injection_in your question titles. That is what the tags are for. – M.Babcock Mar 9 '12 at 1:21
What I usually do in cases like this is linking my code unit with the source project of Unity, and then I step through the Unity code, to see how stuff is being resolved. – zespri Mar 9 '12 at 4:17
2 Answers 2
up vote 2 down vote accepted
The dependency is not injected because the DependencyAttribute is on the concrete class instead of the interface.
As DI attributes can be harmful I would recommend you change the registration to
container.RegisterType<IFooService,FooService>(new InjectionProperty("BarService"), new InjectionProperty("AnotherService"));
Resolving IFooService will then return an instance of FooService with the injected dependencies.
share|improve this answer
An even better advice would be to use constructor injection instead of property injection. – Steven Mar 9 '12 at 8:06
@Steven Depending on wether these dependencies are "must have" (constructor injection) or "nice to have" (property injection) dependencies you are absolutely right. – Sebastian Weber Mar 9 '12 at 8:13
I must say I'm a bit dazzled that Unity doesn't fail when a property that is explicitly decorated with the [Dependency] attribute can't be injected. Failing silently makes the configuration much more fragile. – Steven Mar 9 '12 at 8:17
@Steven It is not failing, it is exactly doing what you ask for. "Give me an IFooService". And so it does. If you ask for a FooService it will fill the properties. But the interface does not tell Unity to inject anything. – Sebastian Weber Mar 9 '12 at 8:20
No, Unity will fill in the properties that are explicitly decorated, but only if those types are registered. Otherwise it will silently skip them, which is IMO not what you want most of the time, because this means that you can't easily verify the correctness of the DI configuration. – Steven Mar 9 '12 at 8:23
Call container.Resolve<IFooService>();
Where/how is resolution of IFooService happening?
share|improve this answer
I updated the question to show how FooService is instantiated, and that part works. – JK. Mar 9 '12 at 4:03
Someone who knows more about Unity and setter injection may have to answer (I haven't used it in a while), but I would try temporarily making the dependencies all constructor-injected, just to see if they resolve. If they don't, the error should tell you why. If they do, then it must be how you have the setter injection set up. – Phil Sandler Mar 9 '12 at 5:11
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.619034 |
Social media affecting relationships
By | November 14, 2018
social media affecting relationships photo - 1
Social media today are not just a place to chat. Today it is more …
Do social media take over the world?!?
Are you using social media affecting relationships?
– Not.
– Why?
Today, all areas of our lives are tightly interwoven with social media – be it family, friends, work or leisure. To be in trend and keep up – in these communications you need to use social media affecting relationships.
What do social media affecting relationships do?
Today there is everything in social media:
First is communication and communication. You can talk to your sister, who is sitting in the next room, and you can leave a tweet that thousands of people will read on another continent.
You can learn, whatever you want, or you can train yourself using social media.
You can create a business in social media. And make yourself and other people rich.
Politicians today use social media to conduct their policies.
And this is only the smallest opportunities of social networks.
social media affecting relationships will help you in all this.
How to deal with social media affecting relationships?
Feel free to use social media affecting relationships. Otherwise, time will run far ahead and leave you behind life.
|
__label__pos
| 0.645662 |
Redux Ducks: Restructure your Redux App with Ducks
Follow on Twitter Follow on GitHub
This tutorial is part 2 of 2 in the series.
Part 1:
The Redux Ducks: Restructure your Redux App with Ducks tutorial will teach you how to bundle action creators, action types and reducers side by side in your Redux app.
Usually in the beginning of learning Redux you have a technical separation of concerns which gets reflected in the folder structure. Basically there is one folder for your actions and one folder for your reducers. Additionally you collect all action types at one place that they can be reused by reducers and actions.
Since it is often the case that an action is followed by a reducer and both of them share the same action type, a new recommendation came up to collocate all of them at one place. They call it Ducks.
The tutorial itself will not strictly follow all proposed guidelines of the recommendation, but it gives you a good understanding of how your app would look like after the refactoring and the advantages of using the bundling.
Seed Project?
If you don’t have the outcome of The SoundCloud Client in React + Redux, checkout this repository. Clone the repository and use the Base folder named base-init/ as seed project. The seed project is the same which you would implement when you would step through the initial tutorial. Next to the Base folder you will find several Extension folders, which will show the outcome of each tutorial which builds up on The SoundCloud Client in React + Redux.
When you have your own The SoundCloud Client in React + Redux project, you can continue with the Table of Contents and ignore the following instructions.
When you want to use the seed project, make sure to clone the repository. Use the base-init/ folder as your project folder. Make sure that your project starts with npm install && npm start.
Table of Contents
Refactor Auth Redux Duck
Basically we have two ducks in the SoundCloud app: There is one place for the authentication and data fetching and another place where the tracks are saved and played.
Let’s begin with the auth duck: In the existent app you will find the auth actions in src/actions/auth.js and the reducer in src/reducers/auth.js. Moreover there is one action type in the src/constants/actionTypes.js file.
A new folder for the ducks instead of actions / reducers folder pairs will help us to collocate actions and reducers.
From src folder:
mkdir ducks
cd ducks
touch auth.js
At first we can move the the sole auth action type.
src/ducks/auth.js
const ME_SET = 'ME_SET';
As you can see we are not exporting anything at this time. We can even more refactor the action type itself to represent the duck bundle. In a growing application it is an improved way to identify the actions and places in the source code.
src/ducks/auth.js
const ME_SET = 'auth/ME_SET';
The next step is to move the action creators. I have highlighted the important pieces after the copy and paste from src/actions/auth.js.
src/ducks/auth.js
import SC from 'soundcloud';
import { setTracks as doSetTracks } from '../actions';
const ME_SET = 'auth/ME_SET';
function doSetMe(user) {
return {
type: ME_SET,
user
};
}
function doAuth() {
return function (dispatch) {
SC.connect().then((session) => {
dispatch(doFetchMe(session));
dispatch(doFetchStream(session));
});
};
}
function doFetchMe(session) {
return function (dispatch) {
fetch(`//api.soundcloud.com/me?oauth_token=${session.oauth_token}`)
.then((response) => response.json())
.then((data) => {
dispatch(doSetMe(data));
});
};
}
function doFetchStream(session) {
return function (dispatch) {
fetch(`//api.soundcloud.com/me/activities?limit=20&offset=0&oauth_token=${session.oauth_token}`)
.then((response) => response.json())
.then((data) => {
dispatch(doSetTracks(data.collection));
});
};
}
Again we are not exporting anything. Moreover the action creators got prefixed. Since reducers and action creators will live side by side, it is a good way to keep the naming in your file tidy. Additionally we need to import the action creator to set tracks like we did before, but with an alias to follow the new naming convention. We will refactor that later when we have a duck for the track bundle as well.
Last but not least let’s move our reducer.
src/ducks/auth.js
import { CLIENT_ID, REDIRECT_URI } from '../constants/auth';
import { setTracks as doSetTracks } from '../actions';
const ME_SET = 'auth/ME_SET';
function doSetMe(user) {
...
}
function doAuth() {
...
}
function doFetchMe(session) {
...
}
function doFetchStream(me, session) {
...
}
const initialState = {};
function reducer(state = initialState, action) {
switch (action.type) {
case ME_SET:
return applySetMe(state, action);
}
return state;
}
function applySetMe(state, action) {
const { user } = action;
return { ...state, user };
}
Note that the reducer is a named function and we prefixed its functions as well. As last step we have to export all the necessary stakeholders.
src/ducks/auth.js
...
const actionCreators = {
doAuth
};
const actionTypes = {
ME_SET
};
export {
actionCreators,
actionTypes
};
export default reducer;
Usually you don’t need to export the action types, but there may be cases where you have to export. They could be used by tests or other side effect middleware like redux-saga. The example just gives a suggestion how you would accomplish a clean export of all stakeholders.
Now it is time to clean up and remove the files which are unused.
From src folder:
rm actions/auth.js
rm reducers/auth.js
Remove the unused action type ME_SET as well. Keep the remaining action types for now.
src/constants/actionTypes.js
export const TRACKS_SET = 'TRACKS_SET';
export const TRACK_PLAY = 'TRACK_PLAY';
Moreover we can remove the dependency in the entry points of our legacy actions. The file should look like the following without the auth bundle:
src/actions/index.js
import { setTracks, playTrack } from './track';
export {
setTracks,
playTrack
};
After the auth duck is finished and all actions creators and reducers are side by side, we can use the new reducer location to export the combined reducers for the store and use the replaced action creators in the Stream container.
src/reducers/index.js
import { combineReducers } from 'redux';
import { routerReducer } from 'react-router-redux';
import auth from '../ducks/auth';
import track from './track';
export default combineReducers({
auth,
track,
routing: routerReducer
});
src/components/Stream/index.js
import React from 'react';
import { bindActionCreators } from 'redux';
import { connect } from 'react-redux';
import * as actions from '../../actions';
import { actionCreators as authActionCreators } from '../../ducks/auth';
import Stream from './presenter';
function mapStateToProps(state) {
const { user } = state.auth;
const { tracks, activeTrack } = state.track;
return {
user,
tracks,
activeTrack
}
}
function mapDispatchToProps(dispatch) {
return {
onPlay: bindActionCreators(actions.playTrack, dispatch),
onAuth: bindActionCreators(authActionCreators.doAuth, dispatch)
};
}
export default connect(mapStateToProps, mapDispatchToProps)(Stream);
The app should still be intact after all, but it comes with our first duck!
From root folder:
npm start
Refactor Redux Track Duck
Now it’s time to create the track duck.
From ducks folder:
touch track.js
Let’s move the action types, action creators and reducer. Again I highlighted the changed pieces after copy and pasting the relevant lines of code.
src/ducks/track.js
const TRACKS_SET = 'track/TRACKS_SET';
const TRACK_PLAY = 'track/TRACK_PLAY';
function doSetTracks(tracks) {
return {
type: TRACKS_SET,
tracks
};
};
function doPlayTrack(track) {
return {
type: TRACK_PLAY,
track
};
}
const initialState = {
tracks: [],
activeTrack: null
};
function reducer(state = initialState, action) {
switch (action.type) {
case TRACKS_SET:
return applySetTracks(state, action);
case TRACK_PLAY:
return applySetPlay(state, action);
}
return state;
}
function applySetTracks(state, action) {
const { tracks } = action;
return { ...state, tracks };
}
function applySetPlay(state, action) {
const { track } = action;
return { ...state, activeTrack: track };
}
const actionCreators = {
doSetTracks,
doPlayTrack
};
const actionTypes = {
TRACKS_SET,
TRACK_PLAY
};
export {
actionCreators,
actionTypes
};
export default reducer;
Now we provide the store with the relocated reducer like we did with the auth duck.
src/reducers/index.js
import { combineReducers } from 'redux';
import { routerReducer } from 'react-router-redux';
import auth from '../ducks/auth';
import track from '../ducks/track';
export default combineReducers({
auth,
track,
routing: routerReducer
});
Same applies for the Stream container component. We can import the actionCreators from their new place.
src/components/Stream/index.js
import React from 'react';
import { bindActionCreators } from 'redux';
import { connect } from 'react-redux';
import { actionCreators as trackActionCreators } from '../../ducks/track';
import { actionCreators as authActionCreators } from '../../ducks/auth';
import Stream from './presenter';
function mapStateToProps(state) {
const { user } = state.auth;
const { tracks, activeTrack } = state.track;
return {
user,
tracks,
activeTrack
}
}
function mapDispatchToProps(dispatch) {
return {
onPlay: bindActionCreators(trackActionCreators.doPlayTrack, dispatch),
onAuth: bindActionCreators(authActionCreators.doAuth, dispatch)
};
}
export default connect(mapStateToProps, mapDispatchToProps)(Stream);
Remember when we had to import the setTracks as doSetTracks alias in the auth duck? Now we renamed it due to the track duck refactoring and can change that in the auth duck.
src/ducks/auth.js
import SC from 'soundcloud';
import { actionCreators as trackActionCreators } from './track';
const ME_SET = 'auth/ME_SET';
function doSetMe(user) {
return {
type: ME_SET,
user
};
}
function doAuth() {
...
}
function doFetchMe() {
...
}
function doFetchStream(session) {
return function (dispatch) {
fetch(`//api.soundcloud.com/me/activities?limit=20&offset=0&oauth_token=${session.oauth_token}`)
.then((response) => response.json())
.then((data) => {
dispatch(trackActionCreators.doSetTracks(data.collection));
});
};
}
const initialState = {};
...
As last step we can remove all unused folders and files now.
From src folder:
rm -rf actions
rm constants/actionTypes.js
rm reducers/track.js
After the refactoring the folder structure should look like the following:
Folder structure:
-src
--components
--constants
--ducks
---auth.js
---track.js
--reducers
--stores
--index.js
Finally we have a clean bundling of {action type, action creator and reducer} tuples with the ducks pattern. We still have a reducers folder to combine all of the reducers for the store, but one could move this next to the store to get rid of the reducers folder as well. After that the app would only have components and ducks as main bundling folders.
Troubleshoot
You may encounter issues in that tutorial. Here you will find some references how to handle issues.
Dependencies
In case you want to know which versions npm installed during that tutorial, here a list of all npm packages in my package.json.
"devDependencies": {
"babel-core": "^6.9.1",
"babel-loader": "^6.2.4",
"babel-preset-es2015": "^6.9.0",
"babel-preset-react": "^6.5.0",
"babel-preset-stage-2": "^6.5.0",
"chai": "^3.5.0",
"enzyme": "^2.3.0",
"exports-loader": "^0.6.3",
"imports-loader": "^0.6.5",
"jsdom": "^9.2.1",
"mocha": "^2.5.3",
"react-addons-test-utils": "^15.1.0",
"react-hot-loader": "^1.3.0",
"webpack": "^1.13.1",
"webpack-dev-server": "^1.14.1"
},
"dependencies": {
"react": "^15.1.0",
"react-dom": "^15.1.0",
"react-redux": "^4.4.5",
"react-router": "^2.4.1",
"react-router-redux": "^4.0.5",
"redux": "^3.5.2",
"redux-logger": "^2.6.1",
"redux-thunk": "^2.1.0",
"soundcloud": "^3.1.2",
"whatwg-fetch": "^1.0.0"
}
Final Thoughts
Have a look again in The SoundCloud Client in React + Redux tutorial for more smaller tutorials which build up on top of the SoundCloud project. All tutorials on top will get released in the repository as well.
Build a Hacker News App along the way. No setup configuration. No tooling. No Redux. Plain React in 190+ pages of learning material. Learn React like 14.500+ readers.
Get the Book
comments powered by Disqus
Never miss an article about web development and self-growth.
Take Part
Join 9300+ Developers
Learn Web Development with JavaScript
Tips and Tricks
Access Tutorials, eBooks and Courses
Personal Development as a Software Engineer
|
__label__pos
| 0.951314 |
linux/drivers/hwmon/emc2103.c
<<
>>
Prefs
1/*
2 * emc2103.c - Support for SMSC EMC2103
3 * Copyright (c) 2010 SMSC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#include <linux/module.h>
21#include <linux/init.h>
22#include <linux/slab.h>
23#include <linux/jiffies.h>
24#include <linux/i2c.h>
25#include <linux/hwmon.h>
26#include <linux/hwmon-sysfs.h>
27#include <linux/err.h>
28#include <linux/mutex.h>
29
30/* Addresses scanned */
31static const unsigned short normal_i2c[] = { 0x2E, I2C_CLIENT_END };
32
33static const u8 REG_TEMP[4] = { 0x00, 0x02, 0x04, 0x06 };
34static const u8 REG_TEMP_MIN[4] = { 0x3c, 0x38, 0x39, 0x3a };
35static const u8 REG_TEMP_MAX[4] = { 0x34, 0x30, 0x31, 0x32 };
36
37#define REG_CONF1 0x20
38#define REG_TEMP_MAX_ALARM 0x24
39#define REG_TEMP_MIN_ALARM 0x25
40#define REG_FAN_CONF1 0x42
41#define REG_FAN_TARGET_LO 0x4c
42#define REG_FAN_TARGET_HI 0x4d
43#define REG_FAN_TACH_HI 0x4e
44#define REG_FAN_TACH_LO 0x4f
45#define REG_PRODUCT_ID 0xfd
46#define REG_MFG_ID 0xfe
47
48/* equation 4 from datasheet: rpm = (3932160 * multipler) / count */
49#define FAN_RPM_FACTOR 3932160
50
51/*
52 * 2103-2 and 2103-4's 3rd temperature sensor can be connected to two diodes
53 * in anti-parallel mode, and in this configuration both can be read
54 * independently (so we have 4 temperature inputs). The device can't
55 * detect if it's connected in this mode, so we have to manually enable
56 * it. Default is to leave the device in the state it's already in (-1).
57 * This parameter allows APD mode to be optionally forced on or off
58 */
59static int apd = -1;
60module_param(apd, bint, 0);
61MODULE_PARM_DESC(init, "Set to zero to disable anti-parallel diode mode");
62
63struct temperature {
64 s8 degrees;
65 u8 fraction; /* 0-7 multiples of 0.125 */
66};
67
68struct emc2103_data {
69 struct device *hwmon_dev;
70 struct mutex update_lock;
71 bool valid; /* registers are valid */
72 bool fan_rpm_control;
73 int temp_count; /* num of temp sensors */
74 unsigned long last_updated; /* in jiffies */
75 struct temperature temp[4]; /* internal + 3 external */
76 s8 temp_min[4]; /* no fractional part */
77 s8 temp_max[4]; /* no fractional part */
78 u8 temp_min_alarm;
79 u8 temp_max_alarm;
80 u8 fan_multiplier;
81 u16 fan_tach;
82 u16 fan_target;
83};
84
85static int read_u8_from_i2c(struct i2c_client *client, u8 i2c_reg, u8 *output)
86{
87 int status = i2c_smbus_read_byte_data(client, i2c_reg);
88 if (status < 0) {
89 dev_warn(&client->dev, "reg 0x%02x, err %d\n",
90 i2c_reg, status);
91 } else {
92 *output = status;
93 }
94 return status;
95}
96
97static void read_temp_from_i2c(struct i2c_client *client, u8 i2c_reg,
98 struct temperature *temp)
99{
100 u8 degrees, fractional;
101
102 if (read_u8_from_i2c(client, i2c_reg, °rees) < 0)
103 return;
104
105 if (read_u8_from_i2c(client, i2c_reg + 1, &fractional) < 0)
106 return;
107
108 temp->degrees = degrees;
109 temp->fraction = (fractional & 0xe0) >> 5;
110}
111
112static void read_fan_from_i2c(struct i2c_client *client, u16 *output,
113 u8 hi_addr, u8 lo_addr)
114{
115 u8 high_byte, lo_byte;
116
117 if (read_u8_from_i2c(client, hi_addr, &high_byte) < 0)
118 return;
119
120 if (read_u8_from_i2c(client, lo_addr, &lo_byte) < 0)
121 return;
122
123 *output = ((u16)high_byte << 5) | (lo_byte >> 3);
124}
125
126static void write_fan_target_to_i2c(struct i2c_client *client, u16 new_target)
127{
128 u8 high_byte = (new_target & 0x1fe0) >> 5;
129 u8 low_byte = (new_target & 0x001f) << 3;
130 i2c_smbus_write_byte_data(client, REG_FAN_TARGET_LO, low_byte);
131 i2c_smbus_write_byte_data(client, REG_FAN_TARGET_HI, high_byte);
132}
133
134static void read_fan_config_from_i2c(struct i2c_client *client)
135
136{
137 struct emc2103_data *data = i2c_get_clientdata(client);
138 u8 conf1;
139
140 if (read_u8_from_i2c(client, REG_FAN_CONF1, &conf1) < 0)
141 return;
142
143 data->fan_multiplier = 1 << ((conf1 & 0x60) >> 5);
144 data->fan_rpm_control = (conf1 & 0x80) != 0;
145}
146
147static struct emc2103_data *emc2103_update_device(struct device *dev)
148{
149 struct i2c_client *client = to_i2c_client(dev);
150 struct emc2103_data *data = i2c_get_clientdata(client);
151
152 mutex_lock(&data->update_lock);
153
154 if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
155 || !data->valid) {
156 int i;
157
158 for (i = 0; i < data->temp_count; i++) {
159 read_temp_from_i2c(client, REG_TEMP[i], &data->temp[i]);
160 read_u8_from_i2c(client, REG_TEMP_MIN[i],
161 &data->temp_min[i]);
162 read_u8_from_i2c(client, REG_TEMP_MAX[i],
163 &data->temp_max[i]);
164 }
165
166 read_u8_from_i2c(client, REG_TEMP_MIN_ALARM,
167 &data->temp_min_alarm);
168 read_u8_from_i2c(client, REG_TEMP_MAX_ALARM,
169 &data->temp_max_alarm);
170
171 read_fan_from_i2c(client, &data->fan_tach,
172 REG_FAN_TACH_HI, REG_FAN_TACH_LO);
173 read_fan_from_i2c(client, &data->fan_target,
174 REG_FAN_TARGET_HI, REG_FAN_TARGET_LO);
175 read_fan_config_from_i2c(client);
176
177 data->last_updated = jiffies;
178 data->valid = true;
179 }
180
181 mutex_unlock(&data->update_lock);
182
183 return data;
184}
185
186static ssize_t
187show_temp(struct device *dev, struct device_attribute *da, char *buf)
188{
189 int nr = to_sensor_dev_attr(da)->index;
190 struct emc2103_data *data = emc2103_update_device(dev);
191 int millidegrees = data->temp[nr].degrees * 1000
192 + data->temp[nr].fraction * 125;
193 return sprintf(buf, "%d\n", millidegrees);
194}
195
196static ssize_t
197show_temp_min(struct device *dev, struct device_attribute *da, char *buf)
198{
199 int nr = to_sensor_dev_attr(da)->index;
200 struct emc2103_data *data = emc2103_update_device(dev);
201 int millidegrees = data->temp_min[nr] * 1000;
202 return sprintf(buf, "%d\n", millidegrees);
203}
204
205static ssize_t
206show_temp_max(struct device *dev, struct device_attribute *da, char *buf)
207{
208 int nr = to_sensor_dev_attr(da)->index;
209 struct emc2103_data *data = emc2103_update_device(dev);
210 int millidegrees = data->temp_max[nr] * 1000;
211 return sprintf(buf, "%d\n", millidegrees);
212}
213
214static ssize_t
215show_temp_fault(struct device *dev, struct device_attribute *da, char *buf)
216{
217 int nr = to_sensor_dev_attr(da)->index;
218 struct emc2103_data *data = emc2103_update_device(dev);
219 bool fault = (data->temp[nr].degrees == -128);
220 return sprintf(buf, "%d\n", fault ? 1 : 0);
221}
222
223static ssize_t
224show_temp_min_alarm(struct device *dev, struct device_attribute *da, char *buf)
225{
226 int nr = to_sensor_dev_attr(da)->index;
227 struct emc2103_data *data = emc2103_update_device(dev);
228 bool alarm = data->temp_min_alarm & (1 << nr);
229 return sprintf(buf, "%d\n", alarm ? 1 : 0);
230}
231
232static ssize_t
233show_temp_max_alarm(struct device *dev, struct device_attribute *da, char *buf)
234{
235 int nr = to_sensor_dev_attr(da)->index;
236 struct emc2103_data *data = emc2103_update_device(dev);
237 bool alarm = data->temp_max_alarm & (1 << nr);
238 return sprintf(buf, "%d\n", alarm ? 1 : 0);
239}
240
241static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
242 const char *buf, size_t count)
243{
244 int nr = to_sensor_dev_attr(da)->index;
245 struct i2c_client *client = to_i2c_client(dev);
246 struct emc2103_data *data = i2c_get_clientdata(client);
247 long val;
248
249 int result = kstrtol(buf, 10, &val);
250 if (result < 0)
251 return -EINVAL;
252
253 val = DIV_ROUND_CLOSEST(val, 1000);
254 if ((val < -63) || (val > 127))
255 return -EINVAL;
256
257 mutex_lock(&data->update_lock);
258 data->temp_min[nr] = val;
259 i2c_smbus_write_byte_data(client, REG_TEMP_MIN[nr], val);
260 mutex_unlock(&data->update_lock);
261
262 return count;
263}
264
265static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
266 const char *buf, size_t count)
267{
268 int nr = to_sensor_dev_attr(da)->index;
269 struct i2c_client *client = to_i2c_client(dev);
270 struct emc2103_data *data = i2c_get_clientdata(client);
271 long val;
272
273 int result = kstrtol(buf, 10, &val);
274 if (result < 0)
275 return -EINVAL;
276
277 val = DIV_ROUND_CLOSEST(val, 1000);
278 if ((val < -63) || (val > 127))
279 return -EINVAL;
280
281 mutex_lock(&data->update_lock);
282 data->temp_max[nr] = val;
283 i2c_smbus_write_byte_data(client, REG_TEMP_MAX[nr], val);
284 mutex_unlock(&data->update_lock);
285
286 return count;
287}
288
289static ssize_t
290show_fan(struct device *dev, struct device_attribute *da, char *buf)
291{
292 struct emc2103_data *data = emc2103_update_device(dev);
293 int rpm = 0;
294 if (data->fan_tach != 0)
295 rpm = (FAN_RPM_FACTOR * data->fan_multiplier) / data->fan_tach;
296 return sprintf(buf, "%d\n", rpm);
297}
298
299static ssize_t
300show_fan_div(struct device *dev, struct device_attribute *da, char *buf)
301{
302 struct emc2103_data *data = emc2103_update_device(dev);
303 int fan_div = 8 / data->fan_multiplier;
304 return sprintf(buf, "%d\n", fan_div);
305}
306
307/*
308 * Note: we also update the fan target here, because its value is
309 * determined in part by the fan clock divider. This follows the principle
310 * of least surprise; the user doesn't expect the fan target to change just
311 * because the divider changed.
312 */
313static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
314 const char *buf, size_t count)
315{
316 struct emc2103_data *data = emc2103_update_device(dev);
317 struct i2c_client *client = to_i2c_client(dev);
318 int new_range_bits, old_div = 8 / data->fan_multiplier;
319 long new_div;
320
321 int status = kstrtol(buf, 10, &new_div);
322 if (status < 0)
323 return -EINVAL;
324
325 if (new_div == old_div) /* No change */
326 return count;
327
328 switch (new_div) {
329 case 1:
330 new_range_bits = 3;
331 break;
332 case 2:
333 new_range_bits = 2;
334 break;
335 case 4:
336 new_range_bits = 1;
337 break;
338 case 8:
339 new_range_bits = 0;
340 break;
341 default:
342 return -EINVAL;
343 }
344
345 mutex_lock(&data->update_lock);
346
347 status = i2c_smbus_read_byte_data(client, REG_FAN_CONF1);
348 if (status < 0) {
349 dev_dbg(&client->dev, "reg 0x%02x, err %d\n",
350 REG_FAN_CONF1, status);
351 mutex_unlock(&data->update_lock);
352 return -EIO;
353 }
354 status &= 0x9F;
355 status |= (new_range_bits << 5);
356 i2c_smbus_write_byte_data(client, REG_FAN_CONF1, status);
357
358 data->fan_multiplier = 8 / new_div;
359
360 /* update fan target if high byte is not disabled */
361 if ((data->fan_target & 0x1fe0) != 0x1fe0) {
362 u16 new_target = (data->fan_target * old_div) / new_div;
363 data->fan_target = min(new_target, (u16)0x1fff);
364 write_fan_target_to_i2c(client, data->fan_target);
365 }
366
367 /* invalidate data to force re-read from hardware */
368 data->valid = false;
369
370 mutex_unlock(&data->update_lock);
371 return count;
372}
373
374static ssize_t
375show_fan_target(struct device *dev, struct device_attribute *da, char *buf)
376{
377 struct emc2103_data *data = emc2103_update_device(dev);
378 int rpm = 0;
379
380 /* high byte of 0xff indicates disabled so return 0 */
381 if ((data->fan_target != 0) && ((data->fan_target & 0x1fe0) != 0x1fe0))
382 rpm = (FAN_RPM_FACTOR * data->fan_multiplier)
383 / data->fan_target;
384
385 return sprintf(buf, "%d\n", rpm);
386}
387
388static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
389 const char *buf, size_t count)
390{
391 struct emc2103_data *data = emc2103_update_device(dev);
392 struct i2c_client *client = to_i2c_client(dev);
393 long rpm_target;
394
395 int result = kstrtol(buf, 10, &rpm_target);
396 if (result < 0)
397 return -EINVAL;
398
399 /* Datasheet states 16384 as maximum RPM target (table 3.2) */
400 if ((rpm_target < 0) || (rpm_target > 16384))
401 return -EINVAL;
402
403 mutex_lock(&data->update_lock);
404
405 if (rpm_target == 0)
406 data->fan_target = 0x1fff;
407 else
408 data->fan_target = SENSORS_LIMIT(
409 (FAN_RPM_FACTOR * data->fan_multiplier) / rpm_target,
410 0, 0x1fff);
411
412 write_fan_target_to_i2c(client, data->fan_target);
413
414 mutex_unlock(&data->update_lock);
415 return count;
416}
417
418static ssize_t
419show_fan_fault(struct device *dev, struct device_attribute *da, char *buf)
420{
421 struct emc2103_data *data = emc2103_update_device(dev);
422 bool fault = ((data->fan_tach & 0x1fe0) == 0x1fe0);
423 return sprintf(buf, "%d\n", fault ? 1 : 0);
424}
425
426static ssize_t
427show_pwm_enable(struct device *dev, struct device_attribute *da, char *buf)
428{
429 struct emc2103_data *data = emc2103_update_device(dev);
430 return sprintf(buf, "%d\n", data->fan_rpm_control ? 3 : 0);
431}
432
433static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
434 const char *buf, size_t count)
435{
436 struct i2c_client *client = to_i2c_client(dev);
437 struct emc2103_data *data = i2c_get_clientdata(client);
438 long new_value;
439 u8 conf_reg;
440
441 int result = kstrtol(buf, 10, &new_value);
442 if (result < 0)
443 return -EINVAL;
444
445 mutex_lock(&data->update_lock);
446 switch (new_value) {
447 case 0:
448 data->fan_rpm_control = false;
449 break;
450 case 3:
451 data->fan_rpm_control = true;
452 break;
453 default:
454 count = -EINVAL;
455 goto err;
456 }
457
458 result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
459 if (result) {
460 count = result;
461 goto err;
462 }
463
464 if (data->fan_rpm_control)
465 conf_reg |= 0x80;
466 else
467 conf_reg &= ~0x80;
468
469 i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg);
470err:
471 mutex_unlock(&data->update_lock);
472 return count;
473}
474
475static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
476static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, show_temp_min,
477 set_temp_min, 0);
478static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max,
479 set_temp_max, 0);
480static SENSOR_DEVICE_ATTR(temp1_fault, S_IRUGO, show_temp_fault, NULL, 0);
481static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO, show_temp_min_alarm,
482 NULL, 0);
483static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_temp_max_alarm,
484 NULL, 0);
485
486static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
487static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, show_temp_min,
488 set_temp_min, 1);
489static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max,
490 set_temp_max, 1);
491static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_temp_fault, NULL, 1);
492static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO, show_temp_min_alarm,
493 NULL, 1);
494static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_temp_max_alarm,
495 NULL, 1);
496
497static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
498static SENSOR_DEVICE_ATTR(temp3_min, S_IRUGO | S_IWUSR, show_temp_min,
499 set_temp_min, 2);
500static SENSOR_DEVICE_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max,
501 set_temp_max, 2);
502static SENSOR_DEVICE_ATTR(temp3_fault, S_IRUGO, show_temp_fault, NULL, 2);
503static SENSOR_DEVICE_ATTR(temp3_min_alarm, S_IRUGO, show_temp_min_alarm,
504 NULL, 2);
505static SENSOR_DEVICE_ATTR(temp3_max_alarm, S_IRUGO, show_temp_max_alarm,
506 NULL, 2);
507
508static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3);
509static SENSOR_DEVICE_ATTR(temp4_min, S_IRUGO | S_IWUSR, show_temp_min,
510 set_temp_min, 3);
511static SENSOR_DEVICE_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max,
512 set_temp_max, 3);
513static SENSOR_DEVICE_ATTR(temp4_fault, S_IRUGO, show_temp_fault, NULL, 3);
514static SENSOR_DEVICE_ATTR(temp4_min_alarm, S_IRUGO, show_temp_min_alarm,
515 NULL, 3);
516static SENSOR_DEVICE_ATTR(temp4_max_alarm, S_IRUGO, show_temp_max_alarm,
517 NULL, 3);
518
519static DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL);
520static DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR, show_fan_div, set_fan_div);
521static DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, show_fan_target,
522 set_fan_target);
523static DEVICE_ATTR(fan1_fault, S_IRUGO, show_fan_fault, NULL);
524
525static DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, show_pwm_enable,
526 set_pwm_enable);
527
528/* sensors present on all models */
529static struct attribute *emc2103_attributes[] = {
530 &sensor_dev_attr_temp1_input.dev_attr.attr,
531 &sensor_dev_attr_temp1_min.dev_attr.attr,
532 &sensor_dev_attr_temp1_max.dev_attr.attr,
533 &sensor_dev_attr_temp1_fault.dev_attr.attr,
534 &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
535 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
536 &sensor_dev_attr_temp2_input.dev_attr.attr,
537 &sensor_dev_attr_temp2_min.dev_attr.attr,
538 &sensor_dev_attr_temp2_max.dev_attr.attr,
539 &sensor_dev_attr_temp2_fault.dev_attr.attr,
540 &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
541 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
542 &dev_attr_fan1_input.attr,
543 &dev_attr_fan1_div.attr,
544 &dev_attr_fan1_target.attr,
545 &dev_attr_fan1_fault.attr,
546 &dev_attr_pwm1_enable.attr,
547 NULL
548};
549
550/* extra temperature sensors only present on 2103-2 and 2103-4 */
551static struct attribute *emc2103_attributes_temp3[] = {
552 &sensor_dev_attr_temp3_input.dev_attr.attr,
553 &sensor_dev_attr_temp3_min.dev_attr.attr,
554 &sensor_dev_attr_temp3_max.dev_attr.attr,
555 &sensor_dev_attr_temp3_fault.dev_attr.attr,
556 &sensor_dev_attr_temp3_min_alarm.dev_attr.attr,
557 &sensor_dev_attr_temp3_max_alarm.dev_attr.attr,
558 NULL
559};
560
561/* extra temperature sensors only present on 2103-2 and 2103-4 in APD mode */
562static struct attribute *emc2103_attributes_temp4[] = {
563 &sensor_dev_attr_temp4_input.dev_attr.attr,
564 &sensor_dev_attr_temp4_min.dev_attr.attr,
565 &sensor_dev_attr_temp4_max.dev_attr.attr,
566 &sensor_dev_attr_temp4_fault.dev_attr.attr,
567 &sensor_dev_attr_temp4_min_alarm.dev_attr.attr,
568 &sensor_dev_attr_temp4_max_alarm.dev_attr.attr,
569 NULL
570};
571
572static const struct attribute_group emc2103_group = {
573 .attrs = emc2103_attributes,
574};
575
576static const struct attribute_group emc2103_temp3_group = {
577 .attrs = emc2103_attributes_temp3,
578};
579
580static const struct attribute_group emc2103_temp4_group = {
581 .attrs = emc2103_attributes_temp4,
582};
583
584static int
585emc2103_probe(struct i2c_client *client, const struct i2c_device_id *id)
586{
587 struct emc2103_data *data;
588 int status;
589
590 if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
591 return -EIO;
592
593 data = devm_kzalloc(&client->dev, sizeof(struct emc2103_data),
594 GFP_KERNEL);
595 if (!data)
596 return -ENOMEM;
597
598 i2c_set_clientdata(client, data);
599 mutex_init(&data->update_lock);
600
601 /* 2103-2 and 2103-4 have 3 external diodes, 2103-1 has 1 */
602 status = i2c_smbus_read_byte_data(client, REG_PRODUCT_ID);
603 if (status == 0x24) {
604 /* 2103-1 only has 1 external diode */
605 data->temp_count = 2;
606 } else {
607 /* 2103-2 and 2103-4 have 3 or 4 external diodes */
608 status = i2c_smbus_read_byte_data(client, REG_CONF1);
609 if (status < 0) {
610 dev_dbg(&client->dev, "reg 0x%02x, err %d\n", REG_CONF1,
611 status);
612 return status;
613 }
614
615 /* detect current state of hardware */
616 data->temp_count = (status & 0x01) ? 4 : 3;
617
618 /* force APD state if module parameter is set */
619 if (apd == 0) {
620 /* force APD mode off */
621 data->temp_count = 3;
622 status &= ~(0x01);
623 i2c_smbus_write_byte_data(client, REG_CONF1, status);
624 } else if (apd == 1) {
625 /* force APD mode on */
626 data->temp_count = 4;
627 status |= 0x01;
628 i2c_smbus_write_byte_data(client, REG_CONF1, status);
629 }
630 }
631
632 /* Register sysfs hooks */
633 status = sysfs_create_group(&client->dev.kobj, &emc2103_group);
634 if (status)
635 return status;
636
637 if (data->temp_count >= 3) {
638 status = sysfs_create_group(&client->dev.kobj,
639 &emc2103_temp3_group);
640 if (status)
641 goto exit_remove;
642 }
643
644 if (data->temp_count == 4) {
645 status = sysfs_create_group(&client->dev.kobj,
646 &emc2103_temp4_group);
647 if (status)
648 goto exit_remove_temp3;
649 }
650
651 data->hwmon_dev = hwmon_device_register(&client->dev);
652 if (IS_ERR(data->hwmon_dev)) {
653 status = PTR_ERR(data->hwmon_dev);
654 goto exit_remove_temp4;
655 }
656
657 dev_info(&client->dev, "%s: sensor '%s'\n",
658 dev_name(data->hwmon_dev), client->name);
659
660 return 0;
661
662exit_remove_temp4:
663 if (data->temp_count == 4)
664 sysfs_remove_group(&client->dev.kobj, &emc2103_temp4_group);
665exit_remove_temp3:
666 if (data->temp_count >= 3)
667 sysfs_remove_group(&client->dev.kobj, &emc2103_temp3_group);
668exit_remove:
669 sysfs_remove_group(&client->dev.kobj, &emc2103_group);
670 return status;
671}
672
673static int emc2103_remove(struct i2c_client *client)
674{
675 struct emc2103_data *data = i2c_get_clientdata(client);
676
677 hwmon_device_unregister(data->hwmon_dev);
678
679 if (data->temp_count == 4)
680 sysfs_remove_group(&client->dev.kobj, &emc2103_temp4_group);
681
682 if (data->temp_count >= 3)
683 sysfs_remove_group(&client->dev.kobj, &emc2103_temp3_group);
684
685 sysfs_remove_group(&client->dev.kobj, &emc2103_group);
686
687 return 0;
688}
689
690static const struct i2c_device_id emc2103_ids[] = {
691 { "emc2103", 0, },
692 { /* LIST END */ }
693};
694MODULE_DEVICE_TABLE(i2c, emc2103_ids);
695
696/* Return 0 if detection is successful, -ENODEV otherwise */
697static int
698emc2103_detect(struct i2c_client *new_client, struct i2c_board_info *info)
699{
700 struct i2c_adapter *adapter = new_client->adapter;
701 int manufacturer, product;
702
703 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
704 return -ENODEV;
705
706 manufacturer = i2c_smbus_read_byte_data(new_client, REG_MFG_ID);
707 if (manufacturer != 0x5D)
708 return -ENODEV;
709
710 product = i2c_smbus_read_byte_data(new_client, REG_PRODUCT_ID);
711 if ((product != 0x24) && (product != 0x26))
712 return -ENODEV;
713
714 strlcpy(info->type, "emc2103", I2C_NAME_SIZE);
715
716 return 0;
717}
718
719static struct i2c_driver emc2103_driver = {
720 .class = I2C_CLASS_HWMON,
721 .driver = {
722 .name = "emc2103",
723 },
724 .probe = emc2103_probe,
725 .remove = emc2103_remove,
726 .id_table = emc2103_ids,
727 .detect = emc2103_detect,
728 .address_list = normal_i2c,
729};
730
731module_i2c_driver(emc2103_driver);
732
733MODULE_AUTHOR("Steve Glendinning <[email protected]>");
734MODULE_DESCRIPTION("SMSC EMC2103 hwmon driver");
735MODULE_LICENSE("GPL");
736
lxr.linux.no kindly hosted by Redpill Linpro AS, provider of Linux consulting and operations services since 1995.
|
__label__pos
| 0.984782 |
Dismiss
Announcing Stack Overflow Documentation
We started with Q&A. Technical documentation is next, and we need your help.
Whether you're a beginner or an experienced developer, you can contribute.
Sign up and start helping → Learn more about Documentation →
So I'm using the MapBox SDK which is a fork of the RouteMe's map SDK for iOS. From what I can tell, the annotation stuff might be in here.
I've modified some CAAnimation code I found here to try achieve this. My code is below. Since I'm reasonably new to CAAnimation, the issue could be in that, or with the RouteMe code itself. It's just animating the layer which the annotation wraps.
RMPointAnnotation *point = [[RMPointAnnotation alloc] initWithMapView:self.mapView coordinate:CLLocationCoordinate2DMake([location[@"lat"] floatValue], [location[@"lon"] floatValue]) andTitle:@"App open"];
point.image = [UIImage imageNamed:@"mapPin.png"];
[self.mapView addAnnotation:point];
CGRect endFrame = point.layer.frame;
// Move annotation out of view
point.layer.frame = CGRectMake(point.layer.frame.origin.x, point.layer.frame.origin.y - self.mapView.frame.size.height, point.layer.frame.size.width, point.layer.frame.size.height);
// Animate drop
[UIView animateWithDuration:2.0 delay:0.04 options:UIViewAnimationOptionCurveLinear animations:^{
point.layer.frame = endFrame;
// Animate squash
}completion:^(BOOL finished){
if (finished) {
[UIView animateWithDuration:0.05 animations:^{
point.layer.transform = CATransform3DMakeAffineTransform(CGAffineTransformMake(1.0, 0, 0, 0.8, 0, + point.layer.frame.size.height*0.1));
}completion:^(BOOL finished){
[UIView animateWithDuration:0.1 animations:^{
point.layer.transform = CATransform3DMakeAffineTransform(CGAffineTransformIdentity);
}];
}];
}
}];
Any ideas?
share|improve this question
up vote 3 down vote accepted
Here is a reply from developers about the feature: https://github.com/mapbox/mapbox-ios-sdk/issues/185 The feature is not in the roadmap.
I came up with a workaround using facebook pop library [UPDATED]:
@property (strong, nonatomic) RMAnnotation *annotation;
- (void)viewDidLoad
{
[super viewDidLoad];
self.annotation = [[RMAnnotation alloc] initWithMapView:self.mapView
coordinate:self.mapView.centerCoordinate
andTitle:@"Drop pin"];
self.annotation.layer.hidden = YES;
[self.mapView addAnnotation:self.annotation];
POPSpringAnimation *anim = [POPSpringAnimation animationWithPropertyNamed:kPOPLayerPositionY];
anim.fromValue = @(0);
anim.toValue = @(self.view.center.y);
anim.springSpeed = 8;
anim.springBounciness = 4;
anim.delegate = self;
[self.annotation.layer pop_addAnimation:anim forKey:@"positionY"];
}
-(void)pop_animationDidStart:(POPAnimation *)anim
{
self.annotation.layer.hidden = NO;
}
The idea is that you hide the annotation layers until the animation starts.
share|improve this answer
So turns out that these Annotation objects have a layer property that is actually a CALayer, meaning you have to use CAAnimations on them, and not UIView animations.
share|improve this answer
Could you provide a code example for the CAAnimations solution? – Nikolay Derkach May 26 '14 at 22:57
Sorry, I no longer have access to the codebase but I know Facebook just released a library for this code.facebook.com/projects/642915749111632/pop – Sam Jarman May 26 '14 at 23:26
1
Thanks for the tip, Sam, the library appeared to be very useful! I'll submit a reply with my solution using pop – Nikolay Derkach May 28 '14 at 20:15
Looks great Nikolay! :) – Sam Jarman May 29 '14 at 12:25
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.592946 |
how to open xbox one console
What screwdriver Do you need to open an Xbox One controller?
You can open Xbox One controller and Xbox One Elite Wireless controller and Xbox 360 controller with T6 T8H screwdriver.
How do I clean my Xbox One without compressed air?
All you need to do is run the dust cloth over the outside of your system. This includes the top, bottom, sides, and ports. Depending on how long it’s been since you’ve wiped down your Xbox One you may need to clean off the cloth a few times, but for the most part this method will get the job done.
How do you clean the dust out of an Xbox One S?
You can easily clean your Xbox One by using a dry microfiber cloth to remove dust, fingerprints, and smudges from the outside of your console. You can also carefully use compressed air to remove dust build-up in the vents and ports of your Xbox console.
What can I use instead of a T8 screwdriver?
Sometimes an allen key will work, or even a flat blade screwdriver if it will jam between opposite points in the recess. Either way, be ready to replace the screw and/or the tool used because of damage. Your best bet is just to get a Torx key.
How often should you clean your Xbox one?
General cleaning recommendations
Clean every 3-6 months or whenever needed. Important Power off and disconnect your Xbox console from the wall before cleaning it. Don’t apply liquids directly to your console or controllers.
Does cleaning your Xbox make it run faster?
De-clutter and clean
The internal hard drive of your Xbox needs to work extra hard if it is constantly moving back and forth looking for data. The cleaner the drive, the better the performance. You can free up some space and clear some space by deleting and removing any unplayed or unused apps.
Is it bad to leave Xbox on all night?
You can leave your Xbox One on overnight, but you risk damaging your console or shortening its lifespan. When your Xbox One is in use, it usually produces heat, and when you turn it off, it cools down. After using your Xbox One during the day, leaving it on throughout the night might cause it to overheat.
Why is my Xbox One overheating?
Top Reasons for Xbox One Overheating
The exhaust grills or side panels are blocked. The environmental temperature is too high. Direct sunlight. The thermal paste needs to be replaced.
What are TX screws?
A Torx screw is a type of screw characterized by a six-lobed, star-patterned screw drive. Torx drive is a trademark commonly referred to as star drive or, simply, a six-lobe. It’s often abbreviated to TX or 6lobe. The ISO name is hexalobular.
What is Torx key?
Torx keys are also commonly known as star keys. Like hexagon allen keys, most torx keys are small enough to be carried in a pocket and enable the user to drive the appropriate fasteners without needing a separate screwdriver or power tool.
What are Phillips screws?
: a type of screw that has a slot in its top that looks like a cross.
Why is my Xbox taking forever to load?
According to users, Took too long to start error can occur due to problems with your cache. Xbox One stores all sorts of temporary files in your cache, and those files should help you start your apps faster, but sometimes certain files in your cache can get corrupted and cause this and many other errors to appear.
Why is my Xbox laggy?
If too many apps are running in the background, it can bog down your system and cause it to lag. Closing them will allow your Xbox to run more efficiently. On your controller, press the Xbox button to open the guide. … Press the Menu button on your controller, then select Quit.
Why is my Xbox One choppy?
You need to clear the Xbox cache or something has updated and misconfigured your Xbox causing it to slow down. … When your Xbox runs too slow and games are not playable, you need to clear the cache on your Xbox. Clearing cache on your Xbox makes it run faster and smoother and will stop lag.
What was the red ring of death?
The Red Ring of Death is when three flashing red lights around the Xbox 360 power button instead of the one to four green lights indicating normal operation represent an internal problem that requires service.
Does playing Xbox use a lot of electricity?
Interesting research from the NRDC3 in America finds that: The Xbox One (233KWh/y) consumes 30% more energy on average vs. the PS4 (181KWh/y) and more than 6 times than the WiiU (37KWh/y) New consoles use more energy playing video and by being in standby mode than playing games.
Leave a Comment
Your email address will not be published.
|
__label__pos
| 0.995997 |
Skip to main content
Pentaho+ documentation has moved!
The new product documentation portal is here. Check it out now at docs.hitachivantara.com
Hitachi Vantara Lumada and Pentaho Documentation
Utils
cdf.dashboard. Utils
Static
A collection of utility functions.
AMD Module
require(["cdf/dashboard/Utils"], function(Utils) { /* code goes here */ });
Source: dashboard/Utils.js, line 24
Methods
Name Description
Utils.addArgs(url)
Deprecated
Adds the URL parameters to a local object.
Utils.clone(obj) : object
Deprecated
Deep clones an object.
Utils.configLanguage(langCode, config)
Configure a language.
Utils.dateFormat(date, mask, langCode) : string
Formats a date.
Utils.dateParse(date, mask) : Date
Parses a date with a given mask.
Utils.doCsvQuoting(value, separator, alwaysEscape) : string
Quote CSV values in a way that is compatible with CSVTokenizer.
Utils.eachValuesArray(values, opts, f, x) : boolean
Traverses each value, label, and id triple of a values array.
Utils.equalValues(a, b) : boolean
Determines if two values are considered equal.
Utils.escapeHtml(input) : string
Escapes a string as an HTML safe string.
Utils.ev(o) : object
Evaluates the argument.
Utils.getArgValue(key) : object | undefined
Deprecated
Gets an argument value that was previously set by calling addArgs.
Utils.getLocationSearchString() : string
Returns the query string part of the URL.
Utils.getPathParameter(url) : string | undefined
Gets the path from the URL.
Utils.getQueryParameter(parameterName) : string
Returns the value of a query string parameter.
Utils.getURLParameters(sURL) : Array.<Array.<String>>
Gets the parameters from a URL.
Utils.hsvToRgb(h, s, v) : string
Converts an HSV to an RGB color value.
Utils.isArray(value) : boolean
Determines if a value is considered an array.
Utils.isFunction(value) : boolean
Determines if a value is considered a function.
Utils.isNumber(value) : boolean
Determines if a value is considered a number.
Utils.isString(value) : boolean
Determines if a value is considered a string.
Utils.normalizeValue(value) : object
Normalizes a value.
Utils.numberFormat(value, mask, langCode) : string
Formats a number.
Utils.objectToPropertiesArray(obj) : Array.<Array.<Object>> | undefined
Converts an object to an array.
Utils.parseMultipleValues(value) : array | object
Given a parameter value obtains an equivalent values array.
Utils.post(url, obj)
Performs a post to the server.
Utils.propertiesArrayToObject(pArray) : object | undefined
Converts an array to an object.
Utils.sanitizeHtml(html) : string
Sanitizes input HTML.
Methods Details
Utils.addArgs(url)
Deprecated
Adds the URL parameters to a local args object.
Source: dashboard/Utils.js, line 469
Parameters:
Name Default Value Summary
url : string
The URL from which to extract the parameters.
Utils.clone(obj) : object
Deprecated
Deep clones an object. This method is deprecated, use jQuery.extend.
Source: dashboard/Utils.js, line 431
Parameters:
Name Default Value Summary
obj : object
The object to clone.
Returns:
Name Description
object
The cloned object.
Utils.configLanguage(langCode, config)
Configure a new or existing language by specifying the language code and a configuration object.
Source: dashboard/Utils.js, line 179
Parameters:
Name Default Value Summary
langCode : string
Language code to be used.
config : object
Object with the language configuration.
Parameters:
Name Default Value Summary
number : object
Number format language configuration.
dateLocale : object
Date format language configuration.
Utils.dateFormat(date, mask, langCode) : string
Formats a date with a given mask using the dashboard language, the one that the user specified if it exists, or the default language 'en-US'.
Source: dashboard/Utils.js, line 201
Parameters:
Name Default Value Summary
date : Date
Date object to be formatted.
mask : string
Mask with format for the date.
langCode : string
Language to use in format.
Returns:
Name Description
string
The formatted date.
Utils.dateParse(date, mask) : Date
Parses a date with a given mask.
Source: dashboard/Utils.js, line 229
Parameters:
Name Default Value Summary
date : string
The date to be parsed.
mask : string
The mask with the format for the date.
Returns:
Name Description
Date
The parsed date as a Date object.
Utils.doCsvQuoting(value, separator, alwaysEscape) : string
Quote CSV values in a way that is compatible with CSVTokenizer.
Source: dashboard/Utils.js, line 363
Parameters:
Name Default Value Summary
value : string
Value quote.
separator : string
Separator to use when quoting.
alwaysEscape : boolean
Flag that indicates if the value should always be escaped or just when needed.
Returns:
Name Description
string | null
The escaped value or null.
Utils.eachValuesArray(values, opts, f, x) : boolean
Traverses each value, label and id triple of a values array.
Source: dashboard/Utils.js, line 535
Parameters:
Name Default Value Summary
values : Array.<Array.<string>>
The values array - an array of arrays.
Each second-level array is a value specification and contains a value and, optionally, a label and an id. It may have the following forms:
• [valueAndLabel]: when having length one
• [value, label,...]: when having length two or more and opts.valueAsId is falsy
• [id, valueAndLabel,..]: when having length two or more and opts.valueAsId is truthy
opts : object
An object with options.
Parameters:
Name Default Value Summary
valueAsId : boolean
Optional
false
Indicates if the first element of the value specification array is the id, instead of the value.
f : function
The traversal function that is to be called with each value-label-id triple and with the JS context x. The function is called with arguments: value, label, id, and index.
When the function returns the value false, traversal is stopped, and false is returned.
x : object
The JS context object on which f is to be called.
Returns:
Name Description
boolean
true if the traversal was complete, false if explicitly stopped by the traversal function.
Utils.equalValues(a, b) : boolean
Determines if two values are considered equal.
Source: dashboard/Utils.js, line 677
Parameters:
Name Default Value Summary
a : object
The first value.
b : object
The second value.
Returns:
Name Description
boolean
true if equal, false otherwise.
Utils.escapeHtml(input) : string
Escapes a string as an HTML safe string. It assumes that, if there is an escaped char in the input, then the input is fully escaped.
Source: dashboard/Utils.js, line 62
Parameters:
Name Default Value Summary
input : string
The string to be escaped.
Returns:
Name Description
string
The escaped string or an empty string if it receives anything other than a string.
Utils.ev(o) : object
Evaluates the argument. If it is a function, calls the function, otherwise returns the argument.
Source: dashboard/Utils.js, line 391
Parameters:
Name Default Value Summary
o : object
The object to try and evaluate as a function.
Returns:
Name Description
object
The value of o if it isn't a function. Otherwise, the result of invoking o.
Utils.getArgValue(key) : object | undefined
Deprecated
Gets an argument value that was previously set by calling addArgs. This is deprecated, so use getQueryParameter or dashboard.context.params.
Source: dashboard/Utils.js, line 489
Parameters:
Name Default Value Summary
key : string
The argument name.
Returns:
Name Description
object | undefined
The argument value or undefined.
Utils.getLocationSearchString() : string
Returns the query string part of the URL.
Source: dashboard/Utils.js, line 121
Returns:
Name Description
string
The query string.
Utils.getPathParameter(url) : string | undefined
Given a URL containing an encoded Pentaho path, e.g. :home:admin:Test.wcdf, returns the encoded path.
Source: dashboard/Utils.js, line 102
Parameters:
Name Default Value Summary
url : string
The URL to encode.
Returns:
Name Description
string | undefined
The encoded URL or undefined if not available.
Utils.getQueryParameter(parameterName) : string
Returns the value of a query string parameter.
Source: dashboard/Utils.js, line 133
Parameters:
Name Default Value Summary
parameterName : string
The name of the parameter.
Returns:
Name Description
string
The value of the query parameter or an empty string.
Utils.getURLParameters(sURL) : Array.<Array.<String>>
Gets the parameters from a URL. CDF URL parameters are defined as those that are present in the query string with names starting with the string param. So, for a query string like ?paramfoo=bar, you would get a parameter foo with value bar.
Source: dashboard/Utils.js, line 310
Parameters:
Name Default Value Summary
sURL : string
URL with the query string to be parsed.
Returns:
Name Description
Array.<Array.<String>>
Array with the parsed parameters. Each element is an array with two positions, the first being the parameter name and the second the value. For example, .
Utils.hsvToRgb(h, s, v) : string
Converts an HSV to an RGB color value. Based on the algorithm described at http://en.wikipedia.org/wiki/HSL_and_HSV.
Source: dashboard/Utils.js, line 706
Parameters:
Name Default Value Summary
h : number
Hue as a value between 0 - 360 (degrees).
s : number
Saturation as a value between 0 - 100 (%).
v : number
Value as a value between 0 - 100 (%).
Returns:
Name Description
string
A RGB color string (e.g. "rgb(1,2,3)").
Utils.isArray(value) : boolean
Determines if a value is considered an array.
Returns true if value is an array, or an array-like object (object containing the methods join and length).
Source: dashboard/Utils.js, line 619
Parameters:
Name Default Value Summary
value : object
The value.
Returns:
Name Description
boolean
true if it is an array or an array-like object, false otherwise.
Utils.isFunction(value) : boolean
Determines if a value is considered a function.
Source: dashboard/Utils.js, line 635
Parameters:
Name Default Value Summary
value : any
The value to be tested.
Returns:
Name Description
boolean
true if is a function; false otherwise.
Utils.isNumber(value) : boolean
Determines if a value is considered a number.
Source: dashboard/Utils.js, line 663
Parameters:
Name Default Value Summary
value : any
The value to be tested.
Returns:
Name Description
boolean
true if is a string; false otherwise.
Utils.isString(value) : boolean
Determines if a value is considered a string.
Source: dashboard/Utils.js, line 649
Parameters:
Name Default Value Summary
value : any
The value to be tested.
Returns:
Name Description
boolean
true if is a string; false otherwise.
Utils.normalizeValue(value) : object
Normalizes a value so that undefined, empty string, and empty array are all translated to null.
Source: dashboard/Utils.js, line 601
Parameters:
Name Default Value Summary
value : object
The value to normalize.
Returns:
Name Description
object | null
The normalized value or null.
Utils.numberFormat(value, mask, langCode) : string
Formats a number with a given mask using the dashboard language, the one that the user specified if it exists, or the default language 'en-US'.
Source: dashboard/Utils.js, line 153
Parameters:
Name Default Value Summary
value : number
Number value to be formatted.
mask : string
Mask with format for the value.
langCode : string
Language to use in format.
Returns:
Name Description
string
The formatted number.
Utils.objectToPropertiesArray(obj) : Array.<Array.<Object>> | undefined
Converts an object to an array.
Source: dashboard/Utils.js, line 293
Parameters:
Name Default Value Summary
obj : object
The object to be converted into an array.
Returns:
Name Description
Array.<Array.<Object>> | undefined
An array of key-value pairs (array) or undefined if the argument is not an object, e. g., .
Utils.parseMultipleValues(value) : array | object
Given a parameter value obtains an equivalent values array.
The parameter value may encode multiple values in a string format.
A nully (i.e. null or undefined) input value or an empty string results in null, so the result of this method is normalized.
A string value may contain multiple values separated by the character |.
An array or array-like object is returned without modification.
Any other value type returns null.
Source: dashboard/Utils.js, line 581
Parameters:
Name Default Value Summary
value : object
A parameter value, as returned by getParameterValue.
Returns:
Name Description
array | object
An array, an array-like object, or null.
Utils.post(url, obj)
Performs a post to the server.
Source: dashboard/Utils.js, line 403
Parameters:
Name Default Value Summary
url : string
The URL where to post.
obj : object
Parameter object.
Utils.propertiesArrayToObject(pArray) : object | undefined
Converts an array to an object.
Source: dashboard/Utils.js, line 278
Parameters:
Name Default Value Summary
pArray : Array.<Array.<Object>>
An array of key-value pairs (array) to be converted, e.g.,.
Returns:
Name Description
object | undefined /p
Utils.sanitizeHtml(html) : string
Uses https://code.google.com/archive/p/google-caja/source. Strips unsafe tags and attributes from html.
Source: dashboard/Utils.js, line 86
Parameters:
Name Default Value Summary
html : string
The HTML to be sanitized.
Returns:
Name Description
string
safe HTML based on input.
|
__label__pos
| 0.672465 |
Logo Search packages:
Sourcecode: mathgl version File versions Download package
mgl_data.cpp
/***************************************************************************
* mgl_data.cpp is part of Math Graphic Library
* Copyright (C) 2007 Alexey Balakin <[email protected]> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU Library General Public License as *
* published by the Free Software Foundation; either version 3 of the *
* License, or (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU Library General Public *
* License along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
***************************************************************************/
#include <stdlib.h>
#include <string.h>
#include "mgl/mgl_data.h"
#include "mgl/mgl_eval.h"
#ifndef NO_GSL
#include <gsl/gsl_fft_complex.h>
#endif
void mglFillP(int x,int y, const mreal *a,int nx,int ny,mreal _p[4][4]);
void mglFillP(int x, const mreal *a,int nx,mreal _p[4]);
void mglFillP5(int x,int y, const mreal *a,int nx,int ny,mreal _p[6][6]);
void mglFillP5(int x, const mreal *a,int nx,mreal _p[6]);
//-----------------------------------------------------------------------------
double mgl_ipow(double x,int n)
{
double t;
if(n==2) return x*x;
if(n==1) return x;
if(n<0) return 1/mgl_ipow(x,-n);
if(n==0) return 1;
t = mgl_ipow(x,n/2); t = t*t;
if(n%2==1) t *= x;
return t;
}
//-----------------------------------------------------------------------------
00046 void mglData::Smooth(const char *dirs)
{
int type = SMOOTH_QUAD_5;
if(strchr(dirs,'0') || strchr(dirs,'1')) return;
if(strchr(dirs,'3')) type = SMOOTH_LINE_3;
if(strchr(dirs,'5')) type = SMOOTH_LINE_5;
Smooth(type, dirs, 0);
}
//-----------------------------------------------------------------------------
00055 void mglData::Smooth(int Type,const char *dirs,mreal delta)
{
if(Type == SMOOTH_NONE) return;
long i,j,k,i0,nn=nx*ny,d3,d5;
mreal y5,y3,x2y;
mreal *b = new mreal[nx*ny*nz];
// ����������� �� x
memset(b,0,nx*ny*nz*sizeof(mreal));
if(nx>4 && strchr(dirs,'x'))
{
for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++)
{
i0 = i+nx*(j+ny*k);
d3 = d5 = 0;
if(i==0) { d3 = 1; d5 = 2; }
if(i==1) { d5 = 1; }
if(i==nx-1) { d3 = -1; d5 = -2;}
if(i==nx-2) { d5 = -1;}
y3 = (a[i0+d3-1] + a[i0+d3] + a[i0+d3+1]);
// xy3= (a[i0+d3+1] - a[i0+d3-1]);
y5 = (a[i0+d5-2] + a[i0+d5-1] + a[i0+d5] +
a[i0+d5+1] + a[i0+d5+2]);
// xy = (a[i0+d5+1] + 2*a[i0+d5+2] - 2*a[i0+d5-2] - a[i0+d5-1]);
x2y= (a[i0+d5+1] + 4*a[i0+d5+2] + 4*a[i0+d5-2] + a[i0+d5-1]);
if(d3) b[i0] = a[i0];
else if(Type==SMOOTH_LINE_3 || d5) b[i0] = y3/3.;
else if(Type==SMOOTH_LINE_5) b[i0] = y5/5.;
else if(Type==SMOOTH_QUAD_5) b[i0] = (17*y5-5*x2y)/35.;
if(delta>0) b[i0] = mgl_max(a[i0]-delta,mgl_min(a[i0]+delta,b[i0]));
}
memcpy(a,b,nx*ny*nz*sizeof(mreal)); memset(b,0,nx*ny*nz*sizeof(mreal));
}
if(ny>4 && strchr(dirs,'y'))
{
for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++)
{
i0 = i+nx*(j+ny*k);
d3 = d5 = 0;
if(j==0) { d3 = 1; d5 = 2; }
if(j==1) { d5 = 1; }
if(j==ny-1) { d3 = -1; d5 = -2;}
if(j==ny-2) { d5 = -1;}
y3 = (a[i0+nx*(d3-1)] + a[i0+nx*d3] + a[i0+nx*(d3+1)]);
// xy3= (a[i0+nx*(d3+1)] - a[i0+nx*(d3-1)]);
y5 = (a[i0+nx*(d5-2)] + a[i0+nx*(d5-1)] + a[i0+nx*d5] +
a[i0+nx*(d5+1)] + a[i0+nx*(d5+2)]);
// xy = (a[i0+nx*(d5+1)] + 2*a[i0+nx*(d5+2)] -
// 2*a[i0+nx*(d5-2)] - a[i0+nx*(d5-1)]);
x2y= (a[i0+nx*(d5+1)] + 4*a[i0+nx*(d5+2)] +
4*a[i0+nx*(d5-2)] + a[i0+nx*(d5-1)]);
if(d3) b[i0] = a[i0];
else if(Type==SMOOTH_LINE_3 || d5) b[i0] = y3/3.;
else if(Type==SMOOTH_LINE_5) b[i0] = y5/5.;
else if(Type==SMOOTH_QUAD_5) b[i0] = (17*y5-5*x2y)/35.;
if(delta>0) b[i0] = mgl_max(a[i0]-delta,mgl_min(a[i0]+delta,b[i0]));
}
memcpy(a,b,nx*ny*nz*sizeof(mreal)); memset(b,0,nx*ny*nz*sizeof(mreal));
}
if(nz>4 && strchr(dirs,'z'))
{
for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++)
{
i0 = i+nx*(j+ny*k);
d3 = d5 = 0;
if(k==0) { d3 = 1; d5 = 2; }
if(k==1) { d5 = 1; }
if(k==nz-1) { d3 = -1; d5 = -2;}
if(k==nz-2) { d5 = -1;}
y3 = (a[i0+nn*(d3-1)] + a[i0+nn*d3] + a[i0+nn*(d3+1)]);
// xy3= (a[i0+nn*(d3+1)] - a[i0+nn*(d3-1)]);
y5 = (a[i0+nn*(d5-2)] + a[i0+nn*(d5-1)] + a[i0+nn*d5] +
a[i0+nn*(d5+1)] + a[i0+nn*(d5+2)]);
// xy = (a[i0+nn*(d5+1)] + 2*a[i0+nn*(d5+2)] -
// 2*a[i0+nn*(d5-2)] - a[i0+nn*(d5-1)]);
x2y= (a[i0+nn*(d5+1)] + 4*a[i0+nn*(d5+2)] +
4*a[i0+nn*(d5-2)] + a[i0+nn*(d5-1)]);
if(d3) b[i0] = a[i0];
else if(Type==SMOOTH_LINE_3 || d5) b[i0] = y3/3.;
else if(Type==SMOOTH_LINE_5) b[i0] = y5/5.;
else if(Type==SMOOTH_QUAD_5) b[i0] = (17*y5-5*x2y)/35.;
if(delta>0) b[i0] = mgl_max(a[i0]-delta,mgl_min(a[i0]+delta,b[i0]));
}
memcpy(a,b,nx*ny*nz*sizeof(mreal));
}
delete []b;
}
//-----------------------------------------------------------------------------
00143 void mglData::CumSum(const char *dir)
{
register long i,j,k,i0;
//mglData d;
mreal *b = new mreal[nx*ny*nz];
memcpy(b,a,nx*ny*nz*sizeof(mreal));
if(strchr(dir,'z') && nz>1)
{
for(i=0;i<nx*ny;i++)
{
b[i] = a[i];
for(j=1;j<nz;j++) b[i+j*nx*ny] = b[i+(j-1)*nx*ny] + a[i+nx*ny*j];
}
memcpy(a,b,nx*ny*nz*sizeof(mreal));
}
if(strchr(dir,'y') && ny>1)
{
for(i=0;i<nx;i++) for(k=0;k<nz;k++)
{
i0 = i+nx*ny*k; b[i0] = a[i0];
for(j=1;j<ny;j++) b[i0+j*nx] = b[i0+j*nx-nx] + a[i0+nx*j];
}
memcpy(a,b,nx*ny*nz*sizeof(mreal));
}
if(strchr(dir,'x') && nx>1)
{
for(j=0;j<ny*nz;j++)
{
i0 = j*nx; b[i0] = a[i0];
for(i=1;i<nx;i++) b[i+i0] = b[i+i0-1] + a[i+i0];
}
memcpy(a,b,nx*ny*nz*sizeof(mreal));
}
delete []b;
}
//-----------------------------------------------------------------------------
00179 void mglData::Integral(const char *dir)
{
register long i,j,k,i0;
//mglData d;
mreal *b = new mreal[nx*ny*nz];
memcpy(b,a,nx*ny*nz*sizeof(mreal));
if(strchr(dir,'z') && nz>1)
{
for(i=0;i<nx*ny;i++)
{
b[i] = 0;
for(j=1;j<nz;j++)
b[i+j*nx*ny] = b[i+(j-1)*nx*ny] + (a[i+nx*ny*j]+a[i+nx*ny*(j-1)])/2/nz;
}
memcpy(a,b,nx*ny*nz*sizeof(mreal));
}
if(strchr(dir,'y') && ny>1)
{
for(i=0;i<nx;i++) for(k=0;k<nz;k++)
{
i0 = i+nx*ny*k;
b[i0] = 0;
for(j=1;j<ny;j++)
b[i0+j*nx] = b[i0+j*nx-nx] + (a[i0+nx*j]+a[i0+j*nx-nx])/2/ny;
}
memcpy(a,b,nx*ny*nz*sizeof(mreal));
}
if(strchr(dir,'x') && nx>1)
{
for(j=0;j<ny*nz;j++)
{
i0 = j*nx;
b[i0] = 0;
for(i=1;i<nx;i++)
b[i+i0] = b[i+i0-1] + (a[i+i0]+a[i+i0-1])/2/nx;
}
memcpy(a,b,nx*ny*nz*sizeof(mreal));
}
delete []b;
}
//-----------------------------------------------------------------------------
00220 void mglData::Diff(const char *dir)
{
register long i,j,k,i0;
mreal *b = new mreal[nx*ny*nz];
if(strchr(dir,'z') && nz>1)
{
for(i=0;i<nx*ny;i++)
{
b[i] = -(3*a[i]-4*a[i+nx*ny]+a[i+2*nx*ny])*nz/2;
b[i+(nz-1)*nx*ny] = (3*a[i+(nz-1)*nx*ny]-4*a[i+(nz-2)*nx*ny]+a[i+(nz-3)*nx*ny])*nz/2;
for(j=1;j<nz-1;j++)
b[i+j*nx*ny] = (a[i+nx*ny*(j+1)]-a[i+nx*ny*(j-1)])*nz/2;
}
memcpy(a,b,nx*ny*nz*sizeof(mreal));
}
if(strchr(dir,'y') && ny>1)
{
for(i=0;i<nx;i++) for(k=0;k<nz;k++)
{
i0 = i+nx*ny*k;
b[i0] = -(3*a[i0]-4*a[i0+nx]+a[i0+2*nx])*ny/2;
b[i0+(ny-1)*nx] = (3*a[i0+(ny-1)*nx]-4*a[i0+(ny-2)*nx]+a[i0+(ny-3)*nx])*ny/2;
for(j=1;j<ny-1;j++)
b[i0+j*nx] = (a[i0+nx*j+nx]-a[i0+j*nx-nx])*ny/2;
}
memcpy(a,b,nx*ny*nz*sizeof(mreal));
}
if(strchr(dir,'x') && nx>1)
{
for(j=0;j<ny*nz;j++)
{
i0 = j*nx;
b[i0] = -(3*a[i0]-4*a[i0+1]+a[i0+2])*nx/2;
b[i0+nx-1] = (3*a[i0+nx-1]-4*a[i0+nx-2]+a[i0+nx-3])*nx/2;
for(i=1;i<nx-1;i++)
b[i+i0] = (a[i+i0+1]-a[i+i0-1])*nx/2;
}
memcpy(a,b,nx*ny*nz*sizeof(mreal));
}
delete []b;
}
//-----------------------------------------------------------------------------
00262 void mglData::Diff2(const char *dir)
{
register long i,j,k,i0;
mreal *b = new mreal[nx*ny*nz];
if(strchr(dir,'z') && nz>1)
{
for(i=0;i<nx*ny;i++)
{
b[i] = b[i+(nz-1)*nx*ny] = 0;
for(j=1;j<nz-1;j++)
b[i+j*nx*ny] = (a[i+nx*ny*(j+1)]-2*a[i+nx*ny*j]+a[i+nx*ny*(j-1)])*nz*nz/2;
}
memcpy(a,b,nx*ny*nz*sizeof(mreal));
}
if(strchr(dir,'y') && ny>1)
{
for(i=0;i<nx;i++) for(k=0;k<nz;k++)
{
i0 = i+nx*ny*k;
b[i0] = b[i0+(ny-1)*nx] = 0;
for(j=1;j<ny-1;j++)
b[i0+j*nx] = (a[i0+nx*j+nx]-2*a[i0+j*nx]+a[i0+j*nx-nx])*ny*ny/2;
}
memcpy(a,b,nx*ny*nz*sizeof(mreal));
}
if(strchr(dir,'x') && nx>1)
{
for(j=0;j<ny*nz;j++)
{
i0 = j*nx;
b[i0] = b[i0+nx-1] = 0;
for(i=1;i<nx-1;i++)
b[i+i0] = (a[i+i0+1]-2*a[i+i0]+a[i+i0-1])*nx*nx/2;
}
memcpy(a,b,nx*ny*nz*sizeof(mreal));
}
delete []b;
}
//-----------------------------------------------------------------------------
00301 mreal mglData::Linear(mreal x,mreal y,mreal z) const
{
register long i0;
long kx,ky,kz;
mreal b=0,dx,dy,dz,b1,b0;
if(x<0 || y<0 || z<0 || x>nx-1 || y>ny-1 || z>nz-1)
return 0;
if(nz>1 && z!=long(z)) // 3d interpolation
{
kx=long(x); ky=long(y); kz=long(z);
dx = x-kx; dy = y-ky; dz = z-kz;
i0 = kx+nx*(ky+ny*kz);
b0 = a[i0]*(1-dx-dy+dx*dy) + dx*(1-dy)*a[i0+1] +
dy*(1-dx)*a[i0+nx] + dx*dy*a[i0+nx+1];
i0 = kx+nx*(ky+ny*(kz+1));
b1 = a[i0]*(1-dx-dy+dx*dy) + dx*(1-dy)*a[i0+1] +
dy*(1-dx)*a[i0+nx] + dx*dy*a[i0+nx+1];
b = b0 + dz*(b1-b0);
}
else if(ny>1 && y!=long(y)) // 2d interpolation
{
kx=long(x); ky=long(y);
dx = x-kx; dy=y-ky;
i0 = kx+nx*ky;
b = a[i0]*(1-dx-dy+dx*dy) + dx*(1-dy)*a[i0+1] +
dy*(1-dx)*a[i0+nx] + dx*dy*a[i0+nx+1];
}
else if(nx>1 && x!=long(x)) // 1d interpolation
{
kx = long(x);
b = a[kx] + (x-kx)*(a[kx+1]-a[kx]);
}
else // no interpolation
{
b = a[long(x+nx*(y+ny*z))];
}
return b;
}
//-----------------------------------------------------------------------------
00341 mreal mglData::Spline(mreal x,mreal y,mreal z) const
{
mreal dx,dy,dz;
return Spline3(x,y,z,dx,dy,dz);
}
//-----------------------------------------------------------------------------
mreal mglData::Spline3(mreal x,mreal y,mreal z,mreal &dx,mreal &dy,mreal &dz) const
{
mreal _p[4][4];
register long i,j;
register mreal fx=1, fy=1;
long kx=long(x),ky=long(y),kz=long(z);
mreal b=0;
if(x<0 || y<0 || z<0 || x>nx-1 || y>ny-1 || z>nz-1)
return 0;
dx=dy=dz=0;
if(nz>1 && z!=kz) // 3d interpolation
{ // TODO: add dx,dy,dz evaluation
mreal b1[4]={0,0,0,0};
if(kx>nx-2) kx = nx-2;
if(ky>ny-2) ky = ny-2;
if(kz>nz-2) kz = nz-2;
long kk=1;
if(kz==0) { kk=0; }
else if(nz>3 && kz==nz-2) { kk=2; }
for(long k=0;k<4;k++)
{
mglFillP(kx, ky, a+(kz+k-kk)*nx*ny, nx, ny, _p);
for(i=0,fx=1;i<4;i++)
{
for(j=0,fy=1;j<4;j++)
{ b1[k] += fy*fx*_p[i][j]; fy *= y-ky; }
fx *= x-kx;
}
}
mglFillP(kk, b1, nz>3 ? 4:3, _p[0]);
for(i=0,fx=1,b=0;i<4;i++)
{ b += fx*_p[0][i]; fx *= z-kz; }
}
else if(ny>1 && y!=ky) // 2d interpolation
{
if(kx>nx-2) kx = nx-2;
if(ky>ny-2) ky = ny-2;
mglFillP(kx, ky, a+kz*nx*ny, nx, ny, _p);
fx = 1; b = 0;
for(i=0;i<4;i++)
{
fy = 1;
for(j=0;j<4;j++)
{
b += fy*fx*_p[i][j];
dx+= i*fy*fx*_p[i][j];
dy+= j*fy*fx*_p[i][j];
fy *= y-ky;
}
fx *= x-kx;
}
dx /= x-kx; dy /= y-ky;
}
else if(nx>1 && x!=kx) // 1d interpolation
{
if(kx>nx-2) kx = nx-2;
mglFillP(kx, a+(ky+ny*kz)*nx, nx, _p[0]);
for(i=0,fx=1,b=0;i<4;i++)
{ b += fx*_p[0][i]; dx+= i*fx*_p[0][i]; fx *= x-kx; }
dx /= x-kx;
}
else // no interpolation
b = a[kx+nx*(ky+ny*kz)];
return b;
}
//-----------------------------------------------------------------------------
void mglFillP(int x,int y, const mreal *a,int nx,int ny,mreal _p[4][4])
{
mreal sx[4]={0,0,0,0},sy[4]={0,0,0,0},f[4]={0,0,0,0},d[4]={0,0,0,0};
if(x<0 || y<0 || x>nx-2 || y>ny-2)
{
memset(_p[0],0,4*sizeof(mreal));
memset(_p[1],0,4*sizeof(mreal));
memset(_p[2],0,4*sizeof(mreal));
memset(_p[3],0,4*sizeof(mreal));
return;
}
// �������� �������
f[0]=a[x+nx*y]; f[1]=a[x+nx*(y+1)];
if(nx>1) { f[2]=a[x+1+nx*y]; f[3]=a[x+1+nx*(y+1)]; }
else { f[2] = f[0]; f[3] = f[1]; }
// ����������� �� x
if(nx>1)
{
if(x==0)
{
sx[0]=a[x+1+y*nx]-a[x+nx*y];
sx[1]=a[x+1+nx*(y+1)]-a[x+nx*(y+1)];
}
else
{
sx[0]=(a[x+1+nx*y]-a[x-1+nx*y])/2;
sx[1]=(a[x+1+nx*(y+1)]-a[x-1+nx*(y+1)])/2;
}
}
if(x==nx-2)
{
sx[2]=a[x+1+nx*y]-a[x+nx*y];
sx[3]=a[x+1+nx*(y+1)]-a[x+nx*(y+1)];
}
else
{
sx[2]=(a[x+2+nx*y]-a[x+nx*y])/2;
sx[3]=(a[x+2+nx*(y+1)]-a[x+nx*(y+1)])/2;
}
// ����������� �� y
if(y==0)
{
sy[0]=a[x+nx*(y+1)]-a[x+nx*y];
sy[2]=a[x+1+nx*(y+1)]-a[x+1+nx*y];
}
else
{
sy[0]=(a[x+nx*(y+1)]-a[x+nx*(y-1)])/2;
sy[2]=(a[x+1+nx*(y+1)]-a[x+1+nx*(y-1)])/2;
}
if(y==ny-2)
{
sy[1]=a[x+nx*(y+1)]-a[x+nx*y];
sy[3]=a[x+1+nx*(y+1)]-a[x+1+nx*y];
}
else
{
sy[1]=(a[x+nx*(y+2)]-a[x+nx*y])/2;
sy[3]=(a[x+1+nx*(y+2)]-a[x+1+nx*y])/2;
}
// ������������ �����������
if(nx>1)
{
// ������ d[0]
if(y==0 && x==0)
d[0]=(a[x+1+nx*(y+1)]-a[x+nx*(y+1)]-a[x+1+nx*y]+a[x+nx*y]);
else if(y==0)
d[0]=(a[x+1+nx*(y+1)]-a[x-1+nx*(y+1)]-a[x+1+nx*y]+a[x-1+nx*y])/2;
else if(x==0)
d[0]=(a[x+1+nx*(y+1)]-a[x+nx*(y+1)]-a[x+1+nx*(y-1)]+a[x+nx*(y-1)])/2;
else
d[0]=(a[x+1+nx*(y+1)]-a[x-1+nx*(y+1)]-a[x+1+nx*(y-1)]+a[x-1+nx*(y-1)])/4;
// ������ d[1]
if(y==ny-2 && x==0)
d[1]=(a[x+1+nx*(y+1)]-a[x+nx*(y+1)]-a[x+1+nx*y]+a[x+nx*y]);
else if(y==ny-2)
d[1]=(a[x+1+nx*(y+1)]-a[x-1+nx*(y+1)]-a[x+1+nx*y]+a[x-1+nx*y])/2;
else if(x==0)
d[1]=(a[x+1+nx*(y+2)]-a[x+nx*(y+2)]-a[x+1+nx*y]+a[x+nx*y])/2;
else
d[1]=(a[x+1+nx*(y+2)]-a[x-1+nx*(y+2)]-a[x+1+nx*y]+a[x-1+nx*y])/4;
// ������ d[2]
if(y==0 && x==nx-2)
d[2]=(a[x+1+nx*(y+1)]-a[x+nx*(y+1)]-a[x+1+nx*y]+a[x+nx*y]);
else if(y==0)
d[2]=(a[x+2+nx*(y+1)]-a[x+nx*(y+1)]-a[x+2+nx*y]+a[x+nx*y])/2;
else if(x==nx-2)
d[2]=(a[x+1+nx*(y+1)]-a[x+nx*(y+1)]-a[x+1+nx*(y-1)]+a[x+nx*(y-1)])/2;
else
d[2]=(a[x+2+nx*(y+1)]-a[x+nx*(y+1)]-a[x+2+nx*(y-1)]+a[x+nx*(y-1)])/4;
// ������ d[3]
if(y==ny-2 && x==nx-2)
d[3]=(a[x+1+nx*(y+1)]-a[x+nx*(y+1)]-a[x+1+nx*y]+a[x+nx*y]);
else if(y==ny-2)
d[3]=(a[x+2+nx*(y+1)]-a[x+nx*(y+1)]-a[x+2+nx*y]+a[x+nx*y])/2;
else if(x==nx-2)
d[3]=(a[x+1+nx*(y+2)]-a[x+nx*(y+2)]-a[x+1+nx*y]+a[x+nx*y])/2;
else
d[3]=(a[x+2+nx*(y+2)]-a[x+nx*(y+2)]-a[x+2+nx*y]+a[x+nx*y])/4;
}
// ��������� ������������ ��������
_p[0][0]=f[0]; _p[1][0]=sx[0];
_p[2][0]=3*(f[2]-f[0])-2*sx[0]-sx[2];
_p[3][0]=sx[0]+sx[2]+2*(f[0]-f[2]);
_p[0][1]=sy[0]; _p[1][1]=d[0];
_p[2][1]=3*(sy[2]-sy[0])-2*d[0]-d[2];
_p[3][1]=d[0]+d[2]+2*(sy[0]-sy[2]);
_p[0][2]=3*(f[1]-f[0])-2*sy[0]-sy[1];
_p[1][2]=3*(sx[1]-sx[0])-2*d[0]-d[1];
_p[2][2]=9*(f[0]-f[1]-f[2]+f[3])+6*(sy[0]-sy[2]+sx[0]-sx[1])+
3*(sx[2]-sx[3]+sy[1]-sy[3])+2*(d[1]+d[2])+4*d[0]+d[3];
_p[3][2]=6*(f[1]+f[2]-f[0]-f[3])+3*(sx[1]-sx[0]+sx[3]-sx[2])+
4*(sy[2]-sy[0])+2*(sy[3]-sy[1]-d[0]-d[2])-d[1]-d[3];
_p[0][3]=2*(f[0]-f[1])+sy[0]+sy[1];
_p[1][3]=2*(sx[0]-sx[1])+d[0]+d[1];
_p[2][3]=6*(f[1]+f[2]-f[0]-f[3])+3*(sy[2]-sy[1]+sy[3]-sy[0])+
4*(sx[1]-sx[0])+2*(sx[3]-sx[2]-d[0]-d[1])-d[2]-d[3];
_p[3][3]=d[0]+d[1]+d[2]+d[3]+4*(f[0]-f[1]-f[2]+f[3])+
2*(sx[0]-sx[1]+sx[2]-sx[3]+sy[0]-sy[2]+sy[1]-sy[3]);
}
//-----------------------------------------------------------------------------
void mglFillP(int x, const mreal *a,int nx,mreal _p[4])
{
if(x<0 || x>nx-2)
{
memset(_p,0,4*sizeof(mreal));
return;
}
mreal s[2],f[2];
// �������� �������
f[0]=a[x]; f[1]=a[x+1];
// ����������� �� x
if(x==0) s[0]=a[x+1]-a[x];
else s[0]=(a[x+1]-a[x-1])/2;
if(x==nx-2) s[1]=a[x+1]-a[x];
else s[1]=(a[x+2]-a[x])/2;
// ��������� ������������ ��������
_p[0]=f[0]; _p[1]=s[0];
_p[2]=3*(f[1]-f[0])-2*s[0]-s[1];
_p[3]=s[0]+s[1]+2*(f[0]-f[1]);
}
//-----------------------------------------------------------------------------
00556 void mglData::Crop(int n1,int n2,char dir)
{
long nn;
register long i,k;
mreal *b;
if(n1<0) n1=0;
switch(dir)
{
case 'x':
n2 = n2>0 ? n2 : nx+n2;
if(n2<0 || n2>=nx || n2<n1) n2 = nx;
nn = n2-n1; b = new mreal[nn*ny*nz];
for(i=0;i<ny*nz;i++)
memcpy(b+nn*i,a+nx*i+n1,nn*sizeof(mreal));
nx = nn; delete []a; a = b; NewId();
break;
case 'y':
n2 = n2>0 ? n2 : ny+n2;
if(n2<0 || n2>=ny || n2<n1) n2 = ny;
nn = n2-n1; b = new mreal[nn*nx*nz];
for(i=0;i<nx;i++) for(long j=0;j<nz;j++) for(k=0;k<nn;k++)
b[i+nx*(k+nn*j)] = a[i+nx*(n1+k+ny*j)];
ny = nn; delete []a; a = b;
break;
case 'z':
n2 = n2>0 ? n2 : nz+n2;
if(n2<0 || n2>=nz || n2<n1) n2 = nz;
nn = n2-n1; b = new mreal[nn*nx*ny];
for(i=0;i<nx*ny;i++) for(k=0;k<nn;k++)
b[i+nx*ny*k] = a[i+nx*ny*(n1+k)];
nz = nn; delete []a; a = b;
break;
}
}
//-----------------------------------------------------------------------------
00591 mglData mglData::Hist(int n,mreal v1,mreal v2,int nsub) const
{
mglData b;
register long i,k;
b.Create(n);
if(v1==v2) return b;
if(nsub==0) for(i=0;i<nx*ny*nz;i++)
{
k = long(n*(a[i]-v1)/(v2-v1));
if(k>=0 && k<n) b.a[k] +=1;
}
else
{
register mreal x,y,z,d=1./(abs(nsub)+1),f;
bool sp = n>0;
for(x=0;x<nx;x+=d) for(y=0;y<ny;y+=d) for(z=0;z<nz;z+=d)
{
f = sp ? Spline(x,y,z) : Linear(x,y,z);
k = long(n*(f-v1)/(v2-v1));
if(k>=0 && k<n) b.a[k] += d*d*d;
}
}
return b;
}
//-----------------------------------------------------------------------------
00616 mglData mglData::Hist(const mglData &w, int n,mreal v1,mreal v2,int nsub) const
{
mglData b;
register long i,k;
b.Create(n);
if(v1==v2 || nx*ny*nz!=w.nx*w.ny*w.nz) return b;
if(nsub==0) for(i=0;i<nx*ny*nz;i++)
{
k = long(n*(a[i]-v1)/(v2-v1));
if(k>=0 && k<n) b.a[k] += w.a[i];
}
else
{
register mreal x,y,z,d=1./(abs(nsub)+1),f,g;
bool sp = n>0;
for(x=0;x<nx;x+=d) for(y=0;y<ny;y+=d) for(z=0;z<nz;z+=d)
{
if(sp)
{ f = Spline(x,y,z); g = w.Spline(x,y,z); }
else
{ f = Linear(x,y,z); g = w.Linear(x,y,z); }
k = long(n*(f-v1)/(v2-v1));
if(k>=0 && k<n) b.a[k] += g*d*d*d;
}
}
return b;
}
//-----------------------------------------------------------------------------
00644 mglData mglData::Sum(const char *dir) const
{
register long i,j,k,i0;
long kx=nx,ky=ny,kz=nz;
mreal *b = new mreal[nx*ny*nz];
mreal *c = new mreal[nx*ny*nz];
mglData d;
memset(b,0,nx*ny*nz*sizeof(mreal));
memcpy(c,a,nx*ny*nz*sizeof(mreal));
if(strchr(dir,'z') && kz>1)
{
for(i=0;i<kx*ky;i++)
{
for(j=0;j<kz;j++) b[i] += c[i+kx*ky*j];
b[i] /= kz;
}
memcpy(c,b,nx*ny*nz*sizeof(mreal)); kz = 1;
}
if(strchr(dir,'y') && ky>1)
{
for(i=0;i<kx;i++) for(k=0;k<kz;k++)
{
i0 = i+kx*ky*k;
for(j=0;j<ky;j++) b[i+kx*k] += c[i0+kx*j];
b[i+kx*k] /= ky;
}
memcpy(c,b,nx*ny*nz*sizeof(mreal)); ky = kz; kz = 1;
}
if(strchr(dir,'x') && kx>1)
{
for(j=0;j<ky*kz;j++)
{
for(i=0;i<kx;i++) b[j] += c[i+kx*j];
b[j] /= kx;
}
kx = ky; ky = kz; kz = 1;
}
d.Set(b,kx,ky,kz);
delete []b; delete []c; return d;
}
//-----------------------------------------------------------------------------
00685 mglData mglData::Max(const char *dir) const
{
register long i,j,k,i0;
long kx=nx,ky=ny,kz=nz;
mreal *b = new mreal[nx*ny*nz];
mreal *c = new mreal[nx*ny*nz];
mglData d;
memcpy(c,a,nx*ny*nz*sizeof(mreal));
if(strchr(dir,'z') && kz>1)
{
for(i=0;i<kx*ky;i++)
{
b[i] = c[i];
for(j=1;j<kz;j++)
b[i] = b[i] > c[i+kx*ky*j] ? b[i] : c[i+kx*ky*j];
}
memcpy(c,b,nx*ny*nz*sizeof(mreal)); kz = 1;
}
if(strchr(dir,'y') && ky>1)
{
for(i=0;i<kx;i++) for(k=0;k<kz;k++)
{
i0 = i+kx*ky*k;
b[i+kx*k] = c[i0];
for(j=1;j<ky;j++)
b[i+kx*k] = b[i+kx*k] > c[i0+kx*j] ? b[i+kx*k] : c[i0+kx*j];
}
memcpy(c,b,nx*ny*nz*sizeof(mreal)); ky = kz; kz = 1;
}
if(strchr(dir,'x') && kx>1)
{
for(j=0;j<ky*kz;j++)
{
b[j] = c[kx*j];
for(i=1;i<kx;i++)
b[j] = b[j] > c[i+kx*j] ? b[j] : c[i+kx*j];
}
kx = ky; ky = kz; kz = 1;
}
d.Set(b,kx,ky,kz);
delete []b; delete []c; return d;
}
//-----------------------------------------------------------------------------
00728 mglData mglData::Min(const char *dir) const
{
register long i,j,k,i0;
long kx=nx,ky=ny,kz=nz;
mreal *b = new mreal[nx*ny*nz];
mreal *c = new mreal[nx*ny*nz];
mglData d;
memcpy(c,a,nx*ny*nz*sizeof(mreal));
if(strchr(dir,'z') && kz>1)
{
for(i=0;i<kx*ky;i++)
{
b[i] = c[i];
for(j=1;j<kz;j++)
b[i] = b[i] < c[i+kx*ky*j] ? b[i] : c[i+kx*ky*j];
}
memcpy(c,b,nx*ny*nz*sizeof(mreal)); kz = 1;
}
if(strchr(dir,'y') && ky>1)
{
for(i=0;i<kx;i++) for(k=0;k<kz;k++)
{
i0 = i+kx*ky*k;
b[i+kx*k] = c[i0];
for(j=1;j<ky;j++)
b[i+kx*k] = b[i+kx*k] < c[i0+kx*j] ? b[i+kx*k] : c[i0+kx*j];
}
memcpy(c,b,nx*ny*nz*sizeof(mreal)); ky = kz; kz = 1;
}
if(strchr(dir,'x') && kx>1)
{
for(j=0;j<ky*kz;j++)
{
b[j] = c[kx*j];
for(i=1;i<kx;i++)
b[j] = b[j] < c[i+kx*j] ? b[j] : c[i+kx*j];
}
kx = ky; ky = kz; kz = 1;
}
d.Set(b,kx,ky,kz);
delete []b; delete []c; return d;
}
//-----------------------------------------------------------------------------
00771 mreal mglData::Last(const char *cond, int &i, int &j, int &k) const
{
if(!cond) cond = "u";
mglFormula eq(cond);
if(i<0 || i>=nx) i=nx;
if(j<0 || j>=ny) j=ny-1;
if(k<0 || k>=nz) k=nz-1;
long i0 = i+nx*(j+ny*k)-1;
mreal x,y,z,dx=nx>1?1/(nx-1.):0,dy=ny>1?1/(ny-1.):0,dz=nz>1?1/(nz-1.):0;
for(;i0>=0;i0--)
{
x = dx*(i0%nx); y = dy*((i0/nx)%ny); z = dz*(i0/(nx*ny));
if(eq.Calc(x,y,z,a[i0])) break;
}
i = i0%nx; j = (i0/nx)%ny; k = i0/(nx*ny);
return i0<nx*ny*nz ? a[i0] : 0;
}
//-----------------------------------------------------------------------------
00789 mreal mglData::Find(const char *cond, int &i, int &j, int &k) const
{
if(!cond) cond = "u";
mglFormula eq(cond);
if(i<0 || i>=nx) i=-1;
if(j<0 || j>=ny) j=0;
if(k<0 || k>=nz) k=0;
long i0 = i+nx*(j+ny*k)+1;
mreal x,y,z,dx=nx>1?1/(nx-1.):0,dy=ny>1?1/(ny-1.):0,dz=nz>1?1/(nz-1.):0;
for(;i0<nx*ny*nz;i0++)
{
x = dx*(i0%nx); y = dy*((i0/nx)%ny); z = dz*(i0/(nx*ny));
if(eq.Calc(x,y,z,a[i0])) break;
}
i = i0%nx; j = (i0/nx)%ny; k = i0/(nx*ny);
return i0<nx*ny*nz ? a[i0] : 0;
}
//-----------------------------------------------------------------------------
00807 int mglData::Find(const char *cond, char dir, int i, int j, int k) const
{
register int m=-1;
if(!cond) cond = "u";
mglFormula eq(cond);
mreal x=i/(nx-1.),y=j/(ny-1.),z=k/(nz-1.);
if(dir=='x' && nx>1)
{
for(m=i;m<nx;m++)
{
x = m/(nx-1.);
if(eq.Calc(x,y,z,a[m+nx*(j+ny*k)])) break;
}
}
if(dir=='y' && ny>1)
{
for(m=j;m<ny;m++)
{
y = m/(ny-1.);
if(eq.Calc(x,y,z,a[i+nx*(m+ny*k)])) break;
}
}
if(dir=='z' && nz>1)
{
for(m=k;m<nz;m++)
{
z = m/(nz-1.);
if(eq.Calc(x,y,z,a[i+nx*(j+ny*m)])) break;
}
}
return m;
}
//-----------------------------------------------------------------------------
00840 bool mglData::FindAny(const char *cond) const
{
register long i,j,k;
register mreal x,y,z;
bool cc = false;
if(!cond || *cond==0) cond = "u";
mglFormula eq(cond);
for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++)
{
x=i/(nx-1.); y=j/(ny-1.); z=k/(nz-1.);
if(eq.Calc(x,y,z,a[i+nx*(j+ny*k)])) { cc = true; break; }
}
return cc;
}
//-----------------------------------------------------------------------------
mglData mglTransformA(const mglData &am, const mglData &ph, const char *tr)
{
int nx = am.nx, ny = am.ny, nz = am.nz;
if(nx*ny*nz != ph.nx*ph.ny*ph.nz || !tr || tr[0]==0)
return mglData();
mglData re(am), im(am);
for(long i=0;i<nx*ny*nz;i++)
{ re.a[i] = am.a[i]*cos(ph.a[i]); im.a[i] = am.a[i]*sin(ph.a[i]); }
return mglTransform(re, im, tr);
}
//-----------------------------------------------------------------------------
mglData mglTransform(const mglData &re, const mglData &im, const char *tr)
{
mglData d,rr(re),ii(im);
int nx = re.nx, ny = re.ny, nz = re.nz;
if(nx*ny*nz != im.nx*im.ny*im.nz || !tr || tr[0]==0)
{ d.Create(1,1,1); return d; }
if(strchr(tr,'i') && strchr(tr,'f')) // general case
{
if(tr[0]=='f') mglFourier(rr,ii,"x");
if(tr[0]=='i') mglFourier(rr,ii,"xi");
if(tr[1]=='f') mglFourier(rr,ii,"y");
if(tr[1]=='i') mglFourier(rr,ii,"yi");
if(tr[2]=='f') mglFourier(rr,ii,"z");
if(tr[2]=='i') mglFourier(rr,ii,"zi");
}
else if(strchr(tr,'f')) // do Fourier only once for speeding up
{
char str[4] = " ";
if(tr[0]=='f') str[0]='x';
if(tr[1]=='f') str[1]='y';
if(tr[2]=='f') str[2]='z';
mglFourier(rr,ii,str);
}
else if(strchr(tr,'i')) // do Fourier only once for speeding up
{
char str[5] = " i";
if(tr[0]=='f') str[0]='x';
if(tr[1]=='f') str[1]='y';
if(tr[2]=='f') str[2]='z';
mglFourier(rr,ii,str);
}
d.Create(nx, ny, nz);
register long i;
for(i=0;i<nx*ny*nz;i++) d.a[i] = hypot(rr.a[i],ii.a[i]);
return d;
}
//-----------------------------------------------------------------------------
void mglFourier(mglData &re, mglData &im, const char *dir)
{
#ifndef NO_GSL
int nx = re.nx, ny = re.ny, nz = re.nz;
if(nx*ny*nz != im.nx*im.ny*im.nz || !dir || dir[0]==0) return;
double *a = new double[2*nx*ny*nz];
register long i,j;
gsl_fft_direction how = strchr(dir,'i')?backward:forward;
for(i=0;i<nx*ny*nz;i++)
{ a[2*i] = re.a[i]; a[2*i+1] = im.a[i]; }
if(strchr(dir,'x') && nx>1)
{
gsl_fft_complex_wavetable *wt = gsl_fft_complex_wavetable_alloc(nx);
gsl_fft_complex_workspace *ws = gsl_fft_complex_workspace_alloc(nx);
for(i=0;i<ny*nz;i++)
gsl_fft_complex_transform(a+2*i*nx, 1, nx, wt, ws, how);
gsl_fft_complex_workspace_free(ws);
gsl_fft_complex_wavetable_free(wt);
}
if(strchr(dir,'y') && ny>1)
{
gsl_fft_complex_wavetable *wt = gsl_fft_complex_wavetable_alloc(ny);
gsl_fft_complex_workspace *ws = gsl_fft_complex_workspace_alloc(ny);
for(i=0;i<nx;i++) for(j=0;j<nz;j++)
gsl_fft_complex_transform(a+2*i+2*j*nx*ny, nx, ny, wt, ws, how);
gsl_fft_complex_workspace_free(ws);
gsl_fft_complex_wavetable_free(wt);
}
if(strchr(dir,'z') && nz>1)
{
gsl_fft_complex_wavetable *wt = gsl_fft_complex_wavetable_alloc(nz);
gsl_fft_complex_workspace *ws = gsl_fft_complex_workspace_alloc(nz);
for(i=0;i<ny*nx;i++)
gsl_fft_complex_transform(a+2*i, nx*ny, nz, wt, ws, how);
gsl_fft_complex_workspace_free(ws);
gsl_fft_complex_wavetable_free(wt);
}
for(i=0;i<nx*ny*nz;i++)
{ re.a[i] = a[2*i]; im.a[i] = a[2*i+1]; }
delete []a;
#endif
}
//-----------------------------------------------------------------------------
mglData mglSTFA(const mglData &re, const mglData &im, int dn, char dir)
{
mglData d;
#ifndef NO_GSL
if(dn<2) return d;
dn = 2*(dn/2);
long nx = re.nx, ny = re.ny;
if(nx*ny!=im.nx*im.ny) return d;
register long i,j,k,i0,dd=dn/2;
double *a = new double[4*dn],ff;
// for(i=0;i<nx*ny;i++) { a[2*i] = re.a[i]; a[2*i+1] = im.a[i]; }
gsl_fft_complex_wavetable *wt = gsl_fft_complex_wavetable_alloc(2*dn);
gsl_fft_complex_workspace *ws = gsl_fft_complex_workspace_alloc(2*dn);
long mx,my,mz;
if(dir=='y')
{
mx = nx; my = dn; mz = ny/dn;
d.Create(mx, mz, my);
for(i=0;i<mx;i++) for(j=0;j<mz;j++)
{
for(k=0;k<2*dn;k++)
{
i0 = k-dd+j*dn;
if(i0<0) i0=0; else if(i0>=ny) i0=ny-1;
i0 = i+nx*i0; ff = 1;
if(k<dd)
{ ff = 0.5*(k-dd/2.)/dd; ff=0.5+ff*(3-ff*ff); }
else if(k>=dn+dd)
{ ff = 0.5*(k-3.5*dd)/dd; ff=0.5-ff*(3-ff*ff); }
a[2*k] = re.a[i0]*ff; a[2*k+1] = im.a[i0]*ff;
}
gsl_fft_complex_forward(a, 1, 2*dn, wt, ws);
for(k=0;k<dd;k++)
{
i0 = i+mx*(j+mz*k);
d.a[i0+mx*mz*dd] = hypot(a[4*k],a[4*k+1])/dn;
d.a[i0] = hypot(a[4*k+2*dn],a[4*k+2*dn+1])/dn;
}
}
}
else
{
mx = dn; my = nx/dn; mz = ny;
d.Create(my, mx, mz);
for(i=0;i<my;i++) for(j=0;j<mz;j++)
{
for(k=0;k<2*dn;k++)
{
i0 = k-dd+i*dn;
if(i0<0) i0=0; else if(i0>=nx) i0=nx-1;
i0 += nx*j; ff = 1;
if(k<dd)
{ ff = 0.5*(k-dd/2.)/dd; ff=0.5+ff*(3-ff*ff); }
else if(k>=3*dd)
{ ff = 0.5*(k-3.5*dd)/dd; ff=0.5-ff*(3-ff*ff); }
a[2*k] = re.a[i0]*ff; a[2*k+1] = im.a[i0]*ff;
}
gsl_fft_complex_forward(a, 1, 2*dn, wt, ws);
for(k=0;k<dd;k++)
{
i0 = i+my*(k+mx*j);
d.a[i0+dd*my] = hypot(a[4*k],a[4*k+1])/dn;
d.a[i0] = hypot(a[4*k+2*dn],a[4*k+2*dn+1])/dn;
}
}
}
delete []a;
gsl_fft_complex_workspace_free(ws);
gsl_fft_complex_wavetable_free(wt);
#endif
return d;
}
//-----------------------------------------------------------------------------
01019 void mglData::Swap(const char *dir)
{
register long i,j,k,i0,nn,j0;
mreal b;
if(strchr(dir,'z') && nz>1)
{
for(i=0;i<nx*ny;i++)
{
nn = (nz/2)*nx*ny;
for(j=0;j<nz/2;j++)
{ i0 = i+j*nx*ny; b = a[i0]; a[i0] = a[i0+nn]; a[i0+nn] = b; }
}
}
if(strchr(dir,'y') && ny>1)
{
nn = (ny/2)*nx;
for(i=0;i<nx;i++) for(k=0;k<nz;k++)
{
j0 = i+nx*ny*k;
for(j=0;j<ny/2;j++)
{ i0 = j0+j*nx; b = a[i0]; a[i0] = a[i0+nn]; a[i0+nn] = b; }
}
}
if(strchr(dir,'x') && nx>1)
{
nn = nx/2;
for(j=0;j<ny*nz;j++)
{
j0 = j*nx;
for(i=0;i<nx/2;i++)
{ i0 = i+j0; b = a[i0]; a[i0] = a[i0+nn]; a[i0+nn] = b; }
}
}
}
//-----------------------------------------------------------------------------
01054 void mglData::Mirror(const char *dir)
{
register long i,j,k,i0,j0;
mreal b;
if(strchr(dir,'z') && nz>1)
{
for(i=0;i<nx*ny;i++) for(j=0;j<nz/2;j++)
{
i0 = i+j*nx*ny; j0 = i+(nz-1-j)*nx*ny;
b = a[i0]; a[i0] = a[j0]; a[j0] = b;
}
}
if(strchr(dir,'y') && ny>1)
{
for(i=0;i<nx;i++) for(k=0;k<nz;k++)
{
j0 = i+nx*ny*k;
for(j=0;j<ny/2;j++)
{
i0 = j0+(ny-1-j)*nx; b = a[j0+j*nx];
a[j0+j*nx] = a[i0]; a[i0] = b;
}
}
}
if(strchr(dir,'x') && nx>1)
{
for(j=0;j<ny*nz;j++)
{
j0 = j*nx;
for(i=0;i<nx/2;i++)
{ i0 = nx-1-i+j0; b = a[i+j0]; a[i+j0] = a[i0]; a[i0] = b; }
}
}
}
//-----------------------------------------------------------------------------
01089 mreal mglData::Momentum(char dir,mreal &x,mreal &w) const
{
mreal i0=0,i1=0,i2=0,d;
register long i;
switch(dir)
{
case 'x':
for(i=0;i<nx*ny*nz;i++)
{
d = i%nx; i0+= a[i];
i1+= a[i]*d; i2+= a[i]*d*d;
}
break;
case 'y':
for(i=0;i<nx*ny*nz;i++)
{
d = (i/nx)%ny; i0+= a[i];
i1+= a[i]*d; i2+= a[i]*d*d;
}
break;
case 'z':
for(i=0;i<nx*ny*nz;i++)
{
d = i/(nx*ny); i0+= a[i];
i1+= a[i]*d; i2+= a[i]*d*d;
}
break;
default: // "self-dispersion"
i0 = nx*ny*nz;
for(i=0;i<nx*ny*nz;i++)
{
i1+= a[i]; i2+= a[i]*a[i];
}
}
if(i0==0) return 0;
x = i1/i0; w = i2>x*x*i0 ? sqrt(i2/i0 - x*x) : 0;
return i0;
}
//-----------------------------------------------------------------------------
01128 mreal mglData::Momentum(char dir,mreal &x,mreal &w,mreal &s,mreal &k) const
{
mreal i0=0,i1=0,i2=0,d,t;
register long i;
i0 = Momentum(dir,x,w);
if(i0==0 || w==0) return 0;
switch(dir)
{
case 'x':
for(i=0;i<nx*ny*nz;i++)
{
d = (i%nx - x)/w; t = d*d;
i1+= a[i]*t*d; i2+= a[i]*t*t;
}
break;
case 'y':
for(i=0;i<nx*ny*nz;i++)
{
d = ((i/nx)%ny - x)/w; t = d*d;
i1+= a[i]*t*d; i2+= a[i]*t*t;
}
break;
case 'z':
for(i=0;i<nx*ny*nz;i++)
{
d = (i/(nx*ny) - x)/w; t = d*d;
i1+= a[i]*t*d; i2+= a[i]*t*t;
}
break;
default: // "self-dispersion"
for(i=0;i<nx*ny*nz;i++)
{
d = (a[i] - x)/w; t = d*d;
i1+= t*d; i2+= t*t;
}
}
s = i1/i0; k = i2/(i0*3);
return i0;
}
//-----------------------------------------------------------------------------
01168 void mglData::NormSl(mreal v1, mreal v2, char dir, bool keep_en, bool sym)
{
mglData b(*this);
register long i,k;
register mreal e0=1, e=1, m1, m2, aa;
if(sym) { v2 = -v1>v2 ? -v1:v2; v1 = -v2; }
if(dir=='z' && nz>1)
{
for(k=0;k<nz;k++)
{
m1 = 1e20; m2 = -1e20;
for(i=0;i<nx*ny;i++)
{
aa = a[i+nx*ny*k];
m1 = m1<aa ? m1 : aa;
m2 = m2>aa ? m2 : aa;
e += aa*aa;
}
if(m1==m2) m2+=1;
if(sym) { m2 = -m1>m2 ? -m1:m2; m1 = -m2; }
if(keep_en && k) e = sqrt(e/e0);
else { e0 = e; e=1; }
for(i=0;i<nx*ny;i++)
b.a[i+nx*ny*k] = (v1 + (v2-v1)*(a[i+nx*ny*k]-m1)/(m2-m1))*e;
}
}
else if(dir=='y' && ny>1)
{
register long j;
for(j=0;j<ny;j++)
{
m1 = 1e20; m2 = -1e20;
for(i=0;i<nx;i++) for(k=0;k<nz;k++)
{
aa = a[i+nx*(j+ny*k)];
m1 = m1<aa ? m1 : aa;
m2 = m2>aa ? m2 : aa;
e += aa*aa;
}
if(m1==m2) m2+=1;
if(sym) { m2 = -m1>m2 ? -m1:m2; m1 = -m2; }
if(keep_en && j) e = sqrt(e/e0);
else { e0 = e; e=1; }
for(i=0;i<nx;i++) for(k=0;k<nz;k++)
b.a[i+nx*(j+ny*k)] = (v1 + (v2-v1)*(a[i+nx*(j+ny*k)]-m1)/(m2-m1))*e;
}
}
else if(dir=='x' && nx>1)
{
for(i=0;i<nx;i++)
{
m1 = 1e20; m2 = -1e20;
for(k=0;k<ny*nz;k++)
{
aa = a[i+nx*k];
m1 = m1<aa ? m1 : aa;
m2 = m2>aa ? m2 : aa;
e += aa*aa;
}
if(m1==m2) m2+=1;
if(sym) { m2 = -m1>m2 ? -m1:m2; m1 = -m2; }
if(keep_en && i) e = sqrt(e/e0);
else { e0 = e; e=1; }
for(k=0;k<ny*nz;k++)
b.a[i+nx*k] = (v1 + (v2-v1)*(a[i+nx*k]-m1)/(m2-m1))*e;
}
}
memcpy(a, b.a, nx*ny*nz*sizeof(mreal));
}
//-----------------------------------------------------------------------------
01238 mglData mglData::Momentum(char dir, const char *how) const
{
mglData b;
mglFormula eq(how);
register long i,j,k,ii;
mreal i0,i1,x,y,z;
switch(dir)
{
case 'x':
b.Create(nx);
for(i=0;i<nx;i++)
{
i0 = i1 = 0;
for(j=0;j<ny;j++) for(k=0;k<nz;k++)
{
ii = i+nx*(j+ny*k);
x = i/(nx-1.); y = j/(ny-1.); z = k/(nz-1.);
i0+= a[ii];
i1+= a[ii]*eq.Calc(x,y,z,a[ii]);
}
b.a[i] = i0>0 ? i1/i0 : 0;
}
break;
case 'y':
b.Create(ny);
for(j=0;j<ny;j++)
{
i0 = i1 = 0;
for(i=0;i<nx;i++) for(k=0;k<nz;k++)
{
ii = i+nx*(j+ny*k);
x = i/(nx-1.); y = j/(ny-1.); z = k/(nz-1.);
i0+= a[ii];
i1+= a[ii]*eq.Calc(x,y,z,a[ii]);
}
b.a[i] = i0>0 ? i1/i0 : 0;
}
break;
case 'z':
b.Create(nz);
for(k=0;k<nz;k++)
{
i0 = i1 = 0;
for(i=0;i<nx;i++) for(j=0;j<ny;j++)
{
ii = i+nx*(j+ny*k);
x = i/(nx-1.); y = j/(ny-1.); z = k/(nz-1.);
i0+= a[ii];
i1+= a[ii]*eq.Calc(x,y,z,a[ii]);
}
b.a[i] = i0>0 ? i1/i0 : 0;
}
break;
}
return b;
}
//-----------------------------------------------------------------------------
01295 void mglData::PrintInfo(FILE *fp) const
{
if(fp==0) return;
char *buf = new char[512];
PrintInfo(buf);
fprintf(fp,"%s",buf); fflush(fp);
delete []buf;
}
//-----------------------------------------------------------------------------
01304 void mglData::PrintInfo(char *buf, bool all) const
{
if(buf==0) return;
char s[128];
buf[0]=0;
sprintf(s,"nx = %ld\tny = %ld\tnz = %ld\n",nx,ny,nz); strcat(buf,s);
int i=0,j=0,k=0;
mreal b = Maximal(i,j,k);
sprintf(s,"Maximum is %g\t at coordinates x = %d\ty = %d\tz = %d\n", b,i,j,k); strcat(buf,s);
b = Minimal(i,j,k);
sprintf(s,"Minimum is %g\t at coordinates x = %d\ty = %d\tz = %d\n", b,i,j,k); strcat(buf,s);
if(all)
{
mreal A=0,Wa=0,X=0,Y=0,Z=0,Wx=0,Wy=0,Wz=0;
Momentum('x',X,Wx); Momentum('y',Y,Wy);
Momentum('z',Z,Wz); Momentum(0,A,Wa);
sprintf(s,"Averages are:\n<a> = %g\t<x> = %g\t<y> = %g\t<z> = %g\n", A,X,Y,Z); strcat(buf,s);
sprintf(s,"Widths (dispersions) are:\nWa = %g\tWx = %g\tWy = %g\tWz = %g\n",
Wa,Wx,Wy,Wz); strcat(buf,s);
}
}
//-----------------------------------------------------------------------------
01327 void mglData::Rearrange(int mx, int my, int mz)
{
if(mx<1) return; // wrong mx
if(my<1) { my = nx*ny*nz/mx; mz = 1; }
else if(mz<1) mz = (nx*ny*nz)/(mx*my);
long m = mx*my*mz;
if(m==0 || m>nx*ny*nz) return; // too high desired dimensions
nx = mx; ny = my; nz = mz; NewId();
}
//-----------------------------------------------------------------------------
01337 void mglData::InsertColumns(int at, int num, const char *eq)
{
if(num<1) return;
mglData b(nx+num,ny,nz);
if(at<1) at=1; if(at>nx) at=nx;
register long i,j,k;
for(i=0;i<at;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++)
b.a[i+(nx+num)*(j+ny*k)] = a[i+nx*(j+ny*k)];
for(i=at;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++)
b.a[i+num+(nx+num)*(j+ny*k)] = a[i+nx*(j+ny*k)];
if(eq)
{
mglFormula e(eq);
mreal dx,dy,dz;
dx = num==1?0:1./(num-1);
dy = ny==1?0:1./(ny-1);
dz = nz==1?0:1./(nz-1);
for(i=0;i<num;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++)
b.a[i+at+(nx+num)*(j+ny*k)] = e.Calc(i*dx,j*dy, k*dz);
}
Set(b);
}
//-----------------------------------------------------------------------------
01360 void mglData::InsertRows(int at, int num, const char *eq)
{
if(num<1) return;
mglData b(nx,ny+num,nz);
if(at<1) at=1; if(at>nx) at=nx;
register long i,j,k;
for(i=0;i<nx;i++) for(j=0;j<at;j++) for(k=0;k<nz;k++)
b.a[i+nx*(j+(ny+num)*k)] = a[i+nx*(j+ny*k)];
for(i=0;i<nx;i++) for(j=at;j<ny;j++) for(k=0;k<nz;k++)
b.a[i+nx*(j+num+(ny+num)*k)] = a[i+nx*(j+ny*k)];
if(eq)
{
mglFormula e(eq);
mreal dx,dy,dz;
dy = num==1?0:1./(num-1);
dx = nx==1?0:1./(nx-1);
dz = nz==1?0:1./(nz-1);
for(i=0;i<nx;i++) for(j=0;j<num;j++) for(k=0;k<nz;k++)
b.a[i+nx*(j+at+(ny+num)*k)] = e.Calc(i*dx,j*dy, k*dz);
}
Set(b);
}
//-----------------------------------------------------------------------------
01383 void mglData::InsertSlices(int at, int num, const char *eq)
{
if(num<1) return;
mglData b(nx,ny,nz+num);
if(at<1) at=1; if(at>nx) at=nx;
register long i,j,k;
for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<at;k++)
b.a[i+nx*(j+ny*k)] = a[i+nx*(j+ny*k)];
for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=at;k<nz;k++)
b.a[i+nx*(j+ny*(k+num))] = a[i+nx*(j+ny*k)];
if(eq)
{
mglFormula e(eq);
mreal dx,dy,dz;
dz = num==1?0:1./(num-1);
dx = nx==1?0:1./(nx-1);
dy = ny==1?0:1./(ny-1);
for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<num;k++)
b.a[i+nx*(j+(at+k)*ny)] = e.Calc(i*dx,j*dy, k*dz);
}
Set(b);
}
//-----------------------------------------------------------------------------
01406 void mglData::DeleteColumns(int at, int num)
{
if(num<1 || at<0 || at+num>=nx) return;
mglData b(nx-num,ny,nz);
register long i,j,k;
for(i=0;i<at;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++)
b.a[i+(nx-num)*(j+ny*k)] = a[i+nx*(j+ny*k)];
for(i=at+num;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++)
b.a[i-num+(nx-num)*(j+ny*k)] = a[i+nx*(j+ny*k)];
Set(b);
}
//-----------------------------------------------------------------------------
01418 void mglData::DeleteRows(int at, int num)
{
if(num<1 || at<0 || at+num>=ny) return;
mglData b(nx,ny-num,nz);
register long i,j,k;
for(i=0;i<nx;i++) for(j=0;j<at;j++) for(k=0;k<nz;k++)
b.a[i+nx*(j+(ny-num)*k)] = a[i+nx*(j+ny*k)];
for(i=0;i<nx;i++) for(j=at+num;j<ny;j++) for(k=0;k<nz;k++)
b.a[i+nx*(j-num+(ny-num)*k)] = a[i+nx*(j+ny*k)];
Set(b);
}
//-----------------------------------------------------------------------------
01430 void mglData::DeleteSlices(int at, int num)
{
if(num<1 || at<0 || at+num>=nz) return;
mglData b(nx,ny,nz-num);
register long i,j,k;
for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<at;k++)
b.a[i+nx*(j+ny*k)] = a[i+nx*(j+ny*k)];
for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=at+num;k<nz;k++)
b.a[i+nx*(j+(k-num)*num)] = a[i+nx*(j+ny*k)];
Set(b);
}
//-----------------------------------------------------------------------------
01442 void mglData::Insert(char dir, int at, int num)
{
if(dir=='x') InsertColumns(at,num);
if(dir=='y') InsertRows(at,num);
if(dir=='z') InsertSlices(at,num);
}
//-----------------------------------------------------------------------------
01449 void mglData::Delete(char dir, int at, int num)
{
if(dir=='x') DeleteColumns(at,num);
if(dir=='y') DeleteRows(at,num);
if(dir=='z') DeleteSlices(at,num);
}
//-----------------------------------------------------------------------------
mreal mgl_spline5(mreal y1[5], mreal y2[5], int n1, int n2, mreal d, mreal &dy)
{
mreal a1[4], a2[4], f0,d0,t0,f1,d1,t1, b[6];
a1[0] = -(3*y1[4]-16*y1[3]+36*y1[2]-48*y1[1]+25*y1[0])/12;
a1[1] = (11*y1[4]-56*y1[3]+114*y1[2]-104*y1[1]+35*y1[0])/12;
a1[2] = -(3*y1[4]-14*y1[3]+24*y1[2]-18*y1[1]+5*y1[0])/4;
a1[3] = (y1[4]-4*y1[3]+6*y1[2]-4*y1[1]+y1[0])/6;
a2[0] = -(3*y2[4]-16*y2[3]+36*y2[2]-48*y2[1]+25*y2[0])/12;
a2[1] = (11*y2[4]-56*y2[3]+114*y2[2]-104*y2[1]+35*y2[0])/12;
a2[2] = -(3*y2[4]-14*y2[3]+24*y2[2]-18*y2[1]+5*y2[0])/4;
a2[3] = (y2[4]-4*y2[3]+6*y2[2]-4*y2[1]+y2[0])/6;
n2++;
f0 = y1[n1]; d0 = a1[0]+n1*(a1[1]+n1*(a1[2]+n1*a1[3])); t0 = a1[1]/2+a1[2]*n1+1.5*n2*n2*a2[3];
f1 = y2[n2]; d1 = a2[0]+n2*(a2[1]+n2*(a2[2]+n2*a2[3])); t1 = a2[1]/2+a2[2]*n2+1.5*n2*n2*a2[3];
b[0] = f0; b[1] = d0; b[2] = t0;
b[3] = 10*(f1-f0)+t1-3*t0-4*d1-6*d0;
b[4] = 15*(f0-f1)-2*t1+3*t0+7*d1+8*d0;
b[5] = 6*(f1-f0)+t1-t0-3*d1-3*d0;
dy = b[1] + d*(2*b[2]+d*(3*b[3]+d*(4*b[4]+d*5*b[5])));
return b[0] + d*(b[1]+d*(b[2]+d*(b[3]+d*(b[4]+d*b[5]))));
}
//-----------------------------------------------------------------------------
mreal mgl_spline3(mreal y1[3], mreal y2[3], int n1, int n2, mreal d, mreal &dy)
{
mreal a1[2], a2[2], f0,d0,d1,f1, b[4];
a1[0] = -(y1[2]-4*y1[1]+3*y1[0])/2;
a1[1] = y1[2]-2*y1[1]+y1[0];
a2[0] = -(y2[2]-4*y2[1]+3*y2[0])/2;
a2[1] = y2[2]-2*y2[1]+y2[0];
n2++;
f0 = y1[n1]; d0 = a1[0]+a1[1]*n1;
f1 = y2[n2]; d1 = a2[0]+a2[1]*n2;
b[0] = f0; b[1] = d0;
b[2] = 3*(f1-f0)-d1-2*d0;
b[3] = 2*(f0-f1)+d1+d0;
dy = b[1] + d*(2*b[2]+d*3*b[3]);
return b[0] + d*(b[1]+d*(b[2]+d*b[3]));
}
//-----------------------------------------------------------------------------
01495 mreal mglData::Spline5(mreal x,mreal y,mreal z,mreal &dx,mreal &dy,mreal &dz) const
{
mreal res=0;
if(nx<5) return 0; // not interpolation for points < 5 !!!
dx = dy = dz = 0; x*=nx-1; y*=ny-1; z*=nz-1;
if(ny==1 && nz==1) // 1D case
{
int n = int(x), n1 = n>1 ? 2:n, n2 = n<nx-3 ? 1:5+n-nx;
res = mgl_spline5(a+n+n1-2, a+n-n2, n1, n2, x-n, dx);
}
/* else if(nz==1) // 2D case
{
if(ny<6) return 0; // not interpolation for points < 5 !!!
int n = int(x), n1 = n>1 ? 2:n, n2 = n<nx-3 ? 1:5+n-nx;
int m = int(y), m1 = m>1 ? 2:m, m2 = m<ny-3 ? 1:5+m-ny;
mreal b[6],d[6],dd;
/* m += m1-2 < -m2 ? m1-2 : -m2;
for(int i=0;i<6;i++)
b[i] = mgl_spline5(a+n+n1-2+nx*(m+i), a+n-n2+nx*(m+i), n1, n2, x-n, d[i]);
res = mgl_spline5(b,b+1
}*/
return res;
}
//-----------------------------------------------------------------------------
01520 void mglData::Envelop(char dir)
{
#ifndef NO_GSL
register int i,j,k,i0;
double *b = new double[2*nx*ny*nz];
for(i=0;i<nx*ny*nz;i++) { b[2*i] = a[i]; b[2*i+1] = 0; }
if(dir=='x' && nx>1)
{
gsl_fft_complex_wavetable *wt = gsl_fft_complex_wavetable_alloc(nx);
gsl_fft_complex_workspace *ws = gsl_fft_complex_workspace_alloc(nx);
for(i=0;i<ny*nz;i++)
{
gsl_fft_complex_transform(b+2*i*nx, 1, nx, wt, ws, forward);
for(j=0;j<nx;j++)
{ b[j+2*i*nx] /= nx/2.; b[j+nx+2*i*nx] = 0; }
gsl_fft_complex_transform(b+2*i*nx, 1, nx, wt, ws, backward);
}
gsl_fft_complex_workspace_free(ws);
gsl_fft_complex_wavetable_free(wt);
}
if(dir=='y' && ny>1)
{
gsl_fft_complex_wavetable *wt = gsl_fft_complex_wavetable_alloc(ny);
gsl_fft_complex_workspace *ws = gsl_fft_complex_workspace_alloc(ny);
for(i=0;i<nx;i++) for(j=0;j<nz;j++)
{
i0 = 2*i+2*j*nx*ny;
gsl_fft_complex_transform(b+i0, nx, ny, wt, ws, forward);
for(k=0;k<ny;k++)
{ b[i0+k*2*nx] /= ny/2.; b[i0+2*nx*k+2*nx*ny] = 0; }
gsl_fft_complex_transform(b+i0, nx, ny, wt, ws, backward);
}
gsl_fft_complex_workspace_free(ws);
gsl_fft_complex_wavetable_free(wt);
}
if(dir=='z' && nz>1)
{
gsl_fft_complex_wavetable *wt = gsl_fft_complex_wavetable_alloc(nz);
gsl_fft_complex_workspace *ws = gsl_fft_complex_workspace_alloc(nz);
for(i=0;i<ny*nx;i++)
{
i0 = 2*nx*ny;
gsl_fft_complex_transform(b+2*i, nx*ny, nz, wt, ws, forward);
for(j=0;j<nz;j++)
{ b[i+j*i0] /= nz/2.; b[i+j*i0+nz*i0] = 0; }
gsl_fft_complex_transform(b+2*i, nx*ny, nz, wt, ws, backward);
}
gsl_fft_complex_workspace_free(ws);
gsl_fft_complex_wavetable_free(wt);
}
for(i=0;i<nx*ny*nz;i++) a[i] = hypot(b[2*i], b[2*i+1]);
delete []b;
#endif
}
//-----------------------------------------------------------------------------
#define omod(x,y) (y)*((x)>0?int((x)/(y)+0.5):int((x)/(y)-0.5))
void mgl_omod(mreal *a, mreal da, int nx, int n)
{
register long i,ii;
bool qq=true;
register mreal q;
for(i=1;i<nx;i++)
{
ii = i*n;
if(isnan(a[ii-n])) { qq=true; continue; }
if(qq)
{
a[ii] += omod(a[ii-n]-a[ii], da);
qq=false;
}
else
{
q = 2*a[ii-n]-a[ii-2*n];
a[ii] += omod(q-a[ii], da);
}
}
}
//-----------------------------------------------------------------------------
01598 void mglData::Sew(const char *dirs, mreal da)
{
long j,k;
if(strchr(dirs,'x') && nx>1) for(j=0;j<nz*ny;j++)
mgl_omod(a+j*nx, da, nx, 1);
if(strchr(dirs,'y') && ny>1) for(j=0;j<nx;j++) for(k=0;k<nz;k++)
mgl_omod(a+j+k*nx*ny, da, ny, nx);
if(strchr(dirs,'z') && nz>1) for(j=0;j<nx*ny;j++)
mgl_omod(a+j, da, nz, nx*ny);
}
//-----------------------------------------------------------------------------
mglData mglData::Evaluate(const mglData &idat, const mglData &jdat, const mglData &kdat, bool norm) const
{
mglData b;
register int i,n=idat.nx*idat.ny*idat.nz;
if(jdat.nx*jdat.ny*jdat.nz!=n || kdat.nx*kdat.ny*kdat.nz!=n) return b;
b.Create(idat.nx, idat.ny, idat.nz);
if(norm) for(i=0;i<n;i++) b.a[i] = Spline1(idat.a[i], jdat.a[i], kdat.a[i]);
else for(i=0;i<n;i++) b.a[i] = Spline(idat.a[i], jdat.a[i], kdat.a[i]);
return b;
}
//-----------------------------------------------------------------------------
mglData mglData::Evaluate(const mglData &idat, const mglData &jdat, bool norm) const
{
mglData b;
register int i,n=idat.nx*idat.ny*idat.nz;
if(jdat.nx*jdat.ny*jdat.nz!=n) return b;
b.Create(idat.nx, idat.ny, idat.nz);
if(norm) for(i=0;i<n;i++) b.a[i] = Spline1(idat.a[i], jdat.a[i]);
else for(i=0;i<n;i++) b.a[i] = Spline(idat.a[i], jdat.a[i]);
return b;
}
//-----------------------------------------------------------------------------
01631 mglData mglData::Evaluate(const mglData &idat, bool norm) const
{
mglData b;
register int i,n=idat.nx*idat.ny*idat.nz;
b.Create(idat.nx, idat.ny, idat.nz);
if(norm) for(i=0;i<n;i++) b.a[i] = Spline1(idat.a[i]);
else for(i=0;i<n;i++) b.a[i] = Spline(idat.a[i]);
return b;
}
//-----------------------------------------------------------------------------
01641 void mglData::Put(mreal val, int xx, int yy, int zz)
{
if(xx>=nx || yy>=ny || zz>=nz) return;
register long i,j;
if(xx<0 && yy<0 && zz<0) // ��� ������
for(i=0;i<nx*ny*nz;i++) a[i] = val;
else if(xx<0 && yy<0) // 2d
for(i=0;i<nx*ny;i++) a[i+zz*nx*ny] = val;
else if(yy<0 && zz<0) // 2d
for(i=0;i<nz*ny;i++) a[xx+i*nx] = val;
else if(xx<0 && zz<0) // 2d
for(i=0;i<nx;i++) for(j=0;j<nz;j++)
a[i+nx*(yy+j*ny)] = val;
else if(xx<0)
for(i=0;i<nx;i++) a[i+nx*(yy+zz*ny)] = val;
else if(yy<0)
for(i=0;i<ny;i++) a[xx+nx*(i+zz*ny)] = val;
else if(zz<0)
for(i=0;i<nz;i++) a[xx+nx*(yy+i*ny)] = val;
else a[xx+nx*(yy+zz*ny)] = val;
}
//-----------------------------------------------------------------------------
01663 void mglData::Put(const mglData &val, int xx, int yy, int zz)
{
if(xx>=nx || yy>=ny || zz>=nz) return;
register long i,j,k;
if(xx<0 && yy<0 && zz<0) // ��� ������
{
if(val.nx>=nx && val.ny>=ny && val.nz>=nz)
for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++)
a[i+nx*(j+k*ny)] = val.a[i+val.nx*(j+val.ny*k)];
else if(val.nx>=nx && val.ny>=ny)
for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++)
a[i+nx*(j+k*ny)] = val.a[i+val.nx*j];
else if(val.nx>=nx)
for(i=0;i<nx;i++) for(j=0;j<ny;j++) for(k=0;k<nz;k++)
a[i+nx*(j+k*ny)] = val.a[i];
}
else if(xx<0 && yy<0) // 2d
{
if(val.nx>=nx && val.ny>=ny)
for(i=0;i<nx;i++) for(j=0;j<ny;j++)
a[i+nx*(j+zz*ny)] = val.a[i+val.nx*j];
else if(val.nx>=nx)
for(i=0;i<nx;i++) for(j=0;j<ny;j++)
a[i+nx*(j+zz*ny)] = val.a[i];
}
else if(yy<0 && zz<0) // 2d
{
if(val.nx>=ny && val.ny>=nz)
for(i=0;i<ny;i++) for(j=0;j<nz;j++)
a[xx+nx*(i+j*ny)] = val.a[i+val.nx*j];
else if(val.nx>=ny)
for(i=0;i<ny;i++) for(j=0;j<nz;j++)
a[xx+nx*(i+j*ny)] = val.a[i];
}
else if(xx<0 && zz<0) // 2d
{
if(val.nx>=nx && val.ny>=nz)
for(i=0;i<nx;i++) for(j=0;j<nz;j++)
a[i+nx*(yy+j*ny)] = val.a[i+val.nx*j];
else if(val.nx>=nx)
for(i=0;i<nx;i++) for(j=0;j<nz;j++)
a[i+nx*(yy+j*ny)] = val.a[i];
}
else if(xx<0)
{
if(val.nx>=nx) for(i=0;i<nx;i++)
a[i+nx*(yy+zz*ny)] = val.a[i];
}
else if(yy<0)
{
if(val.nx>=ny) for(i=0;i<ny;i++)
a[xx+nx*(i+zz*ny)] = val.a[i];
}
else if(zz<0)
{
if(val.nx>=nz) for(i=0;i<nz;i++)
a[xx+nx*(yy+i*ny)] = val.a[i];
}
}
//-----------------------------------------------------------------------------
01723 void mglData::Diff(const mglData &x, const mglData &y)
{
if(nx<2 || ny<2) return;
if(x.nx*x.ny!=nx*ny || y.nx*y.ny!=nx*ny) return;
bool same = (x.nz==nz && y.nz==nz);
mreal *b = new mreal[nx*ny*nz],au,av,xu,xv,yu,yv;
register long i,j,k,i0,i1;
long kk;
for(k=0;k<nz;k++)
{
kk = same ? 0:-nx*ny*k;
for(i=0;i<nx;i++) for(j=0;j<ny;j++)
{
i0 = i+nx*(j+ny*k); i1 = i0 + kk;
if(i==0)
{
au = 3*a[i0]-4*a[i0+1]+a[i0+2];
xu = 3*x.a[i1]-4*x.a[i1+1]+x.a[i1+2];
yu = 3*y.a[i1]-4*y.a[i1+1]+y.a[i1+2];
}
else if(i==nx-1)
{
au = 3*a[i0]-4*a[i0-1]+a[i0-2];
xu = 3*x.a[i1]-4*x.a[i1-1]+x.a[i1-2];
yu = 3*y.a[i1]-4*y.a[i1-1]+y.a[i1-2];
}
else
{
au = a[i0+1]-a[i0-1];
xu = x.a[i1+1]-x.a[i1-1];
yu = y.a[i1+1]-y.a[i1-1];
}
if(j==0)
{
av = 3*a[i0]-4*a[i0+nx]+a[i0+2*nx];
xv = 3*x.a[i1]-4*x.a[i1+nx]+x.a[i1+2*nx];
yv = 3*y.a[i1]-4*y.a[i1+nx]+y.a[i1+2*nx];
}
else if(j==ny-1)
{
av = 3*a[i0]-4*a[i0-nx]+a[i0-2*nx];
xv = 3*x.a[i1]-4*x.a[i1-nx]+x.a[i1-2*nx];
yv = 3*y.a[i1]-4*y.a[i1-nx]+y.a[i1-2*nx];
}
else
{
av = a[i0+nx]-a[i0-nx];
xv = x.a[i1+nx]-x.a[i1-nx];
yv = y.a[i1+nx]-y.a[i1-nx];
}
b[i0] = (av*yu-au*yv)/(xv*yu-xu*yv);
}
}
delete []a; a=b;
}
//-----------------------------------------------------------------------------
01779 void mglData::Diff(const mglData &x, const mglData &y, const mglData &z)
{
if(nx<2 || ny<2) return;
if(x.nx*x.ny*x.nz!=nx*ny*nz || y.nx*y.ny*y.nz!=nx*ny*nz || z.nx*z.ny*z.nz!=nx*ny*nz) return;
mreal *b = new mreal[nx*ny*nz],au,av,aw,xu,xv,xw,yu,yv,yw,zu,zv,zw;
register long i,j,k,i0,nn=nx*ny;
for(k=0;k<nz;k++) for(i=0;i<nx;i++) for(j=0;j<ny;j++)
{
i0 = i+nx*(j+ny*k);
if(i==0)
{
au = 3*a[i0]-4*a[i0+1]+a[i0+2];
xu = 3*x.a[i0]-4*x.a[i0+1]+x.a[i0+2];
yu = 3*y.a[i0]-4*y.a[i0+1]+y.a[i0+2];
zu = 3*z.a[i0]-4*z.a[i0+1]+z.a[i0+2];
}
else if(i==nx-1)
{
au = 3*a[i0]-4*a[i0-1]+a[i0-2];
xu = 3*x.a[i0]-4*x.a[i0-1]+x.a[i0-2];
yu = 3*y.a[i0]-4*y.a[i0-1]+y.a[i0-2];
zu = 3*z.a[i0]-4*z.a[i0-1]+z.a[i0-2];
}
else
{
au = a[i0+1]-a[i0-1];
xu = x.a[i0+1]-x.a[i0-1];
yu = y.a[i0+1]-y.a[i0-1];
zu = z.a[i0+1]-z.a[i0-1];
}
if(j==0)
{
av = 3*a[i0]-4*a[i0+nx]+a[i0+2*nx];
xv = 3*x.a[i0]-4*x.a[i0+nx]+x.a[i0+2*nx];
yv = 3*y.a[i0]-4*y.a[i0+nx]+y.a[i0+2*nx];
zv = 3*z.a[i0]-4*z.a[i0+nx]+z.a[i0+2*nx];
}
else if(j==ny-1)
{
av = 3*a[i0]-4*a[i0-nx]+a[i0+(ny-3)*nx];
xv = 3*x.a[i0]-4*x.a[i0-nx]+x.a[i0-2*nx];
yv = 3*y.a[i0]-4*y.a[i0-nx]+y.a[i0-2*nx];
zv = 3*z.a[i0]-4*z.a[i0-nx]+z.a[i0-2*nx];
}
else
{
av = a[i0+nx]-a[i0-nx];
xv = x.a[i0+nx]-x.a[i0-nx];
yv = y.a[i0+nx]-y.a[i0-nx];
zv = z.a[i0+nx]-z.a[i0-nx];
}
if(k==0)
{
aw = 3*a[i0]-4*a[i0+nn]+a[i0+2*nn];
xw = 3*x.a[i0]-4*x.a[i0+nn]+x.a[i0+2*nn];
yw = 3*y.a[i0]-4*y.a[i0+nn]+y.a[i0+2*nn];
zw = 3*z.a[i0]-4*z.a[i0+nn]+z.a[i0+2*nn];
}
else if(k==nz-1)
{
aw = 3*a[i0]-4*a[i+(nz-2)*nx*ny]+a[i-2*nn];
xw = 3*x.a[i0]-4*x.a[i-nn]+x.a[i-2*nn];
yw = 3*y.a[i0]-4*y.a[i-nn]+y.a[i-2*nn];
zw = 3*z.a[i0]-4*z.a[i-nn]+z.a[i-2*nn];
}
else
{
aw = a[i0+nn]-a[i0-nn];
xw = x.a[i0+nn]-x.a[i0-nn];
yw = y.a[i0+nn]-y.a[i0-nn];
zw = z.a[i0+nn]-z.a[i0-nn];
}
b[i0] = (au*yv*zw-av*yu*zw-au*yw*zv+aw*yu*zv+av*yw*zu-aw*yv*zu) / (xu*yv*zw-xv*yu*zw-xu*yw*zv+xw*yu*zv+xv*yw*zu-xw*yv*zu);
}
delete []a; a=b;
}
//-----------------------------------------------------------------------------
Generated by Doxygen 1.6.0 Back to index
|
__label__pos
| 0.999891 |
How 3D Mapping Builds Engagement
3D MappingUrbanization, climate change, and global supply chain disruptions pose significant challenges, and building resilience to these factors has become a critical priority for communities and businesses alike. One of the tools that is proving to be indispensable in this endeavor is 3D mapping technology. By combining spatial data with advanced visualization techniques, 3D mapping empowers organizations better to understand their surroundings, plan for potential risks, and engage stakeholders effectively.
Understanding the Power of 3D Mapping
Enhanced Situational Awareness: One of the key aspects of building resilience is having a clear understanding of the environment and potential threats. 3D mapping enables organizations to create highly detailed, three-dimensional models of their surroundings, providing a comprehensive view of the terrain, infrastructure, and natural features. This enhanced situational awareness is invaluable for disaster preparedness, response, and recovery efforts.
Resilience Planning: According to Esri, 3D mapping plays a pivotal role in resilience planning for urban and community development. By incorporating real-world data into 3D models, planners and policymakers can simulate various scenarios, such as floods, earthquakes, or climate change impacts. This allows them to identify vulnerabilities, develop mitigation strategies, and make informed decisions to enhance resilience.
Supply Chain ResilienceSupply chain resilience is crucial for the modern economy. 3D mapping can be instrumental in optimizing and securing supply chains. By visualizing the entire supply chain in three dimensions, businesses can identify potential bottlenecks, vulnerabilities, and alternative routes, helping them adapt quickly to disruptions and ensure the continuous flow of goods.
Building Engagement through 3D Mapping
Stakeholder Involvement: Effective engagement with stakeholders is a cornerstone of building resilience. 3D mapping provides a powerful tool for involving communities, government agencies, and businesses in the planning and decision-making processes. By presenting data visually compellingly, it becomes easier for diverse stakeholders to grasp complex information and contribute their insights.
Public Awareness: 3D maps can be shared with the public through various channels, fostering awareness about resilience measures. Citizens are more likely to support initiatives that they understand, and interactive 3D models can make complex resilience concepts accessible to a wider audience. As a result, communities become more actively engaged in building their own resilience.
Collaborative Planning: Collaborative planning, as advocated by Esri, is greatly facilitated by 3D mapping. Different stakeholders can work together in a virtual environment, making it easier to share ideas, identify trade-offs, and develop consensus on resilience strategies. This collaborative approach strengthens the collective commitment to building resilience.
In an era marked by rapid changes and unforeseen challenges, the importance of building resilience cannot be overstated. 3D mapping technology, as demonstrated by GeoTel, Esri, and Brookings, is a versatile tool that empowers organizations and communities to enhance their resilience. By providing a clear understanding of the environment, supporting resilience planning, and fostering engagement among stakeholders, 3D mapping contributes to a more secure and adaptable future. As technology continues to advance, the role of 3D mapping in building resilience and engagement will only become more significant, helping us navigate the complexities of our ever-changing world.
Written By: Valerie Stephen
Share:
|
__label__pos
| 0.998858 |
Data Intelligence
Chapter 76 – Generate the Object Feature Importance Using Scikit learn and Random Forest
The random forest algorithm has been applied across a number of industries, allowing them to make better business decisions. Some use cases include high credit risk analysis and product recommendation for cross-sell purposes.
In this piece, I would briefly walk you through several methods of generating feature importance by using classic red wine quality validator dataset. By the end of this chapter, you can have a basic concept to use Random forest applied to your projects and compare the result amongst different methods.
Python Robotic Process Automation – Def Functions, Import Custom Modules to Create a Multi-functional Bot
Robotic process automation, or RPA is not only a technology, but also it’s a vital mindset to think of how to leverage bots that can do things like understand what’s on a screen, complete the right keystrokes, navigate systems, identify and extract data, and perform a wide range of defined actions. In Python, the def function is one of the key components to deploy, and create multi-functional bot to complete a whole process of task. So this is why the article is about today.
Chapter 75 – CRUD Notion Page Content Using Notion API & Python
ChatGPT is super blink recently because it transforms our life and work style upside down. Likey I am incline to use OpenAI API GPT3 and GPT 3.5 API as it can totally automate my life and off load repetitive work that wastes time but is critical. Using both of them is just like eating a piece of buttery and marbling rich Wagyu beef.
In fact, we never only have one option. Between API and AI chatbot, Notion AI which is using Anthropic’s Claude generative AI model can perfectly provide you semi-auto AI experience with its AI writing and API capability. Although it’s not as crunch as GPT 3, it impresses users with clear, thin and straight using experience.
In this piece, I would walk you through how to retrieve Notion AI content from Notion private page, and update new content using Notion API.
Google Sheets ImportXML – Automatically Scrape Web and Collect Product Price Info
I am always on the lookout for a unique angle to use freely available or potentially scrapable data sources. Also, It’s indeed frustrating that you have, admittedly, spent hours upon hours trying to learn Python for writing simple web scraper applications, and implementing web scraping automatically, however, at the end you can only discover the data isn’t accessible, interesting or differentiated enough from what’s already out there.
If you just want to automate updating the profit calculator of your eCommerce business, thankfully, there is an easier way to collect data from the web without spending that much hours: Google Sheets ImportXML Function.
JSON vs YAML, Which Data Serialization Is Better?
Any developers or programmers, or even marketers would not feel strange to a data type – JSON. It’s one of the most popular and awesome data serialization language. In fact, there is an alternative called YAML. I believe people who is famiilar with Google ads API must know this data type. In this Python knowledge hub, I would elaborate what are their pros and cons respectively, and how you can better leverage them as a developer and marketer.
Chapter 74 – Flask App Dynamic Sitemap XML Using MongoDB
What a freezing day is like if you build a sitemap and update time by time manually. Or paying for a recurring monthly subscription for just a second work one-off is not a smart decision as well.
If you are looking for a better way like the feeling of eating better marbling beef meat, this piece is here for you. This article is going to talk about developing dynamic sitemap xml for your Flask App. Let’s go!
Chapter 72 – Build a Blog Content Generator Using OpenAI GPT3 and Easy2Digital API
ChatGPT has been in the spotlight recently. OpenAI GPT3 has been launched since 2020. So in a way, I kind of feel it might be relevant to external factors of the cost of capital and the cost of debt rising which might tighten the business’s operational cost now or this situation might last for a while moving forward. It’s a sort of good timing for AI. Capital-driven is always able to smell the project opportunities in hands.
In this article, I would walk through how to build a blog content generator using OpenAI GPT3 and Easy2Digital APIs to automatically generate blog content in Google Sheets.
1 2 9
|
__label__pos
| 0.759528 |
You can remove USB devices from a virtual machine if the devices are no longer needed. When you disconnect a USB device from a virtual machine, the device is released from the virtual machine and is given back to the client computer, which starts using it.
To minimize the risk of data loss, follow the instructions to safely unmount or eject hardware for your operating system. Safely removing hardware allows accumulated data to be transmitted to a file. Windows operating systems typically include a "Remove Hardware" icon located in the System Tray. Linux operating systems use the umount command.
Note
You might need to use the sync command instead of or in addition to the umount command, for example after you run a dd command on Linux or other UNIX operating systems.
1
Unmount or eject the USB device from the guest operating system.
2
Select the virtual machine in the vSphere Client inventory.
3
Click USB Connections on the virtual machine toolbar.
4
Select the device to remove from the drop-down menu.
For example, select USB Device 1 > Disconnect from device name.
The menu shows the device status as Disconnecting.
The device reconnects to the client computer and is available to add to another virtual machine. In some cases, Windows Explorer detects the device and opens a dialog box on the client computer. You can close this dialog box.
|
__label__pos
| 0.801788 |
001/*
002 * (C) Copyright 2015 Nuxeo SA (http://nuxeo.com/) and others.
003 *
004 * Licensed under the Apache License, Version 2.0 (the "License");
005 * you may not use this file except in compliance with the License.
006 * You may obtain a copy of the License at
007 *
008 * http://www.apache.org/licenses/LICENSE-2.0
009 *
010 * Unless required by applicable law or agreed to in writing, software
011 * distributed under the License is distributed on an "AS IS" BASIS,
012 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
013 * See the License for the specific language governing permissions and
014 * limitations under the License.
015 *
016 * Contributors:
017 * Nicolas Chapurlat <[email protected]>
018 */
019
020package org.nuxeo.ecm.platform.usermanager.io;
021
022import static org.nuxeo.ecm.core.io.registry.reflect.Instantiations.SINGLETON;
023import static org.nuxeo.ecm.core.io.registry.reflect.Priorities.REFERENCE;
024import static org.nuxeo.ecm.platform.usermanager.io.NuxeoGroupJsonWriter.ENTITY_TYPE;
025
026import java.io.IOException;
027import java.util.ArrayList;
028import java.util.Iterator;
029import java.util.List;
030
031import javax.inject.Inject;
032
033import org.codehaus.jackson.JsonNode;
034import org.nuxeo.ecm.core.api.NuxeoGroup;
035import org.nuxeo.ecm.core.api.impl.NuxeoGroupImpl;
036import org.nuxeo.ecm.core.io.marshallers.json.EntityJsonReader;
037import org.nuxeo.ecm.core.io.registry.reflect.Setup;
038import org.nuxeo.ecm.platform.usermanager.UserManager;
039
040/**
041 * Convert Json as {@link NuxeoGroup}.
042 * <p>
043 * Format is (any additional json property is ignored):
044 *
045 * <pre>
046 * {
047 * "entity-type":"group",
048 * "groupname": "GROUP_NAME",
049 * "grouplabel": "GROUP_DISPLAY_NAME",
050 * "memberUsers": [
051 * "USERNAME1",
052 * "USERNAME2",
053 * ...
054 * ],
055 * "memberGroups": [
056 * "GROUPNAME1",
057 * "GROUPNAME2",
058 * ...
059 * ]
060 * }
061 * </pre>
062 *
063 * </p>
064 *
065 * @since 7.2
066 */
067@Setup(mode = SINGLETON, priority = REFERENCE)
068public class NuxeoGroupJsonReader extends EntityJsonReader<NuxeoGroup> {
069
070 @Inject
071 private UserManager userManager;
072
073 public NuxeoGroupJsonReader() {
074 super(ENTITY_TYPE);
075 }
076
077 @Override
078 protected NuxeoGroup readEntity(JsonNode jn) throws IOException {
079 NuxeoGroup group = null;
080 String id = getStringField(jn, "groupname");
081 if (id != null) {
082 group = userManager.getGroup(id);
083 }
084 if (group == null) {
085 group = new NuxeoGroupImpl(id);
086 }
087 String label = getStringField(jn, "grouplabel");
088 group.setLabel(label);
089 List<String> users = getArrayStringValues(jn.get("memberUsers"));
090 group.setMemberUsers(users);
091 List<String> groups = getArrayStringValues(jn.get("memberGroups"));
092 group.setMemberGroups(groups);
093 return group;
094 }
095
096 private List<String> getArrayStringValues(JsonNode node) {
097 List<String> values = new ArrayList<>();
098 if (node != null && !node.isNull() && node.isArray()) {
099 JsonNode elNode = null;
100 Iterator<JsonNode> it = node.getElements();
101 while (it.hasNext()) {
102 elNode = it.next();
103 if (elNode != null && !elNode.isNull() && elNode.isTextual()) {
104 values.add(elNode.getTextValue());
105 }
106 }
107 }
108 return values;
109 }
110
111}
|
__label__pos
| 0.807242 |
What NVMe technology features are for data centers?
NVM Express Markets
NVMe technology has become the go-to storage protocol solution for today’s data centers which were lacking full support previously. The addition of NVMe-oF technology makes NVMe solutions easier to implement than ever across the data center ecosystem. The NVMe-oF protocol allows optimal performance for both applications and the network when accessing NVMe storage via a network. By allowing the NVMe protocol to run over a switched fabric, the NVMe-oF protocol reduces bottlenecks and latency created from older storage fabric protocols. The NVM Express organization recently supported the addition of the NVMe/TCP transport to the NVMe-oF family, making NVMe-oF even more flexible than before. This was in response to data center hyperscaler requests, due to their scale-out architecture choices.
Upcoming specification updates in NVMe 1.4 and NVMe-oF 1.1 also take data center hyperscaler requirements into account. New features in NVMe 1.4 specification such as I/O determinism break up each drive into multiple different devices, allowing multiple I/O workloads to independently access the drive, reducing long-tail latency and improving Quality of Service (QoS). NVMe technology data center benefits:
• Delivery of faster overall access to data
• Lowering of power consumption
• Reduced latency
• Higher Input/Output Operations (IOPS)
|
__label__pos
| 0.90392 |
pve-docs-mediawiki-import: import new pve-disk-health-monitoring page
[pve-docs.git] / debian / tree / pve-docs-mediawiki / pve-docs-mediawiki-import
1 #!/usr/bin/perl
2
3 use strict;
4 use warnings;
5 use Data::Dumper;
6
7 use IO::File;
8 use File::Basename;
9 use MediaWiki::API;
10
11 my $config_fn = "/root/.pve-docs"; # format 'username:pw'
12
13 my $fh = IO::File->new("$config_fn") ||
14 die "Please configure the mediawiki user/passswd in '$config_fn'\n";
15
16 my $api_url = "http://localhost/mediawiki/api.php";
17
18 my $config = <$fh>;
19 chomp $config;
20
21 my ($username, $passwd) = split(':', $config, 2);
22
23 my $mw = MediaWiki::API->new();
24 $mw->{config}->{api_url} = $api_url;
25
26 # log in to the wiki
27 $mw->login({ lgname => $username, lgpassword => $passwd })
28 || die $mw->{error}->{code} . ': ' . $mw->{error}->{details};
29
30 sub update_page {
31 my ($pagename, $include, $category) = @_;
32
33 print "update mediawiki page: $pagename\n";
34
35 my $ref = $mw->get_page( { title => $pagename } );
36 my $page = $ref->{'*'} || '';
37
38 if ($page !~ m/^\{\{#pvedocs:.*\}\}\s*$/m) {
39 $page = "{{#pvedocs:$include}}\n$page";
40 } else {
41 $page =~ s/^\{\{#pvedocs:.*\}\}\s*$/\{\{#pvedocs:$include\}\}\n/m;
42 }
43
44 if ($category) {
45 my $catstr = "Category:$category";
46
47 if ($page !~ m/^\[\[$catstr\]\]\s*$/m) {
48 $page .= "\n[[$catstr]]\n";
49 }
50 }
51
52 my $timestamp = $ref->{timestamp};
53 my $wcmd = {
54 action => 'edit',
55 title => $pagename,
56 basetimestamp => $timestamp, # to avoid edit conflicts
57 text => $page,
58 };
59
60 $mw->edit($wcmd) ||
61 die $mw->{error}->{code} . ': ' . $mw->{error}->{details};
62 }
63
64 my $cat_refdoc = "Reference Documentation";
65
66 my $docs = {
67 'chapter-ha-manager-plain.html' => {
68 title => "High Availability",
69 category => $cat_refdoc,
70 },
71 'chapter-pve-installation-plain.html' => {
72 title => "Installation",
73 category => $cat_refdoc,
74 },
75 'section-pve-usbstick-plain.html' => {
76 title => "Install from USB Stick",
77 category => $cat_refdoc,
78 },
79 'section-pve-system-requirements-plain.html' => {
80 title => "System Requirements",
81 category => $cat_refdoc,
82 },
83 'section-getting-help-plain.html' => {
84 title => "Getting Help",
85 category => $cat_refdoc,
86 },
87 'sysadmin-pve-network-plain.html' => {
88 title => "Network Configuration",
89 category => $cat_refdoc,
90 },
91 'sysadmin-pve-disk-health-monitoring-plain.html' => {
92 title => "Disk Health Monitoring",
93 category => $cat_refdoc,
94 },
95 'sysadmin-local-lvm-plain.html' => {
96 title => "Logical Volume Manager (LVM)",
97 category => $cat_refdoc,
98 },
99 'sysadmin-pve-package-repos-plain.html' => {
100 title => "Package Repositories",
101 category => $cat_refdoc,
102 },
103 'sysadmin-system-software-updates-plain.html' => {
104 title => "System Software Updates",
105 category => $cat_refdoc,
106 },
107 'chapter-sysadmin-plain.html' => {
108 title => "Host System Administration",
109 category => $cat_refdoc,
110 },
111 'chapter-pct-plain.html' => {
112 title => "Linux Container",
113 category => $cat_refdoc,
114 },
115 'chapter-pmxcfs-plain.html' => {
116 title => "Proxmox Cluster File System (pmxcfs)",
117 category => $cat_refdoc,
118 },
119 'chapter-pve-bibliography-plain.html' => {
120 title => "Bibliography",
121 category => $cat_refdoc,
122 },
123 'chapter-pvecm-plain.html' => {
124 title => "Cluster Manager",
125 category => $cat_refdoc,
126 },
127 'chapter-pve-faq-plain.html' => {
128 title => "FAQ",
129 category => $cat_refdoc,
130 },
131 'chapter-pve-firewall-plain.html' => {
132 title => "Firewall",
133 category => $cat_refdoc,
134 },
135 'chapter-pvesm-plain.html' => {
136 title => "Storage",
137 category => $cat_refdoc,
138 },
139 'chapter-pveum-plain.html' => {
140 title => "User Management",
141 category => $cat_refdoc,
142 },
143 'chapter-qm-plain.html' => {
144 title => "Qemu/KVM Virtual Machines",
145 category => $cat_refdoc,
146 },
147 'chapter-vzdump-plain.html' => {
148 title => "Backup and Restore",
149 category => $cat_refdoc,
150 },
151 'qm.conf.5-plain.html' => {
152 title => "Manual: vm.conf",
153 category => $cat_refdoc,
154 },
155 'pct.conf.5-plain.html' => {
156 title => "Manual: pct.conf",
157 category => $cat_refdoc,
158 },
159 'datacenter.cfg.5-plain.html' => {
160 title => "Manual: datacenter.cfg",
161 category => $cat_refdoc,
162 },
163 # Storage Plugins
164 'pve-storage-dir-plain.html' => {
165 title => "Storage: Directory",
166 category => $cat_refdoc,
167 },
168 'pve-storage-glusterfs-plain.html' => {
169 title => "Storage: GlusterFS",
170 category => $cat_refdoc,
171 },
172 'pve-storage-iscsidirect-plain.html' => {
173 title => "Storage: User Mode iSCSI",
174 category => $cat_refdoc,
175 },
176 'pve-storage-iscsi-plain.html' => {
177 title => "Storage: iSCSI",
178 category => $cat_refdoc,
179 },
180 'pve-storage-lvm-plain.html' => {
181 title => "Storage: LVM",
182 category => $cat_refdoc,
183 },q
184 'pve-storage-lvmthin-plain.html' => {
185 title => "Storage: LVM Thin",
186 category => $cat_refdoc,
187 },
188 'pve-storage-nfs-plain.html' => {
189 title => "Storage: NFS",
190 category => $cat_refdoc,
191 },
192 'pve-storage-rbd-plain.html' => {
193 title => "Storage: RBD",
194 category => $cat_refdoc,
195 },
196 'pve-storage-zfspool-plain.html' => {
197 title => "Storage: ZFS",
198 category => $cat_refdoc,
199 },
200 };
201
202 #update_page("testpage1", $filename, $d->{category});
203
204 foreach my $filename (keys %$docs) {
205 my $path = "/usr/share/pve-docs/$filename";
206 die "no such file '$path'" if ! -f $path;
207 my $d = $docs->{$filename};
208 update_page($d->{title}, $filename, $d->{category});
209 }
210
211 # also update 'Get support' page, because this is used since a long
212 # time and is referenced from outside
213 update_page("Get support", 'sysadmin-getting-help-plain.html', 'HOWTO');
|
__label__pos
| 0.988332 |
How to import an Excel document into the application - Web version
Define new categories and locations using your own Excel tables that you can import into the application.
1. Click on the three-point icon that appears when you hover your mouse over your project.
2. Click Settings.
3. It is only possible to import categories or premises. Click the Import icon.
Categories
To import a list of categories from an Excel file into APROPLAN, please organise your categories as explained below:
• Column A: Category code
• Column B: Name of category
• Column C: Subcategory code
• Column D: Subcategory name
• Column E: Subject
• Column F: Description
Premises
To import a list of rooms from an Excel file into APROPLAN, please organise your rooms as explained below:
• Column A: Level 1 code
• Column B: Level 1 description
• Column C: Level 2 code
• Column D: Level 2 description
1. Click Browse and select your document.
2. Click Import.
⚠️️⚠️ The file name cannot contain any of the following characters: @ + % ° ( ) { } § & # \ / : * ? " < > |
Make sure all lines are completed. The premises or categories in the Excel file will be added to the existing premises in APROPLAN .
Got a problem? Contact our support here or ask your questions online directly on the app.
How did we do?
APROPLAN FAQ
Powered by HelpDocs
|
__label__pos
| 0.64788 |
wiki:MemoryCache
Version 1 (modified by [email protected], 9 years ago) (diff)
--
Overview
MemoryCache module is a part of the bigger mechanism responsible for page loading called loader. You can learn more about loader from this article (https://www.webkit.org/blog/427/webkit-page-cache-i-the-basics/). This article focuses only on Memory Cache aspects and its internal components without interaction with loader.
Simple description what MemoryCache is and its purpose can be found in MemoryCache.h header:
This cache holds subresources used by Web pages: images, scripts, stylesheets, etc. The cache keeps a flexible but bounded window of dead resources that grows/shrinks depending on the live resource load. Here's an example of cache growth over time, with a min dead resource capacity of 25% and a max dead resource capacity of 50%:
|-----| Dead: -
|----------| Live: +
--|----------| Cache boundary: | (objects outside this mark have been evicted)
--|----------++++++++++|
-------|-----+++++++++++++++|
-------|-----+++++++++++++++|+++++
The behavior of the cache changes in the following way if shouldMakeResourcePurgeableOnEviction returns true.
1. Dead resources in the cache are kept in non-purgeable memory.
2. When we prune dead resources, instead of freeing them, we mark their memory as purgeable and keep the resources until the kernel reclaims the purgeable memory.
By leaving the in-cache dead resources in dirty resident memory, we decrease the likelihood of the kernel claiming that memory and forcing us to refetch the resource (for example when a user presses back).
And by having an unbounded number of resource objects using purgeable memory, we can use as much memory as it is available on the machine. The trade-off here is that the CachedResource object (and its member variables) are allocated in non-purgeable TC-malloc'd memory so we would see slightly more memory use due to this.
What exactly means that some memory is purgeable/non-purgeable? Well, some operating systems allows to mark memory as purgeable. It means that since it is marked the content may disappear (the system expropriatse it) at any point of time (until it gets unmarked) without informing the application. So the application may know if the memory is purged after it asks the system for the resource (this may be implemented differently on separate systems). This leads to serious consequences in object life cycle management:
• this kind of memory disallows to store objects that depends on its (or related by inheritance/consistency) destructor
• ownership/references to the memory should never be passed to the modules that wouldn't treat this memory as a “special object”.
• known and widely used techniques of managing object lifetime (like smart pointers) are not relevant to objects stored in purgable memory
Now, after you know about consequences of storing objects in purgeable memory let's take a look on how the cache module is constructed.
MemoryCache class
This class is designed to store and manage objects (web resources) lifecycle. Stored resources may be live or dead. A resource is dead when there are no references from web pages. And vice versa resource is live when there is at least one reference to it from web page.
MemoryCache client may have influence on RAM usage using three values:
• min dead bytes - minimum size of dead resources when purging
• max dead bytes - maximum size of dead resources when purging
• total capacity bytes - sum of live and dead resources capacity sizes
Don't panic. Total capacity does not determine upper boundary of resources size that WebKit would keep in memory therefore will not be a situation when only half of your favourite page will be loaded. It is only a threshold to decide when to prune resources. But what exactly means to prune? First, let's explain the meaning of live and dead resources. When a resource is downloaded it is stored in memory as raw data (it can be an image, CSS, JavaScript etc.). Such data needs to be decoded (parsed) to make it usable so when the resource is decoded (it means implicitly that some CachedResourceClient needs it) it starts to be a live resource. It will be dead after last CachedResourceClient stops using it. Anyway live or dead resources may contain decoded data that (depending on external factors) unnecessarily occupies memory. Therefore prune() method is provided. In first step MemoryCache will prune dead resources (to meet minimum and maximum dead resources size boundary) and then live resources one by one until all resources size exceeds total capacity.
Prune procedure provides one more mechanism to reduce memory consumption. In the beginning of this article I've mentioned about quaint technique - purgeable memory. CachedResource has ability to move stored raw data to special buffer called PurgeableBuffer. Since data is located in such buffer the operating system may take over (purge) memory allocated for it. So if a resource buffer was purged the prune procedure would also remove all CachedResource data related to the buffer - it's called eviction. Evict will also remove a resource from MemoryCache internal structures and after that the only way to restore lost data is to regain it basing on related ResourceRequest.
LRU lists
MemoryCache keeps references to resources in three different structures:
• m_resources - a URL-based map of all resources kept in cache
• m_allResources - Vector of LRU lists with fixed size (32). Basing on CachedResource access counter a resource is located in appropriate list. Prune procedure will start evict beginning from last recently used resources.
• m_liveDecodedResources - live resources with decoded data. If stored resources significantly exceeds total capacity MemoryCache tries to remove decoded data one by one until boundary assumptions will be fulfilled or there would be nothing redundant data to release.
Attachments (1)
Download all attachments as: .zip
|
__label__pos
| 0.963317 |
Would you mind participating in a short survey? We don't need your details - it's anonymous.
We'd like to find out more about your opinion of purchasing software online.
tridkb.dll
Process name: Trident Video Driver
Application using this process: Trident Video Driver
tridkb.dll
Process name: Trident Video Driver
Application using this process: Trident Video Driver
tridkb.dll
Click here to run a scan if you are experiencing issues with this process.
Process name: Trident Video Driver
Application using this process: Trident Video Driver
Recommended: Scan your system for invalid registry entries.
What is tridkb.dll doing on my computer?
Trident Display Driver This process is still being reviewed. If you have some information about it feel free to send us an email at pl[at]uniblue[dot]net
Non-system processes like tridkb.dll originate from software you installed on your system. Since most applications store data in your system's registry, it is likely that your registry has suffered fragmentation and accumulated invalid entries which can affect your PC's performance. It is recommended that you check your registry to identify slowdown issues.
tridkb.dll
Is tridkb.dll harmful?
tridkb.dll has not been assigned a security rating yet. Check your computer for viruses or other malware infected files.
tridkb.dll is unrated
Can I stop or remove tridkb.dll?
Most non-system processes that are running can be stopped because they are not involved in running your operating system. Scan your system now to identify unused processes that are using up valuable resources. tridkb.dll is used by 'Trident Video Driver'.This is an application created by 'Unknown'. To stop tridkb.dll permanently uninstall 'Trident Video Driver' from your system. Uninstalling applications can leave invalid registry entries, accumulating over time. Run a free scan to find out how to optimize software and system performance.
Is tridkb.dll CPU intensive?
This process is not considered CPU intensive. However, running too many processes on your system may affect your PC’s performance. To reduce system overload, you can use the Microsoft System Configuration Utility to manually find and disable processes that launch upon start-up. Alternatively, download SpeedUpMyPC to automatically scan and identify any unused processes.
Why is tridkb.dll giving me errors?
Process related issues are usually related to problems encountered by the application that runs it. The safest way to stop these errors is to uninstall the application and run a system scan to automatically identify any unused processes and services that are using up valuable resources.
The safest way to stop these errors is to uninstall the application and run a scan to identify any system issues including invalid registry entries that have accumulated over time.
Process Library is the unique and indispensable process listing database since 2004 Now counting 140,000 processes and 55,000 DLLs. Join and subscribe now!
System Tools
SpeedUpMyPC
Toolbox
ProcessQuicklink
|
__label__pos
| 0.797348 |
Xamarin Android 与 MVVMLight︰ 只有原始线程创建 view 层次结构可 touch 其意见
标签: Android C#
发布时间: 2017/3/4 6:24:52
注意事项: 本文中文内容可能为机器翻译,如要查看英文原文请点击上面连接.
我得到这个错误每次更新 ViewModel property 已经绑定在 activity 。我知道这个错误从另一个线程 (而不是 UI 线程) 与更新 UI 相关但我不能控制 MVVMLight 如何绑定...
activity:
private readonly List<Binding> _bindings = new List<Binding>();
public PersonViewModel ViewModel
{
get
{
return App.Locator.Detail;
}
}
private TextView _txtName;
public TextView TxtName
{
get
{
return _txtName ??
(_txtName = FindViewById<TextView>(Resource.Id.txtName));
}
}
protected async override void OnCreate(Bundle savedInstanceState)
{
base.OnCreate(savedInstanceState);
SetContentView(Resource.Layout.Detail);
_bindings.Add(this.SetBinding(
() => ViewModel.Person.Name,
() => TxtName.Text));
await Task.Factory.StartNew(() => { ViewModel.LoadDetailCommand.Execute(null); });
}
视图模型︰
private IDataService _dataService;
private PersonObj _person;
public PersonObj Person
{
get
{
return _person;
}
set
{
Set(ref _person, value);
}
}
private RelayCommand _loadDetailCommand;
public RelayCommand LoadDetailCommand
{
get
{
return _loadDetailCommand
?? (_loadDetailCommand = new RelayCommand(
async () =>
{
await LoadDataAsync();
}));
}
}
public PersonViewModel(IDataService dataService)
{
_dataService = dataService;
}
private async Task LoadDataAsync()
{
try
{
Person= await _dataService.GetPerson(0);
}
catch (Exception ex)
{
// Exception here!!
}
}
解决方法 1:
好吧,我管理它创建任务在 ViewModel 必要时和等待它,所以我可以捕获所有主控的异常︰
Person = await Task.Run(async () =>
{
var p = await _dataService.GetPerson(0);
return p;
});
感谢 @xleon 的帮助。
赞助商
|
__label__pos
| 0.99884 |
Conditional Statements
Within PLSQL conditional statements can be used to determine what action to take based on other actions or data retrieved. The main statements within this are the IF and CASE statements.
Syntax
IF
THEN
ELSE
END IF;
CASE [ expression ]
WHEN THEN
WHEN THEN
ELSE
END
Example
IF v_employee_surname = ‘Smith’
THEN
v_smith_count := v_smith_count + 1;
ELSE
V_other_count := v_other_count + 1;
END IF;
CASE
WHEN v_employee_surname = ‘Smith’
THEN v_smith_count := v_smith_count + 1;
ELSE v_other_count := v_other_count + 1;
END;
Leave a Reply
Your email address will not be published. Required fields are marked *
|
__label__pos
| 0.994521 |
Difference between Method Overloading and Overriding in Java?
Method Overloading vs Method Overriding
Though the name of the method remains same in the case of both method overloading and overriding, main difference comes from the fact that method overloading is resolved during compile time, while method overriding is resolved at runtime. Also rules of overriding or overloading a method are different in Java. For example, a private, static and final method cannot be overriding in Java but you can still overload them. For overriding both name and signature of the method must remain same, but in for overloading method, the signature must be different. Last but not the least difference between them is that call to overloaded methods are resolved using static binding while the call to overridden method is resolved using dynamic binding in Java.
By the way, Method overloading and method overriding in Java is two important concept in Java which allows Java programmer to declare method with same name but different behavior. Method overloading and method overriding is based on Polymorphism in Java.
In case of method overloading, method with same name co-exists in same class but they must have different method signature, while in case of method overriding, method with same name is declared in derived class or sub class.Method overloading is resolved using static binding in Java at compile time while method overriding is resolved using dynamic binding in Java at runtime.
In short, when you overload a method in Java its method signature got changed while in case of overriding method signature remains same but a method can only be overridden in sub class. Since Java supports Polymorphism and resolve object at run-time it is capable to call overridden method in Java.
By the way difference between method overloading and overriding is also one of the popular Java design question and appear in almost all levels of Java interviews.
What is method overloading and overriding in Java?
In this Java tutorial we will see how Java allows you to create two methods of same name by using method overloading and method overriding. We will also touch base on how methods are bonded or called by Compiler and Java Virtual Machine and finally we will answer of popular interview questions difference between method overloading and method overriding in Java.
This article is in my series of Java article which discusses about Interview e.g. Difference between Synchronized Collection and Concurrent Collection or How to Stop Thread in Java. Please let me know if you have some other interview questions and you are looking answer or reason for that and here in Javarevisited we will try to find and discuss those interview questions.
How to Overload a Method in Java
If you have two methods with same name in one Java class with different method signature than its called overloaded method in Java. Generally overloaded method in Java has different set of arguments to perform something based on different number of input.
You can also overload constructor in Java, which we will see in following example of method overloading in Java. Binding of overloading method occurs during compile time and overloaded calls resolved using static binding. To overload a Java method just changes its signature.
Just remember in order to change signature you either need to change number of argument, type of argument or order of argument in Java if they are of different types. Since return type is not part of method signature simply changing return type will result in duplicate method and you will get compile time error in Java.
In our example of Loan and PersonalLoan class, createLoan method is overloaded. Since you have two crateLoan() method with one takes one argument lender while other take two argument both lender and interestRate. Remember you can overload static method in Java, you can also overload private and final method in Java but you can not override them.
How to Override a Method in Java
In order to override a Java method, you need to create a child class which extends parent. Overridden method in Java also shares same name as original method in Java but can only be overridden in sub class. Original method has to be defined inside interface or base class, which can be abstract as well. When you override a method in Java its signature remains exactly same including return type. JVM resolves correct overridden method based upon object at run-time by using dynamic binding in Java. For example in our case when we call personalLoan.toString() method even though personalLoan object is of type Loan actual method called would be from PersonalLoan class because object referenced by personalLoan variable is of type PersonalLoan(). This is very useful technique to modify behavior of a function in Java based on different implementation. equals(), hashcode() and compareTo() methods are classic example of overridden methods in Java.
Another important point is that you can not override static method in Java because they are associated with Class rather than object and resolved and bonded during compile time and that’s the reason you cannot override main method in Java. Similar to static, private and final methods are also not overridden in Java. By the way, as part of overriding best practice, always use @Override annotation, while overriding method from an abstract class or interface.
Rules of Method Overriding in Java
Following are rules of method overriding in java which must be followed while overriding any method. As stated earlier private, static and final method can not be overridden in Java.
1. Method signature must be same including return type, number of method parameters, type of parameters and order of parameters
2. Overriding method can not throw higher Exception than original or overridden method. means if original method throws IOException than overriding method can not throw super class of IOException e.g. Exception but it can throw any sub class of IOException or simply does not throw any Exception. This rule only applies to checked Exception in Java, overridden method is free to throw any unchecked Exception
3. Overriding method can not reduce accessibility of overridden method , means if original or overridden method is public than overriding method can not make it protected.
Difference between Method Overloading vs Overriding in Java
Overloading vs Overriding in Java is one of the popular java interview questions at many companies and asked at different levels of programmers. Here are some important difference between overloading and overriding in Java. Though It's more important is to understand how to use both overloading and overriding, these difference are good from interview perspective and gives some basic idea as well:
1) First and most important difference between method overloading and overriding is that, In case of method overloading in Java, signature of method changes while in case of method overriding it remain same.
2) Second major difference between method overloading vs overriding in Java is that You can overload method in one class but overriding can only be done on subclass.
3) You can not override static, final and private method in Java but you can overload static, final or private method in Java.
4) Overloaded method in Java is bonded by static binding and overridden methods are subject to dynamic binding.
5) Private and final method can also be not overridden in Java.
By the way, you might have heard about "a picture is worth more than thousand words" and this is made true by following image. By looking at the pic you can clearly understand difference between method overloading and overriding in Java.
Difference between Overloading and Overriding in Java
Handling Exception while overloading and overriding method in Java
While overriding a method it can only throw checked exception declared by by overridden method or any subclass of it, means if overridden method throws IOExcpetion than overriding method can throw sub classes of IOExcpetion e.g. FileNotFoundException but not wider exception e.g. Exception or Throwable. This restriction is only for checked Exception for RuntimeException you can throw any RuntimeException. Overloaded method in Java doesn't have such restriction and you are free to modify throws clause as per your need.
Method Overloading and Overriding Example in Java
Here is an example of both method overloading and method overriding in Java. In order to explain the concept we have create two classes Loan and PersonalLoan. createLoan() method is overloaded as it has different version with different signature, while toString() method which is original declared in Object class is overridden in both Loan and PersonalLoan class.
public class OverloadingOverridingTest {
public static void main(String[] args) {
// Example of method overloading in Java
Loan cheapLoan = Loan.createLoan("HSBC");
Loan veryCheapLoan = Loan.createLoan("Citibank", 8.5);
// Example of method overriding in Java
Loan personalLoan = new PersonalLoan();
personalLoan.toString();
}
}
public class Loan {
private double interestRate;
private String customer;
private String lender;
public static Loan createLoan(String lender) {
Loan loan = new Loan();
loan.lender = lender;
return loan;
}
public static Loan createLoan(String lender, double interestRate) {
Loan loan = new Loan();
loan.lender = lender;
loan.interestRate = interestRate;
return loan;
}
@Override
public String toString() {
return "This is Loan by Citibank";
}
}
public class PersonalLoan extends Loan {
@Override
public String toString() {
return "This is Personal Loan by Citibank";
}
}
Things to Remember
1) In case of method overloading method signature gets changed while in case of overriding signature remains same.
2) Return type is not part of method signature in Java.
3) Overloaded method can be subject to compile time binding but overridden method can only be bind at run-time.
4) Both overloaded and overridden method has same name in Java.
5) Static method can not be overridden in Java.
6) Since private method is also not visible outside of class, it can not be overridden and method binding happens during compile time.
7) From Java 5 onwards you can use annotation in Java to declare overridden method just like we did with @override. @override annotation allows compiler, IDE like NetBeans and Eclipse to cross verify or check if this method is really overrides super class method or not.
Covariant Method Overriding in Java
One of my reader Rajeev makes an interesting comment about one change related to return type of overriding method from Java 5 onwards, which enable to use subtype of return type of overridden method. This is really useful, when original method returns a general type like java.lang.Object. If you are overriding clone() method in Java then you can use this feature to return actual type, instead of returning java.lang.Object and can save caller from type-casting cloned object. Here is the actual comment from Rajeev:
Hi Javin,I visit your blog regularly and I found that you missed covariant return which is added in Java 5 in the case of method overriding. When a subclass wants to change the method implementation of an inherited method (an override), the subclass must define a method that matches the inherited version exactly. Or, as of Java 5, you're allowed to change the return type in the overriding method as long as the new return type is a subtype of the declared return type of the overridden (super class) method. Let's look at a covariant return in action:
class Alpha {
Alpha doStuff(char c) {
return new Alpha();
}
}
class Beta extends Alpha {
Beta doStuff(char c) { // legal override in Java 1.5
return new Beta();
}
}
You can see that Beta class which is overriding doStuff() method from Alpha class is returning Beta type and not Alpha type. This will remove type casting on client side. See here to learn more about covariant method overriding in Java.
AS I said one of the good example of this is overriding clone method and using return type as Actual type instead of java.lang.Object, which is suggested by Joshua Bloch in Effective Java as well. This in in-fact one of the Java best practices while implementing clone method in Java. By the way don't forget to follow these Java overloading best practices, when doing it in your project.
Further Reading
Java Fundamentals, Part 1 and 2
Java Programming Interview Exposed
Cracking the code interview - 189 qustions and solutions
16 comments :
Anonymous said...
what about the exceptions while overriding and overloading
Javin @ spring interview questions answers said...
Good point, I have added one section regarding exception handling for overloading and overriding methods in java. thanks for pointing this.
Rajeev said...
Hi Javin,I visit your blog regularly and I found that you missed covariant return which is added in Java5 in the case of method overriding.
When a subclass wants to change the method implementation of an inherited method (an override), the subclass must define a method that matches the inherited
version exactly. Or, as of Java 5, you're allowed to change the return type in the
overriding method as long as the new return type is a subtype of the declared return
type of the overridden (superclass) method.
Let's look at a covariant return in action:
class Alpha {
Alpha doStuff(char c) {
return new Alpha();
}
}
class Beta extends Alpha {
Beta doStuff(char c) { // legal override in Java 1.5
return new Beta();
}
}
Anonymous said...
to add on what you have described, overloading is done in two steps, first JVM finds all the methods which are applicable for a given type of argument and than in second step it pick the most specific method. for exmaple if you have two method one which takes Object clas as parameter and other takes String class as parameter like below
public void show(Object ob){}
public void show(String str){}
and you call show(null) than since both show() method which are overloaded are applicable but Java will pick the most specific one which is the String version because String is more specific than Object.
Cheers
Anonymous said...
Overriding method cannot assign weaker access privileges:
protected in parent
must be kept
public/protected in subclass
Anonymous said...
Most simple difference between overloading and overriding in Java is former is a compile time activity while later is a runtime activity. Overloading is fast , overriding is slow. Overloading uses static binding, overriding uses dynamic binding. private, static and final method can be overloaded but can not be overridden.
Sunil said...
@Rajiv, Thanks for informing about covariant return type, which allows to change return type of overriding method in Java 1.5, does you can do this only on methods which has Object as return type or its also possible to override a method which has return type long with overriding method has return type primitive int ?
Anonymous said...
What is method overloading in Java ?
- multiple method with same name
Why do you need overloading in Java
-because you have method which does same job but with different operands e.g. size() method finding size of Array, Collection and string
When do you use method overloading in Java ?
same as above
Anonymous said...
Example of method overloading and method overriding is not clear to me ? Can you please explain why you overloaded createLoan() method ? Its better you put another example of method overloading and overriding which makes more sense and easy to understand.
shweta said...
i m able to override public static method
class TestB{
public static void mytest()
{
System.out.println("ok in B");
}
}
public class TestA extends TestB{
public static void mytest()
{
System.out.println("ok in A");
}
public static void main(String args[])
{
mytest();
}
}
Vishal said...
Why people compare method overloading to method overriding ? I thing both are completely different operations except similarity in name. Comparing overloading vs overriding is like comparing Orange to apples.
Vivek Hingorani said...
@Shweta: Its not overriding as you havent created any instance and directly calling mytest() from class TestA so it will print "Ok in A" as overriding doesnt come into picture at all..
@Javin: Do let me know if i am wrong :-)
kinjal shah said...
I was asked in an interview if we can overload public static void main(String[] args) method?
ans: Yes we can.
chetan said...
overloading.....method name remains the same only changes will be in the type or number of arguments
eg: public void testmethod()
public void testmethod(int a,int b)
overriding .....method name and argument remains the same only the implementation will change
for overriding to happen
1>inheritance should happen
eg:
public class test1{
public void test(){
system.out.println("running test method in class test1");
}
}
public class test2 extends test1{
public void test(){
system.out.println("running test method in class test2");
}
}
Anonymous said...
Hi Javin, There was a question asked to me in one of the Interviews is- why Overriding method can not throw a broader exception.
Anonymous said...
I have been asked twice in interviews that I have attended - What is good about Java and What is Bad about Java ? Could you please throw some light on this ?
Post a Comment
|
__label__pos
| 0.797594 |
OfflineAudioContext
A interface OfflineAudioContext é uma interface AudioContext que representa um gráfico de processament de áudio construido a partir de conexões entre AudioNodes. Em contraste com o padrão AudioContext, um OfflineAudioContext não processa o áudio para o hardware do dispositivo; Em vez disso, ele gera, o mais rápido possível, e exibe o resultado para um AudioBuffer (en-US).
EventTarget BaseAudioContext (en-US) OfflineAudioContext
Construtor
OfflineAudioContext.OfflineAudioContext() (en-US)
Cria uma nova instância OfflineAudioContext.
Propriedades
Também herda propriedades da sua entidade paterna, BaseAudioContext (en-US).
OfflineAudioContext.length (en-US) Somente leitura
Um número inteiro que representa o tamanho do buffer em quadros de amostra.
Manipuladores de Eventos
OfflineAudioContext.oncomplete (en-US)
É uma chamada event handler quando o processamento é encerrado, é quando o evento complete (en-US) - do tipo OfflineAudioCompletionEvent (en-US) - é gerado, após a versão baseada em eventos do OfflineAudioContext.startRendering() (en-US) é usada.
Métodos
Também herda métodos da interface paterna, BaseAudioContext (en-US).
OfflineAudioContext.resume() (en-US)
Programa uma suspensão da progressão do tempo no contexto de áudio no horário especificado e retorna uma promessa.
OfflineAudioContext.suspend() (en-US)
Agende uma suspensão da progressão do tempo no contexto de áudio no horário especificado e retorna uma promessa.
OfflineAudioContext.startRendering() (en-US)
Inicia a renderização do áudio, levando em consideração as conexões atuais e as mudanças programadas atuais. Esta página abrange a versão baseada em eventos e a versão baseada em promessas.
Exemplo
Nesse exemplo, declaramos um ambos AudioContext e um OfflineAudioContext objeto. Nós usamos o AudioContext para carregar uma faixa de áudio via XHR (AudioContext.decodeAudioData (en-US)), então o OfflineAudioContext para renderizar o áudio em um AudioBufferSourceNode (en-US) e reproduzir a trilha. Depois que o gráfico de áudio off-line estiver configurado, você deve renderizá-lo para AudioBuffer (en-US) usando OfflineAudioContext.startRendering (en-US).
Quando a 'promise' startRendering() é resolvida, a renderização foi concluída e a saída AudioBuffer é retornada fora da 'promise.
Neste ponto, criamos outro contexto de áudio, criamos um AudioBufferSourceNode (en-US) dentro dele e configuramos o buffer para ser igual à promessa AudioBuffer. Isso é jogado como parte de um gráfico de áudio padrão simples.
Nota: Para um exemplo de trabalho, veja nosso offline-audio-context-promise Github repo (veja o código fonte também.)
// define o contexto de áudio online e offline
var audioCtx = new AudioContext();
var offlineCtx = new OfflineAudioContext(2,44100*40,44100);
source = offlineCtx.createBufferSource();
// usa XHR para carregar uma faixa de áudio, e
// decodeAudioData para decodificar e OfflineAudioContext para renderizar
function getData() {
request = new XMLHttpRequest();
request.open('GET', 'viper.ogg', true);
request.responseType = 'arraybuffer';
request.onload = function() {
var audioData = request.response;
audioCtx.decodeAudioData(audioData, function(buffer) {
myBuffer = buffer;
source.buffer = myBuffer;
source.connect(offlineCtx.destination);
source.start();
//source.loop = true;
offlineCtx.startRendering().then(function(renderedBuffer) {
console.log('Rendering completed successfully');
var audioCtx = new (window.AudioContext || window.webkitAudioContext)();
var song = audioCtx.createBufferSource();
song.buffer = renderedBuffer;
song.connect(audioCtx.destination);
play.onclick = function() {
song.start();
}
}).catch(function(err) {
console.log('Rendering failed: ' + err);
// Nota: A promessa deve rejeitar quando o StartRendering é chamado uma segunda vez em um OfflineAudioContext
});
});
}
request.send();
}
// Run getData to start the process off
getData();
Especificações
Specification Status Comment
Web Audio API
The definition of 'OfflineAudioContext' in that specification.
Recomendação Initial definition
Compatibilidade com navegadores
BCD tables only load in the browser
Veja também
|
__label__pos
| 0.863524 |
/[pcre]/code/trunk/doc/pcrepattern.3
ViewVC logotype
Contents of /code/trunk/doc/pcrepattern.3
Parent Directory Parent Directory | Revision Log Revision Log
Revision 643 - (hide annotations) (download)
Fri Jul 29 15:56:39 2011 UTC (3 years, 2 months ago) by ph10
File size: 115395 byte(s)
Allow all characters except closing parens in MARK:NAME etc.
1 nigel 79 .TH PCREPATTERN 3
2 nigel 63 .SH NAME
3 PCRE - Perl-compatible regular expressions
4 nigel 75 .SH "PCRE REGULAR EXPRESSION DETAILS"
5 nigel 63 .rs
6 .sp
7 ph10 208 The syntax and semantics of the regular expressions that are supported by PCRE
8 are described in detail below. There is a quick-reference syntax summary in the
9 .\" HREF
10 \fBpcresyntax\fP
11 .\"
12 ph10 333 page. PCRE tries to match Perl syntax and semantics as closely as it can. PCRE
13 also supports some alternative regular expression syntax (which does not
14 conflict with the Perl syntax) in order to provide some compatibility with
15 regular expressions in Python, .NET, and Oniguruma.
16 .P
17 Perl's regular expressions are described in its own documentation, and
18 ph10 208 regular expressions in general are covered in a number of books, some of which
19 have copious examples. Jeffrey Friedl's "Mastering Regular Expressions",
20 published by O'Reilly, covers regular expressions in great detail. This
21 description of PCRE's regular expressions is intended as reference material.
22 nigel 75 .P
23 The original operation of PCRE was on strings of one-byte characters. However,
24 ph10 461 there is now also support for UTF-8 character strings. To use this,
25 ph10 456 PCRE must be built to include UTF-8 support, and you must call
26 \fBpcre_compile()\fP or \fBpcre_compile2()\fP with the PCRE_UTF8 option. There
27 is also a special sequence that can be given at the start of a pattern:
28 ph10 412 .sp
29 (*UTF8)
30 ph10 416 .sp
31 ph10 412 Starting a pattern with this sequence is equivalent to setting the PCRE_UTF8
32 option. This feature is not Perl-compatible. How setting UTF-8 mode affects
33 pattern matching is mentioned in several places below. There is also a summary
34 of UTF-8 features in the
35 nigel 63 .\" HTML <a href="pcre.html#utf8support">
36 .\" </a>
37 section on UTF-8 support
38 .\"
39 in the main
40 .\" HREF
41 nigel 75 \fBpcre\fP
42 nigel 63 .\"
43 page.
44 nigel 75 .P
45 ph10 535 Another special sequence that may appear at the start of a pattern or in
46 ph10 518 combination with (*UTF8) is:
47 .sp
48 (*UCP)
49 .sp
50 ph10 535 This has the same effect as setting the PCRE_UCP option: it causes sequences
51 such as \ed and \ew to use Unicode properties to determine character types,
52 instead of recognizing only characters with codes less than 128 via a lookup
53 ph10 518 table.
54 .P
55 ph10 576 If a pattern starts with (*NO_START_OPT), it has the same effect as setting the
56 ph10 579 PCRE_NO_START_OPTIMIZE option either at compile or matching time. There are
57 also some more of these special sequences that are concerned with the handling
58 ph10 576 of newlines; they are described below.
59 .P
60 nigel 77 The remainder of this document discusses the patterns that are supported by
61 PCRE when its main matching function, \fBpcre_exec()\fP, is used.
62 From release 6.0, PCRE offers a second matching function,
63 \fBpcre_dfa_exec()\fP, which matches using a different algorithm that is not
64 ph10 172 Perl-compatible. Some of the features discussed below are not available when
65 ph10 168 \fBpcre_dfa_exec()\fP is used. The advantages and disadvantages of the
66 alternative function, and how it differs from the normal function, are
67 discussed in the
68 nigel 77 .\" HREF
69 \fBpcrematching\fP
70 .\"
71 page.
72 nigel 93 .
73 .
74 ph10 556 .\" HTML <a name="newlines"></a>
75 ph10 227 .SH "NEWLINE CONVENTIONS"
76 .rs
77 .sp
78 PCRE supports five different conventions for indicating line breaks in
79 strings: a single CR (carriage return) character, a single LF (linefeed)
80 character, the two-character sequence CRLF, any of the three preceding, or any
81 Unicode newline sequence. The
82 .\" HREF
83 \fBpcreapi\fP
84 .\"
85 page has
86 .\" HTML <a href="pcreapi.html#newlines">
87 .\" </a>
88 further discussion
89 .\"
90 about newlines, and shows how to set the newline convention in the
91 \fIoptions\fP arguments for the compiling and matching functions.
92 .P
93 It is also possible to specify a newline convention by starting a pattern
94 string with one of the following five sequences:
95 .sp
96 (*CR) carriage return
97 (*LF) linefeed
98 (*CRLF) carriage return, followed by linefeed
99 (*ANYCRLF) any of the three above
100 (*ANY) all Unicode newline sequences
101 .sp
102 ph10 461 These override the default and the options given to \fBpcre_compile()\fP or
103 ph10 456 \fBpcre_compile2()\fP. For example, on a Unix system where LF is the default
104 newline sequence, the pattern
105 ph10 227 .sp
106 (*CR)a.b
107 .sp
108 changes the convention to CR. That pattern matches "a\enb" because LF is no
109 longer a newline. Note that these special settings, which are not
110 Perl-compatible, are recognized only at the very start of a pattern, and that
111 ph10 231 they must be in upper case. If more than one of them is present, the last one
112 is used.
113 .P
114 ph10 514 The newline convention affects the interpretation of the dot metacharacter when
115 PCRE_DOTALL is not set, and also the behaviour of \eN. However, it does not
116 affect what the \eR escape sequence matches. By default, this is any Unicode
117 newline sequence, for Perl compatibility. However, this can be changed; see the
118 description of \eR in the section entitled
119 ph10 231 .\" HTML <a href="#newlineseq">
120 .\" </a>
121 "Newline sequences"
122 .\"
123 ph10 247 below. A change of \eR setting can be combined with a change of newline
124 ph10 246 convention.
125 ph10 227 .
126 .
127 nigel 93 .SH "CHARACTERS AND METACHARACTERS"
128 .rs
129 .sp
130 nigel 63 A regular expression is a pattern that is matched against a subject string from
131 left to right. Most characters stand for themselves in a pattern, and match the
132 corresponding characters in the subject. As a trivial example, the pattern
133 nigel 75 .sp
134 nigel 63 The quick brown fox
135 nigel 75 .sp
136 nigel 77 matches a portion of a subject string that is identical to itself. When
137 caseless matching is specified (the PCRE_CASELESS option), letters are matched
138 independently of case. In UTF-8 mode, PCRE always understands the concept of
139 case for characters whose values are less than 128, so caseless matching is
140 always possible. For characters with higher values, the concept of case is
141 supported if PCRE is compiled with Unicode property support, but not otherwise.
142 If you want to use caseless matching for characters 128 and above, you must
143 ensure that PCRE is compiled with Unicode property support as well as with
144 UTF-8 support.
145 .P
146 The power of regular expressions comes from the ability to include alternatives
147 and repetitions in the pattern. These are encoded in the pattern by the use of
148 nigel 75 \fImetacharacters\fP, which do not stand for themselves but instead are
149 nigel 63 interpreted in some special way.
150 nigel 75 .P
151 There are two different sets of metacharacters: those that are recognized
152 nigel 63 anywhere in the pattern except within square brackets, and those that are
153 nigel 93 recognized within square brackets. Outside square brackets, the metacharacters
154 are as follows:
155 nigel 75 .sp
156 \e general escape character with several uses
157 nigel 63 ^ assert start of string (or line, in multiline mode)
158 $ assert end of string (or line, in multiline mode)
159 . match any character except newline (by default)
160 [ start character class definition
161 | start of alternative branch
162 ( start subpattern
163 ) end subpattern
164 ? extends the meaning of (
165 also 0 or 1 quantifier
166 also quantifier minimizer
167 * 0 or more quantifier
168 + 1 or more quantifier
169 also "possessive quantifier"
170 { start min/max quantifier
171 nigel 75 .sp
172 nigel 63 Part of a pattern that is in square brackets is called a "character class". In
173 nigel 75 a character class the only metacharacters are:
174 .sp
175 \e general escape character
176 nigel 63 ^ negate the class, but only if the first character
177 - indicates character range
178 nigel 75 .\" JOIN
179 nigel 63 [ POSIX character class (only if followed by POSIX
180 syntax)
181 ] terminates the character class
182 nigel 75 .sp
183 The following sections describe the use of each of the metacharacters.
184 .
185 nigel 93 .
186 nigel 63 .SH BACKSLASH
187 .rs
188 .sp
189 The backslash character has several uses. Firstly, if it is followed by a
190 ph10 574 character that is not a number or a letter, it takes away any special meaning
191 that character may have. This use of backslash as an escape character applies
192 ph10 579 both inside and outside character classes.
193 nigel 75 .P
194 For example, if you want to match a * character, you write \e* in the pattern.
195 nigel 63 This escaping action applies whether or not the following character would
196 nigel 75 otherwise be interpreted as a metacharacter, so it is always safe to precede a
197 non-alphanumeric with backslash to specify that it stands for itself. In
198 particular, if you want to match a backslash, you write \e\e.
199 .P
200 ph10 574 In UTF-8 mode, only ASCII numbers and letters have any special meaning after a
201 ph10 579 backslash. All other characters (in particular, those whose codepoints are
202 ph10 574 greater than 127) are treated as literals.
203 .P
204 nigel 63 If a pattern is compiled with the PCRE_EXTENDED option, whitespace in the
205 pattern (other than in a character class) and characters between a # outside
206 nigel 91 a character class and the next newline are ignored. An escaping backslash can
207 be used to include a whitespace or # character as part of the pattern.
208 nigel 75 .P
209 nigel 63 If you want to remove the special meaning from a sequence of characters, you
210 nigel 75 can do so by putting them between \eQ and \eE. This is different from Perl in
211 that $ and @ are handled as literals in \eQ...\eE sequences in PCRE, whereas in
212 nigel 63 Perl, $ and @ cause variable interpolation. Note the following examples:
213 nigel 75 .sp
214 nigel 63 Pattern PCRE matches Perl matches
215 nigel 75 .sp
216 .\" JOIN
217 \eQabc$xyz\eE abc$xyz abc followed by the
218 nigel 63 contents of $xyz
219 nigel 75 \eQabc\e$xyz\eE abc\e$xyz abc\e$xyz
220 \eQabc\eE\e$\eQxyz\eE abc$xyz abc$xyz
221 .sp
222 The \eQ...\eE sequence is recognized both inside and outside character classes.
223 ph10 607 An isolated \eE that is not preceded by \eQ is ignored. If \eQ is not followed
224 by \eE later in the pattern, the literal interpretation continues to the end of
225 the pattern (that is, \eE is assumed at the end). If the isolated \eQ is inside
226 a character class, this causes an error, because the character class is not
227 terminated.
228 nigel 75 .
229 .
230 .\" HTML <a name="digitsafterbackslash"></a>
231 .SS "Non-printing characters"
232 .rs
233 .sp
234 nigel 63 A second use of backslash provides a way of encoding non-printing characters
235 in patterns in a visible manner. There is no restriction on the appearance of
236 non-printing characters, apart from the binary zero that terminates a pattern,
237 ph10 456 but when a pattern is being prepared by text editing, it is often easier to use
238 one of the following escape sequences than the binary character it represents:
239 nigel 75 .sp
240 \ea alarm, that is, the BEL character (hex 07)
241 ph10 574 \ecx "control-x", where x is any ASCII character
242 nigel 75 \ee escape (hex 1B)
243 \ef formfeed (hex 0C)
244 ph10 227 \en linefeed (hex 0A)
245 nigel 75 \er carriage return (hex 0D)
246 \et tab (hex 09)
247 ph10 488 \eddd character with octal code ddd, or back reference
248 nigel 75 \exhh character with hex code hh
249 nigel 87 \ex{hhh..} character with hex code hhh..
250 nigel 75 .sp
251 The precise effect of \ecx is as follows: if x is a lower case letter, it
252 nigel 63 is converted to upper case. Then bit 6 of the character (hex 40) is inverted.
253 ph10 574 Thus \ecz becomes hex 1A (z is 7A), but \ec{ becomes hex 3B ({ is 7B), while
254 ph10 579 \ec; becomes hex 7B (; is 3B). If the byte following \ec has a value greater
255 than 127, a compile-time error occurs. This locks out non-ASCII characters in
256 both byte mode and UTF-8 mode. (When PCRE is compiled in EBCDIC mode, all byte
257 values are valid. A lower case letter is converted to upper case, and then the
258 ph10 574 0xc0 bits are flipped.)
259 nigel 75 .P
260 After \ex, from zero to two hexadecimal digits are read (letters can be in
261 nigel 87 upper or lower case). Any number of hexadecimal digits may appear between \ex{
262 and }, but the value of the character code must be less than 256 in non-UTF-8
263 ph10 211 mode, and less than 2**31 in UTF-8 mode. That is, the maximum value in
264 hexadecimal is 7FFFFFFF. Note that this is bigger than the largest Unicode code
265 point, which is 10FFFF.
266 nigel 75 .P
267 ph10 211 If characters other than hexadecimal digits appear between \ex{ and }, or if
268 there is no terminating }, this form of escape is not recognized. Instead, the
269 initial \ex will be interpreted as a basic hexadecimal escape, with no
270 following digits, giving a character whose value is zero.
271 .P
272 nigel 63 Characters whose value is less than 256 can be defined by either of the two
273 nigel 87 syntaxes for \ex. There is no difference in the way they are handled. For
274 example, \exdc is exactly the same as \ex{dc}.
275 nigel 75 .P
276 nigel 91 After \e0 up to two further octal digits are read. If there are fewer than two
277 digits, just those that are present are used. Thus the sequence \e0\ex\e07
278 specifies two binary zeros followed by a BEL character (code value 7). Make
279 sure you supply two digits after the initial zero if the pattern character that
280 follows is itself an octal digit.
281 nigel 75 .P
282 nigel 63 The handling of a backslash followed by a digit other than 0 is complicated.
283 Outside a character class, PCRE reads it and any following digits as a decimal
284 number. If the number is less than 10, or if there have been at least that many
285 previous capturing left parentheses in the expression, the entire sequence is
286 nigel 75 taken as a \fIback reference\fP. A description of how this works is given
287 .\" HTML <a href="#backreferences">
288 .\" </a>
289 later,
290 .\"
291 following the discussion of
292 .\" HTML <a href="#subpattern">
293 .\" </a>
294 parenthesized subpatterns.
295 .\"
296 .P
297 nigel 63 Inside a character class, or if the decimal number is greater than 9 and there
298 have not been that many capturing subpatterns, PCRE re-reads up to three octal
299 nigel 93 digits following the backslash, and uses them to generate a data character. Any
300 nigel 91 subsequent digits stand for themselves. In non-UTF-8 mode, the value of a
301 character specified in octal must be less than \e400. In UTF-8 mode, values up
302 to \e777 are permitted. For example:
303 nigel 75 .sp
304 \e040 is another way of writing a space
305 .\" JOIN
306 \e40 is the same, provided there are fewer than 40
307 nigel 63 previous capturing subpatterns
308 nigel 75 \e7 is always a back reference
309 .\" JOIN
310 \e11 might be a back reference, or another way of
311 nigel 63 writing a tab
312 nigel 75 \e011 is always a tab
313 \e0113 is a tab followed by the character "3"
314 .\" JOIN
315 \e113 might be a back reference, otherwise the
316 nigel 63 character with octal code 113
317 nigel 75 .\" JOIN
318 \e377 might be a back reference, otherwise
319 nigel 63 the byte consisting entirely of 1 bits
320 nigel 75 .\" JOIN
321 \e81 is either a back reference, or a binary zero
322 nigel 63 followed by the two characters "8" and "1"
323 nigel 75 .sp
324 nigel 63 Note that octal values of 100 or greater must not be introduced by a leading
325 zero, because no more than three octal digits are ever read.
326 nigel 75 .P
327 nigel 91 All the sequences that define a single character value can be used both inside
328 and outside character classes. In addition, inside a character class, the
329 ph10 513 sequence \eb is interpreted as the backspace character (hex 08). The sequences
330 ph10 514 \eB, \eN, \eR, and \eX are not special inside a character class. Like any other
331 ph10 513 unrecognized escape sequences, they are treated as the literal characters "B",
332 ph10 514 "N", "R", and "X" by default, but cause an error if the PCRE_EXTRA option is
333 set. Outside a character class, these sequences have different meanings.
334 nigel 75 .
335 .
336 nigel 93 .SS "Absolute and relative back references"
337 .rs
338 .sp
339 ph10 208 The sequence \eg followed by an unsigned or a negative number, optionally
340 enclosed in braces, is an absolute or relative back reference. A named back
341 reference can be coded as \eg{name}. Back references are discussed
342 nigel 93 .\" HTML <a href="#backreferences">
343 .\" </a>
344 later,
345 .\"
346 following the discussion of
347 .\" HTML <a href="#subpattern">
348 .\" </a>
349 parenthesized subpatterns.
350 .\"
351 .
352 .
353 ph10 333 .SS "Absolute and relative subroutine calls"
354 .rs
355 .sp
356 ph10 345 For compatibility with Oniguruma, the non-Perl syntax \eg followed by a name or
357 a number enclosed either in angle brackets or single quotes, is an alternative
358 syntax for referencing a subpattern as a "subroutine". Details are discussed
359 ph10 333 .\" HTML <a href="#onigurumasubroutines">
360 .\" </a>
361 later.
362 .\"
363 ph10 345 Note that \eg{...} (Perl syntax) and \eg<...> (Oniguruma syntax) are \fInot\fP
364 ph10 461 synonymous. The former is a back reference; the latter is a
365 ph10 454 .\" HTML <a href="#subpatternsassubroutines">
366 .\" </a>
367 subroutine
368 .\"
369 call.
370 ph10 333 .
371 .
372 ph10 518 .\" HTML <a name="genericchartypes"></a>
373 nigel 75 .SS "Generic character types"
374 .rs
375 .sp
376 ph10 514 Another use of backslash is for specifying generic character types:
377 nigel 75 .sp
378 ph10 182 \ed any decimal digit
379 nigel 75 \eD any character that is not a decimal digit
380 ph10 178 \eh any horizontal whitespace character
381 ph10 182 \eH any character that is not a horizontal whitespace character
382 nigel 75 \es any whitespace character
383 \eS any character that is not a whitespace character
384 ph10 178 \ev any vertical whitespace character
385 ph10 182 \eV any character that is not a vertical whitespace character
386 nigel 75 \ew any "word" character
387 \eW any "non-word" character
388 .sp
389 ph10 535 There is also the single sequence \eN, which matches a non-newline character.
390 This is the same as
391 ph10 514 .\" HTML <a href="#fullstopdot">
392 .\" </a>
393 ph10 535 the "." metacharacter
394 ph10 514 .\"
395 when PCRE_DOTALL is not set.
396 nigel 75 .P
397 ph10 514 Each pair of lower and upper case escape sequences partitions the complete set
398 of characters into two disjoint sets. Any given character matches one, and only
399 ph10 518 one, of each pair. The sequences can appear both inside and outside character
400 nigel 75 classes. They each match one character of the appropriate type. If the current
401 ph10 518 matching point is at the end of the subject string, all of them fail, because
402 nigel 75 there is no character to match.
403 .P
404 For compatibility with Perl, \es does not match the VT character (code 11).
405 This makes it different from the the POSIX "space" class. The \es characters
406 ph10 178 are HT (9), LF (10), FF (12), CR (13), and space (32). If "use locale;" is
407 nigel 91 included in a Perl script, \es may match the VT character. In PCRE, it never
408 ph10 178 does.
409 nigel 75 .P
410 ph10 518 A "word" character is an underscore or any character that is a letter or digit.
411 By default, the definition of letters and digits is controlled by PCRE's
412 low-valued character tables, and may vary if locale-specific matching is taking
413 place (see
414 .\" HTML <a href="pcreapi.html#localesupport">
415 .\" </a>
416 "Locale support"
417 .\"
418 in the
419 .\" HREF
420 \fBpcreapi\fP
421 .\"
422 page). For example, in a French locale such as "fr_FR" in Unix-like systems,
423 or "french" in Windows, some character codes greater than 128 are used for
424 accented letters, and these are then matched by \ew. The use of locales with
425 Unicode is discouraged.
426 ph10 178 .P
427 ph10 518 By default, in UTF-8 mode, characters with values greater than 128 never match
428 \ed, \es, or \ew, and always match \eD, \eS, and \eW. These sequences retain
429 their original meanings from before UTF-8 support was available, mainly for
430 ph10 535 efficiency reasons. However, if PCRE is compiled with Unicode property support,
431 ph10 518 and the PCRE_UCP option is set, the behaviour is changed so that Unicode
432 properties are used to determine character types, as follows:
433 .sp
434 \ed any character that \ep{Nd} matches (decimal digit)
435 \es any character that \ep{Z} matches, plus HT, LF, FF, CR
436 \ew any character that \ep{L} or \ep{N} matches, plus underscore
437 .sp
438 The upper case escapes match the inverse sets of characters. Note that \ed
439 ph10 535 matches only decimal digits, whereas \ew matches any Unicode digit, as well as
440 ph10 518 any Unicode letter, and underscore. Note also that PCRE_UCP affects \eb, and
441 \eB because they are defined in terms of \ew and \eW. Matching these sequences
442 is noticeably slower when PCRE_UCP is set.
443 .P
444 ph10 579 The sequences \eh, \eH, \ev, and \eV are features that were added to Perl at
445 ph10 572 release 5.10. In contrast to the other sequences, which match only ASCII
446 characters by default, these always match certain high-valued codepoints in
447 UTF-8 mode, whether or not PCRE_UCP is set. The horizontal space characters
448 are:
449 ph10 178 .sp
450 U+0009 Horizontal tab
451 U+0020 Space
452 U+00A0 Non-break space
453 U+1680 Ogham space mark
454 U+180E Mongolian vowel separator
455 U+2000 En quad
456 U+2001 Em quad
457 U+2002 En space
458 U+2003 Em space
459 U+2004 Three-per-em space
460 U+2005 Four-per-em space
461 U+2006 Six-per-em space
462 U+2007 Figure space
463 U+2008 Punctuation space
464 U+2009 Thin space
465 U+200A Hair space
466 U+202F Narrow no-break space
467 U+205F Medium mathematical space
468 U+3000 Ideographic space
469 .sp
470 The vertical space characters are:
471 .sp
472 U+000A Linefeed
473 U+000B Vertical tab
474 U+000C Formfeed
475 U+000D Carriage return
476 U+0085 Next line
477 U+2028 Line separator
478 U+2029 Paragraph separator
479 nigel 75 .
480 .
481 ph10 231 .\" HTML <a name="newlineseq"></a>
482 nigel 93 .SS "Newline sequences"
483 .rs
484 .sp
485 ph10 231 Outside a character class, by default, the escape sequence \eR matches any
486 ph10 572 Unicode newline sequence. In non-UTF-8 mode \eR is equivalent to the following:
487 nigel 93 .sp
488 (?>\er\en|\en|\ex0b|\ef|\er|\ex85)
489 .sp
490 This is an example of an "atomic group", details of which are given
491 .\" HTML <a href="#atomicgroup">
492 .\" </a>
493 below.
494 .\"
495 This particular group matches either the two-character sequence CR followed by
496 LF, or one of the single characters LF (linefeed, U+000A), VT (vertical tab,
497 U+000B), FF (formfeed, U+000C), CR (carriage return, U+000D), or NEL (next
498 line, U+0085). The two-character sequence is treated as a single unit that
499 cannot be split.
500 .P
501 In UTF-8 mode, two additional characters whose codepoints are greater than 255
502 are added: LS (line separator, U+2028) and PS (paragraph separator, U+2029).
503 Unicode character property support is not needed for these characters to be
504 recognized.
505 .P
506 ph10 231 It is possible to restrict \eR to match only CR, LF, or CRLF (instead of the
507 complete set of Unicode line endings) by setting the option PCRE_BSR_ANYCRLF
508 ph10 247 either at compile time or when the pattern is matched. (BSR is an abbrevation
509 ph10 246 for "backslash R".) This can be made the default when PCRE is built; if this is
510 the case, the other behaviour can be requested via the PCRE_BSR_UNICODE option.
511 It is also possible to specify these settings by starting a pattern string with
512 one of the following sequences:
513 ph10 231 .sp
514 (*BSR_ANYCRLF) CR, LF, or CRLF only
515 (*BSR_UNICODE) any Unicode newline sequence
516 .sp
517 ph10 461 These override the default and the options given to \fBpcre_compile()\fP or
518 ph10 456 \fBpcre_compile2()\fP, but they can be overridden by options given to
519 \fBpcre_exec()\fP or \fBpcre_dfa_exec()\fP. Note that these special settings,
520 which are not Perl-compatible, are recognized only at the very start of a
521 pattern, and that they must be in upper case. If more than one of them is
522 present, the last one is used. They can be combined with a change of newline
523 ph10 518 convention; for example, a pattern can start with:
524 ph10 246 .sp
525 (*ANY)(*BSR_ANYCRLF)
526 .sp
527 ph10 518 They can also be combined with the (*UTF8) or (*UCP) special sequences. Inside
528 a character class, \eR is treated as an unrecognized escape sequence, and so
529 matches the letter "R" by default, but causes an error if PCRE_EXTRA is set.
530 nigel 93 .
531 .
532 nigel 75 .\" HTML <a name="uniextseq"></a>
533 .SS Unicode character properties
534 .rs
535 .sp
536 When PCRE is built with Unicode character property support, three additional
537 ph10 184 escape sequences that match characters with specific properties are available.
538 When not in UTF-8 mode, these sequences are of course limited to testing
539 characters whose codepoints are less than 256, but they do work in this mode.
540 The extra escape sequences are:
541 nigel 75 .sp
542 nigel 87 \ep{\fIxx\fP} a character with the \fIxx\fP property
543 \eP{\fIxx\fP} a character without the \fIxx\fP property
544 \eX an extended Unicode sequence
545 nigel 75 .sp
546 nigel 87 The property names represented by \fIxx\fP above are limited to the Unicode
547 ph10 517 script names, the general category properties, "Any", which matches any
548 character (including newline), and some special PCRE properties (described
549 ph10 535 in the
550 ph10 517 .\" HTML <a href="#extraprops">
551 .\" </a>
552 ph10 535 next section).
553 ph10 517 .\"
554 Other Perl properties such as "InMusicalSymbols" are not currently supported by
555 PCRE. Note that \eP{Any} does not match any characters, so always causes a
556 match failure.
557 nigel 75 .P
558 nigel 87 Sets of Unicode characters are defined as belonging to certain scripts. A
559 character from one of these sets can be matched using a script name. For
560 example:
561 nigel 75 .sp
562 nigel 87 \ep{Greek}
563 \eP{Han}
564 .sp
565 Those that are not part of an identified script are lumped together as
566 "Common". The current list of scripts is:
567 .P
568 Arabic,
569 Armenian,
570 ph10 491 Avestan,
571 nigel 93 Balinese,
572 ph10 491 Bamum,
573 nigel 87 Bengali,
574 Bopomofo,
575 Braille,
576 Buginese,
577 Buhid,
578 Canadian_Aboriginal,
579 ph10 491 Carian,
580 Cham,
581 nigel 87 Cherokee,
582 Common,
583 Coptic,
584 nigel 93 Cuneiform,
585 nigel 87 Cypriot,
586 Cyrillic,
587 Deseret,
588 Devanagari,
589 ph10 491 Egyptian_Hieroglyphs,
590 nigel 87 Ethiopic,
591 Georgian,
592 Glagolitic,
593 Gothic,
594 Greek,
595 Gujarati,
596 Gurmukhi,
597 Han,
598 Hangul,
599 Hanunoo,
600 Hebrew,
601 Hiragana,
602 ph10 491 Imperial_Aramaic,
603 nigel 87 Inherited,
604 ph10 491 Inscriptional_Pahlavi,
605 Inscriptional_Parthian,
606 Javanese,
607 Kaithi,
608 nigel 87 Kannada,
609 Katakana,
610 ph10 491 Kayah_Li,
611 nigel 87 Kharoshthi,
612 Khmer,
613 Lao,
614 Latin,
615 ph10 491 Lepcha,
616 nigel 87 Limbu,
617 Linear_B,
618 ph10 491 Lisu,
619 Lycian,
620 Lydian,
621 nigel 87 Malayalam,
622 ph10 491 Meetei_Mayek,
623 nigel 87 Mongolian,
624 Myanmar,
625 New_Tai_Lue,
626 nigel 93 Nko,
627 nigel 87 Ogham,
628 Old_Italic,
629 Old_Persian,
630 ph10 491 Old_South_Arabian,
631 Old_Turkic,
632 Ol_Chiki,
633 nigel 87 Oriya,
634 Osmanya,
635 nigel 93 Phags_Pa,
636 Phoenician,
637 ph10 491 Rejang,
638 nigel 87 Runic,
639 ph10 491 Samaritan,
640 Saurashtra,
641 nigel 87 Shavian,
642 Sinhala,
643 ph10 491 Sundanese,
644 nigel 87 Syloti_Nagri,
645 Syriac,
646 Tagalog,
647 Tagbanwa,
648 Tai_Le,
649 ph10 491 Tai_Tham,
650 Tai_Viet,
651 nigel 87 Tamil,
652 Telugu,
653 Thaana,
654 Thai,
655 Tibetan,
656 Tifinagh,
657 Ugaritic,
658 ph10 491 Vai,
659 nigel 87 Yi.
660 .P
661 ph10 517 Each character has exactly one Unicode general category property, specified by
662 a two-letter abbreviation. For compatibility with Perl, negation can be
663 specified by including a circumflex between the opening brace and the property
664 name. For example, \ep{^Lu} is the same as \eP{Lu}.
665 nigel 87 .P
666 If only one letter is specified with \ep or \eP, it includes all the general
667 category properties that start with that letter. In this case, in the absence
668 of negation, the curly brackets in the escape sequence are optional; these two
669 examples have the same effect:
670 .sp
671 nigel 75 \ep{L}
672 \epL
673 .sp
674 nigel 87 The following general category property codes are supported:
675 nigel 75 .sp
676 C Other
677 Cc Control
678 Cf Format
679 Cn Unassigned
680 Co Private use
681 Cs Surrogate
682 .sp
683 L Letter
684 Ll Lower case letter
685 Lm Modifier letter
686 Lo Other letter
687 Lt Title case letter
688 Lu Upper case letter
689 .sp
690 M Mark
691 Mc Spacing mark
692 Me Enclosing mark
693 Mn Non-spacing mark
694 .sp
695 N Number
696 Nd Decimal number
697 Nl Letter number
698 No Other number
699 .sp
700 P Punctuation
701 Pc Connector punctuation
702 Pd Dash punctuation
703 Pe Close punctuation
704 Pf Final punctuation
705 Pi Initial punctuation
706 Po Other punctuation
707 Ps Open punctuation
708 .sp
709 S Symbol
710 Sc Currency symbol
711 Sk Modifier symbol
712 Sm Mathematical symbol
713 So Other symbol
714 .sp
715 Z Separator
716 Zl Line separator
717 Zp Paragraph separator
718 Zs Space separator
719 .sp
720 nigel 87 The special property L& is also supported: it matches a character that has
721 the Lu, Ll, or Lt property, in other words, a letter that is not classified as
722 a modifier or "other".
723 nigel 75 .P
724 ph10 211 The Cs (Surrogate) property applies only to characters in the range U+D800 to
725 U+DFFF. Such characters are not valid in UTF-8 strings (see RFC 3629) and so
726 cannot be tested by PCRE, unless UTF-8 validity checking has been turned off
727 (see the discussion of PCRE_NO_UTF8_CHECK in the
728 .\" HREF
729 \fBpcreapi\fP
730 .\"
731 ph10 451 page). Perl does not support the Cs property.
732 ph10 211 .P
733 ph10 451 The long synonyms for property names that Perl supports (such as \ep{Letter})
734 nigel 91 are not supported by PCRE, nor is it permitted to prefix any of these
735 nigel 87 properties with "Is".
736 .P
737 No character that is in the Unicode table has the Cn (unassigned) property.
738 Instead, this property is assumed for any code point that is not in the
739 Unicode table.
740 .P
741 nigel 75 Specifying caseless matching does not affect these escape sequences. For
742 example, \ep{Lu} always matches only upper case letters.
743 .P
744 The \eX escape matches any number of Unicode characters that form an extended
745 Unicode sequence. \eX is equivalent to
746 .sp
747 (?>\ePM\epM*)
748 .sp
749 That is, it matches a character without the "mark" property, followed by zero
750 or more characters with the "mark" property, and treats the sequence as an
751 atomic group
752 .\" HTML <a href="#atomicgroup">
753 .\" </a>
754 (see below).
755 .\"
756 Characters with the "mark" property are typically accents that affect the
757 ph10 185 preceding character. None of them have codepoints less than 256, so in
758 ph10 184 non-UTF-8 mode \eX matches any one character.
759 nigel 75 .P
760 ph10 628 Note that recent versions of Perl have changed \eX to match what Unicode calls
761 an "extended grapheme cluster", which has a more complicated definition.
762 .P
763 nigel 75 Matching characters by Unicode property is not fast, because PCRE has to search
764 a structure that contains data for over fifteen thousand characters. That is
765 why the traditional escape sequences such as \ed and \ew do not use Unicode
766 ph10 535 properties in PCRE by default, though you can make them do so by setting the
767 ph10 518 PCRE_UCP option for \fBpcre_compile()\fP or by starting the pattern with
768 (*UCP).
769 nigel 75 .
770 .
771 ph10 517 .\" HTML <a name="extraprops"></a>
772 .SS PCRE's additional properties
773 .rs
774 .sp
775 ph10 535 As well as the standard Unicode properties described in the previous
776 section, PCRE supports four more that make it possible to convert traditional
777 ph10 517 escape sequences such as \ew and \es and POSIX character classes to use Unicode
778 ph10 518 properties. PCRE uses these non-standard, non-Perl properties internally when
779 PCRE_UCP is set. They are:
780 ph10 517 .sp
781 Xan Any alphanumeric character
782 Xps Any POSIX space character
783 Xsp Any Perl space character
784 Xwd Any Perl "word" character
785 .sp
786 ph10 535 Xan matches characters that have either the L (letter) or the N (number)
787 property. Xps matches the characters tab, linefeed, vertical tab, formfeed, or
788 ph10 517 carriage return, and any other character that has the Z (separator) property.
789 ph10 535 Xsp is the same as Xps, except that vertical tab is excluded. Xwd matches the
790 ph10 517 same characters as Xan, plus underscore.
791 .
792 .
793 ph10 168 .\" HTML <a name="resetmatchstart"></a>
794 .SS "Resetting the match start"
795 .rs
796 .sp
797 ph10 572 The escape sequence \eK causes any previously matched characters not to be
798 included in the final matched sequence. For example, the pattern:
799 ph10 168 .sp
800 foo\eKbar
801 .sp
802 ph10 172 matches "foobar", but reports that it has matched "bar". This feature is
803 ph10 168 similar to a lookbehind assertion
804 .\" HTML <a href="#lookbehind">
805 .\" </a>
806 (described below).
807 .\"
808 ph10 172 However, in this case, the part of the subject before the real match does not
809 have to be of fixed length, as lookbehind assertions do. The use of \eK does
810 ph10 168 not interfere with the setting of
811 .\" HTML <a href="#subpattern">
812 .\" </a>
813 captured substrings.
814 ph10 172 .\"
815 ph10 168 For example, when the pattern
816 .sp
817 (foo)\eKbar
818 .sp
819 ph10 172 matches "foobar", the first substring is still set to "foo".
820 ph10 500 .P
821 ph10 507 Perl documents that the use of \eK within assertions is "not well defined". In
822 PCRE, \eK is acted upon when it occurs inside positive assertions, but is
823 ph10 500 ignored in negative assertions.
824 ph10 168 .
825 .
826 nigel 75 .\" HTML <a name="smallassertions"></a>
827 .SS "Simple assertions"
828 .rs
829 .sp
830 nigel 93 The final use of backslash is for certain simple assertions. An assertion
831 nigel 63 specifies a condition that has to be met at a particular point in a match,
832 without consuming any characters from the subject string. The use of
833 nigel 75 subpatterns for more complicated assertions is described
834 .\" HTML <a href="#bigassertions">
835 .\" </a>
836 below.
837 .\"
838 nigel 91 The backslashed assertions are:
839 nigel 75 .sp
840 \eb matches at a word boundary
841 \eB matches when not at a word boundary
842 nigel 93 \eA matches at the start of the subject
843 \eZ matches at the end of the subject
844 also matches before a newline at the end of the subject
845 \ez matches only at the end of the subject
846 \eG matches at the first matching position in the subject
847 nigel 75 .sp
848 ph10 513 Inside a character class, \eb has a different meaning; it matches the backspace
849 ph10 535 character. If any other of these assertions appears in a character class, by
850 ph10 513 default it matches the corresponding literal character (for example, \eB
851 matches the letter B). However, if the PCRE_EXTRA option is set, an "invalid
852 escape sequence" error is generated instead.
853 nigel 75 .P
854 nigel 63 A word boundary is a position in the subject string where the current character
855 nigel 75 and the previous character do not both match \ew or \eW (i.e. one matches
856 \ew and the other matches \eW), or the start or end of the string if the
857 ph10 518 first or last character matches \ew, respectively. In UTF-8 mode, the meanings
858 of \ew and \eW can be changed by setting the PCRE_UCP option. When this is
859 done, it also affects \eb and \eB. Neither PCRE nor Perl has a separate "start
860 of word" or "end of word" metasequence. However, whatever follows \eb normally
861 determines which it is. For example, the fragment \eba matches "a" at the start
862 of a word.
863 nigel 75 .P
864 The \eA, \eZ, and \ez assertions differ from the traditional circumflex and
865 dollar (described in the next section) in that they only ever match at the very
866 start and end of the subject string, whatever options are set. Thus, they are
867 independent of multiline mode. These three assertions are not affected by the
868 PCRE_NOTBOL or PCRE_NOTEOL options, which affect only the behaviour of the
869 circumflex and dollar metacharacters. However, if the \fIstartoffset\fP
870 argument of \fBpcre_exec()\fP is non-zero, indicating that matching is to start
871 at a point other than the beginning of the subject, \eA can never match. The
872 nigel 91 difference between \eZ and \ez is that \eZ matches before a newline at the end
873 of the string as well as at the very end, whereas \ez matches only at the end.
874 nigel 75 .P
875 The \eG assertion is true only when the current matching position is at the
876 start point of the match, as specified by the \fIstartoffset\fP argument of
877 \fBpcre_exec()\fP. It differs from \eA when the value of \fIstartoffset\fP is
878 non-zero. By calling \fBpcre_exec()\fP multiple times with appropriate
879 nigel 63 arguments, you can mimic Perl's /g option, and it is in this kind of
880 nigel 75 implementation where \eG can be useful.
881 .P
882 Note, however, that PCRE's interpretation of \eG, as the start of the current
883 nigel 63 match, is subtly different from Perl's, which defines it as the end of the
884 previous match. In Perl, these can be different when the previously matched
885 string was empty. Because PCRE does just one match at a time, it cannot
886 reproduce this behaviour.
887 nigel 75 .P
888 If all the alternatives of a pattern begin with \eG, the expression is anchored
889 nigel 63 to the starting match position, and the "anchored" flag is set in the compiled
890 regular expression.
891 nigel 75 .
892 .
893 .SH "CIRCUMFLEX AND DOLLAR"
894 nigel 63 .rs
895 .sp
896 Outside a character class, in the default matching mode, the circumflex
897 nigel 75 character is an assertion that is true only if the current matching point is
898 at the start of the subject string. If the \fIstartoffset\fP argument of
899 \fBpcre_exec()\fP is non-zero, circumflex can never match if the PCRE_MULTILINE
900 nigel 63 option is unset. Inside a character class, circumflex has an entirely different
901 nigel 75 meaning
902 .\" HTML <a href="#characterclass">
903 .\" </a>
904 (see below).
905 .\"
906 .P
907 nigel 63 Circumflex need not be the first character of the pattern if a number of
908 alternatives are involved, but it should be the first thing in each alternative
909 in which it appears if the pattern is ever to match that branch. If all
910 possible alternatives start with a circumflex, that is, if the pattern is
911 constrained to match only at the start of the subject, it is said to be an
912 "anchored" pattern. (There are also other constructs that can cause a pattern
913 to be anchored.)
914 nigel 75 .P
915 A dollar character is an assertion that is true only if the current matching
916 nigel 63 point is at the end of the subject string, or immediately before a newline
917 nigel 91 at the end of the string (by default). Dollar need not be the last character of
918 the pattern if a number of alternatives are involved, but it should be the last
919 item in any branch in which it appears. Dollar has no special meaning in a
920 character class.
921 nigel 75 .P
922 nigel 63 The meaning of dollar can be changed so that it matches only at the very end of
923 the string, by setting the PCRE_DOLLAR_ENDONLY option at compile time. This
924 nigel 75 does not affect the \eZ assertion.
925 .P
926 nigel 63 The meanings of the circumflex and dollar characters are changed if the
927 nigel 91 PCRE_MULTILINE option is set. When this is the case, a circumflex matches
928 immediately after internal newlines as well as at the start of the subject
929 string. It does not match after a newline that ends the string. A dollar
930 matches before any newlines in the string, as well as at the very end, when
931 PCRE_MULTILINE is set. When newline is specified as the two-character
932 sequence CRLF, isolated CR and LF characters do not indicate newlines.
933 nigel 75 .P
934 nigel 91 For example, the pattern /^abc$/ matches the subject string "def\enabc" (where
935 \en represents a newline) in multiline mode, but not otherwise. Consequently,
936 patterns that are anchored in single line mode because all branches start with
937 ^ are not anchored in multiline mode, and a match for circumflex is possible
938 when the \fIstartoffset\fP argument of \fBpcre_exec()\fP is non-zero. The
939 PCRE_DOLLAR_ENDONLY option is ignored if PCRE_MULTILINE is set.
940 .P
941 nigel 75 Note that the sequences \eA, \eZ, and \ez can be used to match the start and
942 nigel 63 end of the subject in both modes, and if all branches of a pattern start with
943 nigel 91 \eA it is always anchored, whether or not PCRE_MULTILINE is set.
944 nigel 75 .
945 .
946 ph10 514 .\" HTML <a name="fullstopdot"></a>
947 .SH "FULL STOP (PERIOD, DOT) AND \eN"
948 nigel 63 .rs
949 .sp
950 Outside a character class, a dot in the pattern matches any one character in
951 nigel 91 the subject string except (by default) a character that signifies the end of a
952 nigel 93 line. In UTF-8 mode, the matched character may be more than one byte long.
953 nigel 91 .P
954 nigel 93 When a line ending is defined as a single character, dot never matches that
955 character; when the two-character sequence CRLF is used, dot does not match CR
956 if it is immediately followed by LF, but otherwise it matches all characters
957 (including isolated CRs and LFs). When any Unicode line endings are being
958 recognized, dot does not match CR or LF or any of the other line ending
959 characters.
960 .P
961 nigel 91 The behaviour of dot with regard to newlines can be changed. If the PCRE_DOTALL
962 nigel 93 option is set, a dot matches any one character, without exception. If the
963 two-character sequence CRLF is present in the subject string, it takes two dots
964 to match it.
965 nigel 91 .P
966 The handling of dot is entirely independent of the handling of circumflex and
967 dollar, the only relationship being that they both involve newlines. Dot has no
968 special meaning in a character class.
969 ph10 514 .P
970 ph10 579 The escape sequence \eN behaves like a dot, except that it is not affected by
971 ph10 572 the PCRE_DOTALL option. In other words, it matches any character except one
972 that signifies the end of a line.
973 nigel 75 .
974 .
975 .SH "MATCHING A SINGLE BYTE"
976 nigel 63 .rs
977 .sp
978 nigel 75 Outside a character class, the escape sequence \eC matches any one byte, both
979 nigel 93 in and out of UTF-8 mode. Unlike a dot, it always matches any line-ending
980 characters. The feature is provided in Perl in order to match individual bytes
981 ph10 572 in UTF-8 mode. Because it breaks up UTF-8 characters into individual bytes, the
982 rest of the string may start with a malformed UTF-8 character. For this reason,
983 nigel 93 the \eC escape sequence is best avoided.
984 nigel 75 .P
985 PCRE does not allow \eC to appear in lookbehind assertions
986 .\" HTML <a href="#lookbehind">
987 .\" </a>
988 (described below),
989 .\"
990 because in UTF-8 mode this would make it impossible to calculate the length of
991 the lookbehind.
992 .
993 .
994 .\" HTML <a name="characterclass"></a>
995 .SH "SQUARE BRACKETS AND CHARACTER CLASSES"
996 nigel 63 .rs
997 .sp
998 An opening square bracket introduces a character class, terminated by a closing
999 ph10 461 square bracket. A closing square bracket on its own is not special by default.
1000 However, if the PCRE_JAVASCRIPT_COMPAT option is set, a lone closing square
1001 ph10 456 bracket causes a compile-time error. If a closing square bracket is required as
1002 a member of the class, it should be the first data character in the class
1003 (after an initial circumflex, if present) or escaped with a backslash.
1004 nigel 75 .P
1005 nigel 63 A character class matches a single character in the subject. In UTF-8 mode, the
1006 ph10 456 character may be more than one byte long. A matched character must be in the
1007 set of characters defined by the class, unless the first character in the class
1008 nigel 63 definition is a circumflex, in which case the subject character must not be in
1009 the set defined by the class. If a circumflex is actually required as a member
1010 of the class, ensure it is not the first character, or escape it with a
1011 backslash.
1012 nigel 75 .P
1013 nigel 63 For example, the character class [aeiou] matches any lower case vowel, while
1014 [^aeiou] matches any character that is not a lower case vowel. Note that a
1015 nigel 75 circumflex is just a convenient notation for specifying the characters that
1016 are in the class by enumerating those that are not. A class that starts with a
1017 ph10 456 circumflex is not an assertion; it still consumes a character from the subject
1018 nigel 75 string, and therefore it fails if the current pointer is at the end of the
1019 string.
1020 .P
1021 nigel 63 In UTF-8 mode, characters with values greater than 255 can be included in a
1022 nigel 75 class as a literal string of bytes, or by using the \ex{ escaping mechanism.
1023 .P
1024 nigel 63 When caseless matching is set, any letters in a class represent both their
1025 upper case and lower case versions, so for example, a caseless [aeiou] matches
1026 "A" as well as "a", and a caseless [^aeiou] does not match "A", whereas a
1027 nigel 77 caseful version would. In UTF-8 mode, PCRE always understands the concept of
1028 case for characters whose values are less than 128, so caseless matching is
1029 always possible. For characters with higher values, the concept of case is
1030 supported if PCRE is compiled with Unicode property support, but not otherwise.
1031 ph10 456 If you want to use caseless matching in UTF8-mode for characters 128 and above,
1032 you must ensure that PCRE is compiled with Unicode property support as well as
1033 with UTF-8 support.
1034 nigel 75 .P
1035 nigel 93 Characters that might indicate line breaks are never treated in any special way
1036 when matching character classes, whatever line-ending sequence is in use, and
1037 whatever setting of the PCRE_DOTALL and PCRE_MULTILINE options is used. A class
1038 such as [^a] always matches one of these characters.
1039 nigel 75 .P
1040 nigel 63 The minus (hyphen) character can be used to specify a range of characters in a
1041 character class. For example, [d-m] matches any letter between d and m,
1042 inclusive. If a minus character is required in a class, it must be escaped with
1043 a backslash or appear in a position where it cannot be interpreted as
1044 indicating a range, typically as the first or last character in the class.
1045 nigel 75 .P
1046 nigel 63 It is not possible to have the literal character "]" as the end character of a
1047 range. A pattern such as [W-]46] is interpreted as a class of two characters
1048 ("W" and "-") followed by a literal string "46]", so it would match "W46]" or
1049 "-46]". However, if the "]" is escaped with a backslash it is interpreted as
1050 nigel 75 the end of range, so [W-\e]46] is interpreted as a class containing a range
1051 followed by two other characters. The octal or hexadecimal representation of
1052 "]" can also be used to end a range.
1053 .P
1054 nigel 63 Ranges operate in the collating sequence of character values. They can also be
1055 nigel 75 used for characters specified numerically, for example [\e000-\e037]. In UTF-8
1056 nigel 63 mode, ranges can include characters whose values are greater than 255, for
1057 nigel 75 example [\ex{100}-\ex{2ff}].
1058 .P
1059 nigel 63 If a range that includes letters is used when caseless matching is set, it
1060 matches the letters in either case. For example, [W-c] is equivalent to
1061 nigel 75 [][\e\e^_`wxyzabc], matched caselessly, and in non-UTF-8 mode, if character
1062 ph10 139 tables for a French locale are in use, [\exc8-\excb] matches accented E
1063 nigel 75 characters in both cases. In UTF-8 mode, PCRE supports the concept of case for
1064 characters with values greater than 128 only when it is compiled with Unicode
1065 property support.
1066 .P
1067 ph10 575 The character escape sequences \ed, \eD, \eh, \eH, \ep, \eP, \es, \eS, \ev,
1068 \eV, \ew, and \eW may appear in a character class, and add the characters that
1069 they match to the class. For example, [\edABCDEF] matches any hexadecimal
1070 digit. In UTF-8 mode, the PCRE_UCP option affects the meanings of \ed, \es, \ew
1071 and their upper case partners, just as it does when they appear outside a
1072 character class, as described in the section entitled
1073 .\" HTML <a href="#genericchartypes">
1074 .\" </a>
1075 "Generic character types"
1076 .\"
1077 above. The escape sequence \eb has a different meaning inside a character
1078 class; it matches the backspace character. The sequences \eB, \eN, \eR, and \eX
1079 are not special inside a character class. Like any other unrecognized escape
1080 sequences, they are treated as the literal characters "B", "N", "R", and "X" by
1081 default, but cause an error if the PCRE_EXTRA option is set.
1082 .P
1083 A circumflex can conveniently be used with the upper case character types to
1084 ph10 518 specify a more restricted set of characters than the matching lower case type.
1085 ph10 575 For example, the class [^\eW_] matches any letter or digit, but not underscore,
1086 ph10 579 whereas [\ew] includes underscore. A positive character class should be read as
1087 "something OR something OR ..." and a negative class as "NOT something AND NOT
1088 ph10 575 something AND NOT ...".
1089 nigel 75 .P
1090 The only metacharacters that are recognized in character classes are backslash,
1091 hyphen (only where it can be interpreted as specifying a range), circumflex
1092 (only at the start), opening square bracket (only when it can be interpreted as
1093 introducing a POSIX class name - see the next section), and the terminating
1094 closing square bracket. However, escaping other non-alphanumeric characters
1095 does no harm.
1096 .
1097 .
1098 .SH "POSIX CHARACTER CLASSES"
1099 nigel 63 .rs
1100 .sp
1101 nigel 75 Perl supports the POSIX notation for character classes. This uses names
1102 nigel 63 enclosed by [: and :] within the enclosing square brackets. PCRE also supports
1103 this notation. For example,
1104 nigel 75 .sp
1105 nigel 63 [01[:alpha:]%]
1106 nigel 75 .sp
1107 nigel 63 matches "0", "1", any alphabetic character, or "%". The supported class names
1108 ph10 518 are:
1109 nigel 75 .sp
1110 nigel 63 alnum letters and digits
1111 alpha letters
1112 ascii character codes 0 - 127
1113 blank space or tab only
1114 cntrl control characters
1115 nigel 75 digit decimal digits (same as \ed)
1116 nigel 63 graph printing characters, excluding space
1117 lower lower case letters
1118 print printing characters, including space
1119 ph10 518 punct printing characters, excluding letters and digits and space
1120 nigel 75 space white space (not quite the same as \es)
1121 nigel 63 upper upper case letters
1122 nigel 75 word "word" characters (same as \ew)
1123 nigel 63 xdigit hexadecimal digits
1124 nigel 75 .sp
1125 nigel 63 The "space" characters are HT (9), LF (10), VT (11), FF (12), CR (13), and
1126 space (32). Notice that this list includes the VT character (code 11). This
1127 nigel 75 makes "space" different to \es, which does not include VT (for Perl
1128 nigel 63 compatibility).
1129 nigel 75 .P
1130 nigel 63 The name "word" is a Perl extension, and "blank" is a GNU extension from Perl
1131 5.8. Another Perl extension is negation, which is indicated by a ^ character
1132 after the colon. For example,
1133 nigel 75 .sp
1134 nigel 63 [12[:^digit:]]
1135 nigel 75 .sp
1136 nigel 63 matches "1", "2", or any non-digit. PCRE (and Perl) also recognize the POSIX
1137 syntax [.ch.] and [=ch=] where "ch" is a "collating element", but these are not
1138 supported, and an error is given if they are encountered.
1139 nigel 75 .P
1140 ph10 518 By default, in UTF-8 mode, characters with values greater than 128 do not match
1141 any of the POSIX character classes. However, if the PCRE_UCP option is passed
1142 ph10 535 to \fBpcre_compile()\fP, some of the classes are changed so that Unicode
1143 character properties are used. This is achieved by replacing the POSIX classes
1144 ph10 518 by other sequences, as follows:
1145 .sp
1146 [:alnum:] becomes \ep{Xan}
1147 [:alpha:] becomes \ep{L}
1148 ph10 535 [:blank:] becomes \eh
1149 ph10 518 [:digit:] becomes \ep{Nd}
1150 [:lower:] becomes \ep{Ll}
1151 ph10 535 [:space:] becomes \ep{Xps}
1152 ph10 518 [:upper:] becomes \ep{Lu}
1153 [:word:] becomes \ep{Xwd}
1154 .sp
1155 Negated versions, such as [:^alpha:] use \eP instead of \ep. The other POSIX
1156 classes are unchanged, and match only characters with code points less than
1157 128.
1158 nigel 75 .
1159 .
1160 .SH "VERTICAL BAR"
1161 nigel 63 .rs
1162 .sp
1163 Vertical bar characters are used to separate alternative patterns. For example,
1164 the pattern
1165 nigel 75 .sp
1166 nigel 63 gilbert|sullivan
1167 nigel 75 .sp
1168 nigel 63 matches either "gilbert" or "sullivan". Any number of alternatives may appear,
1169 nigel 91 and an empty alternative is permitted (matching the empty string). The matching
1170 process tries each alternative in turn, from left to right, and the first one
1171 that succeeds is used. If the alternatives are within a subpattern
1172 nigel 75 .\" HTML <a href="#subpattern">
1173 .\" </a>
1174 (defined below),
1175 .\"
1176 "succeeds" means matching the rest of the main pattern as well as the
1177 alternative in the subpattern.
1178 .
1179 .
1180 .SH "INTERNAL OPTION SETTING"
1181 nigel 63 .rs
1182 .sp
1183 The settings of the PCRE_CASELESS, PCRE_MULTILINE, PCRE_DOTALL, and
1184 ph10 231 PCRE_EXTENDED options (which are Perl-compatible) can be changed from within
1185 the pattern by a sequence of Perl option letters enclosed between "(?" and ")".
1186 The option letters are
1187 nigel 75 .sp
1188 nigel 63 i for PCRE_CASELESS
1189 m for PCRE_MULTILINE
1190 s for PCRE_DOTALL
1191 x for PCRE_EXTENDED
1192 nigel 75 .sp
1193 nigel 63 For example, (?im) sets caseless, multiline matching. It is also possible to
1194 unset these options by preceding the letter with a hyphen, and a combined
1195 setting and unsetting such as (?im-sx), which sets PCRE_CASELESS and
1196 PCRE_MULTILINE while unsetting PCRE_DOTALL and PCRE_EXTENDED, is also
1197 permitted. If a letter appears both before and after the hyphen, the option is
1198 unset.
1199 nigel 75 .P
1200 ph10 231 The PCRE-specific options PCRE_DUPNAMES, PCRE_UNGREEDY, and PCRE_EXTRA can be
1201 changed in the same way as the Perl-compatible options by using the characters
1202 J, U and X respectively.
1203 .P
1204 ph10 412 When one of these option changes occurs at top level (that is, not inside
1205 subpattern parentheses), the change applies to the remainder of the pattern
1206 that follows. If the change is placed right at the start of a pattern, PCRE
1207 extracts it into the global options (and it will therefore show up in data
1208 extracted by the \fBpcre_fullinfo()\fP function).
1209 nigel 75 .P
1210 nigel 93 An option change within a subpattern (see below for a description of
1211 ph10 572 subpatterns) affects only that part of the subpattern that follows it, so
1212 nigel 75 .sp
1213 nigel 63 (a(?i)b)c
1214 nigel 75 .sp
1215 nigel 63 matches abc and aBc and no other strings (assuming PCRE_CASELESS is not used).
1216 By this means, options can be made to have different settings in different
1217 parts of the pattern. Any changes made in one alternative do carry on
1218 into subsequent branches within the same subpattern. For example,
1219 nigel 75 .sp
1220 nigel 63 (a(?i)b|c)
1221 nigel 75 .sp
1222 nigel 63 matches "ab", "aB", "c", and "C", even though when matching "C" the first
1223 branch is abandoned before the option setting. This is because the effects of
1224 option settings happen at compile time. There would be some very weird
1225 behaviour otherwise.
1226 ph10 251 .P
1227 \fBNote:\fP There are other PCRE-specific options that can be set by the
1228 application when the compile or match functions are called. In some cases the
1229 ph10 412 pattern can contain special leading sequences such as (*CRLF) to override what
1230 the application has set or what has been defaulted. Details are given in the
1231 section entitled
1232 ph10 251 .\" HTML <a href="#newlineseq">
1233 .\" </a>
1234 "Newline sequences"
1235 .\"
1236 ph10 518 above. There are also the (*UTF8) and (*UCP) leading sequences that can be used
1237 to set UTF-8 and Unicode property modes; they are equivalent to setting the
1238 PCRE_UTF8 and the PCRE_UCP options, respectively.
1239 nigel 75 .
1240 .
1241 .\" HTML <a name="subpattern"></a>
1242 nigel 63 .SH SUBPATTERNS
1243 .rs
1244 .sp
1245 Subpatterns are delimited by parentheses (round brackets), which can be nested.
1246 nigel 75 Turning part of a pattern into a subpattern does two things:
1247 .sp
1248 nigel 63 1. It localizes a set of alternatives. For example, the pattern
1249 nigel 75 .sp
1250 nigel 63 cat(aract|erpillar|)
1251 nigel 75 .sp
1252 ph10 572 matches "cataract", "caterpillar", or "cat". Without the parentheses, it would
1253 match "cataract", "erpillar" or an empty string.
1254 nigel 75 .sp
1255 2. It sets up the subpattern as a capturing subpattern. This means that, when
1256 the whole pattern matches, that portion of the subject string that matched the
1257 subpattern is passed back to the caller via the \fIovector\fP argument of
1258 \fBpcre_exec()\fP. Opening parentheses are counted from left to right (starting
1259 ph10 572 from 1) to obtain numbers for the capturing subpatterns. For example, if the
1260 string "the red king" is matched against the pattern
1261 nigel 75 .sp
1262 nigel 63 the ((red|white) (king|queen))
1263 nigel 75 .sp
1264 nigel 63 the captured substrings are "red king", "red", and "king", and are numbered 1,
1265 2, and 3, respectively.
1266 nigel 75 .P
1267 nigel 63 The fact that plain parentheses fulfil two functions is not always helpful.
1268 There are often times when a grouping subpattern is required without a
1269 capturing requirement. If an opening parenthesis is followed by a question mark
1270 and a colon, the subpattern does not do any capturing, and is not counted when
1271 computing the number of any subsequent capturing subpatterns. For example, if
1272 the string "the white queen" is matched against the pattern
1273 nigel 75 .sp
1274 nigel 63 the ((?:red|white) (king|queen))
1275 nigel 75 .sp
1276 nigel 63 the captured substrings are "white queen" and "queen", and are numbered 1 and
1277 nigel 93 2. The maximum number of capturing subpatterns is 65535.
1278 nigel 75 .P
1279 nigel 63 As a convenient shorthand, if any option settings are required at the start of
1280 a non-capturing subpattern, the option letters may appear between the "?" and
1281 the ":". Thus the two patterns
1282 nigel 75 .sp
1283 nigel 63 (?i:saturday|sunday)
1284 (?:(?i)saturday|sunday)
1285 nigel 75 .sp
1286 nigel 63 match exactly the same set of strings. Because alternative branches are tried
1287 from left to right, and options are not reset until the end of the subpattern
1288 is reached, an option setting in one branch does affect subsequent branches, so
1289 the above patterns match "SUNDAY" as well as "Saturday".
1290 nigel 75 .
1291 .
1292 ph10 456 .\" HTML <a name="dupsubpatternnumber"></a>
1293 ph10 175 .SH "DUPLICATE SUBPATTERN NUMBERS"
1294 .rs
1295 .sp
1296 ph10 182 Perl 5.10 introduced a feature whereby each alternative in a subpattern uses
1297 the same numbers for its capturing parentheses. Such a subpattern starts with
1298 (?| and is itself a non-capturing subpattern. For example, consider this
1299 ph10 175 pattern:
1300 .sp
1301 (?|(Sat)ur|(Sun))day
1302 ph10 182 .sp
1303 Because the two alternatives are inside a (?| group, both sets of capturing
1304 parentheses are numbered one. Thus, when the pattern matches, you can look
1305 at captured substring number one, whichever alternative matched. This construct
1306 is useful when you want to capture part, but not all, of one of a number of
1307 alternatives. Inside a (?| group, parentheses are numbered as usual, but the
1308 ph10 175 number is reset at the start of each branch. The numbers of any capturing
1309 ph10 572 parentheses that follow the subpattern start after the highest number used in
1310 any branch. The following example is taken from the Perl documentation. The
1311 numbers underneath show in which buffer the captured content will be stored.
1312 ph10 175 .sp
1313 # before ---------------branch-reset----------- after
1314 / ( a ) (?| x ( y ) z | (p (q) r) | (t) u (v) ) ( z ) /x
1315 # 1 2 2 3 2 3 4
1316 ph10 182 .sp
1317 ph10 488 A back reference to a numbered subpattern uses the most recent value that is
1318 set for that number by any subpattern. The following pattern matches "abcabc"
1319 or "defdef":
1320 ph10 456 .sp
1321 ph10 461 /(?|(abc)|(def))\e1/
1322 ph10 456 .sp
1323 In contrast, a recursive or "subroutine" call to a numbered subpattern always
1324 ph10 461 refers to the first one in the pattern with the given number. The following
1325 ph10 456 pattern matches "abcabc" or "defabc":
1326 .sp
1327 /(?|(abc)|(def))(?1)/
1328 .sp
1329 ph10 459 If a
1330 .\" HTML <a href="#conditions">
1331 .\" </a>
1332 condition test
1333 .\"
1334 for a subpattern's having matched refers to a non-unique number, the test is
1335 true if any of the subpatterns of that number have matched.
1336 .P
1337 An alternative approach to using this "branch reset" feature is to use
1338 ph10 175 duplicate named subpatterns, as described in the next section.
1339 .
1340 .
1341 nigel 75 .SH "NAMED SUBPATTERNS"
1342 nigel 63 .rs
1343 .sp
1344 Identifying capturing parentheses by number is simple, but it can be very hard
1345 to keep track of the numbers in complicated regular expressions. Furthermore,
1346 nigel 75 if an expression is modified, the numbers may change. To help with this
1347 nigel 93 difficulty, PCRE supports the naming of subpatterns. This feature was not
1348 added to Perl until release 5.10. Python had the feature earlier, and PCRE
1349 introduced it at release 4.0, using the Python syntax. PCRE now supports both
1350 ph10 459 the Perl and the Python syntax. Perl allows identically numbered subpatterns to
1351 have different names, but PCRE does not.
1352 nigel 93 .P
1353 In PCRE, a subpattern can be named in one of three ways: (?<name>...) or
1354 (?'name'...) as in Perl, or (?P<name>...) as in Python. References to capturing
1355 nigel 91 parentheses from other parts of the pattern, such as
1356 .\" HTML <a href="#backreferences">
1357 .\" </a>
1358 ph10 488 back references,
1359 nigel 91 .\"
1360 .\" HTML <a href="#recursion">
1361 .\" </a>
1362 recursion,
1363 .\"
1364 and
1365 .\" HTML <a href="#conditions">
1366 .\" </a>
1367 conditions,
1368 .\"
1369 can be made by name as well as by number.
1370 nigel 75 .P
1371 nigel 91 Names consist of up to 32 alphanumeric characters and underscores. Named
1372 nigel 93 capturing parentheses are still allocated numbers as well as names, exactly as
1373 if the names were not present. The PCRE API provides function calls for
1374 extracting the name-to-number translation table from a compiled pattern. There
1375 is also a convenience function for extracting a captured substring by name.
1376 nigel 91 .P
1377 By default, a name must be unique within a pattern, but it is possible to relax
1378 ph10 457 this constraint by setting the PCRE_DUPNAMES option at compile time. (Duplicate
1379 ph10 461 names are also always permitted for subpatterns with the same number, set up as
1380 ph10 457 described in the previous section.) Duplicate names can be useful for patterns
1381 where only one instance of the named parentheses can match. Suppose you want to
1382 match the name of a weekday, either as a 3-letter abbreviation or as the full
1383 name, and in both cases you want to extract the abbreviation. This pattern
1384 (ignoring the line breaks) does the job:
1385 nigel 91 .sp
1386 nigel 93 (?<DN>Mon|Fri|Sun)(?:day)?|
1387 (?<DN>Tue)(?:sday)?|
1388 (?<DN>Wed)(?:nesday)?|
1389 (?<DN>Thu)(?:rsday)?|
1390 (?<DN>Sat)(?:urday)?
1391 nigel 91 .sp
1392 There are five capturing substrings, but only one is ever set after a match.
1393 ph10 182 (An alternative way of solving this problem is to use a "branch reset"
1394 ph10 175 subpattern, as described in the previous section.)
1395 .P
1396 nigel 91 The convenience function for extracting the data by name returns the substring
1397 nigel 93 for the first (and in this example, the only) subpattern of that name that
1398 ph10 461 matched. This saves searching to find which numbered subpattern it was.
1399 ph10 459 .P
1400 ph10 488 If you make a back reference to a non-unique named subpattern from elsewhere in
1401 ph10 459 the pattern, the one that corresponds to the first occurrence of the name is
1402 used. In the absence of duplicate numbers (see the previous section) this is
1403 the one with the lowest number. If you use a named reference in a condition
1404 test (see the
1405 .\"
1406 .\" HTML <a href="#conditions">
1407 .\" </a>
1408 section about conditions
1409 .\"
1410 ph10 461 below), either to check whether a subpattern has matched, or to check for
1411 ph10 459 recursion, all subpatterns with the same name are tested. If the condition is
1412 true for any one of them, the overall condition is true. This is the same
1413 behaviour as testing by number. For further details of the interfaces for
1414 handling named subpatterns, see the
1415 nigel 63 .\" HREF
1416 nigel 75 \fBpcreapi\fP
1417 nigel 63 .\"
1418 documentation.
1419 ph10 385 .P
1420 \fBWarning:\fP You cannot use different names to distinguish between two
1421 ph10 457 subpatterns with the same number because PCRE uses only the numbers when
1422 matching. For this reason, an error is given at compile time if different names
1423 are given to subpatterns with the same number. However, you can give the same
1424 name to subpatterns with the same number, even when PCRE_DUPNAMES is not set.
1425 nigel 75 .
1426 .
1427 nigel 63 .SH REPETITION
1428 .rs
1429 .sp
1430 Repetition is specified by quantifiers, which can follow any of the following
1431 items:
1432 nigel 75 .sp
1433 nigel 63 a literal data character
1434 nigel 93 the dot metacharacter
1435 nigel 75 the \eC escape sequence
1436 the \eX escape sequence (in UTF-8 mode with Unicode properties)
1437 nigel 93 the \eR escape sequence
1438 ph10 572 an escape such as \ed or \epL that matches a single character
1439 nigel 63 a character class
1440 a back reference (see next section)
1441 ph10 637 a parenthesized subpattern (including assertions)
1442 ph10 461 a recursive or "subroutine" call to a subpattern
1443 nigel 75 .sp
1444 nigel 63 The general repetition quantifier specifies a minimum and maximum number of
1445 permitted matches, by giving the two numbers in curly brackets (braces),
1446 separated by a comma. The numbers must be less than 65536, and the first must
1447 be less than or equal to the second. For example:
1448 nigel 75 .sp
1449 nigel 63 z{2,4}
1450 nigel 75 .sp
1451 nigel 63 matches "zz", "zzz", or "zzzz". A closing brace on its own is not a special
1452 character. If the second number is omitted, but the comma is present, there is
1453 no upper limit; if the second number and the comma are both omitted, the
1454 quantifier specifies an exact number of required matches. Thus
1455 nigel 75 .sp
1456 nigel 63 [aeiou]{3,}
1457 nigel 75 .sp
1458 nigel 63 matches at least 3 successive vowels, but may match many more, while
1459 nigel 75 .sp
1460 \ed{8}
1461 .sp
1462 nigel 63 matches exactly 8 digits. An opening curly bracket that appears in a position
1463 where a quantifier is not allowed, or one that does not match the syntax of a
1464 quantifier, is taken as a literal character. For example, {,6} is not a
1465 quantifier, but a literal string of four characters.
1466 nigel 75 .P
1467 nigel 63 In UTF-8 mode, quantifiers apply to UTF-8 characters rather than to individual
1468 nigel 75 bytes. Thus, for example, \ex{100}{2} matches two UTF-8 characters, each of
1469 which is represented by a two-byte sequence. Similarly, when Unicode property
1470 support is available, \eX{3} matches three Unicode extended sequences, each of
1471 which may be several bytes long (and they may be of different lengths).
1472 .P
1473 nigel 63 The quantifier {0} is permitted, causing the expression to behave as if the
1474 ph10 345 previous item and the quantifier were not present. This may be useful for
1475 subpatterns that are referenced as
1476 ph10 335 .\" HTML <a href="#subpatternsassubroutines">
1477 .\" </a>
1478 subroutines
1479 .\"
1480 ph10 572 from elsewhere in the pattern (but see also the section entitled
1481 .\" HTML <a href="#subdefine">
1482 .\" </a>
1483 "Defining subpatterns for use by reference only"
1484 .\"
1485 below). Items other than subpatterns that have a {0} quantifier are omitted
1486 from the compiled pattern.
1487 nigel 75 .P
1488 nigel 93 For convenience, the three most common quantifiers have single-character
1489 abbreviations:
1490 nigel 75 .sp
1491 nigel 63 * is equivalent to {0,}
1492 + is equivalent to {1,}
1493 ? is equivalent to {0,1}
1494 nigel 75 .sp
1495 nigel 63 It is possible to construct infinite loops by following a subpattern that can
1496 match no characters with a quantifier that has no upper limit, for example:
1497 nigel 75 .sp
1498 nigel 63 (a?)*
1499 nigel 75 .sp
1500 nigel 63 Earlier versions of Perl and PCRE used to give an error at compile time for
1501 such patterns. However, because there are cases where this can be useful, such
1502 patterns are now accepted, but if any repetition of the subpattern does in fact
1503 match no characters, the loop is forcibly broken.
1504 nigel 75 .P
1505 nigel 63 By default, the quantifiers are "greedy", that is, they match as much as
1506 possible (up to the maximum number of permitted times), without causing the
1507 rest of the pattern to fail. The classic example of where this gives problems
1508 nigel 75 is in trying to match comments in C programs. These appear between /* and */
1509 and within the comment, individual * and / characters may appear. An attempt to
1510 match C comments by applying the pattern
1511 .sp
1512 /\e*.*\e*/
1513 .sp
1514 nigel 63 to the string
1515 nigel 75 .sp
1516 /* first comment */ not comment /* second comment */
1517 .sp
1518 nigel 63 fails, because it matches the entire string owing to the greediness of the .*
1519 item.
1520 nigel 75 .P
1521 nigel 63 However, if a quantifier is followed by a question mark, it ceases to be
1522 greedy, and instead matches the minimum number of times possible, so the
1523 pattern
1524 nigel 75 .sp
1525 /\e*.*?\e*/
1526 .sp
1527 nigel 63 does the right thing with the C comments. The meaning of the various
1528 quantifiers is not otherwise changed, just the preferred number of matches.
1529 Do not confuse this use of question mark with its use as a quantifier in its
1530 own right. Because it has two uses, it can sometimes appear doubled, as in
1531 nigel 75 .sp
1532 \ed??\ed
1533 .sp
1534 nigel 63 which matches one digit by preference, but can match two if that is the only
1535 way the rest of the pattern matches.
1536 nigel 75 .P
1537 nigel 93 If the PCRE_UNGREEDY option is set (an option that is not available in Perl),
1538 nigel 63 the quantifiers are not greedy by default, but individual ones can be made
1539 greedy by following them with a question mark. In other words, it inverts the
1540 default behaviour.
1541 nigel 75 .P
1542 nigel 63 When a parenthesized subpattern is quantified with a minimum repeat count that
1543 nigel 75 is greater than 1 or with a limited maximum, more memory is required for the
1544 nigel 63 compiled pattern, in proportion to the size of the minimum or maximum.
1545 nigel 75 .P
1546 nigel 63 If a pattern starts with .* or .{0,} and the PCRE_DOTALL option (equivalent
1547 nigel 93 to Perl's /s) is set, thus allowing the dot to match newlines, the pattern is
1548 nigel 63 implicitly anchored, because whatever follows will be tried against every
1549 character position in the subject string, so there is no point in retrying the
1550 overall match at any position after the first. PCRE normally treats such a
1551 nigel 75 pattern as though it were preceded by \eA.
1552 .P
1553 nigel 63 In cases where it is known that the subject string contains no newlines, it is
1554 worth setting PCRE_DOTALL in order to obtain this optimization, or
1555 alternatively using ^ to indicate anchoring explicitly.
1556 nigel 75 .P
1557 nigel 63 However, there is one situation where the optimization cannot be used. When .*
1558 ph10 488 is inside capturing parentheses that are the subject of a back reference
1559 nigel 93 elsewhere in the pattern, a match at the start may fail where a later one
1560 succeeds. Consider, for example:
1561 nigel 75 .sp
1562 (.*)abc\e1
1563 .sp
1564 nigel 63 If the subject is "xyz123abc123" the match point is the fourth character. For
1565 this reason, such a pattern is not implicitly anchored.
1566 nigel 75 .P
1567 nigel 63 When a capturing subpattern is repeated, the value captured is the substring
1568 that matched the final iteration. For example, after
1569 nigel 75 .sp
1570 (tweedle[dume]{3}\es*)+
1571 .sp
1572 nigel 63 has matched "tweedledum tweedledee" the value of the captured substring is
1573 "tweedledee". However, if there are nested capturing subpatterns, the
1574 corresponding captured values may have been set in previous iterations. For
1575 example, after
1576 nigel 75 .sp
1577 nigel 63 /(a|(b))+/
1578 nigel 75 .sp
1579 nigel 63 matches "aba" the value of the second captured substring is "b".
1580 nigel 75 .
1581 .
1582 .\" HTML <a name="atomicgroup"></a>
1583 .SH "ATOMIC GROUPING AND POSSESSIVE QUANTIFIERS"
1584 nigel 63 .rs
1585 .sp
1586 nigel 93 With both maximizing ("greedy") and minimizing ("ungreedy" or "lazy")
1587 repetition, failure of what follows normally causes the repeated item to be
1588 re-evaluated to see if a different number of repeats allows the rest of the
1589 pattern to match. Sometimes it is useful to prevent this, either to change the
1590 nature of the match, or to cause it fail earlier than it otherwise might, when
1591 the author of the pattern knows there is no point in carrying on.
1592 nigel 75 .P
1593 Consider, for example, the pattern \ed+foo when applied to the subject line
1594 .sp
1595 nigel 63 123456bar
1596 nigel 75 .sp
1597 nigel 63 After matching all 6 digits and then failing to match "foo", the normal
1598 nigel 75 action of the matcher is to try again with only 5 digits matching the \ed+
1599 nigel 63 item, and then with 4, and so on, before ultimately failing. "Atomic grouping"
1600 (a term taken from Jeffrey Friedl's book) provides the means for specifying
1601 that once a subpattern has matched, it is not to be re-evaluated in this way.
1602 nigel 75 .P
1603 nigel 93 If we use atomic grouping for the previous example, the matcher gives up
1604 nigel 63 immediately on failing to match "foo" the first time. The notation is a kind of
1605 special parenthesis, starting with (?> as in this example:
1606 nigel 75 .sp
1607 (?>\ed+)foo
1608 .sp
1609 nigel 63 This kind of parenthesis "locks up" the part of the pattern it contains once
1610 it has matched, and a failure further into the pattern is prevented from
1611 backtracking into it. Backtracking past it to previous items, however, works as
1612 normal.
1613 nigel 75 .P
1614 nigel 63 An alternative description is that a subpattern of this type matches the string
1615 of characters that an identical standalone pattern would match, if anchored at
1616 the current point in the subject string.
1617 nigel 75 .P
1618 nigel 63 Atomic grouping subpatterns are not capturing subpatterns. Simple cases such as
1619 the above example can be thought of as a maximizing repeat that must swallow
1620 nigel 75 everything it can. So, while both \ed+ and \ed+? are prepared to adjust the
1621 nigel 63 number of digits they match in order to make the rest of the pattern match,
1622 nigel 75 (?>\ed+) can only match an entire sequence of digits.
1623 .P
1624 nigel 63 Atomic groups in general can of course contain arbitrarily complicated
1625 subpatterns, and can be nested. However, when the subpattern for an atomic
1626 group is just a single repeated item, as in the example above, a simpler
1627 notation, called a "possessive quantifier" can be used. This consists of an
1628 additional + character following a quantifier. Using this notation, the
1629 previous example can be rewritten as
1630 nigel 75 .sp
1631 \ed++foo
1632 .sp
1633 ph10 208 Note that a possessive quantifier can be used with an entire group, for
1634 example:
1635 .sp
1636 (abc|xyz){2,3}+
1637 .sp
1638 nigel 63 Possessive quantifiers are always greedy; the setting of the PCRE_UNGREEDY
1639 option is ignored. They are a convenient notation for the simpler forms of
1640 nigel 93 atomic group. However, there is no difference in the meaning of a possessive
1641 quantifier and the equivalent atomic group, though there may be a performance
1642 difference; possessive quantifiers should be slightly faster.
1643 nigel 75 .P
1644 nigel 93 The possessive quantifier syntax is an extension to the Perl 5.8 syntax.
1645 Jeffrey Friedl originated the idea (and the name) in the first edition of his
1646 book. Mike McCloskey liked it, so implemented it when he built Sun's Java
1647 package, and PCRE copied it from there. It ultimately found its way into Perl
1648 at release 5.10.
1649 nigel 75 .P
1650 nigel 93 PCRE has an optimization that automatically "possessifies" certain simple
1651 pattern constructs. For example, the sequence A+B is treated as A++B because
1652 there is no point in backtracking into a sequence of A's when B must follow.
1653 .P
1654 nigel 63 When a pattern contains an unlimited repeat inside a subpattern that can itself
1655 be repeated an unlimited number of times, the use of an atomic group is the
1656 only way to avoid some failing matches taking a very long time indeed. The
1657 pattern
1658 nigel 75 .sp
1659 (\eD+|<\ed+>)*[!?]
1660 .sp
1661 nigel 63 matches an unlimited number of substrings that either consist of non-digits, or
1662 digits enclosed in <>, followed by either ! or ?. When it matches, it runs
1663 quickly. However, if it is applied to
1664 nigel 75 .sp
1665 nigel 63 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
1666 nigel 75 .sp
1667 nigel 63 it takes a long time before reporting failure. This is because the string can
1668 nigel 75 be divided between the internal \eD+ repeat and the external * repeat in a
1669 large number of ways, and all have to be tried. (The example uses [!?] rather
1670 than a single character at the end, because both PCRE and Perl have an
1671 optimization that allows for fast failure when a single character is used. They
1672 remember the last single character that is required for a match, and fail early
1673 if it is not present in the string.) If the pattern is changed so that it uses
1674 an atomic group, like this:
1675 .sp
1676 ((?>\eD+)|<\ed+>)*[!?]
1677 .sp
1678 nigel 63 sequences of non-digits cannot be broken, and failure happens quickly.
1679 nigel 75 .
1680 .
1681 .\" HTML <a name="backreferences"></a>
1682 .SH "BACK REFERENCES"
1683 nigel 63 .rs
1684 .sp
1685 Outside a character class, a backslash followed by a digit greater than 0 (and
1686 possibly further digits) is a back reference to a capturing subpattern earlier
1687 (that is, to its left) in the pattern, provided there have been that many
1688 previous capturing left parentheses.
1689 nigel 75 .P
1690 nigel 63 However, if the decimal number following the backslash is less than 10, it is
1691 always taken as a back reference, and causes an error only if there are not
1692 that many capturing left parentheses in the entire pattern. In other words, the
1693 parentheses that are referenced need not be to the left of the reference for
1694 nigel 91 numbers less than 10. A "forward back reference" of this type can make sense
1695 when a repetition is involved and the subpattern to the right has participated
1696 in an earlier iteration.
1697 .P
1698 nigel 93 It is not possible to have a numerical "forward back reference" to a subpattern
1699 whose number is 10 or more using this syntax because a sequence such as \e50 is
1700 interpreted as a character defined in octal. See the subsection entitled
1701 nigel 91 "Non-printing characters"
1702 nigel 75 .\" HTML <a href="#digitsafterbackslash">
1703 .\" </a>
1704 above
1705 .\"
1706 nigel 93 for further details of the handling of digits following a backslash. There is
1707 no such problem when named parentheses are used. A back reference to any
1708 subpattern is possible using named parentheses (see below).
1709 nigel 75 .P
1710 nigel 93 Another way of avoiding the ambiguity inherent in the use of digits following a
1711 ph10 572 backslash is to use the \eg escape sequence. This escape must be followed by an
1712 unsigned number or a negative number, optionally enclosed in braces. These
1713 examples are all identical:
1714 nigel 93 .sp
1715 (ring), \e1
1716 (ring), \eg1
1717 (ring), \eg{1}
1718 .sp
1719 ph10 208 An unsigned number specifies an absolute reference without the ambiguity that
1720 is present in the older syntax. It is also useful when literal digits follow
1721 the reference. A negative number is a relative reference. Consider this
1722 example:
1723 nigel 93 .sp
1724 (abc(def)ghi)\eg{-1}
1725 .sp
1726 The sequence \eg{-1} is a reference to the most recently started capturing
1727 ph10 572 subpattern before \eg, that is, is it equivalent to \e2 in this example.
1728 Similarly, \eg{-2} would be equivalent to \e1. The use of relative references
1729 can be helpful in long patterns, and also in patterns that are created by
1730 joining together fragments that contain references within themselves.
1731 nigel 93 .P
1732 nigel 63 A back reference matches whatever actually matched the capturing subpattern in
1733 the current subject string, rather than anything matching the subpattern
1734 itself (see
1735 .\" HTML <a href="#subpatternsassubroutines">
1736 .\" </a>
1737 "Subpatterns as subroutines"
1738 .\"
1739 below for a way of doing that). So the pattern
1740 nigel 75 .sp
1741 (sens|respons)e and \e1ibility
1742 .sp
1743 nigel 63 matches "sense and sensibility" and "response and responsibility", but not
1744 "sense and responsibility". If caseful matching is in force at the time of the
1745 back reference, the case of letters is relevant. For example,
1746 nigel 75 .sp
1747 ((?i)rah)\es+\e1
1748 .sp
1749 nigel 63 matches "rah rah" and "RAH RAH", but not "RAH rah", even though the original
1750 capturing subpattern is matched caselessly.
1751 nigel 75 .P
1752 ph10 171 There are several different ways of writing back references to named
1753 subpatterns. The .NET syntax \ek{name} and the Perl syntax \ek<name> or
1754 \ek'name' are supported, as is the Python syntax (?P=name). Perl 5.10's unified
1755 back reference syntax, in which \eg can be used for both numeric and named
1756 references, is also supported. We could rewrite the above example in any of
1757 nigel 93 the following ways:
1758 nigel 75 .sp
1759 nigel 93 (?<p1>(?i)rah)\es+\ek<p1>
1760 ph10 171 (?'p1'(?i)rah)\es+\ek{p1}
1761 nigel 91 (?P<p1>(?i)rah)\es+(?P=p1)
1762 ph10 171 (?<p1>(?i)rah)\es+\eg{p1}
1763 nigel 75 .sp
1764 nigel 91 A subpattern that is referenced by name may appear in the pattern before or
1765 after the reference.
1766 .P
1767 nigel 63 There may be more than one back reference to the same subpattern. If a
1768 subpattern has not actually been used in a particular match, any back
1769 ph10 456 references to it always fail by default. For example, the pattern
1770 nigel 75 .sp
1771 (a|(bc))\e2
1772 .sp
1773 ph10 461 always fails if it starts to match "a" rather than "bc". However, if the
1774 PCRE_JAVASCRIPT_COMPAT option is set at compile time, a back reference to an
1775 ph10 456 unset value matches an empty string.
1776 .P
1777 Because there may be many capturing parentheses in a pattern, all digits
1778 following a backslash are taken as part of a potential back reference number.
1779 If the pattern continues with a digit character, some delimiter must be used to
1780 terminate the back reference. If the PCRE_EXTENDED option is set, this can be
1781 whitespace. Otherwise, the \eg{ syntax or an empty comment (see
1782 nigel 75 .\" HTML <a href="#comments">
1783 .\" </a>
1784 "Comments"
1785 .\"
1786 below) can be used.
1787 ph10 488 .
1788 .SS "Recursive back references"
1789 .rs
1790 .sp
1791 nigel 63 A back reference that occurs inside the parentheses to which it refers fails
1792 nigel 75 when the subpattern is first used, so, for example, (a\e1) never matches.
1793 nigel 63 However, such references can be useful inside repeated subpatterns. For
1794 example, the pattern
1795 nigel 75 .sp
1796 (a|b\e1)+
1797 .sp
1798 nigel 63 matches any number of "a"s and also "aba", "ababbaa" etc. At each iteration of
1799 the subpattern, the back reference matches the character string corresponding
1800 to the previous iteration. In order for this to work, the pattern must be such
1801 that the first iteration does not need to match the back reference. This can be
1802 done using alternation, as in the example above, or by a quantifier with a
1803 minimum of zero.
1804 ph10 488 .P
1805 Back references of this type cause the group that they reference to be treated
1806 as an
1807 .\" HTML <a href="#atomicgroup">
1808 .\" </a>
1809 atomic group.
1810 .\"
1811 Once the whole group has been matched, a subsequent matching failure cannot
1812 cause backtracking into the middle of the group.
1813 nigel 75 .
1814 .
1815 .\" HTML <a name="bigassertions"></a>
1816 nigel 63 .SH ASSERTIONS
1817 .rs
1818 .sp
1819 An assertion is a test on the characters following or preceding the current
1820 matching point that does not actually consume any characters. The simple
1821 nigel 75 assertions coded as \eb, \eB, \eA, \eG, \eZ, \ez, ^ and $ are described
1822 .\" HTML <a href="#smallassertions">
1823 .\" </a>
1824 above.
1825 .\"
1826 .P
1827 nigel 63 More complicated assertions are coded as subpatterns. There are two kinds:
1828 those that look ahead of the current position in the subject string, and those
1829 nigel 75 that look behind it. An assertion subpattern is matched in the normal way,
1830 except that it does not cause the current matching position to be changed.
1831 .P
1832 ph10 637 Assertion subpatterns are not capturing subpatterns. If such an assertion
1833 contains capturing subpatterns within it, these are counted for the purposes of
1834 numbering the capturing subpatterns in the whole pattern. However, substring
1835 capturing is carried out only for positive assertions, because it does not make
1836 sense for negative assertions.
1837 .P
1838 ph10 643 For compatibility with Perl, assertion subpatterns may be repeated; though
1839 it makes no sense to assert the same thing several times, the side effect of
1840 capturing parentheses may occasionally be useful. In practice, there only three
1841 cases:
1842 ph10 637 .sp
1843 (1) If the quantifier is {0}, the assertion is never obeyed during matching.
1844 However, it may contain internal capturing parenthesized groups that are called
1845 from elsewhere via the
1846 .\" HTML <a href="#subpatternsassubroutines">
1847 .\" </a>
1848 subroutine mechanism.
1849 .\"
1850 .sp
1851 (2) If quantifier is {0,n} where n is greater than zero, it is treated as if it
1852 were {0,1}. At run time, the rest of the pattern match is tried with and
1853 without the assertion, the order depending on the greediness of the quantifier.
1854 .sp
1855 (3) If the minimum repetition is greater than zero, the quantifier is ignored.
1856 The assertion is obeyed just once when encountered during matching.
1857 nigel 75 .
1858 .
1859 .SS "Lookahead assertions"
1860 .rs
1861 .sp
1862 nigel 91 Lookahead assertions start with (?= for positive assertions and (?! for
1863 negative assertions. For example,
1864 nigel 75 .sp
1865 \ew+(?=;)
1866 .sp
1867 nigel 63 matches a word followed by a semicolon, but does not include the semicolon in
1868 the match, and
1869 nigel 75 .sp
1870 nigel 63 foo(?!bar)
1871 nigel 75 .sp
1872 nigel 63 matches any occurrence of "foo" that is not followed by "bar". Note that the
1873 apparently similar pattern
1874 nigel 75 .sp
1875 nigel 63 (?!foo)bar
1876 nigel 75 .sp
1877 nigel 63 does not find an occurrence of "bar" that is preceded by something other than
1878 "foo"; it finds any occurrence of "bar" whatsoever, because the assertion
1879 (?!foo) is always true when the next three characters are "bar". A
1880 nigel 75 lookbehind assertion is needed to achieve the other effect.
1881 .P
1882 nigel 63 If you want to force a matching failure at some point in a pattern, the most
1883 convenient way to do it is with (?!) because an empty string always matches, so
1884 an assertion that requires there not to be an empty string must always fail.
1885 ph10 572 The backtracking control verb (*FAIL) or (*F) is a synonym for (?!).
1886 nigel 75 .
1887 .
1888 .\" HTML <a name="lookbehind"></a>
1889 .SS "Lookbehind assertions"
1890 .rs
1891 .sp
1892 nigel 63 Lookbehind assertions start with (?<= for positive assertions and (?<! for
1893 negative assertions. For example,
1894 nigel 75 .sp
1895 nigel 63 (?<!foo)bar
1896 nigel 75 .sp
1897 nigel 63 does find an occurrence of "bar" that is not preceded by "foo". The contents of
1898 a lookbehind assertion are restricted such that all the strings it matches must
1899 nigel 91 have a fixed length. However, if there are several top-level alternatives, they
1900 do not all have to have the same fixed length. Thus
1901 nigel 75 .sp
1902 nigel 63 (?<=bullock|donkey)
1903 nigel 75 .sp
1904 nigel 63 is permitted, but
1905 nigel 75 .sp
1906 nigel 63 (?<!dogs?|cats?)
1907 nigel 75 .sp
1908 nigel 63 causes an error at compile time. Branches that match different length strings
1909 are permitted only at the top level of a lookbehind assertion. This is an
1910 ph10 572 extension compared with Perl, which requires all branches to match the same
1911 length of string. An assertion such as
1912 nigel 75 .sp
1913 nigel 63 (?<=ab(c|de))
1914 nigel 75 .sp
1915 nigel 63 is not permitted, because its single top-level branch can match two different
1916 ph10 454 lengths, but it is acceptable to PCRE if rewritten to use two top-level
1917 branches:
1918 nigel 75 .sp
1919 nigel 63 (?<=abc|abde)
1920 nigel 75 .sp
1921 ph10 572 In some cases, the escape sequence \eK
1922 ph10 168 .\" HTML <a href="#resetmatchstart">
1923 .\" </a>
1924 (see above)
1925 .\"
1926 ph10 461 can be used instead of a lookbehind assertion to get round the fixed-length
1927 ph10 454 restriction.
1928 ph10 168 .P
1929 nigel 63 The implementation of lookbehind assertions is, for each alternative, to
1930 nigel 93 temporarily move the current position back by the fixed length and then try to
1931 nigel 63 match. If there are insufficient characters before the current position, the
1932 nigel 93 assertion fails.
1933 nigel 75 .P
1934 PCRE does not allow the \eC escape (which matches a single byte in UTF-8 mode)
1935 nigel 63 to appear in lookbehind assertions, because it makes it impossible to calculate
1936 nigel 93 the length of the lookbehind. The \eX and \eR escapes, which can match
1937 different numbers of bytes, are also not permitted.
1938 nigel 75 .P
1939 ph10 454 .\" HTML <a href="#subpatternsassubroutines">
1940 .\" </a>
1941 "Subroutine"
1942 .\"
1943 calls (see below) such as (?2) or (?&X) are permitted in lookbehinds, as long
1944 ph10 461 as the subpattern matches a fixed-length string.
1945 ph10 454 .\" HTML <a href="#recursion">
1946 .\" </a>
1947 Recursion,
1948 .\"
1949 however, is not supported.
1950 .P
1951 nigel 93 Possessive quantifiers can be used in conjunction with lookbehind assertions to
1952 ph10 456 specify efficient matching of fixed-length strings at the end of subject
1953 strings. Consider a simple pattern such as
1954 nigel 75 .sp
1955 nigel 63 abcd$
1956 nigel 75 .sp
1957 nigel 63 when applied to a long string that does not match. Because matching proceeds
1958 from left to right, PCRE will look for each "a" in the subject and then see if
1959 what follows matches the rest of the pattern. If the pattern is specified as
1960 nigel 75 .sp
1961 nigel 63 ^.*abcd$
1962 nigel 75 .sp
1963 nigel 63 the initial .* matches the entire string at first, but when this fails (because
1964 there is no following "a"), it backtracks to match all but the last character,
1965 then all but the last two characters, and so on. Once again the search for "a"
1966 covers the entire string, from right to left, so we are no better off. However,
1967 if the pattern is written as
1968 nigel 75 .sp
1969 nigel 63 ^.*+(?<=abcd)
1970 nigel 75 .sp
1971 nigel 93 there can be no backtracking for the .*+ item; it can match only the entire
1972 nigel 63 string. The subsequent lookbehind assertion does a single test on the last four
1973 characters. If it fails, the match fails immediately. For long strings, this
1974 approach makes a significant difference to the processing time.
1975 nigel 75 .
1976 .
1977 .SS "Using multiple assertions"
1978 .rs
1979 .sp
1980 nigel 63 Several assertions (of any sort) may occur in succession. For example,
1981 nigel 75 .sp
1982 (?<=\ed{3})(?<!999)foo
1983 .sp
1984 nigel 63 matches "foo" preceded by three digits that are not "999". Notice that each of
1985 the assertions is applied independently at the same point in the subject
1986 string. First there is a check that the previous three characters are all
1987 digits, and then there is a check that the same three characters are not "999".
1988 nigel 75 This pattern does \fInot\fP match "foo" preceded by six characters, the first
1989 nigel 63 of which are digits and the last three of which are not "999". For example, it
1990 doesn't match "123abcfoo". A pattern to do that is
1991 nigel 75 .sp
1992 (?<=\ed{3}...)(?<!999)foo
1993 .sp
1994 nigel 63 This time the first assertion looks at the preceding six characters, checking
1995 that the first three are digits, and then the second assertion checks that the
1996 preceding three characters are not "999".
1997 nigel 75 .P
1998 nigel 63 Assertions can be nested in any combination. For example,
1999 nigel 75 .sp
2000 nigel 63 (?<=(?<!foo)bar)baz
2001 nigel 75 .sp
2002 nigel 63 matches an occurrence of "baz" that is preceded by "bar" which in turn is not
2003 preceded by "foo", while
2004 nigel 75 .sp
2005 (?<=\ed{3}(?!999)...)foo
2006 .sp
2007 is another pattern that matches "foo" preceded by three digits and any three
2008 nigel 63 characters that are not "999".
2009 nigel 75 .
2010 .
2011 nigel 91 .\" HTML <a name="conditions"></a>
2012 nigel 75 .SH "CONDITIONAL SUBPATTERNS"
2013 nigel 63 .rs
2014 .sp
2015 It is possible to cause the matching process to obey a subpattern
2016 conditionally or to choose between two alternative subpatterns, depending on
2017 ph10 461 the result of an assertion, or whether a specific capturing subpattern has
2018 ph10 456 already been matched. The two possible forms of conditional subpattern are:
2019 nigel 75 .sp
2020 nigel 63 (?(condition)yes-pattern)
2021 (?(condition)yes-pattern|no-pattern)
2022 nigel 75 .sp
2023 nigel 63 If the condition is satisfied, the yes-pattern is used; otherwise the
2024 no-pattern (if present) is used. If there are more than two alternatives in the
2025 ph10 557 subpattern, a compile-time error occurs. Each of the two alternatives may
2026 ph10 579 itself contain nested subpatterns of any form, including conditional
2027 ph10 557 subpatterns; the restriction to two alternatives applies only at the level of
2028 ph10 579 the condition. This pattern fragment is an example where the alternatives are
2029 ph10 557 complex:
2030 .sp
2031 (?(1) (A|B|C) | (D | (?(2)E|F) | E) )
2032 .sp
2033 nigel 75 .P
2034 nigel 93 There are four kinds of condition: references to subpatterns, references to
2035 recursion, a pseudo-condition called DEFINE, and assertions.
2036 .
2037 .SS "Checking for a used subpattern by number"
2038 .rs
2039 .sp
2040 If the text between the parentheses consists of a sequence of digits, the
2041 ph10 456 condition is true if a capturing subpattern of that number has previously
2042 ph10 461 matched. If there is more than one capturing subpattern with the same number
2043 (see the earlier
2044 ph10 456 .\"
2045 .\" HTML <a href="#recursion">
2046 .\" </a>
2047 section about duplicate subpattern numbers),
2048 .\"
2049 ph10 572 the condition is true if any of them have matched. An alternative notation is
2050 ph10 456 to precede the digits with a plus or minus sign. In this case, the subpattern
2051 number is relative rather than absolute. The most recently opened parentheses
2052 ph10 572 can be referenced by (?(-1), the next most recent by (?(-2), and so on. Inside
2053 ph10 579 loops it can also make sense to refer to subsequent groups. The next
2054 ph10 572 parentheses to be opened can be referenced as (?(+1), and so on. (The value
2055 zero in any of these forms is not used; it provokes a compile-time error.)
2056 nigel 91 .P
2057 Consider the following pattern, which contains non-significant white space to
2058 make it more readable (assume the PCRE_EXTENDED option) and to divide it into
2059 three parts for ease of discussion:
2060 nigel 75 .sp
2061 ( \e( )? [^()]+ (?(1) \e) )
2062 .sp
2063 nigel 63 The first part matches an optional opening parenthesis, and if that
2064 character is present, sets it as the first captured substring. The second part
2065 matches one or more characters that are not parentheses. The third part is a
2066 ph10 572 conditional subpattern that tests whether or not the first set of parentheses
2067 matched. If they did, that is, if subject started with an opening parenthesis,
2068 nigel 63 the condition is true, and so the yes-pattern is executed and a closing
2069 parenthesis is required. Otherwise, since no-pattern is not present, the
2070 subpattern matches nothing. In other words, this pattern matches a sequence of
2071 nigel 93 non-parentheses, optionally enclosed in parentheses.
2072 ph10 167 .P
2073 ph10 172 If you were embedding this pattern in a larger one, you could use a relative
2074 ph10 167 reference:
2075 .sp
2076 ...other stuff... ( \e( )? [^()]+ (?(-1) \e) ) ...
2077 .sp
2078 This makes the fragment independent of the parentheses in the larger pattern.
2079 nigel 93 .
2080 .SS "Checking for a used subpattern by name"
2081 .rs
2082 nigel 91 .sp
2083 nigel 93 Perl uses the syntax (?(<name>)...) or (?('name')...) to test for a used
2084 subpattern by name. For compatibility with earlier versions of PCRE, which had
2085 this facility before Perl, the syntax (?(name)...) is also recognized. However,
2086 there is a possible ambiguity with this syntax, because subpattern names may
2087 consist entirely of digits. PCRE looks first for a named subpattern; if it
2088 cannot find one and the name consists entirely of digits, PCRE looks for a
2089 subpattern of that number, which must be greater than zero. Using subpattern
2090 names that consist entirely of digits is not recommended.
2091 .P
2092 Rewriting the above example to use a named subpattern gives this:
2093 nigel 91 .sp
2094 nigel 93 (?<OPEN> \e( )? [^()]+ (?(<OPEN>) \e) )
2095 .sp
2096 ph10 461 If the name used in a condition of this kind is a duplicate, the test is
2097 applied to all subpatterns of the same name, and is true if any one of them has
2098 ph10 459 matched.
2099 nigel 93 .
2100 .SS "Checking for pattern recursion"
2101 .rs
2102 .sp
2103 nigel 91 If the condition is the string (R), and there is no subpattern with the name R,
2104 nigel 93 the condition is true if a recursive call to the whole pattern or any
2105 subpattern has been made. If digits or a name preceded by ampersand follow the
2106 letter R, for example:
2107 .sp
2108 (?(R3)...) or (?(R&name)...)
2109 .sp
2110 ph10 456 the condition is true if the most recent recursion is into a subpattern whose
2111 nigel 93 number or name is given. This condition does not check the entire recursion
2112 ph10 461 stack. If the name used in a condition of this kind is a duplicate, the test is
2113 applied to all subpatterns of the same name, and is true if any one of them is
2114 the most recent recursion.
2115 nigel 75 .P
2116 ph10 461 At "top level", all these recursion test conditions are false.
2117 ph10 454 .\" HTML <a href="#recursion">
2118 .\" </a>
2119 ph10 459 The syntax for recursive patterns
2120 ph10 454 .\"
2121 ph10 459 is described below.
2122 nigel 93 .
2123 ph10 572 .\" HTML <a name="subdefine"></a>
2124 nigel 93 .SS "Defining subpatterns for use by reference only"
2125 .rs
2126 .sp
2127 If the condition is the string (DEFINE), and there is no subpattern with the
2128 name DEFINE, the condition is always false. In this case, there may be only one
2129 alternative in the subpattern. It is always skipped if control reaches this
2130 point in the pattern; the idea of DEFINE is that it can be used to define
2131 ph10 461 "subroutines" that can be referenced from elsewhere. (The use of
2132 ph10 454 .\" HTML <a href="#subpatternsassubroutines">
2133 .\" </a>
2134 "subroutines"
2135 .\"
2136 ph10 572 is described below.) For example, a pattern to match an IPv4 address such as
2137 "192.168.23.245" could be written like this (ignore whitespace and line
2138 breaks):
2139 nigel 93 .sp
2140 (?(DEFINE) (?<byte> 2[0-4]\ed | 25[0-5] | 1\ed\ed | [1-9]?\ed) )
2141 \eb (?&byte) (\e.(?&byte)){3} \eb
2142 .sp
2143 The first part of the pattern is a DEFINE group inside which a another group
2144 named "byte" is defined. This matches an individual component of an IPv4
2145 address (a number less than 256). When matching takes place, this part of the
2146 ph10 456 pattern is skipped because DEFINE acts like a false condition. The rest of the
2147 pattern uses references to the named group to match the four dot-separated
2148 components of an IPv4 address, insisting on a word boundary at each end.
2149 nigel 93 .
2150 .SS "Assertion conditions"
2151 .rs
2152 .sp
2153 If the condition is not in any of the above formats, it must be an assertion.
2154 nigel 63 This may be a positive or negative lookahead or lookbehind assertion. Consider
2155 this pattern, again containing non-significant white space, and with the two
2156 alternatives on the second line:
2157 nigel 75 .sp
2158 nigel 63 (?(?=[^a-z]*[a-z])
2159 nigel 75 \ed{2}-[a-z]{3}-\ed{2} | \ed{2}-\ed{2}-\ed{2} )
2160 .sp
2161 nigel 63 The condition is a positive lookahead assertion that matches an optional
2162 sequence of non-letters followed by a letter. In other words, it tests for the
2163 presence of at least one letter in the subject. If a letter is found, the
2164 subject is matched against the first alternative; otherwise it is matched
2165 against the second. This pattern matches strings in one of the two forms
2166 dd-aaa-dd or dd-dd-dd, where aaa are letters and dd are digits.
2167 nigel 75 .
2168 .
2169 .\" HTML <a name="comments"></a>
2170 nigel 63 .SH COMMENTS
2171 .rs
2172 .sp
2173 ph10 579 There are two ways of including comments in patterns that are processed by
2174 ph10 562 PCRE. In both cases, the start of the comment must not be in a character class,
2175 nor in the middle of any other sequence of related characters such as (?: or a
2176 subpattern name or number. The characters that make up a comment play no part
2177 in the pattern matching.
2178 .P
2179 nigel 75 The sequence (?# marks the start of a comment that continues up to the next
2180 ph10 562 closing parenthesis. Nested parentheses are not permitted. If the PCRE_EXTENDED
2181 option is set, an unescaped # character also introduces a comment, which in
2182 this case continues to immediately after the next newline character or
2183 character sequence in the pattern. Which characters are interpreted as newlines
2184 is controlled by the options passed to \fBpcre_compile()\fP or by a special
2185 sequence at the start of the pattern, as described in the section entitled
2186 ph10 572 .\" HTML <a href="#newlines">
2187 ph10 556 .\" </a>
2188 "Newline conventions"
2189 .\"
2190 ph10 572 above. Note that the end of this type of comment is a literal newline sequence
2191 in the pattern; escape sequences that happen to represent a newline do not
2192 count. For example, consider this pattern when PCRE_EXTENDED is set, and the
2193 default newline convention is in force:
2194 ph10 556 .sp
2195 abc #comment \en still comment
2196 .sp
2197 ph10 579 On encountering the # character, \fBpcre_compile()\fP skips along, looking for
2198 ph10 556 a newline in the pattern. The sequence \en is still literal at this stage, so
2199 it does not terminate the comment. Only an actual character with the code value
2200 ph10 562 0x0a (the default newline) does so.
2201 nigel 75 .
2202 .
2203 nigel 91 .\" HTML <a name="recursion"></a>
2204 nigel 75 .SH "RECURSIVE PATTERNS"
2205 nigel 63 .rs
2206 .sp
2207 Consider the problem of matching a string in parentheses, allowing for
2208 unlimited nested parentheses. Without the use of recursion, the best that can
2209 be done is to use a pattern that matches up to some fixed depth of nesting. It
2210 nigel 93 is not possible to handle an arbitrary nesting depth.
2211 .P
2212 For some time, Perl has provided a facility that allows regular expressions to
2213 recurse (amongst other things). It does this by interpolating Perl code in the
2214 expression at run time, and the code can refer to the expression itself. A Perl
2215 pattern using code interpolation to solve the parentheses problem can be
2216 created like this:
2217 nigel 75 .sp
2218 $re = qr{\e( (?: (?>[^()]+) | (?p{$re}) )* \e)}x;
2219 .sp
2220 nigel 63 The (?p{...}) item interpolates Perl code at run time, and in this case refers
2221 nigel 93 recursively to the pattern in which it appears.
2222 nigel 75 .P
2223 nigel 93 Obviously, PCRE cannot support the interpolation of Perl code. Instead, it
2224 supports special syntax for recursion of the entire pattern, and also for
2225 individual subpattern recursion. After its introduction in PCRE and Python,
2226 ph10 453 this kind of recursion was subsequently introduced into Perl at release 5.10.
2227 nigel 75 .P
2228 nigel 93 A special item that consists of (? followed by a number greater than zero and a
2229 closing parenthesis is a recursive call of the subpattern of the given number,
2230 ph10 461 provided that it occurs inside that subpattern. (If not, it is a
2231 ph10 454 .\" HTML <a href="#subpatternsassubroutines">
2232 .\" </a>
2233 "subroutine"
2234 .\"
2235 nigel 93 call, which is described in the next section.) The special item (?R) or (?0) is
2236 a recursive call of the entire regular expression.
2237 nigel 87 .P
2238 This PCRE pattern solves the nested parentheses problem (assume the
2239 PCRE_EXTENDED option is set so that white space is ignored):
2240 nigel 75 .sp
2241 ph10 456 \e( ( [^()]++ | (?R) )* \e)
2242 nigel 75 .sp
2243 nigel 63 First it matches an opening parenthesis. Then it matches any number of
2244 substrings which can either be a sequence of non-parentheses, or a recursive
2245 nigel 87 match of the pattern itself (that is, a correctly parenthesized substring).
2246 ph10 461 Finally there is a closing parenthesis. Note the use of a possessive quantifier
2247 ph10 456 to avoid backtracking into sequences of non-parentheses.
2248 nigel 75 .P
2249 nigel 63 If this were part of a larger pattern, you would not want to recurse the entire
2250 pattern, so instead you could use this:
2251 nigel 75 .sp
2252 ph10 456 ( \e( ( [^()]++ | (?1) )* \e) )
2253 nigel 75 .sp
2254 nigel 63 We have put the pattern into parentheses, and caused the recursion to refer to
2255 ph10 172 them instead of the whole pattern.
2256 ph10 166 .P
2257 In a larger pattern, keeping track of parenthesis numbers can be tricky. This
2258 ph10 572 is made easier by the use of relative references. Instead of (?1) in the
2259 pattern above you can write (?-2) to refer to the second most recently opened
2260 parentheses preceding the recursion. In other words, a negative number counts
2261 capturing parentheses leftwards from the point at which it is encountered.
2262 ph10 166 .P
2263 It is also possible to refer to subsequently opened parentheses, by writing
2264 references such as (?+2). However, these cannot be recursive because the
2265 reference is not inside the parentheses that are referenced. They are always
2266 ph10 454 .\" HTML <a href="#subpatternsassubroutines">
2267 .\" </a>
2268 "subroutine"
2269 .\"
2270 calls, as described in the next section.
2271 ph10 166 .P
2272 An alternative approach is to use named parentheses instead. The Perl syntax
2273 for this is (?&name); PCRE's earlier syntax (?P>name) is also supported. We
2274 could rewrite the above example as follows:
2275 nigel 75 .sp
2276 ph10 456 (?<pn> \e( ( [^()]++ | (?&pn) )* \e) )
2277 nigel 75 .sp
2278 nigel 93 If there is more than one subpattern with the same name, the earliest one is
2279 ph10 172 used.
2280 ph10 166 .P
2281 This particular example pattern that we have been looking at contains nested
2282 ph10 456 unlimited repeats, and so the use of a possessive quantifier for matching
2283 strings of non-parentheses is important when applying the pattern to strings
2284 that do not match. For example, when this pattern is applied to
2285 nigel 75 .sp
2286 nigel 63 (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa()
2287 nigel 75 .sp
2288 ph10 456 it yields "no match" quickly. However, if a possessive quantifier is not used,
2289 nigel 63 the match runs for a very long time indeed because there are so many different
2290 ways the + and * repeats can carve up the subject, and all have to be tested
2291 before failure can be reported.
2292 nigel 75 .P
2293 ph10 464 At the end of a match, the values of capturing parentheses are those from
2294 the outermost level. If you want to obtain intermediate values, a callout
2295 function can be used (see below and the
2296 nigel 63 .\" HREF
2297 nigel 75 \fBpcrecallout\fP
2298 nigel 63 .\"
2299 documentation). If the pattern above is matched against
2300 nigel 75 .sp
2301 nigel 63 (ab(cd)ef)
2302 nigel 75 .sp
2303 ph10 464 the value for the inner capturing parentheses (numbered 2) is "ef", which is
2304 the last value taken on at the top level. If a capturing subpattern is not
2305 matched at the top level, its final value is unset, even if it is (temporarily)
2306 set at a deeper level.
2307 nigel 75 .P
2308 ph10 464 If there are more than 15 capturing parentheses in a pattern, PCRE has to
2309 obtain extra memory to store data during a recursion, which it does by using
2310 \fBpcre_malloc\fP, freeing it via \fBpcre_free\fP afterwards. If no memory can
2311 be obtained, the match fails with the PCRE_ERROR_NOMEMORY error.
2312 .P
2313 nigel 63 Do not confuse the (?R) item with the condition (R), which tests for recursion.
2314 Consider this pattern, which matches text in angle brackets, allowing for
2315 arbitrary nesting. Only digits are allowed in nested brackets (that is, when
2316 recursing), whereas any characters are permitted at the outer level.
2317 nigel 75 .sp
2318 < (?: (?(R) \ed++ | [^<>]*+) | (?R)) * >
2319 .sp
2320 nigel 63 In this pattern, (?(R) is the start of a conditional subpattern, with two
2321 different alternatives for the recursive and non-recursive cases. The (?R) item
2322 is the actual recursive call.
2323 nigel 75 .
2324 .
2325 ph10 453 .\" HTML <a name="recursiondifference"></a>
2326 .SS "Recursion difference from Perl"
2327 .rs
2328 .sp
2329 In PCRE (like Python, but unlike Perl), a recursive subpattern call is always
2330 treated as an atomic group. That is, once it has matched some of the subject
2331 string, it is never re-entered, even if it contains untried alternatives and
2332 ph10 461 there is a subsequent matching failure. This can be illustrated by the
2333 following pattern, which purports to match a palindromic string that contains
2334 ph10 453 an odd number of characters (for example, "a", "aba", "abcba", "abcdcba"):
2335 .sp
2336 ^(.|(.)(?1)\e2)$
2337 .sp
2338 ph10 461 The idea is that it either matches a single character, or two identical
2339 characters surrounding a sub-palindrome. In Perl, this pattern works; in PCRE
2340 ph10 453 it does not if the pattern is longer than three characters. Consider the
2341 subject string "abcba":
2342 .P
2343 ph10 461 At the top level, the first character is matched, but as it is not at the end
2344 ph10 453 of the string, the first alternative fails; the second alternative is taken
2345 and the recursion kicks in. The recursive call to subpattern 1 successfully
2346 matches the next character ("b"). (Note that the beginning and end of line
2347 tests are not part of the recursion).
2348 .P
2349 Back at the top level, the next character ("c") is compared with what
2350 ph10 461 subpattern 2 matched, which was "a". This fails. Because the recursion is
2351 ph10 453 treated as an atomic group, there are now no backtracking points, and so the
2352 entire match fails. (Perl is able, at this point, to re-enter the recursion and
2353 try the second alternative.) However, if the pattern is written with the
2354 alternatives in the other order, things are different:
2355 .sp
2356 ^((.)(?1)\e2|.)$
2357 .sp
2358 ph10 461 This time, the recursing alternative is tried first, and continues to recurse
2359 until it runs out of characters, at which point the recursion fails. But this
2360 time we do have another alternative to try at the higher level. That is the big
2361 ph10 453 difference: in the previous case the remaining alternative is at a deeper
2362 recursion level, which PCRE cannot use.
2363 .P
2364 ph10 572 To change the pattern so that it matches all palindromic strings, not just
2365 those with an odd number of characters, it is tempting to change the pattern to
2366 this:
2367 ph10 453 .sp
2368 ^((.)(?1)\e2|.?)$
2369 .sp
2370 ph10 461 Again, this works in Perl, but not in PCRE, and for the same reason. When a
2371 deeper recursion has matched a single character, it cannot be entered again in
2372 order to match an empty string. The solution is to separate the two cases, and
2373 ph10 453 write out the odd and even cases as alternatives at the higher level:
2374 .sp
2375 ^(?:((.)(?1)\e2|)|((.)(?3)\e4|.))
2376 ph10 461 .sp
2377 If you want to match typical palindromic phrases, the pattern has to ignore all
2378 ph10 453 non-word characters, which can be done like this:
2379 .sp
2380 ph10 461 ^\eW*+(?:((.)\eW*+(?1)\eW*+\e2|)|((.)\eW*+(?3)\eW*+\e4|\eW*+.\eW*+))\eW*+$
2381 ph10 453 .sp
2382 ph10 461 If run with the PCRE_CASELESS option, this pattern matches phrases such as "A
2383 man, a plan, a canal: Panama!" and it works well in both PCRE and Perl. Note
2384 the use of the possessive quantifier *+ to avoid backtracking into sequences of
2385 ph10 453 non-word characters. Without this, PCRE takes a great deal longer (ten times or
2386 more) to match typical phrases, and Perl takes so long that you think it has
2387 gone into a loop.
2388 ph10 456 .P
2389 \fBWARNING\fP: The palindrome-matching patterns above work only if the subject
2390 string does not start with a palindrome that is shorter than the entire string.
2391 For example, although "abcba" is correctly matched, if the subject is "ababa",
2392 PCRE finds the palindrome "aba" at the start, then fails at top level because
2393 the end of the string does not follow. Once again, it cannot jump back into the
2394 recursion to try other alternatives, so the entire match fails.
2395 ph10 453 .
2396 .
2397 nigel 63 .\" HTML <a name="subpatternsassubroutines"></a>
2398 nigel 75 .SH "SUBPATTERNS AS SUBROUTINES"
2399 nigel 63 .rs
2400 .sp
2401 If the syntax for a recursive subpattern reference (either by number or by
2402 name) is used outside the parentheses to which it refers, it operates like a
2403 nigel 93 subroutine in a programming language. The "called" subpattern may be defined
2404 ph10 166 before or after the reference. A numbered reference can be absolute or
2405 relative, as in these examples:
2406 nigel 75 .sp
2407 ph10 166 (...(absolute)...)...(?2)...
2408 (...(relative)...)...(?-1)...
2409 ph10 172 (...(?+1)...(relative)...
2410 ph10 166 .sp
2411 An earlier example pointed out that the pattern
2412 .sp
2413 nigel 75 (sens|respons)e and \e1ibility
2414 .sp
2415 nigel 63 matches "sense and sensibility" and "response and responsibility", but not
2416 "sense and responsibility". If instead the pattern
2417 nigel 75 .sp
2418 nigel 63 (sens|respons)e and (?1)ibility
2419 nigel 75 .sp
2420 nigel 63 is used, it does match "sense and responsibility" as well as the other two
2421 nigel 93 strings. Another example is given in the discussion of DEFINE above.
2422 nigel 87 .P
2423 ph10 464 Like recursive subpatterns, a subroutine call is always treated as an atomic
2424 nigel 87 group. That is, once it has matched some of the subject string, it is never
2425 re-entered, even if it contains untried alternatives and there is a subsequent
2426 ph10 469 matching failure. Any capturing parentheses that are set during the subroutine
2427 ph10 464 call revert to their previous values afterwards.
2428 nigel 93 .P
2429 When a subpattern is used as a subroutine, processing options such as
2430 case-independence are fixed when the subpattern is defined. They cannot be
2431 changed for different calls. For example, consider this pattern:
2432 .sp
2433 ph10 166 (abc)(?i:(?-1))
2434 nigel 93 .sp
2435 It matches "abcabc". It does not match "abcABC" because the change of
2436 processing option does not affect the called subpattern.
2437 nigel 75 .
2438 .
2439 ph10 333 .\" HTML <a name="onigurumasubroutines"></a>
2440 .SH "ONIGURUMA SUBROUTINE SYNTAX"
2441 .rs
2442 .sp
2443 ph10 345 For compatibility with Oniguruma, the non-Perl syntax \eg followed by a name or
2444 a number enclosed either in angle brackets or single quotes, is an alternative
2445 syntax for referencing a subpattern as a subroutine, possibly recursively. Here
2446 ph10 333 are two of the examples used above, rewritten using this syntax:
2447 .sp
2448 (?<pn> \e( ( (?>[^()]+) | \eg<pn> )* \e) )
2449 (sens|respons)e and \eg'1'ibility
2450 .sp
2451 ph10 345 PCRE supports an extension to Oniguruma: if a number is preceded by a
2452 ph10 333 plus or a minus sign it is taken as a relative reference. For example:
2453 .sp
2454 (abc)(?i:\eg<-1>)
2455 .sp
2456 ph10 345 Note that \eg{...} (Perl syntax) and \eg<...> (Oniguruma syntax) are \fInot\fP
2457 ph10 333 synonymous. The former is a back reference; the latter is a subroutine call.
2458 .
2459 .
2460 nigel 63 .SH CALLOUTS
2461 .rs
2462 .sp
2463 Perl has a feature whereby using the sequence (?{...}) causes arbitrary Perl
2464 code to be obeyed in the middle of matching a regular expression. This makes it
2465 possible, amongst other things, to extract different substrings that match the
2466 same pair of parentheses when there is a repetition.
2467 nigel 75 .P
2468 nigel 63 PCRE provides a similar feature, but of course it cannot obey arbitrary Perl
2469 code. The feature is called "callout". The caller of PCRE provides an external
2470 nigel 75 function by putting its entry point in the global variable \fIpcre_callout\fP.
2471 nigel 63 By default, this variable contains NULL, which disables all calling out.
2472 nigel 75 .P
2473 nigel 63 Within a regular expression, (?C) indicates the points at which the external
2474 function is to be called. If you want to identify different callout points, you
2475 can put a number less than 256 after the letter C. The default value is zero.
2476 For example, this pattern has two callout points:
2477 nigel 75 .sp
2478 ph10 155 (?C1)abc(?C2)def
2479 nigel 75 .sp
2480 If the PCRE_AUTO_CALLOUT flag is passed to \fBpcre_compile()\fP, callouts are
2481 automatically installed before each item in the pattern. They are all numbered
2482 255.
2483 .P
2484 During matching, when PCRE reaches a callout point (and \fIpcre_callout\fP is
2485 nigel 63 set), the external function is called. It is provided with the number of the
2486 nigel 75 callout, the position in the pattern, and, optionally, one item of data
2487 originally supplied by the caller of \fBpcre_exec()\fP. The callout function
2488 may cause matching to proceed, to backtrack, or to fail altogether. A complete
2489 description of the interface to the callout function is given in the
2490 nigel 63 .\" HREF
2491 nigel 75 \fBpcrecallout\fP
2492 nigel 63 .\"
2493 documentation.
2494 nigel 93 .
2495 .
2496 ph10 510 .\" HTML <a name="backtrackcontrol"></a>
2497 ph10 235 .SH "BACKTRACKING CONTROL"
2498 ph10 210 .rs
2499 .sp
2500 ph10 211 Perl 5.10 introduced a number of "Special Backtracking Control Verbs", which
2501 ph10 210 are described in the Perl documentation as "experimental and subject to change
2502 ph10 211 or removal in a future version of Perl". It goes on to say: "Their usage in
2503 production code should be noted to avoid problems during upgrades." The same
2504 ph10 210 remarks apply to the PCRE features described in this section.
2505 .P
2506 ph10 341 Since these verbs are specifically related to backtracking, most of them can be
2507 used only when the pattern is to be matched using \fBpcre_exec()\fP, which uses
2508 ph10 345 a backtracking algorithm. With the exception of (*FAIL), which behaves like a
2509 ph10 341 failing negative assertion, they cause an error if encountered by
2510 ph10 210 \fBpcre_dfa_exec()\fP.
2511 .P
2512 ph10 469 If any of these verbs are used in an assertion or subroutine subpattern
2513 ph10 464 (including recursive subpatterns), their effect is confined to that subpattern;
2514 ph10 630 it does not extend to the surrounding pattern, with one exception: a *MARK that
2515 is encountered in a positive assertion \fIis\fP passed back (compare capturing
2516 parentheses in assertions). Note that such subpatterns are processed as
2517 anchored at the point where they are tested.
2518 ph10 445 .P
2519 ph10 211 The new verbs make use of what was previously invalid syntax: an opening
2520 ph10 510 parenthesis followed by an asterisk. They are generally of the form
2521 ph10 512 (*VERB) or (*VERB:NAME). Some may take either form, with differing behaviour,
2522 ph10 510 depending on whether or not an argument is present. An name is a sequence of
2523 letters, digits, and underscores. If the name is empty, that is, if the closing
2524 parenthesis immediately follows the colon, the effect is as if the colon were
2525 not there. Any number of these verbs may occur in a pattern.
2526 .P
2527 ph10 512 PCRE contains some optimizations that are used to speed up matching by running
2528 some checks at the start of each match attempt. For example, it may know the
2529 minimum length of matching subject, or that a particular character must be
2530 present. When one of these optimizations suppresses the running of a match, any
2531 included backtracking verbs will not, of course, be processed. You can suppress
2532 the start-of-match optimizations by setting the PCRE_NO_START_OPTIMIZE option
2533 ph10 577 when calling \fBpcre_compile()\fP or \fBpcre_exec()\fP, or by starting the
2534 pattern with (*NO_START_OPT).
2535 ph10 210 .
2536 ph10 510 .
2537 ph10 210 .SS "Verbs that act immediately"
2538 .rs
2539 .sp
2540 ph10 512 The following verbs act as soon as they are encountered. They may not be
2541 ph10 510 followed by a name.
2542 ph10 210 .sp
2543 (*ACCEPT)
2544 .sp
2545 This verb causes the match to end successfully, skipping the remainder of the
2546 ph10 211 pattern. When inside a recursion, only the innermost pattern is ended
2547 ph10 456 immediately. If (*ACCEPT) is inside capturing parentheses, the data so far is
2548 captured. (This feature was added to PCRE at release 8.00.) For example:
2549 ph10 210 .sp
2550 ph10 447 A((?:A|B(*ACCEPT)|C)D)
2551 ph10 210 .sp
2552 ph10 461 This matches "AB", "AAD", or "ACD"; when it matches "AB", "B" is captured by
2553 ph10 447 the outer parentheses.
2554 ph10 210 .sp
2555 (*FAIL) or (*F)
2556 .sp
2557 ph10 211 This verb causes the match to fail, forcing backtracking to occur. It is
2558 ph10 210 equivalent to (?!) but easier to read. The Perl documentation notes that it is
2559 probably useful only when combined with (?{}) or (??{}). Those are, of course,
2560 Perl features that are not present in PCRE. The nearest equivalent is the
2561 callout feature, as for example in this pattern:
2562 .sp
2563 a+(?C)(*FAIL)
2564 .sp
2565 ph10 211 A match with the string "aaaa" always fails, but the callout is taken before
2566 each backtrack happens (in this example, 10 times).
2567 ph10 210 .
2568 ph10 510 .
2569 .SS "Recording which path was taken"
2570 .rs
2571 .sp
2572 ph10 512 There is one verb whose main purpose is to track how a match was arrived at,
2573 though it also has a secondary use in conjunction with advancing the match
2574 ph10 510 starting point (see (*SKIP) below).
2575 .sp
2576 (*MARK:NAME) or (*:NAME)
2577 .sp
2578 A name is always required with this verb. There may be as many instances of
2579 (*MARK) as you like in a pattern, and their names do not have to be unique.
2580 .P
2581 When a match succeeds, the name of the last-encountered (*MARK) is passed back
2582 to the caller via the \fIpcre_extra\fP data structure, as described in the
2583 .\" HTML <a href="pcreapi.html#extradata">
2584 .\" </a>
2585 section on \fIpcre_extra\fP
2586 .\"
2587 ph10 512 in the
2588 ph10 510 .\" HREF
2589 \fBpcreapi\fP
2590 .\"
2591 documentation. No data is returned for a partial match. Here is an example of
2592 \fBpcretest\fP output, where the /K modifier requests the retrieval and
2593 outputting of (*MARK) data:
2594 .sp
2595 /X(*MARK:A)Y|X(*MARK:B)Z/K
2596 XY
2597 0: XY
2598 MK: A
2599 XZ
2600 0: XZ
2601 MK: B
2602 .sp
2603 ph10 512 The (*MARK) name is tagged with "MK:" in this output, and in this example it
2604 indicates which of the two alternatives matched. This is a more efficient way
2605 ph10 510 of obtaining this information than putting each alternative in its own
2606 capturing parentheses.
2607 .P
2608 ph10 630 If (*MARK) is encountered in a positive assertion, its name is recorded and
2609 passed back if it is the last-encountered. This does not happen for negative
2610 assetions.
2611 .P
2612 ph10 510 A name may also be returned after a failed match if the final path through the
2613 pattern involves (*MARK). However, unless (*MARK) used in conjunction with
2614 (*COMMIT), this is unlikely to happen for an unanchored pattern because, as the
2615 starting point for matching is advanced, the final check is often with an empty
2616 string, causing a failure before (*MARK) is reached. For example:
2617 .sp
2618 /X(*MARK:A)Y|X(*MARK:B)Z/K
2619 XP
2620 No match
2621 .sp
2622 There are three potential starting points for this match (starting with X,
2623 ph10 512 starting with P, and with an empty string). If the pattern is anchored, the
2624 ph10 510 result is different:
2625 .sp
2626 /^X(*MARK:A)Y|^X(*MARK:B)Z/K
2627 XP
2628 No match, mark = B
2629 .sp
2630 ph10 512 PCRE's start-of-match optimizations can also interfere with this. For example,
2631 if, as a result of a call to \fBpcre_study()\fP, it knows the minimum
2632 ph10 510 subject length for a match, a shorter subject will not be scanned at all.
2633 .P
2634 ph10 512 Note that similar anomalies (though different in detail) exist in Perl, no
2635 doubt for the same reasons. The use of (*MARK) data after a failed match of an
2636 ph10 510 unanchored pattern is not recommended, unless (*COMMIT) is involved.
2637 .
2638 .
2639 ph10 210 .SS "Verbs that act after backtracking"
2640 .rs
2641 .sp
2642 ph10 211 The following verbs do nothing when they are encountered. Matching continues
2643 ph10 510 with what follows, but if there is no subsequent match, causing a backtrack to
2644 the verb, a failure is forced. That is, backtracking cannot pass to the left of
2645 the verb. However, when one of these verbs appears inside an atomic group, its
2646 effect is confined to that group, because once the group has been matched,
2647 there is never any backtracking into it. In this situation, backtracking can
2648 ph10 512 "jump back" to the left of the entire atomic group. (Remember also, as stated
2649 ph10 510 above, that this localization also applies in subroutine calls and assertions.)
2650 .P
2651 These verbs differ in exactly what kind of failure occurs when backtracking
2652 reaches them.
2653 ph10 210 .sp
2654 (*COMMIT)
2655 .sp
2656 ph10 510 This verb, which may not be followed by a name, causes the whole match to fail
2657 outright if the rest of the pattern does not match. Even if the pattern is
2658 unanchored, no further attempts to find a match by advancing the starting point
2659 take place. Once (*COMMIT) has been passed, \fBpcre_exec()\fP is committed to
2660 finding a match at the current starting point, or not at all. For example:
2661 ph10 210 .sp
2662 a+(*COMMIT)b
2663 .sp
2664 ph10 211 This matches "xxaab" but not "aacaab". It can be thought of as a kind of
2665 ph10 512 dynamic anchor, or "I've started, so I must finish." The name of the most
2666 recently passed (*MARK) in the path is passed back when (*COMMIT) forces a
2667 ph10 510 match failure.
2668 .P
2669 ph10 512 Note that (*COMMIT) at the start of a pattern is not the same as an anchor,
2670 unless PCRE's start-of-match optimizations are turned off, as shown in this
2671 ph10 510 \fBpcretest\fP example:
2672 ph10 210 .sp
2673 ph10 510 /(*COMMIT)abc/
2674 xyzabc
2675 0: abc
2676 xyzabc\eY
2677 No match
2678 ph10 210 .sp
2679 ph10 512 PCRE knows that any match must start with "a", so the optimization skips along
2680 ph10 510 the subject to "a" before running the first match attempt, which succeeds. When
2681 the optimization is disabled by the \eY escape in the second subject, the match
2682 ph10 512 starts at "x" and so the (*COMMIT) causes it to fail without trying any other
2683 ph10 510 starting points.
2684 ph10 210 .sp
2685 ph10 510 (*PRUNE) or (*PRUNE:NAME)
2686 .sp
2687 ph10 512 This verb causes the match to fail at the current starting position in the
2688 ph10 510 subject if the rest of the pattern does not match. If the pattern is
2689 unanchored, the normal "bumpalong" advance to the next starting character then
2690 happens. Backtracking can occur as usual to the left of (*PRUNE), before it is
2691 reached, or when matching to the right of (*PRUNE), but if there is no match to
2692 the right, backtracking cannot cross (*PRUNE). In simple cases, the use of
2693 (*PRUNE) is just an alternative to an atomic group or possessive quantifier,
2694 but there are some uses of (*PRUNE) that cannot be expressed in any other way.
2695 The behaviour of (*PRUNE:NAME) is the same as (*MARK:NAME)(*PRUNE) when the
2696 match fails completely; the name is passed back if this is the final attempt.
2697 (*PRUNE:NAME) does not pass back a name if the match succeeds. In an anchored
2698 pattern (*PRUNE) has the same effect as (*COMMIT).
2699 .sp
2700 ph10 210 (*SKIP)
2701 .sp
2702 ph10 510 This verb, when given without a name, is like (*PRUNE), except that if the
2703 pattern is unanchored, the "bumpalong" advance is not to the next character,
2704 but to the position in the subject where (*SKIP) was encountered. (*SKIP)
2705 signifies that whatever text was matched leading up to it cannot be part of a
2706 successful match. Consider:
2707 ph10 210 .sp
2708 a+(*SKIP)b
2709 .sp
2710 ph10 211 If the subject is "aaaac...", after the first match attempt fails (starting at
2711 ph10 210 the first character in the string), the starting point skips on to start the
2712 ph10 211 next attempt at "c". Note that a possessive quantifer does not have the same
2713 ph10 456 effect as this example; although it would suppress backtracking during the
2714 ph10 210 first match attempt, the second attempt would start at the second character
2715 instead of skipping on to "c".
2716 .sp
2717 ph10 510 (*SKIP:NAME)
2718 ph10 211 .sp
2719 ph10 512 When (*SKIP) has an associated name, its behaviour is modified. If the
2720 following pattern fails to match, the previous path through the pattern is
2721 searched for the most recent (*MARK) that has the same name. If one is found,
2722 the "bumpalong" advance is to the subject position that corresponds to that
2723 (*MARK) instead of to where (*SKIP) was encountered. If no (*MARK) with a
2724 matching name is found, normal "bumpalong" of one character happens (the
2725 ph10 510 (*SKIP) is ignored).
2726 .sp
2727 (*THEN) or (*THEN:NAME)
2728 .sp
2729 ph10 579 This verb causes a skip to the next alternation in the innermost enclosing
2730 ph10 550 group if the rest of the pattern does not match. That is, it cancels pending
2731 backtracking, but only within the current alternation. Its name comes from the
2732 observation that it can be used for a pattern-based if-then-else block:
2733 ph10 210 .sp
2734 ( COND1 (*THEN) FOO | COND2 (*THEN) BAR | COND3 (*THEN) BAZ ) ...
2735 .sp
2736 ph10 211 If the COND1 pattern matches, FOO is tried (and possibly further items after
2737 ph10 210 the end of the group if FOO succeeds); on failure the matcher skips to the
2738 ph10 510 second alternative and tries COND2, without backtracking into COND1. The
2739 behaviour of (*THEN:NAME) is exactly the same as (*MARK:NAME)(*THEN) if the
2740 overall match fails. If (*THEN) is not directly inside an alternation, it acts
2741 like (*PRUNE).
2742 ph10 210 .
2743 ph10 551 .P
2744 ph10 579 The above verbs provide four different "strengths" of control when subsequent
2745 ph10 551 matching fails. (*THEN) is the weakest, carrying on the match at the next
2746 alternation. (*PRUNE) comes next, failing the match at the current starting
2747 position, but allowing an advance to the next character (for an unanchored
2748 pattern). (*SKIP) is similar, except that the advance may be more than one
2749 character. (*COMMIT) is the strongest, causing the entire match to fail.
2750 .P
2751 If more than one is present in a pattern, the "stongest" one wins. For example,
2752 consider this pattern, where A, B, etc. are complex pattern fragments:
2753 .sp
2754 (A(*COMMIT)B(*THEN)C|D)
2755 .sp
2756 Once A has matched, PCRE is committed to this match, at the current starting
2757 position. If subsequently B matches, but C does not, the normal (*THEN) action
2758 of trying the next alternation (that is, D) does not happen because (*COMMIT)
2759 overrides.
2760 ph10 210 .
2761 ph10 551 .
2762 nigel 93 .SH "SEE ALSO"
2763 .rs
2764 .sp
2765 ph10 461 \fBpcreapi\fP(3), \fBpcrecallout\fP(3), \fBpcrematching\fP(3),
2766 ph10 456 \fBpcresyntax\fP(3), \fBpcre\fP(3).
2767 ph10 99 .
2768 .
2769 .SH AUTHOR
2770 .rs
2771 .sp
2772 .nf
2773 Philip Hazel
2774 University Computing Service
2775 Cambridge CB2 3QH, England.
2776 .fi
2777 .
2778 .
2779 .SH REVISION
2780 .rs
2781 .sp
2782 .nf
2783 ph10 637 Last updated: 24 July 2011
2784 ph10 607 Copyright (c) 1997-2011 University of Cambridge.
2785 ph10 99 .fi
Properties
Name Value
svn:eol-style native
svn:keywords "Author Date Id Revision Url"
[email protected]
ViewVC Help
Powered by ViewVC 1.1.12
|
__label__pos
| 0.739266 |
Thursday, April 22, 2021
HTML5 comes with a nifty feature to show custom form validation messages on form submission (see cover image). This looks different in each browser/OS - but it is a standard HTML5 feature, fully supported in all modern browsers.
This is easy enough to use through javascript:
element.setCustomValidity('That is the wrong name!');
But when using a front end framework like Vue.js or Svelte, this means that we first need to get a reference to the DOM element and make sure that the element is mounted before executing above javascript.
Wouldn't it be nice if this could just be set as an attribute directly on each input element? Something like this:
<input validity="That is the wrong name!" />
Here's how you can do just that:
Vue.js
Run before loading your app, to make this globally available:
Vue.directive('validity', function (el, binding) {
el.setCustomValidity(binding.value?binding.value:'');
})
In a Vue component (.vue file):
<template>
<input type="text"
v-model="name"
v-validity="name!=='joe'?'That is the wrong name!':''" />
</template>
<script>
export default {
data: function () {
return {
name: ''
}
}
}
<script>
Svelte
In a "shared.js" file:
export function Validity(node, val) {
if(!!val) node.setCustomValidity(val);
return {
update(newVal) {
node.setCustomValidity(newVal?newVal:'');
}
};
}
In a ".svelte" component file:
<input type="text"
bind:value={name}
use:Validity={name!=='joe'?'That is the wrong name!':''}>
<script>
import {Validity} from './shared.js';
let name='';
</script>
Share: Twitter / Facebook / LinkedIn
Copyright © 2006-2021 Jesper G. Høy
|
__label__pos
| 0.920234 |
1
$\begingroup$
So I'm trying to figure out if it's possible to extract the private key if you have the ciphertext and the IV corresponding to that ciphertext. It's for some kind of a challenge, and can't really seem to figure it out.
$\endgroup$
1
• 1
$\begingroup$ It's usually called a secret key instead of a private key for symmetric block ciphers such as AES. If anybody takes up this challenge for a well randomized AES key then I would consider them challenged indeed. $\endgroup$ – Maarten Bodewes Mar 4 '15 at 18:26
3
$\begingroup$
No, that's not really possible without blatant flaws of the implementation. Modern modes of operation of ciphers are resistent to attacks even if you know many pairs of plaintext and ciphertext - and the IV is public knowledge. Knowing it is the normal case. You also didn't mention what operation mode was used.
Well, of course you could brute force the key, but that would take more time and computational power than you can ever muster.
Oh, and a small note: AES is a symmetric cipher, so there's just one key, which is normally not called private key. It may be true, but this term is generally reserved for asymmetric encryption.
$\endgroup$
0
$\begingroup$
We know this is theoretically possible, but your search space is 2^126 right now and no sign of this improving. Good luck building enough supercomputer (all the computers on the planet combined aren't even close right now).
$\endgroup$
Your Answer
By clicking “Post Your Answer”, you agree to our terms of service, privacy policy and cookie policy
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.682205 |
MDN’s new design is in Beta! A sneak peek: https://blog.mozilla.org/opendesign/mdns-new-design-beta/
TypeError: "x" is (not) "y"
Message
TypeError: "x" is (not) "y"
Exemples :
TypeError: "x" is undefined
TypeError: "x" is null
TypeError: "undefined" is not an object
TypeError: "x" is not an object or null
TypeError: "x" is not a symbol
Type d'erreur
TypeError.
Quel est le problème ?
Un type inattendu a été rencontré. Cela se produit la plupart du temps avec les valeurs undefined ou null.
Certaines méthodes comme Object.create() ou Symbol.keyFor() ont des contraintes sur le type de valeur qui peut être passé en argument.
Exemples
Exemples invalides
// undefined et null : des valeurs
// sur lesquelles la méthode substring
// ne fonctionnera pas
var toto = undefined;
toto.substring(1); // TypeError: toto is undefined
var toto = null;
toto.substring(1); // TypeError: toto is null
// Certaines méthodes nécessitent une valeur
// d'un type spécifique
var toto = {}
Symbol.keyFor(toto); // TypeError: toto is not a symbol
var toto = "truc"
Object.create(toto); // TypeError: "toto" is not an object or null
Résoudre le problème
Pour résoudre ce problème et écarter les cas où la valeur vaut undefined ou null, on peut par exemple utiliser l'opérateur typeof.
if (typeof toto !== 'undefined') {
// Désormais, on sait que toto est bien
// défini et on peut poursuivre.
}
Voir aussi
Étiquettes et contributeurs liés au document
Contributeurs à cette page : application2000, SphinxKnight
Dernière mise à jour par : application2000,
|
__label__pos
| 0.55499 |
Category: Artificial Intelligence
At present, many people are started to use internet and technology were highly useful for people. Nowadays, internet became a part of everyone’s life and anyone want to know about anything then they will get connected to the internet and make use of search engine to search about their query. Many search engines were available among that one of the topmost search engine which is being used globally is Google. When you enter the query in the search area of the Google it will return the relevant results according to the ranking of the webpages. Once typed the query in search area and click on search the results will get loaded in seconds whereas this action seems to be look very simple but there is a complex functioning were behind this to take place. The searches functioning of Google were work based on the search algorithm which is known as Hummingbird. Hummingbird is the overall search algorithm of Google as it is encompasses of various parts among that RankBrain is one of the newest.
Google RankBrain is the core algorithm that makes use of machine-learning for determining the most appropriate results of the search engine queries. It is the only live artificial intelligence which Google is being used in their search results. Pre-RankBrain, Google make use of the basic algorithm for identifying the results of the given query. Post-RankBrain algorithm made more advancement in search results, when the query entered it goes through an interpretation model which apply possible factors such as location of the user, personalization and the words of the query for discovering the user’s exact solution. Through using this Google algorithm one can able to deliver more relevant results of the query.
RankBrain’s machine learning aspect makes to look vary from other updates. In order to “teach” the RankBrain algorithm for producing useful results at first Google “feeds” it data from variety of sources. Then the algorithm utilizes and takes it from there, calculating and teaching by self over time for matching the variety of signals with the variety of results. Also for ordering the search engine rankings according to the calculations made and this how the RankBrain algorithm functions.
An overview about the RankBrain Algorithm
• Google RankBrain is a machine learning (AI) algorithm, Google make use of it for sorting the search results.
• This algorithm assists Google well on processing and understanding the search queries.
• This algorithm went online in April 2015 and in October 2015 it has been introduced to the world.
• RankBrain is a part of wider Hummingbird algorithm which does not replace it and at the same time it will not operate independently.
• Before the introduction of RankBrain all Google’s algorithm were hand-coded which requires engineers to work on the algorithm whereas now RankBrain also doing this in the background.
• RankBrain modifies the algorithm on its own.
• It is being the third most important signal which is providing to the result of a search query.
• RankBrain is not a Natural Language Processor (NLP) as it wants a database of contacts and vectors of known contacts between same queries in order to get back the best guess.
• When the queries are not understood inference will occur yet the results returned will be based on the data.
RankBrain algorithm seems to be more effective
RankBrain makes use of artificial intelligence for embedding huge amounts of written language into mathematical entities which is known as vectors and this can be understand by the computer. When the RankBrain looks a word or phrase that is not familiar then the machine looks for the similar meaning and returns by filtering the results accordingly. This makes the algorithm to be more effective on handling search queries as not seen before.
Working of the RankBrain algorithm
RankBrain uses so-called entities, as it makes use of a series of databases based on people, places, and things which Google knows for seeding the algorithm and their machine learning processes. After that the words (queries) will be conked out into word vectors for using a numerical method to assign those terms an “address” whereas the same words distribute the same “addresses.” At the time when Google processes an unfamiliar query then RankBrain makes use of the numerical mapped out the contacts for assuming a best fit based on the query and it gives multiple results that were related to it. Over time Google filters the results according to the user interaction and machine learning for improving the match between users search object and the search results which were returned by Google. In RankBrain’s analysis words such as “and” or “the” that search engines used to throw were not present in this. It is mainly meant to assist in better understanding of queries for delivering best search results, especially for negative-oriented queries, like queries making use of words such as “without” or “not.”
• The textual contents of search queries were converted into word vectors by RankBrain which is known as ‘distributed representations’.
• In mathematical space, each of the distributed representations gets a unique coordinate address. In this space vectors that were close to each other has linguistic similarity.
• In RankBrain, words get assigned a mathematical address and they were retrieved based on the query and it locates in the best fit vector whereas these word “interpretations” are used to return results.
RankBrain the important Ranking Signal
RankBrain is known as the third most significant ranking indications of Google which determines the results that appear in the search queries. Among the complete mix of hundreds of signals RankBrain is one that gets into an algorithm which decides what results needs to be appear on the Google search page and where they are ranked. This made much prediction in the search industry as the system directly assesses the quality of content and websites and rank them based on it. Ranking signals impact the search results of the given query by including the personal or contextual factors like user’s search history and location to produce relevant results. As RankBrain is being a ranking signal it impacts the type of content which is selected after that ranked by the wider Hummingbird algorithm.
• The key consideration of RankBrain as ranking signal that it acts to filter the result according to the query with including certain factors.
• RankBrain is only speculation in this point of time.
• It is being a method of processing search queries in a way that produces a “best fit” for the queries which were unknown to Google.
• RankBrain brings the most relevant results of the query as they were a processing algorithm that makes use of machine learning in order to bring back the best match for the query when they are not sure about what that query “means”.
• RankBrain has little influence when Google sure about the query meaning. The necessity of RankBrain occurs when Google not sure about queries meaning as it can be helped only by RankBrain.
• Initially, RankBrain present in about 15% of Google queries later has been expanded and involved in almost all queries entered in Google.
• RankBrain impacts queries in all languages and all countries. It plays a major role when the query is unique and unknown.
Two ways for optimizing RankBrain
RankBrain is being the third most important ranking factor plays a vital role in how search engines filter content. This influences the search engine optimization which impacts the businesses and marketers yet there are two ways to optimize it are Research the intent behind every keyword and … Keep Reading
ai in search engines
At present, the internet becomes a mandatory thing for people and it becomes a necessity to do their day today work. On other hand, almost all business sectors make use of internet and some business mainly works on basis of internet only. Likewise, internet is widely used when it comes to business sectors and they use several things in internet basis among all search engines is consider to be essential one for all business and incredible for all people. Many of them would wonder while searching their favorite things in google or any other platform they can notice when they type some keywords a list of options related to keywords would be pop up onto the home screen. Some may think that is magic but in real facts the search engine has secured path to use entire internet and search all relevant contents to the keyword you typed. Many can think how it is possible for search engines to find all relevant things of keywords? It is not that much simple the search engines are nothing but a bunch of algorithm derived from different ideas from brilliant people. Even then there arises a question how come a bunch of algorithm can do all these functions and pop out the results? The answer simple all search engine algorithm uses artificial intelligence for searching content related to keywords given by people.
How Does a Search Engine Optimize the Result for People?
All search engines uses AI that is Artificial Intelligence mainly to have aggressive web searching on specified keyword given by people. Moreover the AI is not only used for searching content based on the keywords rather there are several sub internal process are undergoing in fraction of seconds in algorithm. Were the searched contents are separated based on several category such as
• Quality
• Priority
• Keyword analysis percent
Quality: In general the search engine algorithm keeps on updating thus when people do search based on keyword using Artificial intelligence the web search is done. Once all the content is searched it looks for high quality and display them based on high quality.
Moreover, this made a big thing in back days and many SEO owning business specialists tried to beat the system by using black hat SEO techniques and which made their pages to appear as a top results whenever the desired keyword searched. This technique is preferred because in real facts the search pages resulted in the top is with high quality which is widely preferred by most of the people.
Priority: When the searched contents are segregated based on the quality it is displayed with priority too. Were the searched contents are displayed from high priority basis to low priority spam contents. This makes people to get all keyword related contents as result.
Keyword analysis percent: In this process there are certain cases keyword does not match but the content would be displayed in the result and it is mainly because while doing keyword analysis the content in that page would be matching with the meaning of the search or any of the keyword thus the page would be resulted.
Likewise Search Engine uses Artificial Intelligence in greater extent to deliver results and on other hand it also keeps on updating based on recent algorithm updates.
Impact of Artificial intelligence using in Search engines:
Initially AI is used by Search engines to improve the algorithm search using keywords but in recent days the artificial intelligences has created its revolution in search engines too. Thus the impact of this revolution is high and lead to several benefits to people also to make clear enough here are some of factors are listed below.
• Keyword target
• Predict human
• Understand the need
• Real time data
Keyword target:
The technology is keep on updating thus there are frequent invention in Artificial Intelligence too. Previously people use search engines with texts and keywords and at present people have many options to search in search engine such as voice search in google which is one among them. This helps a lot to search engines to know how human search in the website.
Predict human:
Once the search engine know how human search on web, they started to predict what would be the next move of a human in searching and try to deliver that before. The main thing behind this is to grasp the attention of the user by providing them what they need. To be more clear if the user search for any movie or a game the first result would be Wikipedia which holds entire history about story, characters, platform, gameplay and developer all these makes the use to stay with the page for maximum time this helps the search engine to update themselves in frequent manner.
Understand the need:
As the search engine predicted the user action earlier it starts and understand the need of the user and deliver results based on that. That is when the user search for purchasing and failed to get then search engine would give results based on previous search too this would create curiosity in user and tends the user to go with previous decision which makes them to complete their purchase and fulfill their need.
Real time data:
As search engines understand the user need and display contents based on that it also increases the purchases too which in turn increases the ROI in real time data.
Likewise, the Artificial intelligence usage in search engines changes them to analyze the content and get interrupted with one another to deliver better search results for the users. Moreover the algorithm is keep on updating the search engines also includes NLC and images similar to keywords. Since artificial intelligence is inventory there are no benefits of black hat SEO techniques in search engines. Thus when a search engine need to provide all needs of the user then it is more necessary to keep on improving the algorithm based on revolution only that can fulfill the user search and gives a better result.
Keep Reading
|
__label__pos
| 0.853256 |
Resource Center
Hitachi ID Facebook Page Hitachi ID Twitter Page Find us on Google+ Hitachi ID YouTube Page
Onboarding
In the context of identity and access management, onboarding refers to the creation of a new identity or a new account, associated with an existing identity. (It is possible to create a new account and not associate it with any identity, but this is generally considered a poor practice).
The term onboarding derives from the process of passengers boarding a vehicle such as an aircraft or ship. This term is often preferable to terms such as "hiring" since it encompasses different types of users -- employees, contractors, vendors, students, customers, partners, etc.
Hitachi ID Identity Manager can automate various onboarding processes, including those driven by a system of record or a request portal.
Return to Identity Management Concepts
|
__label__pos
| 0.773009 |
Hide Table of Contents
View Point clustering sample in sandbox
Point clustering
Description
This sample is outdated. Clustering has already been implemented in the JS API via the setFeatureReduction() method, so creating a custom layer is unnecessary. Please see the Basic clustering sample.
Point clustering has been implemented in this sample with a custom layer named extras.ClusterLayer. This custom layer subclasses esri.layers.GraphicsLayer. Symbology for clusters is managed using renderers. A class breaks renderer is recommended. For constructor options, public properties as well as layer methods, please refer to the comments in the ClusterLayer.js file.
If using IIS as your web server, please configure your IIS instance to serve JSON.
Code
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="initial-scale=1, maximum-scale=1,user-scalable=no">
<title>Cluster</title>
<link rel="stylesheet" href="https://js.arcgis.com/3.31/dijit/themes/tundra/tundra.css">
<link rel="stylesheet" href="https://js.arcgis.com/3.31/esri/css/esri.css">
<style>
html, body { height: 100%; width: 100%; margin: 0; padding: 0; }
#map{ margin: 0; padding: 0; }
/* center the image in the popup */
.esriViewPopup .gallery { margin: 0 auto !important; }
</style>
<script>
// helpful for understanding dojoConfig.packages vs. dojoConfig.paths:
// http://www.sitepen.com/blog/2013/06/20/dojo-faq-what-is-the-difference-packages-vs-paths-vs-aliases/
var dojoConfig = {
paths: {
extras: location.pathname.replace(/\/[^/]+$/, "") + "/extras"
}
};
</script>
<script src="https://js.arcgis.com/3.31/"></script>
<script>
var map;
require([
"dojo/parser",
"dojo/ready",
"dojo/_base/array",
"esri/Color",
"dojo/dom-style",
"dojo/query",
"esri/map",
"esri/request",
"esri/graphic",
"esri/geometry/Extent",
"esri/symbols/SimpleMarkerSymbol",
"esri/symbols/SimpleFillSymbol",
"esri/symbols/PictureMarkerSymbol",
"esri/renderers/ClassBreaksRenderer",
"esri/layers/GraphicsLayer",
"esri/SpatialReference",
"esri/dijit/PopupTemplate",
"esri/geometry/Point",
"esri/geometry/webMercatorUtils",
"extras/ClusterLayer",
"dijit/layout/BorderContainer",
"dijit/layout/ContentPane",
"dojo/domReady!"
], function(
parser, ready, arrayUtils, Color, domStyle, query,
Map, esriRequest, Graphic, Extent,
SimpleMarkerSymbol, SimpleFillSymbol, PictureMarkerSymbol, ClassBreaksRenderer,
GraphicsLayer, SpatialReference, PopupTemplate, Point, webMercatorUtils,
ClusterLayer
) {
ready(function() {
parser.parse();
var clusterLayer;
var popupOptions = {
"markerSymbol": new SimpleMarkerSymbol("circle", 20, null, new Color([0, 0, 0, 0.25])),
"marginLeft": "20",
"marginTop": "20"
};
map = new Map("map", {
basemap: "oceans",
center: [-117.789, 33.543],
zoom: 13
});
map.on("load", function() {
// hide the popup's ZoomTo link as it doesn't make sense for cluster features
domStyle.set(query("a.action.zoomTo")[0], "display", "none");
// get the latest 1000 photos from instagram/laguna beach
var photos = esriRequest({
url: "data/1000-photos.json",
handleAs: "json"
});
photos.then(addClusters, error);
});
function addClusters(resp) {
var photoInfo = {};
var wgs = new SpatialReference({
"wkid": 4326
});
photoInfo.data = arrayUtils.map(resp, function(p) {
var latlng = new Point(parseFloat(p.lng), parseFloat(p.lat), wgs);
var webMercator = webMercatorUtils.geographicToWebMercator(latlng);
var attributes = {
"Caption": p.caption,
"Name": p.full_name,
"Image": p.image,
"Link": p.link
};
return {
"x": webMercator.x,
"y": webMercator.y,
"attributes": attributes
};
});
// popupTemplate to work with attributes specific to this dataset
var popupTemplate = new PopupTemplate({
"title": "",
"fieldInfos": [{
"fieldName": "Caption",
visible: true
}, {
"fieldName": "Name",
"label": "By",
visible: true
}, {
"fieldName": "Link",
"label": "On Instagram",
visible: true
}]
});
// cluster layer that uses OpenLayers style clustering
clusterLayer = new ClusterLayer({
"data": photoInfo.data,
"distance": 100,
"id": "clusters",
"labelColor": "#fff",
"labelOffset": 10,
"resolution": map.extent.getWidth() / map.width,
"singleColor": "#888",
"singleTemplate": popupTemplate
});
var defaultSym = new SimpleMarkerSymbol().setSize(4);
var renderer = new ClassBreaksRenderer(defaultSym, "clusterCount");
var picBaseUrl = "https://static.arcgis.com/images/Symbols/Shapes/";
var blue = new PictureMarkerSymbol(picBaseUrl + "BluePin1LargeB.png", 32, 32).setOffset(0, 15);
var green = new PictureMarkerSymbol(picBaseUrl + "GreenPin1LargeB.png", 64, 64).setOffset(0, 15);
var red = new PictureMarkerSymbol(picBaseUrl + "RedPin1LargeB.png", 72, 72).setOffset(0, 15);
renderer.addBreak(0, 2, blue);
renderer.addBreak(2, 200, green);
renderer.addBreak(200, 1001, red);
clusterLayer.setRenderer(renderer);
map.addLayer(clusterLayer);
// close the info window when the map is clicked
map.on("click", cleanUp);
// close the info window when esc is pressed
map.on("key-down", function(e) {
if (e.keyCode === 27) {
cleanUp();
}
});
}
function cleanUp() {
map.infoWindow.hide();
clusterLayer.clearSingles();
}
function error(err) {
console.log("something failed: ", err);
}
// show cluster extents...
// never called directly but useful from the console
window.showExtents = function() {
var extents = map.getLayer("clusterExtents");
if ( extents ) {
map.removeLayer(extents);
}
extents = new GraphicsLayer({ id: "clusterExtents" });
var sym = new SimpleFillSymbol().setColor(new Color([205, 193, 197, 0.5]));
arrayUtils.forEach(clusterLayer._clusters, function(c, idx) {
var e = c.attributes.extent;
extents.add(new Graphic(new Extent(e[0], e[1], e[2], e[3], map.spatialReference), sym));
}, this);
map.addLayer(extents, 0);
};
});
});
</script>
</head>
<body>
<div data-dojo-type="dijit/layout/BorderContainer"
data-dojo-props="design:'headline',gutters:false"
style="width: 100%; height: 100%; margin: 0;">
<div id="map"
data-dojo-type="dijit/layout/ContentPane"
data-dojo-props="region:'center'">
</div>
</div>
</body>
</html>
Show Modal
|
__label__pos
| 0.985776 |
This is a live mirror of the Perl 5 development currently hosted at https://github.com/perl/perl5
This is my patch patch.1n for perl5.001.
[perl5.git] / ext / Socket / Socket.xs
CommitLineData
a0d0e21e
LW
1#include "EXTERN.h"
2#include "perl.h"
3#include "XSUB.h"
4
8e07c86e
AD
5#ifndef VMS
6# ifdef I_SYS_TYPES
7# include <sys/types.h>
8# endif
a0d0e21e 9#include <sys/socket.h>
8e07c86e
AD
10# ifdef I_NETINET_IN
11# include <netinet/in.h>
12# endif
13#include <netdb.h>
14#include <arpa/inet.h>
15#else
16#include "sockadapt.h"
17#endif
a0d0e21e
LW
18
19#ifndef AF_NBS
20#undef PF_NBS
21#endif
22
23#ifndef AF_X25
24#undef PF_X25
25#endif
26
8e07c86e
AD
27#ifndef INADDR_NONE
28#define INADDR_NONE 0xffffffff
29#endif /* INADDR_NONE */
30#ifndef INADDR_LOOPBACK
31#define INADDR_LOOPBACK 0x7F000001
32#endif /* INADDR_LOOPBACK */
33
34
a0d0e21e
LW
35static int
36not_here(s)
37char *s;
38{
39 croak("Socket::%s not implemented on this architecture", s);
40 return -1;
41}
42
43static double
44constant(name, arg)
45char *name;
46int arg;
47{
48 errno = 0;
49 switch (*name) {
50 case 'A':
51 if (strEQ(name, "AF_802"))
52#ifdef AF_802
53 return AF_802;
54#else
55 goto not_there;
56#endif
57 if (strEQ(name, "AF_APPLETALK"))
58#ifdef AF_APPLETALK
59 return AF_APPLETALK;
60#else
61 goto not_there;
62#endif
63 if (strEQ(name, "AF_CCITT"))
64#ifdef AF_CCITT
65 return AF_CCITT;
66#else
67 goto not_there;
68#endif
69 if (strEQ(name, "AF_CHAOS"))
70#ifdef AF_CHAOS
71 return AF_CHAOS;
72#else
73 goto not_there;
74#endif
75 if (strEQ(name, "AF_DATAKIT"))
76#ifdef AF_DATAKIT
77 return AF_DATAKIT;
78#else
79 goto not_there;
80#endif
81 if (strEQ(name, "AF_DECnet"))
82#ifdef AF_DECnet
83 return AF_DECnet;
84#else
85 goto not_there;
86#endif
87 if (strEQ(name, "AF_DLI"))
88#ifdef AF_DLI
89 return AF_DLI;
90#else
91 goto not_there;
92#endif
93 if (strEQ(name, "AF_ECMA"))
94#ifdef AF_ECMA
95 return AF_ECMA;
96#else
97 goto not_there;
98#endif
99 if (strEQ(name, "AF_GOSIP"))
100#ifdef AF_GOSIP
101 return AF_GOSIP;
102#else
103 goto not_there;
104#endif
105 if (strEQ(name, "AF_HYLINK"))
106#ifdef AF_HYLINK
107 return AF_HYLINK;
108#else
109 goto not_there;
110#endif
111 if (strEQ(name, "AF_IMPLINK"))
112#ifdef AF_IMPLINK
113 return AF_IMPLINK;
114#else
115 goto not_there;
116#endif
117 if (strEQ(name, "AF_INET"))
118#ifdef AF_INET
119 return AF_INET;
120#else
121 goto not_there;
122#endif
123 if (strEQ(name, "AF_LAT"))
124#ifdef AF_LAT
125 return AF_LAT;
126#else
127 goto not_there;
128#endif
129 if (strEQ(name, "AF_MAX"))
130#ifdef AF_MAX
131 return AF_MAX;
132#else
133 goto not_there;
134#endif
135 if (strEQ(name, "AF_NBS"))
136#ifdef AF_NBS
137 return AF_NBS;
138#else
139 goto not_there;
140#endif
141 if (strEQ(name, "AF_NIT"))
142#ifdef AF_NIT
143 return AF_NIT;
144#else
145 goto not_there;
146#endif
147 if (strEQ(name, "AF_NS"))
148#ifdef AF_NS
149 return AF_NS;
150#else
151 goto not_there;
152#endif
153 if (strEQ(name, "AF_OSI"))
154#ifdef AF_OSI
155 return AF_OSI;
156#else
157 goto not_there;
158#endif
159 if (strEQ(name, "AF_OSINET"))
160#ifdef AF_OSINET
161 return AF_OSINET;
162#else
163 goto not_there;
164#endif
165 if (strEQ(name, "AF_PUP"))
166#ifdef AF_PUP
167 return AF_PUP;
168#else
169 goto not_there;
170#endif
171 if (strEQ(name, "AF_SNA"))
172#ifdef AF_SNA
173 return AF_SNA;
174#else
175 goto not_there;
176#endif
177 if (strEQ(name, "AF_UNIX"))
178#ifdef AF_UNIX
179 return AF_UNIX;
180#else
181 goto not_there;
182#endif
183 if (strEQ(name, "AF_UNSPEC"))
184#ifdef AF_UNSPEC
185 return AF_UNSPEC;
186#else
187 goto not_there;
188#endif
189 if (strEQ(name, "AF_X25"))
190#ifdef AF_X25
191 return AF_X25;
192#else
193 goto not_there;
194#endif
195 break;
196 case 'B':
197 break;
198 case 'C':
199 break;
200 case 'D':
201 break;
202 case 'E':
203 break;
204 case 'F':
205 break;
206 case 'G':
207 break;
208 case 'H':
209 break;
210 case 'I':
211 break;
212 case 'J':
213 break;
214 case 'K':
215 break;
216 case 'L':
217 break;
218 case 'M':
219 if (strEQ(name, "MSG_DONTROUTE"))
220#ifdef MSG_DONTROUTE
221 return MSG_DONTROUTE;
222#else
223 goto not_there;
224#endif
225 if (strEQ(name, "MSG_MAXIOVLEN"))
226#ifdef MSG_MAXIOVLEN
227 return MSG_MAXIOVLEN;
228#else
229 goto not_there;
230#endif
231 if (strEQ(name, "MSG_OOB"))
232#ifdef MSG_OOB
233 return MSG_OOB;
234#else
235 goto not_there;
236#endif
237 if (strEQ(name, "MSG_PEEK"))
238#ifdef MSG_PEEK
239 return MSG_PEEK;
240#else
241 goto not_there;
242#endif
243 break;
244 case 'N':
245 break;
246 case 'O':
247 break;
248 case 'P':
249 if (strEQ(name, "PF_802"))
250#ifdef PF_802
251 return PF_802;
252#else
253 goto not_there;
254#endif
255 if (strEQ(name, "PF_APPLETALK"))
256#ifdef PF_APPLETALK
257 return PF_APPLETALK;
258#else
259 goto not_there;
260#endif
261 if (strEQ(name, "PF_CCITT"))
262#ifdef PF_CCITT
263 return PF_CCITT;
264#else
265 goto not_there;
266#endif
267 if (strEQ(name, "PF_CHAOS"))
268#ifdef PF_CHAOS
269 return PF_CHAOS;
270#else
271 goto not_there;
272#endif
273 if (strEQ(name, "PF_DATAKIT"))
274#ifdef PF_DATAKIT
275 return PF_DATAKIT;
276#else
277 goto not_there;
278#endif
279 if (strEQ(name, "PF_DECnet"))
280#ifdef PF_DECnet
281 return PF_DECnet;
282#else
283 goto not_there;
284#endif
285 if (strEQ(name, "PF_DLI"))
286#ifdef PF_DLI
287 return PF_DLI;
288#else
289 goto not_there;
290#endif
291 if (strEQ(name, "PF_ECMA"))
292#ifdef PF_ECMA
293 return PF_ECMA;
294#else
295 goto not_there;
296#endif
297 if (strEQ(name, "PF_GOSIP"))
298#ifdef PF_GOSIP
299 return PF_GOSIP;
300#else
301 goto not_there;
302#endif
303 if (strEQ(name, "PF_HYLINK"))
304#ifdef PF_HYLINK
305 return PF_HYLINK;
306#else
307 goto not_there;
308#endif
309 if (strEQ(name, "PF_IMPLINK"))
310#ifdef PF_IMPLINK
311 return PF_IMPLINK;
312#else
313 goto not_there;
314#endif
315 if (strEQ(name, "PF_INET"))
316#ifdef PF_INET
317 return PF_INET;
318#else
319 goto not_there;
320#endif
321 if (strEQ(name, "PF_LAT"))
322#ifdef PF_LAT
323 return PF_LAT;
324#else
325 goto not_there;
326#endif
327 if (strEQ(name, "PF_MAX"))
328#ifdef PF_MAX
329 return PF_MAX;
330#else
331 goto not_there;
332#endif
333 if (strEQ(name, "PF_NBS"))
334#ifdef PF_NBS
335 return PF_NBS;
336#else
337 goto not_there;
338#endif
339 if (strEQ(name, "PF_NIT"))
340#ifdef PF_NIT
341 return PF_NIT;
342#else
343 goto not_there;
344#endif
345 if (strEQ(name, "PF_NS"))
346#ifdef PF_NS
347 return PF_NS;
348#else
349 goto not_there;
350#endif
351 if (strEQ(name, "PF_OSI"))
352#ifdef PF_OSI
353 return PF_OSI;
354#else
355 goto not_there;
356#endif
357 if (strEQ(name, "PF_OSINET"))
358#ifdef PF_OSINET
359 return PF_OSINET;
360#else
361 goto not_there;
362#endif
363 if (strEQ(name, "PF_PUP"))
364#ifdef PF_PUP
365 return PF_PUP;
366#else
367 goto not_there;
368#endif
369 if (strEQ(name, "PF_SNA"))
370#ifdef PF_SNA
371 return PF_SNA;
372#else
373 goto not_there;
374#endif
375 if (strEQ(name, "PF_UNIX"))
376#ifdef PF_UNIX
377 return PF_UNIX;
378#else
379 goto not_there;
380#endif
381 if (strEQ(name, "PF_UNSPEC"))
382#ifdef PF_UNSPEC
383 return PF_UNSPEC;
384#else
385 goto not_there;
386#endif
387 if (strEQ(name, "PF_X25"))
388#ifdef PF_X25
389 return PF_X25;
390#else
391 goto not_there;
392#endif
393 break;
394 case 'Q':
395 break;
396 case 'R':
397 break;
398 case 'S':
399 if (strEQ(name, "SOCK_DGRAM"))
400#ifdef SOCK_DGRAM
401 return SOCK_DGRAM;
402#else
403 goto not_there;
404#endif
405 if (strEQ(name, "SOCK_RAW"))
406#ifdef SOCK_RAW
407 return SOCK_RAW;
408#else
409 goto not_there;
410#endif
411 if (strEQ(name, "SOCK_RDM"))
412#ifdef SOCK_RDM
413 return SOCK_RDM;
414#else
415 goto not_there;
416#endif
417 if (strEQ(name, "SOCK_SEQPACKET"))
418#ifdef SOCK_SEQPACKET
419 return SOCK_SEQPACKET;
420#else
421 goto not_there;
422#endif
423 if (strEQ(name, "SOCK_STREAM"))
424#ifdef SOCK_STREAM
425 return SOCK_STREAM;
426#else
427 goto not_there;
428#endif
429 if (strEQ(name, "SOL_SOCKET"))
430#ifdef SOL_SOCKET
431 return SOL_SOCKET;
432#else
433 goto not_there;
434#endif
435 if (strEQ(name, "SOMAXCONN"))
436#ifdef SOMAXCONN
437 return SOMAXCONN;
438#else
439 goto not_there;
440#endif
441 if (strEQ(name, "SO_ACCEPTCONN"))
442#ifdef SO_ACCEPTCONN
443 return SO_ACCEPTCONN;
444#else
445 goto not_there;
446#endif
447 if (strEQ(name, "SO_BROADCAST"))
448#ifdef SO_BROADCAST
449 return SO_BROADCAST;
450#else
451 goto not_there;
452#endif
453 if (strEQ(name, "SO_DEBUG"))
454#ifdef SO_DEBUG
455 return SO_DEBUG;
456#else
457 goto not_there;
458#endif
459 if (strEQ(name, "SO_DONTLINGER"))
460#ifdef SO_DONTLINGER
461 return SO_DONTLINGER;
462#else
463 goto not_there;
464#endif
465 if (strEQ(name, "SO_DONTROUTE"))
466#ifdef SO_DONTROUTE
467 return SO_DONTROUTE;
468#else
469 goto not_there;
470#endif
471 if (strEQ(name, "SO_ERROR"))
472#ifdef SO_ERROR
473 return SO_ERROR;
474#else
475 goto not_there;
476#endif
477 if (strEQ(name, "SO_KEEPALIVE"))
478#ifdef SO_KEEPALIVE
479 return SO_KEEPALIVE;
480#else
481 goto not_there;
482#endif
483 if (strEQ(name, "SO_LINGER"))
484#ifdef SO_LINGER
485 return SO_LINGER;
486#else
487 goto not_there;
488#endif
489 if (strEQ(name, "SO_OOBINLINE"))
490#ifdef SO_OOBINLINE
491 return SO_OOBINLINE;
492#else
493 goto not_there;
494#endif
495 if (strEQ(name, "SO_RCVBUF"))
496#ifdef SO_RCVBUF
497 return SO_RCVBUF;
498#else
499 goto not_there;
500#endif
501 if (strEQ(name, "SO_RCVLOWAT"))
502#ifdef SO_RCVLOWAT
503 return SO_RCVLOWAT;
504#else
505 goto not_there;
506#endif
507 if (strEQ(name, "SO_RCVTIMEO"))
508#ifdef SO_RCVTIMEO
509 return SO_RCVTIMEO;
510#else
511 goto not_there;
512#endif
513 if (strEQ(name, "SO_REUSEADDR"))
514#ifdef SO_REUSEADDR
515 return SO_REUSEADDR;
516#else
517 goto not_there;
518#endif
519 if (strEQ(name, "SO_REUSEPORT"))
520#ifdef SO_REUSEPORT
521 return SO_REUSEPORT;
522#else
523 goto not_there;
524#endif
525 if (strEQ(name, "SO_SNDBUF"))
526#ifdef SO_SNDBUF
527 return SO_SNDBUF;
528#else
529 goto not_there;
530#endif
531 if (strEQ(name, "SO_SNDLOWAT"))
532#ifdef SO_SNDLOWAT
533 return SO_SNDLOWAT;
534#else
535 goto not_there;
536#endif
537 if (strEQ(name, "SO_SNDTIMEO"))
538#ifdef SO_SNDTIMEO
539 return SO_SNDTIMEO;
540#else
541 goto not_there;
542#endif
543 if (strEQ(name, "SO_TYPE"))
544#ifdef SO_TYPE
545 return SO_TYPE;
546#else
547 goto not_there;
548#endif
549 if (strEQ(name, "SO_USELOOPBACK"))
550#ifdef SO_USELOOPBACK
551 return SO_USELOOPBACK;
552#else
553 goto not_there;
554#endif
555 break;
556 case 'T':
557 break;
558 case 'U':
559 break;
560 case 'V':
561 break;
562 case 'W':
563 break;
564 case 'X':
565 break;
566 case 'Y':
567 break;
568 case 'Z':
569 break;
570 }
571 errno = EINVAL;
572 return 0;
573
574not_there:
575 errno = ENOENT;
576 return 0;
577}
578
8e07c86e 579
a0d0e21e
LW
580MODULE = Socket PACKAGE = Socket
581
582double
583constant(name,arg)
584 char * name
585 int arg
586
8e07c86e
AD
587
588void
589inet_aton(host)
590 char * host
591 CODE:
592 {
593 struct in_addr ip_address;
594 struct hostent * phe;
595
596 if (phe = gethostbyname(host)) {
597 Copy( phe->h_addr, &ip_address, phe->h_length, char );
598 } else {
599 ip_address.s_addr = inet_addr(host);
600 }
601
602 ST(0) = sv_newmortal();
603 if(ip_address.s_addr != INADDR_NONE) {
604 sv_setpvn( ST(0), (char *)&ip_address, sizeof ip_address );
605 }
606 }
607
608void
609inet_ntoa(ip_address_sv)
610 SV * ip_address_sv
611 CODE:
612 {
613 STRLEN addrlen;
614 struct in_addr addr;
615 char * addr_str;
616 char * ip_address = SvPV(ip_address_sv,addrlen);
617 if (addrlen != sizeof(addr)) {
618 croak("Bad arg length for %s, length is %d, should be %d",
619 "Socket::inet_ntoa",
620 addrlen, sizeof(addr));
621 }
622
623 Copy( ip_address, &addr, sizeof addr, char );
624 addr_str = inet_ntoa(addr);
625
626 ST(0) = sv_2mortal(newSVpv(addr_str, strlen(addr_str)));
627 }
628
629void
630pack_sockaddr_in(family,port,ip_address)
631 short family
632 short port
633 char * ip_address
634 CODE:
635 {
636 struct sockaddr_in sin;
637
638 Zero( &sin, sizeof sin, char );
639 sin.sin_family = family;
640 sin.sin_port = htons(port);
641 Copy( ip_address, &sin.sin_addr, sizeof sin.sin_addr, char );
642
643 ST(0) = sv_2mortal(newSVpv((char *)&sin, sizeof sin));
644 }
645
646void
647unpack_sockaddr_in(sin_sv)
648 SV * sin_sv
649 PPCODE:
650 {
651 STRLEN sockaddrlen;
652 struct sockaddr_in addr;
653 short family;
654 short port;
655 struct in_addr ip_address;
656 char * sin = SvPV(sin_sv,sockaddrlen);
657 if (sockaddrlen != sizeof(addr)) {
658 croak("Bad arg length for %s, length is %d, should be %d",
659 "Socket::unpack_sockaddr_in",
660 sockaddrlen, sizeof(addr));
661 }
662
663 Copy( sin, &addr,sizeof addr, char );
664 family = addr.sin_family;
665 port = ntohs(addr.sin_port);
666 ip_address = addr.sin_addr;
667
668 EXTEND(sp, 3);
669 PUSHs(sv_2mortal(newSViv(family)));
670 PUSHs(sv_2mortal(newSViv(port)));
671 PUSHs(sv_2mortal(newSVpv((char *)&ip_address,sizeof ip_address)));
672 }
673
674void
675INADDR_ANY()
676 CODE:
677 {
678 struct in_addr ip_address;
679 ip_address.s_addr = htonl(INADDR_ANY);
680 ST(0) = sv_2mortal(newSVpv((char *)&ip_address,sizeof ip_address ));
681 }
682
683void
684INADDR_LOOPBACK()
685 CODE:
686 {
687 struct in_addr ip_address;
688 ip_address.s_addr = htonl(INADDR_LOOPBACK);
689 ST(0) = sv_2mortal(newSVpv((char *)&ip_address,sizeof ip_address));
690 }
691
692void
693INADDR_NONE()
694 CODE:
695 {
696 struct in_addr ip_address;
697 ip_address.s_addr = htonl(INADDR_NONE);
698 ST(0) = sv_2mortal(newSVpv((char *)&ip_address,sizeof ip_address));
699 }
|
__label__pos
| 0.968692 |
Cloudera Altus Director provides the simplest way to deploy and manage Cloudera Enterprise in the cloud. It enables customers to unlock the benefits of enterprise-grade Hadoop while leveraging the flexibility, scalability, and affordability of the cloud. It integrates seamlessly with Amazon Web Services (AWS), Google Cloud Platform (GCP), and Microsoft Azure, and provides support to build custom plugins for other public or private cloud environments.
Motivation
While automating the provisioning of a cluster on the cloud using Altus Director, customers often ask how to standardize hostnames in line with internal naming conventions. Each cloud provider has a different approach to naming the instances. For example, A typical Amazon EC2 private DNS name looks something like this: ip-12-34-56-78.us-west-2.compute.internal, where the name consists of the internal domain, the service, the region, and a form of the private IPv4 address. These names cannot be registered with an Enterprise Active Directory due to the limitations discussed here, in Microsoft’s documentation. Furthermore, organizations often standardize hostnames in their Active Directory setup to help them classify servers based on type, purpose, location, or other factors.
In this blog post, we will discuss an approach that addresses the above challenge by using cloud instance metadata and an external utility to generate unique custom hostname(s) dynamically, which will comply with organizational standards. This approach also supports the dynamic sizing of the cluster, which greatly increases the adoption of cloud-based deployments.
Approach
The solution described here involves exposing a REST endpoint that is invoked by the Altus Director during the bootstrapping phase of each instance with a standardized hostname prefix (Example: edh-master). The endpoint generates a series of monotonically increasing numbers which is attached to the hostname-prefix (Example: edh-master-01) to create a unique custom hostname that is registered with the DNS. The counter used during the generation of the numbers is persisted and updated in a backend data store by the utility that serves the REST endpoint.
This solution includes three significant steps:
1. Using Altus Director’s cluster configuration file to define instance metadata
2. Developing a utility to generate unique hostnames
3. Using the instance bootstrap script functionality of Altus Director to invoke the utility service and configure the hostname
custom hostname solution diagram
Define Instance Metadata
One of the best practices while deploying an EDH cluster is to assign hostnames based on the roles deployed on the servers in the environment. Examples: MASTER, GATEWAY, WORKER, and so on. Consequently, the first step is to classify and tag the instances appropriately using the “NodeType” in the Altus Director cluster configuration.
MASTER {
type: r4.8xlarge
image: "${IMAGE}"
bootstrapScriptsPaths: ["${BOOTSTRAP_SCRIPT}"]
tags {
NodeType: "MASTER"
}
}
HBASE-MASTER {
type: r4.8xlarge
image: "${IMAGE}"
bootstrapScriptsPaths: ["${BOOTSTRAP_SCRIPT}"]
tags {
NodeType: "HBASE-MASTER"
}
}
GATEWAY {
type: c4.2xlarge
image: "${IMAGE}"
bootstrapScriptsPaths: ["${BOOTSTRAP_SCRIPT}"]
tags {
NodeType: "GATEWAY"
}
}
WORKER {
type: r4.8xlarge
image: "${IMAGE}"
bootstrapScriptsPaths: ["${BOOTSTRAP_SCRIPT}"]
tags {
NodeType: "WORKER"
}
}
REST Utility Design
The utility, which is implemented using Python and Flask, stores and increments the number of invocations for each hostname prefix. It persists this information in a data store, and services it via a REST endpoint. This helps with determining the starting value to be used for increments during subsequent calls for a given hostname prefix.
@app.route("/api/<prefix>")
def main(prefix):
global urlMap
key = prefix
if prefix in urlMap:
urlMap[prefix] = urlMap[prefix] + 1
save_obj(urlMap, file_name)
return '{0:03d}'.format(urlMap[prefix])
else:
urlMap[prefix] = 1
save_obj(urlMap, file_name)
return '{0:03d}'.format(urlMap[prefix])
For example, If Altus Director is bootstrapping three master instances, each of them will invoke the endpoint as http://<rest-server>/api/edh-master. In response, each of these instances will receive a unique value between (1-3). The returned value is then attached with the hostname prefix to form edh-master-01, edh-master-02, and edh-master-03.
If the endpoint is invoked by another instance with a different prefix based on its role in the cluster configuration, edh-worker for example, the utility will maintain a separate counter and return the corresponding value to form edh-worker01, edh-worker02, and so on.
This implementation uses a JSON file for storing the counter. For the prefixes stored in the example below, the next call for edh-master would be served a value of 4, edh-hmaster would receive 3, edh-worker would receive 5 and edh-edge would receive 2. During the initial cluster build, the utility will initialize the counters to 0 and increment as needed during subsequent calls.
{
"edh-master": 3,
"edh-hmaster": 2,
"edh-worker": 4,
"edh-edge": 1
}
Bootstrap Script
In Altus Director’s bootstrap script, the cloud-specific CLI tools (aws cli or gcloud cli or azure cli, or other) are installed and used to query the instance metadata. The instance metadata is used to identify the value associated with the “NodeType” tag. Based on the NodeType value the respective REST endpoint is invoked with the hostname prefix. The hostname prefix and the value returned from the REST API call are merged to form a unique hostname that is now compliant with any organizational standards.
A sample mapping of the “NodeType” instance tag and its corresponding Hostname prefix/endpoint is provided below.
Node Type Hostname Prefix REST Endpoint
MASTER edh-master /api/edh-master
HBASE-MASTER edh-hmaster /api/edh-hmaster
GATEWAY edh-gateway /api/edh-gateway
WORKER edh-worker /api/edh-worker
The code snippet below is part of a bootstrap script that was used for a deployment on AWS. AWS CLI is used to retrieve instance metadata that was defined using the cluster configuration. Refer to the AWS documentation here for more information on the metadata retrieval steps used.
## Gather Instance information
INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id)
AVAILABILITY_ZONE=$(curl http://169.254.169.254/latest/meta-data/placement/availability-zone)
AWS_DEFAULT_REGION=${AVAILABILITY_ZONE::-1}
AWS_REGION=${AVAILABILITY_ZONE::-1}
DOMAIN=cloudera.com
## Install AWS CLI
curl -s -O https://bootstrap.pypa.io/get-pip.py
sudo python get-pip.py
sudo pip install --quiet awscli
## Retrieving NodeType tag that was associated in the Director cluster conf
NODE_TYPE=$(aws ec2 describe-instances --instance-ids ${INSTANCE_ID} --region ${AWS_REGION} --output text| grep NodeType | awk -F' ' '{print $3}')
## Identify Hostname prefix based on NodeType
if [[ "${NODE_TYPE}" == "MASTER" ]]; then
PREFIX=edh-master
elif [[ "${NODE_TYPE}" == "HBASE-MASTER" ]]; then
PREFIX=edh-hmaster
elif [[ "${NODE_TYPE}" == "GATEWAY" ]]; then
PREFIX=edh-gateway
elif [[ "${NODE_TYPE}" == "WORKER" ]]; then
PREFIX=edh-worker
else
PREFIX=edh-other
fi
REST_URI=http://kv-server/api/count/
## Invoking REST Endpoint to obtain a unique number for the hostname
s_number=$(curl -s ${REST_URI}/api/${PREFIX})
custom_hostname=${PREFIX}-${s_number}.${DOMAIN}
echo $custom_hostname > /etc/hostname
Fine Print
The code and examples provided above are for AWS, but the solution could also be implemented for Azure and Google Cloud clusters with necessary modifications. For dynamic cluster sizing, the right starting value for each hostname prefix should be updated in the data store. With unique Node Type/Hostname mappings, the solution can be extended to handle additional roles (Kafka, CDSW, and so on) as well as multiple groupings of similar roles in any high availability/multi-cluster deployment scenario.
The REST API is not a built-in feature of Altus Director, and it can be extended to run using Cloud init scripts or in any multi-threaded application.
Avinash Desireddy is a Senior Solutions Consultant at Cloudera.
Arvind Rajagopal is a Solutions Architect at Cloudera.
Avinash Desireddy
Avinash Desireddy
Leave a comment
Your email address will not be published. Links are not permitted in comments.
|
__label__pos
| 0.836789 |
Last modified: May 08, 2021
Django-CKEditor: Integrate and Upload Images Easily
In this tutorial, we'll learn how to install and set up Django-CKEditor, where you will be able to upload photos easily with content.
So let's get started.
Install Django-CKEditor
Install via pip:
pip install django-ckeditor
In settings.py, add 'ckeditor' and 'ckeditor_uploader' to your INSTALLED_APPS.
# Application definition
INSTALLED_APPS = [
...
#libs
'ckeditor',
'ckeditor_uploader',
#Apps
...
]
Also in settings.py, set STATIC_URL, MEDIA_URL, and CKEDITOR_UPLOAD_PATH.
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
CKEDITOR_UPLOAD_PATH = "uploads/"
CKEDITOR_UPLOAD_PATH: Image upload path.
Note: in MEDIA_URL, you must add the first slash / because, without it, the images will not appear.
In urls.py, you need to add some lines:
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('ckeditor/', include('ckeditor_uploader.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
Now, let's create a model to see how the editor looks like?
In models.py:
from django.db import models
from ckeditor_uploader.fields import RichTextUploadingField
class Topic(models.Model):
title = models.CharField(max_length=100)
content = RichTextUploadingField()
def __str__(self):
return self.title
RichTextUploadingField: The editor's field.
To represent our model in the admin interface, add this line in admin.py.
from django.contrib import admin
#models
from .models import *
admin.site.register(Topic)
Django-CKEditor in Admin interface:
image-1
How to upload image with Django-CKEditor
To upload images, you need to follow these sequence of images:
1:
Django-CKEditor: integrate and upload image an essay way
2:
Django-CKEditor: integrate and upload image an essay way
3:
Django-CKEditor: integrate and upload image an essay way
4:
Django-CKEditor: integrate and upload image an essay way
5:
Django-CKEditor: integrate and upload image an essay way
6:
Django-CKEditor: integrate and upload image an essay way
Display content of editor on the template
To display the editor's content on an HTML page you need to do something like:
<!DOCTYPE html>
<html>
<head>
</head>
<body>
{{obj.content|safe}}
</body>
</html>
for more information:
https://github.com/django-ckeditor/django-ckeditor
|
__label__pos
| 0.641098 |
For us to go further and start creating sample solutions in Rust extending to its parts that we don’t know yet we must finish the basics side quests.
In this side quest our mission is to check loops, for loops are almost similar among programming languages but there are differences thus worth our time to see what is at our disposal.
For the win
Rust’s for loop is different than C++ and similar to Ruby for instance. C++:
for( int a = 10; a < 20; a += 1 )
{
cout << "value of a: " << a << endl;
}
Ruby:
for i in 0..5
puts "#{i}"
end
Curiosity: Ruby has a lots of ways to iterate on things
for x in 0..10 {
println!("{}", x);
}
The for receives the var (x) to hold the iterator (0..10) value in each iteration. Also our iterator is retrieved through an expression, in this case 0..10, which means from 0 to 9, since is an exclusive expression.
Why we don’t have the more common way with index? The idea is to allow a safer way instead of a myriad of options.
we could do this for a While
The other way of iterating in Rust is using while, which in this case is pretty much the same old while. The code bellow is just a sample, we could achieve the same using for.
let mut keepRunning: bool = true;
let mut currentSpeed: i32 = 0;
while keepRunning {
currentSpeed += 2;
if currentSpeed == 10 {
keepRunning = false;
}
}
When building game engines normally we have the game loop that runs forever. In this case we use the loop keyword, but before we go to the code sample let’s also add two keywords: break and continue.
They do exactly as they told, break stops an loop execution (while, for, loop) whilst continue skip to next iteration ignoring whatever comes after him.
let mut isGameRunning = false;
loop {
// something happens and thus game stops running
if isGameRunning == false { break; }
}
Tip: in forever running situations we should always use the keyword loop, Rust compiler is prepared for that situation if we stick to the rule.
Quest closing
With for, while and loop in our spells tree we can now go to the last barrier before we start building things altogether with learning Strings (piece of cake).
|
__label__pos
| 0.985056 |
anthony anthony - 11 months ago 61
YAML Question
Rails - How to avoid repeating same i18n attributes translations
I am building a Rails application using I18n translations.
I have two models (Blog and Event), sharing same attributes (title, content).
In my I18n yml files, how can I avoid repeating same keys for each attributes models and share them ?
Extract of my actual code:
fr:
activerecord:
attributes:
blog:
title: Titre
content: Contenu
event:
title: Titre
content: Contenu
I also tried to set attributes as default, removing wrapped model key without any luck.
fr:
activerecord:
attributes:
title: Titre
content: Contenu
Thanks for your help !
My project:
• Rails 4.2.7.1
• Ruby 2.3.0
Answer Source
Similar kind of question is answered here
You can achieve it using yaml aliases
fr:
activerecord:
attributes:
blog: &title_content
title: Titre
content: Contenu
event: *title_content
Refer yaml aliases for more info.
|
__label__pos
| 0.992621 |
fxsvr2.exe
Process name: Logitech Multimedia Server
Application using this process: Logitech Multimedia Server
Recommended: Check your system for invalid registry entries.
fxsvr2.exe
Process name: Logitech Multimedia Server
Application using this process: Logitech Multimedia Server
Recommended: Check your system for invalid registry entries.
fxsvr2.exe
Process name: Logitech Multimedia Server
Application using this process: Logitech Multimedia Server
Recommended: Check your system for invalid registry entries.
What is fxsvr2.exe doing on my computer?
fxsvr2.exe is installed alongside Logitechs range of multimedia video products and should not be terminated unless suspected of causing problems. Non-system processes like fxsvr2.exe originate from software you installed on your system. Since most applications store data in your system's registry, it is likely that over time your registry suffers fragmentation and accumulates invalid entries which can affect your PC's performance. It is recommended that you check your registry to identify slowdown issues.
fxsvr2.exe
In order to ensure your files and data are not lost, be sure to back up your files online. Using a cloud backup service will allow you to safely secure all your digital files. This will also enable you to access any of your files, at any time, on any device.
Is fxsvr2.exe harmful?
This process is considered safe. It is unlikely to pose any harm to your system.
fxsvr2.exe is a safe process
Can I stop or remove fxsvr2.exe?
Most non-system processes that are running can be stopped because they are not involved in running your operating system. Scan your system now to identify unused processes that are using up valuable resources. fxsvr2.exe is used by 'Logitech Multimedia Server'.This is an application created by 'Logitech'. To stop fxsvr2.exe permanently uninstall 'Logitech Multimedia Server' from your system. Uninstalling applications can leave invalid registry entries, accumulating over time.
Is fxsvr2.exe CPU intensive?
This process is not considered CPU intensive. However, running too many processes on your system may affect your PC’s performance. To reduce system overload, you can use the Microsoft System Configuration Utility to manually find and disable processes that launch upon start-up.
Why is fxsvr2.exe giving me errors?
Process related issues are usually related to problems encountered by the application that runs it. A safe way to stop these errors is to uninstall the application and run a system scan to automatically identify any PC issues.
Process Library is the unique and indispensable process listing database since 2004 Now counting 140,000 processes and 55,000 DLLs. Join and subscribe now!
Toolbox
ProcessQuicklink
|
__label__pos
| 0.959757 |
Quantitative Data
Definition of Quantitative Data
Before we begin, what even is quantitative data?
Well without complicating it, quantitative data can be defined as the value of data in the form of numbers, put into a category for the ease of making sense.
Okay, it still sounds complicated, but we use this in our everyday lives, and it honestly is much simpler than you would think.
Some everyday examples of quantitative data and how they may be categorized are:
• How much does that man weigh? (kilograms)
• How tall is that tower? (meters)
• How good is a product? (# of stars given)
• How viral is my content? (# of likes and shares)
Quantitative Data - Length
Measuring a carrot. Should we use inches? Centimeters?
See? Simple, with an everyday use. But why does it seem so complicated?
Well, the complications of quantitative data comes from its usage in making sense of things in a large quantity.
As we will see later on, some examples of large amounts of quantitative data can come from:
• surveys
• polls
• questionnaires
From these sources, you could receive 1 to 1000 to even more than 10 000 points of quantitative data, which can help you make a decision.
An easy example: “750 customers out of 1000 enjoyed their stay at hotel X.”
So we can see that 75% of customers were satisfied with hotel X, so if you had thoughts on staying at hotel X, there’s a good chance that you’re going to enjoy your stay too!
Now that we understand quantitative data a little bit more, let’s go into more depth by discussing the types of quantitative data, their collection methods, their analysis methods, what’s needed to conduct quantitative analysis, and the advantages and disadvantages of quantitative data.
Quantitative Data - Surveys
Surveys – Gathering Quantitative Data
Examples of Quantitative Data
Some common types of quantitative data are:
• Counter: Number data which represents the number of entities. I.e. number of downloads of an application represents the number of users/customers who have downloaded the application.
• Physical Measurements: The measurements of physical objects. I.e. the dimension of a standard office is 10′ x 15′ (150SF).
• Sensory calculation: Specialized mechanisms to convert information into measurable parameters for a reliable and understandable source of information. I.e. the Richter magnitude scale is a scale to measure the strength of earthquakes.
• Projection of data: Using algorithms and formulas to project future data based on current or past data (predictive analysis). I.e. Google Flu Trends (GFT) was a tool created by Google to predict the next flu outbreak based on search queries.
• Quantification of qualitative entities: Putting a number to an otherwise qualitative source of data. I.e. Asking participants to rate their happiness on a scale of 1-10, where 1 is “feeling sad” and 10 is “feeling very happy”.
It definitely feels like a lot of information to take in, but it helps to keep in mind that depending on what your needs are, you will only need to use a few of these types of quantitative data, rather than all of them.
How to Collect Quantitative Data
Quantitative data is concrete, as it is based on logic and mathematics. As such, you can usually establish conclusive results based on the data.
Two ways to collect quantitative data are through:
Surveys: From the time of pencil and paper to today’s online mediums, surveys are a tried and tested method to gather quantitative data. An effective quantitative survey would use mostly closed-ended questions as yes and no data, and data on a Likert scale can easily be understood.
The usage of surveys is key in collecting feedback. They are easily shared and allows you to gauge the perspective of your audience.
One-on-one Interviews: Another traditional method to collect quantitative data, that has moved to both telephone and online platforms.
Interviews offer you the chance to gather extensive data from participants, which surveys find difficult due to participant fatigue.
The common mediums to conduct an interview are via:
• Face-to-face Interviews
• Telephone/Online Interviews
• Computer Interviews
If you decide to use interviews for data collection, an additional benefit is the collection of qualitative data to pair with your quantitative data.
The trade-off is that due to it being more time intensive compared to the survey method, interviews are mostly used for quality data, rather than quantity.
Analysis Methods for Quantitative Data
You’ve collected your data! Now what? Well, its time to analyze it.
Data alone doesn’t mean anything until you look at it and give it meaning. Some methods of analyzing data are:
• Cross-tabulation: This is the most popular method when analyzing quantitative data. Cross-tabulation, as the name implies, analyzes multiple variables and tries to find correlations between them. This helps establish relationships and discriminates between variables.
• Trend analysis: When you’re looking at data over a period of time to help you predict future data, then you’re analyzing trends. Using this method can help you collect feedback regarding data that changes or remains the same, over time to help you predict
• Gap analysis: Using a gap analysis will help you determine how you and your company are doing vs the potential performance. Through this, you will find areas that are not optimized and/or can be improved on.
• SWOT analysis: A well-known analysis, the Strengths, Weaknesses Opportunities and Threats (or SWOT) analysis is similar to the gap analysis in finding out your company’s performance and potential. However, this analysis goes into further detail, which will help in creating business strategies.
• Text Analysis: Raw survey data starts unstructured but has to be made into something that makes sense. Text analysis helps by using intelligent tools to do structure the data, and help you understand it.
Quantitative Data - Analytics
Website Traffic – Number of Visitors
Understanding How To Conduct A Quantitative Data Analysis
To be blunt, raw data means nothing. This is why you need to conduct an analysis; to make sense of it so you can make use of that data.
There are four criteria that you need to understand before conducting an analysis.
Relating Measurement Scales and Variables
There are four types of scales to categorize your data in. The four scales are:
• Nominal – A label for your data. I.e. hair color, place of birth, a genre of music, cuisine of food, the material of cloth.
• Ordinal – Data that comes in a specific order. I.e. Rankings of badminton players, top 10 Japanese restaurants, movie ratings.
• Interval – A numerical scale with no true 0 (has negative integers). I.e. temperature (Celcius & Fahrenheit), income, any value on a Likert scale.
• Ratio – A numerical scale with a true 0 (does not have negative integers). I.e. height, weight, volume, number of donuts.
Knowing the measurements scales will help you organize, arrange, and understand your data.
Using Descriptive Statistics for Data
To further understand your raw data, you should use descriptive statistics. With descriptive statistics, you should find it easier to see patterns within your data.
Descriptive statistics that are often used are:
• Mean- The average value of specific variables.
• Mode – The most common value in a variable.
• Median – The numerical middle point of a variable.
• Frequency – How many times a specific value is observed on a scale.
• Minimum and Maximum Values- The lowest and highest values on a scale.
• Percentages – A format to express the scores of variables.
Okay, simple to understand. But! You need to put your scales of measurements together with your descriptive stats to fully understand your data!
Deciding Which Measurement Scale To Use
You will have to decide on the measurement scale to use so you can choose a descriptive statistic for your variable.
An example, you can never use a nominal variable score with means or medians, as a label doesn’t have an average or midpoint. As such, your descriptive statistics will be based on your chosen scale for your variable.
Some extra information, descriptive statistics are used to help describe raw data from a sample size, and cannot be applied to a population.
Tabulate and Analyze Your Data
Finally, after you decide which measurement scale and descriptive statistics work best, use tables to represent your data.
Once your data is tabulated, it can be analyzed using the cross-tabulation technique.
Quantitative Data - Tabulated Data
Example of Tabulated Data – Organized Data
Advantages & Disadvantages of Quantitative Data
Advantages
So you’ve learned about what quantitative data is, how to collect it and how to analyze it. But how does quantitative data help you, and how doesn’t it help you?
Well the advantages of quantitative data are:
• Detailed Research: As quantitative data is numerical and statistical in nature, the research you conduct with it can lead to many inferences.
• Minimal Bias: Assuming the data isn’t tampered with, quantitative data will tell you information as it is. Number’s don’t discriminate, but humans may accidentally do so, which could lead to incorrect results.
• Accurate Results: Numbers don’t lie. Quantitative data presented to you is objective by nature and won’t try to trick you (which is why proper analysis is important!)
Disadvantages
Quantitative data is great! But it isn’t perfect. There are limits to what quantitative data can do for you.
The disadvantages of quantitative data are:
• Limited Information: Quantitative data gives you numbers, but what do these numbers mean? Yes, your customer gave you a 7 for satisfaction, but why did they do so? To gain true insight, quantitative data cannot be the only option of data collection.
• Question Bias: Numbers don’t lie, but the intentions behind them may skew your data. To ensure that you’re getting the correct data, you should consider the nature of your questions, and their measurement scale, and analyses while making them.
And that’s it! To recap, this article has explained:
• Types of quantitative data
• Collection methods
• Methods of analyses
• How to conduct quantitative analyses
• Advantages & Disadvantages of quantitative data
Data helps us understand better; but the main problem is that there simply is too much data out there to make sense of.
For your business, check this out to see the different types of data you can use to help understand how your business, products, or even branding is performing.
|
__label__pos
| 0.914969 |
oscillatingLinearMotion.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2 ========= |
3 \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4 \\ / O peration |
5 \\ / A nd | Copyright (C) 2011-2016 OpenFOAM Foundation
6 \\/ M anipulation |
7 -------------------------------------------------------------------------------
8 License
9 This file is part of OpenFOAM.
10
11 OpenFOAM is free software: you can redistribute it and/or modify it
12 under the terms of the GNU General Public License as published by
13 the Free Software Foundation, either version 3 of the License, or
14 (at your option) any later version.
15
16 OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
23
24 \*---------------------------------------------------------------------------*/
25
28
29 // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
30
31 namespace Foam
32 {
33 namespace solidBodyMotionFunctions
34 {
35 defineTypeNameAndDebug(oscillatingLinearMotion, 0);
37 (
38 solidBodyMotionFunction,
39 oscillatingLinearMotion,
40 dictionary
41 );
42 }
43 }
44
45
46 // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
47
48 Foam::solidBodyMotionFunctions::oscillatingLinearMotion::oscillatingLinearMotion
49 (
50 const dictionary& SBMFCoeffs,
51 const Time& runTime
52 )
53 :
54 solidBodyMotionFunction(SBMFCoeffs, runTime)
55 {
56 read(SBMFCoeffs);
57 }
58
59
60 // * * * * * * * * * * * * * * * * Destructor * * * * * * * * * * * * * * * //
61
64 {}
65
66
67 // * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * * //
68
71 {
72 scalar t = time_.value();
73
74 const vector displacement = amplitude_*sin(omega_*t);
75
76 quaternion R(1);
77 septernion TR(septernion(-displacement)*R);
78
79 DebugInFunction << "Time = " << t << " transformation: " << TR << endl;
80
81 return TR;
82 }
83
84
86 (
87 const dictionary& SBMFCoeffs
88 )
89 {
91
92 SBMFCoeffs_.lookup("amplitude") >> amplitude_;
93 SBMFCoeffs_.lookup("omega") >> omega_;
94
95 return true;
96 }
97
98
99 // ************************************************************************* //
virtual bool read(const dictionary &SBMFCoeffs)
Update properties from given dictionary.
A list of keyword definitions, which are a keyword followed by any number of values (e...
Definition: dictionary.H:137
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition: Ostream.H:253
Septernion class used to perform translations and rotations in 3D space.
Definition: septernion.H:65
Class to control time during OpenFOAM simulations that is also the top-level objectRegistry.
Definition: Time.H:68
Macros for easy insertion into run-time selection tables.
defineTypeNameAndDebug(axisRotationMotion, 0)
bool read(const char *, int32_t &)
Definition: int32IO.C:85
Base class for defining solid-body motions.
#define DebugInFunction
Report an information message using Foam::Info.
virtual bool read(const dictionary &SBMFCoeffs)=0
Update properties from given dictionary.
Quaternion class used to perform rotations in 3D space.
Definition: quaternion.H:60
virtual septernion transformation() const
Return the solid-body motion transformation septernion.
dimensionedScalar sin(const dimensionedScalar &ds)
#define R(A, B, C, D, E, F, K, M)
addToRunTimeSelectionTable(solidBodyMotionFunction, axisRotationMotion, dictionary)
Namespace for OpenFOAM.
|
__label__pos
| 0.998069 |
The option 'Line Spacing' doesn't seem to work
Hi all,
I'm new to OpenLP, my background is Song Show Plus 7.
I'm trying to create a custom theme for my church. I noticed that the 'Line Spacing' option has no effect at all. Am I missing something here?
I've tried restarting OpenLP, removing then re-adding the song but still the same.
Other options like Font, color, Bold etc works fine, it's only the Line Spacing that doesn't seem to have any effect.
I'm running OpenLP 2.2.1 on Windows 10.
Blessings,
Sid
Comments
• Dear Sid,
Thank you for your question,
How much did you try to add to Line spacing?
This is measured in pixels, which is a very small unit.
Try setting it to 200 and see if it then has any effect,
you can decrease it afterwards to find the ideal size.
Hope this helps & welcome to the forums!
Blessings,
Azaziah
Sign In or Register to comment.
|
__label__pos
| 0.968651 |
Make your content easier to read
ClearWebStats
Sharing is caring!
Internet users do not actually read the contents of the pages online, at least in accordance with the research of web professionals on the behavior of people on his web site. His analysis shows that only 28% of the text on a page is read, and, the more text, the lower the percentage of reading. With the ClearWebStats this is the perfect deal.
Don’t worry about bending and scrolling vertically
There is an old myth that important content should be placed above the “fold” (the term comes from printing and indicates the area of a web page that can be viewed without scrolling through the content), which was first mentioned by web experts.
So long pages are bad? Should we cram everything into the top of the template because people don’t want to read anything below the fold?
ClearWebStats
The answer to the questions posed is “No,” according to the report of the analyst. The result of their research shows that the length of the page does not affect the likelihood that the user scrolls down the page.
Place important content on the left side of the page.
The bulk of visitors to the sites are people who grew up in a cultural environment with the direction of the letter from left to right. Therefore, the main attention of visitors is directed to the left side of the page at least 69% of the time spent visiting the page, in accordance with the results of a study which was attended by more than 20 users.
Spaces in the text affect the level of perception of information.
Good accessibility of the text for reading improves the perception and speed of reading, as well as increases the likelihood that the user will continue reading, and not leave the page. There are many factors that affect ease of reading, including font selection, font size, line height, text contrast, and spaces.
The study of ease of reading, which was conducted on 20 participants, consisted in presenting the same text to the examinees with different settings for the indentation of the surrounding text and the height of the line (the distance between the lines). The results showed that the text without indents is read faster, but its perception deteriorates. Higher reading speed in the absence of fields is explained by the fact that the text and paragraphs are more densely arranged, and as a result less time is needed to move the gaze from one line to another.
Small details make big changes.
Very often, we look at the big picture of web design and ignore small details, especially in the face of time constraints. There are so many other important points to think about that it is very easy to let the small parts of the design go to chance.
But sometimes something small, such as a form button, can have a significant impact on the success of a site. Interface design expert wrote an article that removing a button and replacing it with a simple error message allowed users to avoid errors during work, which ultimately led to an increase in profits of $ 300 million per year.
Flow Designer also noted the importance of attention to detail. They found that revising the error information page so that it included helpful hint text, increased financial returns by 0.5% per month, which ultimately resulted in an additional quarter of a million pounds a year from the site.
|
__label__pos
| 0.790577 |
Commits
Luke Plant committed 61e8785
Initial import
• Participants
Comments (0)
Files changed (15)
+Luke Plant <[email protected]>
+
+With thanks for contributions from:
+
+ 'nesh' <nesh _at_ studioquattro _dot_ co _dot_ yu>
+Changelog
+=========
+
+Version 1.5 - 2010-12-01
+------------------------
+
+* Re-branded as django-output-validator and packaged properly.
+
+ If you used the previous version, you should drop the old
+ 'validator_validationfailure' table (assuming it doesn't have any data you
+ need, of course). Then go through the installation instructions in the README
+ and update the name/values of the relevant settings.
+
+
+Version 1.4 - 2008-04-28
+------------------------
+
+* Changed maxlength to max_length, as per change in Django.
+* Corrections to instructions (thanks to Gary Wilson)
+* Fixed deprecation warnings (thanks to Gary Wilson)
+
+
+Version 1.3 - 2007-11-05
+------------------------
+
+* Updated for unicodisation of Django.
+
+ This is a BACKWARDS INCOMPATIBLE change.
+
+ The problem was caused by the fact that you used to able to store arbitrary
+ binary data in a TextField, which is no longer possible. As a result, I am
+ using base64 encoding for any pickled objects. I haven't written an upgrade
+ script for the database (since I personally keep the list of failed pages to
+ zero). If you are upgrading from a previous version, any of your existing
+ ValidationFailure objects will be corrupted (the 'request' and 'response' data
+ will be lost). Either deal with the errors before upgrading, or write a
+ conversion script of some kind :-)
+
+Version 1.2 - 2007-04-18
+------------------------
+
+* Fixed bug that occurred when settings.VALIDATOR_APP_IGNORE_PATHS wasn't set
+* Added logic to stop duplicate failures being logged
+
+Version 1.1 - 2005-12-14
+------------------------
+
+* Added optional VALIDATOR_APP_IGNORE_PATHS setting.
+* Added support for mod_python handler - thanks to 'nesh'.
+* Added a setup.py script.
+
+Version 1.0 - 2005-11-19
+------------------------
+* Initial release
+recursive-include . *.html
+include AUTHORS
+include *.rst
+=======================
+Django output validator
+=======================
+
+This app validates all the HTML pages (or other data) that is generated by your
+Django project. This is meant to be used only in development.
+
+Installation
+============
+
+* Run setup.py to install the package into your python path.
+
+* Add "output_validator" to your INSTALLED_APPS setting.
+
+* If you have removed ``"django.template.loaders.app_directories.Loader"`` from
+ your TEMPLATE_LOADERS, you need to add the 'templates' folder to your
+ TEMPLATE_DIRS setting.
+
+* Insert the middleware
+ ``"output_validator.middleware.ValidatorMiddleware"``
+ near the beginning of the middleware list (which means it will get
+ the the response object after everything else). It must be after
+ every middleware that does post-processing, but mustn't be after
+ GZip, since it can't handle gzipped HTML. ( I just disable the GZip
+ middleware for development)
+
+* Alter your URL conf to include the URLs for the validator. You need
+ this line inserted somewhere::
+
+ (r'^validator/', include('output_validator.urls'))
+
+* Add a setting to tell the app where to find the 'validate'
+ executable used for validation. This is a dictionary of mimetypes
+ and corresponding validators, allowing this app to be extended to
+ any other generated content::
+
+ OUTPUT_VALIDATOR_VALIDATORS = {
+ 'text/html': '/usr/bin/validate',
+ 'application/xml+xhtml': '/usr/bin/validate',
+ }
+
+ I usually use a small wrapper for this executable that pops up
+ a message when it fails - the following works for GNOME
+ (if you have the notify-send program installed)::
+
+ #!/bin/sh
+ validate "$1" || {
+ notify-send "Validation failed";
+ }
+
+* Finally, run the django admin script to set up the database tables::
+
+ ./manage.py --settings="yourproject.settings" syncdb
+
+ OR, if you are using South::
+
+ ./manage.py --settings="yourproject.settings" migrate output_validator
+
+* Optionally, set the following settings:
+
+ * OUTPUT_VALIDATOR_IGNORE_PATHS - this is a list of path prefixes that
+ will be ignored. For example, if you have the admin at ``/admin/``
+ you can ignore any errors in the admin with this::
+
+ OUTPUT_VALIDATOR_IGNORE_PATHS = [
+ '/admin/',
+ ]
+
+
+Usage
+=====
+
+When browsing any of your pages in development, all HTML will be validated. If
+it fails, it will be logged. You can see all failures at
+'http://localhost:8000/validator/' (assuming local development and the URL conf
+suggested above). Use the app to delete old failures once they have been fixed.
File output_validator/__init__.py
+
File output_validator/middleware.py
+from output_validator.models import ValidationFailure
+
+
+class ValidatorMiddleware(object):
+ def process_response(self, request, response):
+ if response.status_code == 200:
+ ValidationFailure.do_validation(request, response)
+ return response
File output_validator/migrations/0001_initial.py
+# encoding: utf-8
+import datetime
+from south.db import db
+from south.v2 import SchemaMigration
+from django.db import models
+
+class Migration(SchemaMigration):
+
+ def forwards(self, orm):
+
+ # Adding model 'ValidationFailure'
+ db.create_table('output_validator_validationfailure', (
+ ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
+ ('timestamp', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
+ ('path', self.gf('django.db.models.fields.TextField')()),
+ ('method', self.gf('django.db.models.fields.CharField')(max_length=6)),
+ ('request', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
+ ('response', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
+ ('errors', self.gf('django.db.models.fields.TextField')()),
+ ))
+ db.send_create_signal('output_validator', ['ValidationFailure'])
+
+
+ def backwards(self, orm):
+
+ # Deleting model 'ValidationFailure'
+ db.delete_table('output_validator_validationfailure')
+
+
+ models = {
+ 'output_validator.validationfailure': {
+ 'Meta': {'ordering': "('-timestamp',)", 'object_name': 'ValidationFailure'},
+ 'errors': ('django.db.models.fields.TextField', [], {}),
+ 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
+ 'method': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
+ 'path': ('django.db.models.fields.TextField', [], {}),
+ 'request': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
+ 'response': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
+ 'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
+ }
+ }
+
+ complete_apps = ['output_validator']
File output_validator/migrations/__init__.py
Empty file added.
File output_validator/models.py
+import base64
+import cPickle
+import copy
+import datetime
+import os
+import tempfile
+
+from django.core.handlers.modpython import ModPythonRequest
+from django.db import models
+
+
+class ValidationFailure(models.Model):
+ timestamp = models.DateTimeField("Time", default=datetime.datetime.now)
+ path = models.TextField("Request path")
+ method = models.CharField("HTTP method", max_length=6)
+ request = models.TextField("Request object", default='', blank=True)
+ response = models.TextField("Response object", default='', blank=True)
+ errors = models.TextField("Errors")
+
+ def __repr__(self):
+ return self.method + " " + self.path
+
+ def get_request_formatted(self):
+ import cPickle
+ try:
+ return repr(cPickle.loads(base64.decodestring(self.request)))
+ except EOFError, UnpicklingError:
+ return None
+
+ def get_response(self):
+ import cPickle
+ try:
+ return cPickle.loads(base64.decodestring(self.response))
+ except EOFError, UnpicklingError:
+ return None
+
+ class Meta:
+ ordering = ('-timestamp',)
+
+ class Admin:
+ fields = (
+ (None, {'fields': ('timestamp', 'path', 'method', 'errors')}),
+ )
+
+
+ def do_validation(request, response):
+ """
+ Do validation on response and log if it fails.
+ """
+ from django.conf import settings
+ try:
+ OUTPUT_VALIDATOR_IGNORE_PATHS = settings.OUTPUT_VALIDATOR_IGNORE_PATHS
+ except AttributeError:
+ OUTPUT_VALIDATOR_IGNORE_PATHS = ()
+
+
+ try:
+ content_type = response['Content-Type'].split(';')[0]
+ validator = settings.OUTPUT_VALIDATOR_VALIDATORS[content_type]
+ except KeyError, IndexError:
+ # no content type, or no validator for that content type
+ return
+
+ for ignore_path in OUTPUT_VALIDATOR_IGNORE_PATHS:
+ if request.path.startswith(ignore_path):
+ return
+
+ # first store data in temporary file
+ (tmpfilehandle, tmpfilepath) = tempfile.mkstemp()
+ os.write(tmpfilehandle, response.content)
+ os.close(tmpfilehandle)
+
+ # Now execute validator and get result
+ (child_stdin, child_output) = os.popen4(validator + ' ' + tmpfilepath)
+ errors = child_output.read()
+
+ # Normalise output so that we can eliminate duplicate errors
+ errors = errors.replace(tmpfilepath, '[tmpfilepath]')
+
+ # clean up
+ child_stdin.close()
+ child_output.close()
+ os.unlink(tmpfilepath)
+
+ # Only save if there was an error, and there isn't already
+ # a failure saved at the same URL with identical errors.
+ # (this isn't perfectly watertight -- you could by chance be
+ # generating identical errors with different query strings or
+ # POST data, but it's unlikely).
+
+ if len(errors) > 0 and \
+ ValidationFailure.objects.filter(errors=errors,
+ path=request.path).count() == 0:
+ failure = ValidationFailure(errors=errors)
+ failure.path = request.path
+ qs = request.META.get('QUERY_STRING','')
+ if qs is not None and len(qs) > 0:
+ failure.path += '?' + qs
+ failure.errors = errors
+
+ if isinstance(request, ModPythonRequest):
+ # prepopulate vars
+ request._get_get()
+ request._get_post()
+ request._get_cookies()
+ request._get_files()
+ request._get_meta()
+ request._get_request()
+ request._get_raw_post_data()
+ u = request.user
+ mp = request._req
+ del request._req # get rid of mp_request
+ try:
+ req = copy.deepcopy(request)
+ except Exception, e:
+ req = "Couldn't stash a copy of the request: %s" % str(e)
+ request._req = mp # restore mp_request
+ else:
+ try:
+ req = copy.deepcopy(request)
+ # remove the stuff we can't serialize
+ del req.META['wsgi.errors']
+ del req.META['wsgi.file_wrapper']
+ del req.META['wsgi.input']
+ except Exception, e:
+ # TODO - work out why this happens
+ req = "Couldn't stash a copy of the request: %s" % str(e)
+
+ failure.request = base64.encodestring(cPickle.dumps(req))
+ failure.response = base64.encodestring(cPickle.dumps(response))
+ failure.method = request.META['REQUEST_METHOD']
+ failure.save()
+ do_validation = staticmethod(do_validation)
File output_validator/templates/output_validator/base.html
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
+<head>
+<title>Validation status</title>
+<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
+<style type="text/css">
+
+body {
+ margin: 0px 20px;
+ padding: 0px;
+ background-color: white;
+ color: black;
+ font-family: sans;
+}
+
+
+h1
+{
+ text-align: center;
+ font-size: 1.5em;
+ margin: 0px -20px;
+ padding: 10px 0px;
+ background-color: #e8e8ff;
+ border-bottom: 2px solid #8080a0;
+}
+
+h2
+{
+ margin-left: -10px;
+ font-size: 1.2em;
+ color: #000060;
+}
+
+h3
+{
+ margin-left: -5px;
+ font-size: 1.1em;
+}
+
+td, th, table
+{
+ border-collapse: collapse;
+ border: 1px solid #8080a0;
+ padding: 2px;
+}
+
+tr
+{
+ background-color: #e8e8ff;
+}
+
+tr.alt, th
+{
+ background-color: #ffffff;
+}
+
+th, td
+{
+ vertical-align: top;
+}
+
+pre
+{
+ margin: 0px;
+}
+
+</style>
+</head>
+<body>
+<h1>Django project validation monitor</h1>
+<div id="content">
+{% block content %}
+{% endblock %}
+</div>
+</body>
+</html>
File output_validator/templates/output_validator/validationfailure_detail.html
+{% extends "output_validator/base.html" %}
+{% block content %}
+<a href="../">[up]</a>
+<h2>Validation failure details</h2>
+
+<form method="post" action="delete/">{% csrf_token %}<div>
+<input type="submit" name="delete" value="Delete" />
+</div>
+</form>
+
+<table width="100%">
+ <tr>
+ <th scope="row">Time</th>
+ <td>{{ object.timestamp|date:"d M y h:i" }}</td>
+ </tr>
+ <tr>
+ <th scope="row">Request</th>
+ <td>{{ object.method }} {{ object.path }}
+ {% if object.method == 'GET' %}<a href="{{ object.path|escape }}">[go]</a>{% endif %}
+ </td>
+ </tr>
+ <tr>
+ <th scope="row">Errors</th>
+ <td><div><pre>{{ object.errors }}</pre></div></td>
+ </tr>
+ <tr>
+ <th scope="row">Response content</th>
+ <td><div><pre>{{ object.get_response.content|linenumbers }}</pre></div></td>
+ </tr>
+ <tr>
+ <th scope="row">Original request</th>
+ <td><div class="python">{{ object.get_request_formatted }}</div></td>
+ </tr>
+</table>
+{% endblock %}
File output_validator/templates/output_validator/validationfailure_list.html
+{% extends "output_validator/base.html" %}
+{% block content %}
+
+{% if object_list %}
+ <form method="post" action="delete/">{% csrf_token %}
+ <div class="errorsfound"><p>{{ object_list|length }} page(s) with errors</p></div>
+ <div>
+ <table>
+ <tr class="header">
+ <th scope="col">Time</th>
+ <th scope="col">Request</th>
+ <th scope="col">Details</th>
+ <th scope="col">Delete</th>
+ </tr>
+ {% for failure in object_list %}
+ <tr {% if forloop.counter|divisibleby:"2" %}class="alt"{% endif %}>
+ <td>{{ failure.timestamp|date:"d M Y h:i" }}</td>
+ <td><span class="method">{{ failure.method }}</span> <span class="path">{{ failure.path|escape }}</span></td>
+ <td><a href="{{ failure.id }}/">Details</a></td>
+ <td><input type="checkbox" name="deleteitem{{ failure.id }}" /></td>
+ </tr>
+ {% endfor %}
+ </table>
+ <br/>
+ <input type="submit" name="deleteselected" value="Delete selected" />
+ <input type="submit" name="deleteall" value="Delete all" />
+ </div></form>
+{% else %}
+ <div class="noerrorsfound"><p>No errors found.</p></div>
+{% endif %}
+
+{% endblock %}
File output_validator/urls.py
+from django.conf.urls.defaults import *
+from output_validator.models import ValidationFailure
+
+info_dict = {
+ 'queryset': ValidationFailure.objects.all(),
+}
+
+urlpatterns = patterns('',
+ (r'^$',
+ 'django.views.generic.list_detail.object_list',
+ dict(info_dict, allow_empty=True)),
+ (r'^(?P<object_id>\d+)/$',
+ 'django.views.generic.list_detail.object_detail',
+ info_dict),
+ (r'^(?P<object_id>\d+)/delete/$',
+ 'output_validator.views.delete'),
+ (r'^delete/$',
+ 'output_validator.views.bulkdelete'),
+)
File output_validator/views.py
+from django.http import HttpResponseRedirect
+
+from output_validator.models import ValidationFailure
+
+
+def bulkdelete(request):
+ if request.POST:
+ postkeys = request.POST.keys()
+ if 'deleteall' in postkeys:
+ ValidationFailure.objects.all().delete()
+ elif 'deleteselected' in postkeys:
+ for k in postkeys:
+ if k.startswith('deleteitem'):
+ k = k[len('deleteitem'):]
+ try:
+ vf = ValidationFailure.objects.get(id=k)
+ vf.delete()
+ except ValidationFailure.DoesNotExist:
+ pass
+
+ return HttpResponseRedirect("../")
+
+
+def delete(request, object_id):
+ if request.POST:
+ try:
+ vf = ValidationFailure.objects.get(id=object_id)
+ vf.delete()
+ except ValidationFailure.DoesNotExist:
+ pass
+ return HttpResponseRedirect("../../")
+
+#!/usr/bin/env python
+from setuptools import setup, find_packages
+import os
+
+
+def read(*rnames):
+ return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
+
+
+setup(
+ name = "django-output-validator",
+ version = '1.5',
+ packages = ['output_validator'],
+ include_package_data = True,
+
+ author = "Luke Plant",
+ author_email = "[email protected]",
+ url = "http://lukeplant.me.uk/resources/djangovalidator/",
+ description = "App to catch HTML errors (or other errors) in outgoing Django pages.",
+ long_description = (
+ read('README.rst')
+ + "\n\n" +
+ read('CHANGES.rst')
+ ),
+ license = "MIT",
+ keywords = "django HTML XHTML validation validator",
+ classifiers = [
+ "Development Status :: 5 - Production/Stable",
+ "Environment :: Web Environment",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: MIT License"
+ "Operating System :: OS Independent",
+ "Programming Language :: Python",
+ "Framework :: Django",
+ "Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries",
+ "Topic :: Software Development :: Testing",
+ ]
+)
|
__label__pos
| 0.862547 |
Take the 2-minute tour ×
Stack Overflow is a question and answer site for professional and enthusiast programmers. It's 100% free, no registration required.
I have two NSMutableArrays. One array consists of records from the database and the other array consists of records from webservices.
I want to compare each record from the database array to each record in the web services array using a unique key like barcodeID. Also, if the barcodeID key is same then I want to remove the item from the array. It's like I'm updating my database records. If we get the same records from the webservice then I don't want to insert them.
Please help me I'm unable to break the logic for this. Thanks in advance.
share|improve this question
3 Answers 3
if Product.barcodeID uniquely identifies your objects, then you can use that member to implement -[Product hash] and -[Product isEqual:].
then you can easily use Product in NSSets. NSSet and NSMutableSet contain several methods to combine and remove sets.
share|improve this answer
The brute force method of doing such comparison is for every record in one array is checked with every record in another. If you find it then stop and discard the object. if you do not find it, then you add it to the array. This of course will have a very high time complexity with a worse case scenario is O(n^2). you could shorten this down by using certain data structures inside your database and web service. Maybe storing them in sorted order or through some algorithm.
You should do some research yourself before asking this question. I shall leave you the option to find a way to optimize your code.
Good luck!
share|improve this answer
This is kind of the idea of the brute force method. As mentioned above this is incredibly slow compared to alternatives.
- (void)myUpdateFunction
{
NSMutableArray *baseDatabaseArray;
NSMutableArray *baseWebServiceArray;
for (int i = 0; i < baseWebServiceArray.count; i++) {
id value = [[baseWebServiceArray objectAtIndex:i] valueForKey:@"barcodeID"];
NSArray *array = [baseDatabaseArray filteredArrayUsingPredicate:[NSPredicate predicateWithFormat:@"barcodeID = %@", value]];
if (array.count > 0)
{
id obj = [array objectAtIndex:0];
[baseDatabaseArray removeObject:obj];
}
[baseDatabaseArray addObject:[baseWebServiceArray objectAtIndex:i]];
}
}
I have been using Magical Record and love it. You have to be using Core Data for this though. Here is what my update code looks like with Magical Record.
- (void)updateDatabase
{
Class class = NSClassFromString(self.managedObjectClassName);
if ([class MR_countOfEntities] > 0) {
for (NSArray *array in self.totalBatches) {
[class MR_updateFromArray:array];
}
} else {
for (NSArray *array in self.totalBatches) {
[class MR_importFromArray:array];
}
}
[self.totalBatches removeAllObjects];
}
If you have any questions about Core Data feel or if you need me to walk through the algorithms feel free to ask.
share|improve this answer
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.985663 |
Like Tree1Likes
• 1 Post By smokeyangel
Structs ,pointers and functions - need some help
This is a discussion on Structs ,pointers and functions - need some help within the C Programming forums, part of the General Programming Boards category; Hi, could you please help me? I have a struct and I want to implement in with pointers and functions. ...
1. #1
Registered User
Join Date
Feb 2013
Posts
58
Question Structs ,pointers and functions - need some help
Hi,
could you please help me?
I have a struct and I want to implement in with pointers and functions.
Q - what is the corect syntax?
for example:
Code:
typedef struct XYZ
{
int x;
int y;
int z;
}XYZ_t;
int func( using the XYZ_t struct)
{
}
int main()
{
XYZ_t *XYZ_t_PTR;
func( using the XYZ_t struct);
}
2. #2
Registered User
Join Date
Jun 2005
Posts
6,299
That question is as unclear as mud.
Your description of wanting to "have a struct" that is implemented "in with pointers and functions" is meaningless.
Right 98% of the time, and don't care about the other 3%.
3. #3
and the hat of wrongness Salem's Avatar
Join Date
Aug 2001
Location
The edge of the known universe
Posts
32,546
Which book are you reading?
Because it isn't any good.
Let me put it this way.
If you had
int *XYZ_t_PTR;
would you still be as confused as you are now?
If you dance barefoot on the broken glass of undefined behaviour, you've got to expect the occasional cut.
If at first you don't succeed, try writing your phone number on the exam paper.
I support http://www.ukip.org/ as the first necessary step to a free Europe.
4. #4
Stoned Witch Barney McGrew's Avatar
Join Date
Oct 2012
Location
astaylea
Posts
420
I have a struct and I want to implement in with pointers and functions.
I don't think anyone can help you with that unless you become more specific about what "in" is.
Q - what is the corect syntax?
for example:
Whatever that example is, it isn't valid C. I think you would benefit from learning the difference between objects and types.
I'm wondering, do you have a reason to pretend that XYZ_t's underlying type isn't struct XYZ?
5. #5
Registered User
Join Date
Feb 2013
Location
Espoo, Finland
Posts
1
I think you mean something like this:
Code:
typedef struct XYZ
Code:
{
int x;
int y;
int z;
}XYZ_t;
int func(XYZ_t *X) /* a pointer to struct */
{
X->x = 1;
}
int main()
{
XYZ_t *XYZ_t_PTR; /a pointer to struct */
XYZ_t structXYZ; /The struct */
XYZ_t_PTR = &structXYZ;
func(XYZ_t_PTR);
}
Last edited by Ilkka Mannelin; 02-23-2013 at 02:15 AM.
6. #6
Registered User
Join Date
Mar 2010
Posts
535
Whhhhhhyyyy does everyone get taught to use typedef? It's a convenience to not have to type 'struct' all the time. Just a convenience.
When I first encounted a C struct I was told to use typedef as well, and it confused the hell out of me (because it looks a bit like an anonymous definition, with this mysterious typedef keyword that I don't fully understand). Just seems like the two concepts always get bundled together too early for most people.
Anyway. Mini rant over.
Code:
typedef struct XYZ
{
int x;
int y;
int z;
}XYZ_t;
int func(XYZ_t *s)
{
}
int main()
{
XYZ_t *XYZ_t_PTR;
func(XYZ_t_PTR);
}
Is what I think you're asking. XYZ_t is a type name though not an instance of a struct. Your pointer points to nothing -- uninitialised garbage.
AndiPersti likes this.
Popular pages Recent additions subscribe to a feed
Similar Threads
1. Structs, pointers and functions
By osici in forum C Programming
Replies: 2
Last Post: 04-29-2009, 12:35 AM
2. Array of Pointers to Functions in Structs...
By yaya in forum C++ Programming
Replies: 10
Last Post: 12-21-2008, 05:14 PM
3. passing pointers at structs thru functions
By nunnu in forum C Programming
Replies: 2
Last Post: 05-12-2004, 09:16 AM
4. passing structs & pointers to structs as arguments
By Markallen85 in forum C Programming
Replies: 6
Last Post: 03-16-2004, 06:14 PM
5. Pointers, structs, and functions, oh my!
By funkydude9 in forum C++ Programming
Replies: 8
Last Post: 07-30-2003, 12:51 AM
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
|
__label__pos
| 0.812914 |
Sublime Forum
How do I disable word wrapping?
#1
I am working on a website and I noticed that the text wraps if the line of code is too long. How do I disable that?
0 Likes
#2
In your settings:
"word_wrap": "false",
1 Like
#3
You can also toggle word wrapping from the main menu.
0 Likes
#4
For a command palette lover, I have to say I’m suprised that you didn’t mentionned it :smile:
• ctrl+shift+p
• Word Wrap: Toggle
0 Likes
#5
That’s true, but I have a key binding for this and didn’t know whether there was a palette entry or not, so I was making sure the thing I recommend works :wink:
0 Likes
|
__label__pos
| 0.985288 |
At the moment NetSurf does not share computed styles. Every element on every page has its own unique computed style allocated. Computed styles are quite large. Consider the following page:
It has over twenty thousand element nodes each with its own computed style. But there are less than 40 unique computed styles on the page. This is a massive waste of memory.
Computed Style Sharing
If different elements can reference the same computed styles, we can save a lot of memory.
• Computed styles will become reference counted.
• There are two ways to go about sharing computed styles:
1. Memoising calls to css_get_style()
• We'll use selection calbacks to find if node has previous sibling with same element name, classes, etc. If so the style for node can be an extra reference to the computed style of the previous sibling.
• Presentational hint gathering for nodes needs to change such that presentational hints for two nodes can be compared.
• Only shares styles between siblings. Could be extended to share between cousins, etc.
• As well as saving memory, this will make selection faster.
• It will be a big saving in cases where styles can be shared, by bypassing the need to iterate over the applicable selector chains.
• In cases where the styles can't be shared, it will have a relatively small cost.
2. Interning computed styles
• Will have a performance cost.
• Much greater scope for sharing computed styles than the above.
• Can even share computed styles between different pages. (NetSurf will be unique in doing that.)
• Fully compatible with the above, so we can do both.
|
__label__pos
| 0.728828 |
Next: , Previous: Run a Program, Up: List Processing
1.3 Generate an Error Message
Partly so you won't worry if you do it accidentally, we will now give a command to the Lisp interpreter that generates an error message. This is a harmless activity; and indeed, we will often try to generate error messages intentionally. Once you understand the jargon, error messages can be informative. Instead of being called “error” messages, they should be called “help” messages. They are like signposts to a traveller in a strange country; deciphering them can be hard, but once understood, they can point the way.
The error message is generated by a built-in GNU Emacs debugger. We will `enter the debugger'. You get out of the debugger by typing q.
What we will do is evaluate a list that is not quoted and does not have a meaningful command as its first element. Here is a list almost exactly the same as the one we just used, but without the single-quote in front of it. Position the cursor right after it and type C-x C-e:
(this is an unquoted list)
What you see depends on which version of Emacs you are running. GNU Emacs version 22 provides more information than version 20 and before. First, the more recent result of generating an error; then the earlier, version 20 result.
In GNU Emacs version 22, a *Backtrace* window will open up and you will see the following in it:
---------- Buffer: *Backtrace* ----------
Debugger entered--Lisp error: (void-function this)
(this is an unquoted list)
eval((this is an unquoted list))
eval-last-sexp-1(nil)
eval-last-sexp(nil)
call-interactively(eval-last-sexp)
---------- Buffer: *Backtrace* ----------
Your cursor will be in this window (you may have to wait a few seconds before it becomes visible). To quit the debugger and make the debugger window go away, type:
q
Please type q right now, so you become confident that you can get out of the debugger. Then, type C-x C-e again to re-enter it.
Based on what we already know, we can almost read this error message.
You read the *Backtrace* buffer from the bottom up; it tells you what Emacs did. When you typed C-x C-e, you made an interactive call to the command eval-last-sexp. eval is an abbreviation for `evaluate' and sexp is an abbreviation for `symbolic expression'. The command means `evaluate last symbolic expression', which is the expression just before your cursor.
Each line above tells you what the Lisp interpreter evaluated next. The most recent action is at the top. The buffer is called the *Backtrace* buffer because it enables you to track Emacs backwards.
At the top of the *Backtrace* buffer, you see the line:
Debugger entered--Lisp error: (void-function this)
The Lisp interpreter tried to evaluate the first atom of the list, the word ‘this’. It is this action that generated the error message ‘void-function this’.
The message contains the words ‘void-function’ and ‘this’.
The word ‘function’ was mentioned once before. It is a very important word. For our purposes, we can define it by saying that a function is a set of instructions to the computer that tell the computer to do something.
Now we can begin to understand the error message: ‘void-function this’. The function (that is, the word ‘this’) does not have a definition of any set of instructions for the computer to carry out.
The slightly odd word, ‘void-function’, is designed to cover the way Emacs Lisp is implemented, which is that when a symbol does not have a function definition attached to it, the place that should contain the instructions is `void'.
On the other hand, since we were able to add 2 plus 2 successfully, by evaluating (+ 2 2), we can infer that the symbol + must have a set of instructions for the computer to obey and those instructions must be to add the numbers that follow the +.
In GNU Emacs version 20, and in earlier versions, you will see only one line of error message; it will appear in the echo area and look like this:
Symbol's function definition is void: this
(Also, your terminal may beep at you—some do, some don't; and others blink. This is just a device to get your attention.) The message goes away as soon as you type another key, even just to move the cursor.
We know the meaning of the word ‘Symbol’. It refers to the first atom of the list, the word ‘this’. The word ‘function’ refers to the instructions that tell the computer what to do. (Technically, the symbol tells the computer where to find the instructions, but this is a complication we can ignore for the moment.)
The error message can be understood: ‘Symbol's function definition is void: this’. The symbol (that is, the word ‘this’) lacks instructions for the computer to carry out.
|
__label__pos
| 0.926563 |
A live video activity requires an ingest URL. Alibaba Cloud ApsaraVideo Live supports creating activity of a triggered type. You can create multiple live activities on the basis of the activity creation rules without using APIs.
Note Terms in the console have been updated, and we will update the documentation as soon as possible. We are sorry for any inconvenience caused.
Descriptions
Ingest URL can be created in batches according to the rules, and live activities can be performed simultaneously. When performing multiple live activities, note that each domain can process a limited number of concurrent streams. A maximum of 20 concurrent streams and 10 encoding streams are allowed for each domain name. Therefore, before performing live activities in batches, you must check whether the current stream limitation meets your requirements. If it does not meet your requirements, You can contact us by Opening a ticket.
Create multiple ingest URLs
A live video service URL consists of three levels of live video management units, namely the domain name (Domain), application (APPName) and live stream (StreamName). You can create multiple applications (APPName) under each domain name (Domain), and multiple live streams (StreamName) under each application.
Note For more information about the generation rules of the ingest URL, see Ingest URL and streaming URL.
AppName and StreamName can be customized. Different values generate different ingest URL and streaming URLs.
• You can create multiple live streams under one app.
Examples
For example, an application is named "live", you can create multiple live streams under "live". The ingest URL is:
rtmp://Ingest Domain Name/{live}/{1}? authentication string
rtmp//Ingest Domain Name/{live}/{2}? authentication string
rtmp://Ingest Domain Name/{live}/{3}? authentication string
Note The authorization string is an encrypted string obtained based on the authentication algorithm.
• You can also create multiple live streams for the application.
Note ApsaraVideo Live determines whether the live stream is unique based on stream name (StreamName) instead of application name (AppName). If you set a different application name, you must also make sure that the stream name is different to ensure that the final live stream is different.
Examples
rtmp://Ingest Domain Name/{live1}/{Stream1}? authentication string
rtmp://Ingest Domain Name/{live2}/{Stream2}? authentication string
rtmp://Ingest Domain Name/{live3}/{Stream3}? authentication string
Create multiple streaming URLs
The rules of streaming URLs and the rules of ingest URLs are the same, and the application name (AppName) and stream name (StreamName) of the streaming URLs correspond to the application name (AppName) and stream name (StreamName) of the ingest URLs.
Note For more information about the generation rules of a single streaming URL, see Ingest URL and streaming URL.
Examples
The ingest URL is:
rtmp://Ingest Domain Name/{live}/{3}? authentication string
The corresponding streaming URL is:
http://Streaming Domain Name/{live}/{3}?authentication string
http://Streaming Domain Name/{live}/{3}.flv?authentication string
http://Streaming Domain Name/{live}/{3}.m3u8?authentication string
How do I get a URL after I enable encoding?
Default encoding
The streaming URL is spliced by using different parameters. The encoding streaming URL consists of the original URL and template IDs.
URL format is Streaming Domain Name+AppName+StreamName+_+Template name.
Template name LD SD HD UHD
Narrowband HDTM template ID ld sd hd ud
Examples
If the template name of the standard template is sd, you can create streaming URLs in batches as follows:
RTMP format: rtmp://Streaming Domain Name/{AppName}/{StreamName} _sd? authentication string
FLV format: http://Streaming Domain Name/{AppName}/{StreamName} _sd.flv? authentication string
HLS format: http://Streaming Domain Name/{AppName}/{StreamName} _sd.m3u8? authentication string
Custom encoding
You can also customize encoding as needed. You must configure the template name of custom encoding Template ID in the console.
The splicing rules of multiple streaming URLs are as follows:
RTMP format: rtmp://Streaming Domain Name/{AppName}/{StreamName} _templateID? authentication string
FLV format: http://Streaming Domain Name/{AppName}/{StreamName} _templateID.flv? authentication string
HLS format: http://Streaming Domain Name/{AppName}/{StreamName} _template ID.m3u8? authentication string
|
__label__pos
| 0.864811 |
Converting any string into camel case with JavaScript removing whitespace as well
In order to convert string into camel case, you need to lowercase the first letter of the word and the first letter of the remaining words must be in capital.
Following is the code to convert any string into camel case −
Example
function convertStringToCamelCase(sentence) {
return sentence.replace(/(?:^\w|[A-Z]|\b\w|\s+)/g,
function(camelCaseMatch, i) {
if (+camelCaseMatch === 0)
return "";
return i === 0 ? camelCaseMatch.toLowerCase() :
camelCaseMatch.toUpperCase();
});
}
console.log(convertStringToCamelCase("Add two variables"));
To run the above program, you need to use the following command −
node fileName.js.
Here, my file name is demo104.js.
Output
This will produce the following output −
PS C:\Users\Amit\JavaScript-code> node demo104.js
addTwoVariables
Advertisements
|
__label__pos
| 0.990362 |
Import Step 3: Copy or Import Data
When Your Template is Ready, you may now import into mLIMS. Go to System Settings - Import Animals and Cages. Have you double-checked your template?
Hint: If you have several thousand animals to import, consider doing the import in smaller groups, starting with the oldest animal and working your way up. This will help in error checking!
Method 1: Copy/Paste
Copy all headers and data from your template, and paste into the import box. Select ONE COLUMN for gene and genotype format, and submit.
Method 2: Import from Text File
To import your data as a text file, save your data sheet as a text file.
Select Load Data From a Text File, Browse for the File, and select your gene and genotype format. submit to the next step.
• mLIMS Updates
|
__label__pos
| 0.99002 |
chevron-thin-right chevron-thin-left brand cancel-circle search youtube-icon google-plus-icon linkedin-icon facebook-icon twitter-icon toolbox download check linkedin phone twitter-old google-plus facebook profile-male chat calendar profile-male
0 votes
I'm doing the following in a test:
WHEN_CALLED(cpMac->CreateDispatch(_)).Return(1);
WHEN_CALLED(cpMac->Open(_, _)).Throw(&openFailure);
First one works, second one calls the real code onstead of throwing an exception.
Is there only one WHEN_CALLED allowed?
asked by ed123 (600 points)
1 Answer
0 votes
Hi Ed,
No, there is not only one WHEN_CALLED allowed.
The issue might be something else.
I can't tell you exactly what is the issue, I don't have enough information about your code and test.
If you want to proceed please elaborate more.
Cheers,
Alon Sapozhnikov.
Support Specialist.
answered by Alon_TypeMock (10.4k points)
OK, that's great to know. What information would you need to further investigate this issue?
Thanks,
Ed
Hi Ed,
For starters i will need the class you are faking (including the method open which you are changing it's behavior) and the test itself.
Cheers,
Alon Sapozhnikov.
Support Specialist.
The class I\'m faking:
// Machine generated IDispatch wrapper class(es) created with Add Class from Typelib Wizard
// CPmacDevice wrapper class
class CPmacDevice : public COleDispatchDriver
{
public:
CPmacDevice(){} // Calls COleDispatchDriver default constructor
CPmacDevice(LPDISPATCH pDispatch) : COleDispatchDriver(pDispatch) {}
CPmacDevice(const CPmacDevice& dispatchSrc) : COleDispatchDriver(dispatchSrc) {}
// Attributes
public:
// Operations
public:
// IPmacDevice methods
public:
void Open(long dwDevice, BOOL * pbSuccess)
{
static BYTE parms[] = VTS_I4 VTS_PBOOL ;
InvokeHelper(0x1, DISPATCH_METHOD, VT_EMPTY, NULL, parms, dwDevice, pbSuccess);
}
void Close(long dwDevice)
{
static BYTE parms[] = VTS_I4 ;
InvokeHelper(0x2, DISPATCH_METHOD, VT_EMPTY, NULL, parms, dwDevice);
}
void DataStart(long dwDevice, long resolution, long period)
{
static BYTE parms[] = VTS_I4 VTS_I4 VTS_I4 ;
InvokeHelper(0x5, DISPATCH_METHOD, VT_EMPTY, NULL, parms, dwDevice, resolution, period);
}
void DataStop(long dwDevice)
{
static BYTE parms[] = VTS_I4 ;
InvokeHelper(0x6, DISPATCH_METHOD, VT_EMPTY, NULL, parms, dwDevice);
}
void DataCollect(long dwDevice, VARIANT * pvArray, BOOL * pbSuccess)
{
static BYTE parms[] = VTS_I4 VTS_PVARIANT VTS_PBOOL ;
InvokeHelper(0x7, DISPATCH_METHOD, VT_EMPTY, NULL, parms, dwDevice, pvArray, pbSuccess);
}
void DPRBackGroundVar(long dwDevice, BOOL bOn)
{
static BYTE parms[] = VTS_I4 VTS_BOOL ;
InvokeHelper(0x8, DISPATCH_METHOD, VT_EMPTY, NULL, parms, dwDevice, bOn);
}
/ IPmacDevice properties
public:
};
The test using MSTEST with VS2019:
#include \"pch.h\"
#include \"CppUnitTest.h\"
#include \"Isolator.h\"
#include \"XrmMotionHandler.h\"
#include \"XrmMotionController.h\"
#include \"XrmTechnosoftMotionController.h\"
#include \"CPmacDevice.h\"
using namespace Microsoft::VisualStudio::CppUnitTestFramework;
namespace XrmMotionUnitTests
{
TEST_CLASS(XrmDeltaTauMotionControllerUnitTests_MockCPmacDeviceTests)
{
public:
TEST_CLASS_INITIALIZE(Setup)
{
static bool bTestCreatedKey = false;
HKEY hKey;
string strKey = \"System\\\\CurrentControlSet\\\\Services\\\\Pmac\\\\pmaceth0\";
string strIPAddressName = \"IPAddress\";
string strIPAddress = \"172.24.68.50\";
DWORD dwType = REG_DWORD;
DWORD dwSize, dwValue;
string strKeyDevice = \"System\\\\CurrentControlSet\\\\Services\\\\Pmac\\\\Device0\";
string strLocation = \"Location\";
DWORD nDataLength = 4;
DWORD nLocationValue = 3; // magic number of some sort
string strEnumeration = \"Enumeration\";
DWORD nEnumerationValue = 0;
HKEY hKey2;
// set up the pmac registry information so that we can
// proceed with the checks before starting the test
if (RegOpenKeyA(HKEY_LOCAL_MACHINE, strKey.c_str(), &hKey) != ERROR_SUCCESS)
{
bTestCreatedKey = true;
if (RegCreateKeyExA(HKEY_LOCAL_MACHINE, strKey.c_str(), 0, NULL, REG_OPTION_NON_VOLATILE, KEY_ALL_ACCESS, NULL, &hKey, NULL) != ERROR_SUCCESS)
{
Assert::Fail(L\"Failed to create pmaceth0 key in registry\");
}
}
dwSize = 4;
dwValue = htonl(843323564);
if (RegQueryValueEx(hKey, L\"IPAddress\", nullptr, &dwType, (LPBYTE)&dwValue, &dwSize) != ERROR_SUCCESS)
{
if (RegSetKeyValueA(hKey, NULL, strIPAddressName.c_str(), REG_DWORD, (LPBYTE)&dwValue, dwSize) != ERROR_SUCCESS)
{
Assert::Fail(L\"Failed to create IPAddress key in registry\");
}
}
if (RegOpenKeyA(HKEY_LOCAL_MACHINE, strKeyDevice.c_str(), &hKey2) != ERROR_SUCCESS)
{
if (RegCreateKeyExA(HKEY_LOCAL_MACHINE, strKeyDevice.c_str(), 0, NULL, REG_OPTION_NON_VOLATILE, KEY_ALL_ACCESS, NULL, &hKey2, NULL) != ERROR_SUCCESS)
{
Assert::Fail(L\"Failed to create Device0 key in registry\");
}
}
if (RegQueryValueEx(hKey, L\"Location\", nullptr, &dwType, (LPBYTE)&dwValue, &dwSize) != ERROR_SUCCESS)
{
if (RegSetKeyValueA(hKey2, NULL, strLocation.c_str(), REG_DWORD, &nLocationValue, nDataLength) != ERROR_SUCCESS)
{
Assert::Fail(L\"Failed to create Location key in registry\");
}
}
if (RegQueryValueEx(hKey, L\"Enumeration\", nullptr, &dwType, (LPBYTE)&dwValue, &dwSize) != ERROR_SUCCESS)
{
if (RegSetKeyValueA(hKey2, NULL, strEnumeration.c_str(), REG_DWORD, &nEnumerationValue, nDataLength) != ERROR_SUCCESS)
{
Assert::Fail(L\"Failed to create Enumeration key in registry\");
}
}
}
TEST_METHOD(OpenEthernetDeviceWithExceptionThrown)
{
// As a general recommendation, we should always use the
// AAA unit test pattern (Arrange - Act - Assert)
XrmDeltaTauMotionController* pDeltaTau = nullptr;
//**********************************************************
// Arrange (create and/or initialize the test variables specifically for this test case)
//**********************************************************
exception openFailure(\"Open threw an exception\");
CPmacDevice* cpMac = FAKE_ALL<CPmacDevice>();
XrmMotionHandler* motionHandler = XrmMotionHandler::GetTheMotionHandler(true);
int controllerCount = motionHandler->GetTotalMotionControllersDefined();
for (int i = 0; i < controllerCount; i++)
{
if (motionHandler->GetMotionController(i)->GetControllerType() == XrmMotionTypes::DELTA_TAU)
{
pDeltaTau = static_cast<XrmDeltaTauMotionController*>(motionHandler->GetMotionController(i));
WHEN_CALLED(cpMac->CreateDispatch(_)).Return(1);//Throw(&openFailure);
WHEN_CALLED(cpMac->Open(_, _)).Throw(&openFailure);
pDeltaTau->SetCPmacDevice(cpMac);
break;
}
}
//**********************************************************
// Act (execute the test actions)
//**********************************************************
pDeltaTau->Initialize();
//**********************************************************
// Assert (test state/results to ensure outcome is the expected result)
//**********************************************************
}
TEST_CLASS_CLEANUP(Teardown)
{
ISOLATOR_CLEANUP();
}
};
}
Just wondering if there's any more information I need to give you. If not, any thoughts?
Hi Ed,
Unfortunately, it's difficult to reproduce the issue like that.
If you can please open a ticket in our Premium Support with all the relevant information, if you can also create a sample project that reproduces the issue it will be great.
Cheers,
Alon Sapozhnikov.
Support Specialist.
...
|
__label__pos
| 0.945006 |
Skip to content
master
Switch branches/tags
Code
Latest commit
Git stats
Files
Permalink
Failed to load latest commit information.
Type
Name
Latest commit message
Commit time
README.md
Effective Go (RU) (Эффективный Go)
Оригинал смотри: https://golang.org/doc/effective_go.html go version go1.7.4
Список дополнительных материалов:
Оглавление
Введение
^
Go - это новый язык программирования. Хотя, он заимствует идеи из существующих языков, он обладает необычными свойствами, которые позволяют создавать эффективные программы, язык Go отличается по своему характеру от программ, написанных на родственных языках. Прямолинейный перевод C++ или Java программ в Go вряд ли даст удовлетворительный результат, т.к. Java программы написаны на Java, не на Go. С другой стороны, думая о проблеме с точки зрения Go можно добиться успеха, но это уже другая программа. Другими словами, для хорошего написания кода на языке Go, важно понимать его особенности и идиомы. Также важно знать установленные соглашения для программирования на Go, такие как именование, форматирование, разработка программ и так далее, так чтобы программы написанные Вами были простыми для понимания другими программистами Go.
Этот документ даёт примеры для написания чистого, идеоматичного кода на Go. Он дополняет спецификацию языка, Тур по Go, и Как писать на Go, каждую из которых необходимо прочитать в первую очередь.
^
Примеры
^
Go пакеты исходных кодов предназначены не только в качестве основных библиотек, но и в качестве примеров использования языка. Кроме того, многие пакеты имеют работающие, автономные исполняемые примеры и Вы можете запустить напрямую с помощью страницы golang.org, такие как этот (если необходимо, нажмите на слово "Примеры" чтобы открыть их). Если у Вас есть вопрос о том как решить какую-либо проблему или как что-то реализовать, то документация, исходные коды и примеры в библиотеке могут дать ответ, идею или объяснение.
^
Форматирование
^
Форматирование является наиболее спорным, но не сильно важным вопросом. Люди могут привыкнуть к различным стилям форматирования, но было бы лучше, если бы этого не приходилось делать и меньше времени придавалось этой теме, если бы все использовали одинаковый стиль. Проблема данной утопии в том, как это сделать без длинного руководства по стилю.
В Go мы используем нетипичный подход и передаем машине заботу о форматировании. Программа gofmt (также доступна, как go fmt, которая производит действия на уровне пакета, а не на уровне файлов) читает код на Go и выпускает исходный код со стандартным стилем отступов и вертикальным выравниванием, сохраняет, и при необходимости, переформатирует комментарии. Если Вы хотите знать, как по-новому структурировать код, запустите gofmt; если структура неверна, gofmt поправит Вашу программу (или файл сообщит об ошибке gofmt), не работайте в обход форматирования программой gofmt.
К примеру, нет необходимости тратить время на выравнивание комментариев для полей структур, т.к. gofmt сделает это за Вас. Для данного фрагмента кода
type T struct {
name string // name of the object
value int // its value
}
gofmt произведет выравнивание по колонкам:
type T struct {
name string // name of the object
value int // its value
}
Все стандартные пакеты Go отформатированы с помощью gofmt.
Очень коротко о некоторых деталях форматирования:
Абзац
Мы используем табуляцию для абзацев и gofmt делает это по умолчанию. Используйте пробелы только при острой необходимости.
Длина строки
Go не имеет предела длины строки. Не беспокойтесь о длинных строках. Если строка кажется слишком длинной, прервите ее и добавьте дополнительный отступ (символ табуляции) на новой строке.
Круглые скобки
Go нуждается в меньшем количестве круглых скобок, чем C и Java: структуры ветвления, цикла ( if , for , switch) не имеют круглых скобок в своём синтаксисе. Также, иерархия операторов стала проще и короче. К примеру, выражение
x<<8 + y<<16
не нуждается в добавлении пробелов, в отличии от других языков.
^
Комментарии
^
Go использует C-стиль /* */ для блока комментариев и C++-стиль // для однострочных комментариев. Как правило, используются однострочные комментарии. Блок комментариев, в основном, используется при комментировании пакетов, но также для выразительности или отключения большого участка кода.
Программа и веб-сервер - godoc обрабатывает Go исходники пакета для формирования документации. Комментарии, расположенные сразу над объявлением (без дополнительных пустых строк), извлекаются вместе с объявлением для пояснения данного элемента. Характер и стиль комментариев напрямую влияет на качество документации производимой godoc.
Каждый пакет должен иметь комментарий пакета - это блок комментариев предшествующий объявлению пакета. Для пакетов состоящих из нескольких файлов, комментарий пакета может быть расположен в любом из файлов, но только в одном из них. Комментарий пакета должен представлять информацию о пакете в целом. Он будет отображен вначале страницы godoc и должен представлять из себя детальную информацию, которой можно пользоваться.
/*
Package regexp implements a simple library for regular expressions.
The syntax of the regular expressions accepted is:
regexp:
concatenation { '|' concatenation }
concatenation:
{ closure }
closure:
term [ '*' | '+' | '?' ]
term:
'^'
'$'
'.'
character
'[' [ '^' ] character-ranges ']'
'(' regexp ')'
*/
package regexp
Если пакет простой, то комментарий может быть кратким.
// Package path implements utility routines for
// manipulating slash-separated filename paths.
Дополнительное форматирование, к примеру баннер из * (звездочек), не требуется. Шрифт для сформированного результата не обязательно будет моноширинный, поэтому не полагайтесь на пробелы при выравнивании, godoc, также как gofmt, позаботятся об этом. Комментарии интерпретируются как простой текст, поэтому HTML и другие аннотации такие как _эта_ воспроизводятся дословно и поэтому не должны использоваться. Единственное исключение, которое делает godoc, это выделение моноширинным шрифтом участков кода с отступами. Хорошим примером такого исключения является комментарий к пакету fmt.
В зависимости от контекста, godoc не может переформатировать комментарии, поэтому убедитесь, что они выглядят хорошо: используйте правильное правописание, знаки препинания, структуру предложения и т.д.
Любые комментарии внутри пакета, предшествующие объявлению, используются как описание этого объявления. Каждый экспортируемый объект, название которого начинается с большой буквы, должен иметь комментарий.
Лучше всего использовать комментарии в виде полных предложений. Это позволяет производить их автоматическую обработку. Первое предложение должно быть ключевым и начинаться с имени объявления.
// Compile parses a regular expression and returns, if successful,
// a Regexp that can be used to match against text.
func Compile(str string) (*Regexp, error) {
Если комментарий начинается с имени, то godoc может с использоваться совместно с grep. Представьте, что Вы не можете вспомнить имя "Compile", но Вы ищите the parsing function для регулярных выражений и тогда Вы можете выполнить команду:
$ godoc regexp | grep -i parse
Если все комментарии в пакете начинаются с "This function...", grep не сможет помочь с поиском имени. Если же комментарии начинаются с имени, Вы можете увидеть что-то вроде следующего результата, который напомнит Вам о том, что Вы искали.
$ godoc regexp | grep parse
Compile parses a regular expression and returns, if successful, a Regexp
parsed. It simplifies safe initialization of global variables holding
cannot be parsed. It simplifies safe initialization of global variables
$
Синтаксис Go допускает групповое объявление. Для каждой группы констант или переменных может быть представлен один общий комментарий. Однако такое объявление выглядит небрежно.
// Error codes returned by failures to parse an expression.
var (
ErrInternal = errors.New("regexp: internal error")
ErrUnmatchedLpar = errors.New("regexp: unmatched '('")
ErrUnmatchedRpar = errors.New("regexp: unmatched ')'")
...
)
Группировка также может показать взаимосвязи между элементами, к примеру, группа переменных защищенных mutex:
var (
countLock sync.Mutex
inputCount uint32
outputCount uint32
errorCount uint32
)
^
Именование
^
Именование очень важно в Go, как и в других языках. Они имеют семантический эффект: Видимость имени за пределами пакета, определяется по первой букве имени, которая, если является заглавной, то имя будет видно вне это пакета. Именно поэтому стоит уделить время обсуждению соглашения об именовании в программах Go.
Именование пакетов
^
Когда пакет импортируется, имя пакета используется для доступа к его содержимому. После того, как пакет импортирован,
import "bytes"
можно использовать bytes.Buffer. Это полезно, если все, кто использует пакет, могут использовать одно и то же имя, для обращения к его содержимому, подразумевается, что имя пакета должно быть коротким, четким и запоминающимся. В соответствии с соглашением,имена пакетов состоят из одного слова в нижнем регистре; нет необходимости в использовании подчеркиваний или СмешанногоРегистра. При выборе длинного имени пакета, всем, кто будет его использовать, придётся писать это имя. Но не беспокойтесь об уникальности имени. Имя пакета только по умолчанию используется при импорте; оно не должно быть глобально уникальным, и в редких случаях, при импорте может быть указано другое имя. В любом случае, путаница встречается редко, так как имя файла в импорте определяет, какой именно пакет используется.
Согласно другому соглашению, имя пакета является базовым именем его исходного каталога; пакет src/encoding/base64 импортируется как "encoding/base64" и имеет название base64, а не encoding_base64 и не encodingBase64.
Импортирующий пакет будет использовать имя пакета для обозначения его содержимого, поэтому при экспорте может учитываться этот факт, чтобы избежать повторения. (Не используйте import ., это, конечно, может упростить запуск тестов вне пакета, но в других случаях использоваться не должно). Например, тип reader для буферного чтения описанный в пакете bufio называется Reader, а не BufReader, т.к пользователи его видят как bufio.Reader, имя которого кратко и понятно.
Более того, т.к. импортируемые объекты адресуются по имени пакета, следовательно bufio.Reader не будет конфликтовать с io.Reader. Аналогично, функция для создания нового экземпляра объекта ring.Ring, которая объявлена как конструктор в Go, может называться NewRing, но т.к. Ring - это экспортируемый тип из пакета ring, функция-конструктор может называться просто New, которую, можно будет вызвать как ring.New. Используйте структуру пакетов при выборе имен.
Другой короткий пример функция once.Do; once.Do(setup) читается хорошо, и при этом лучше не станет, если ее переименовать в once.DoOrWaitUntilDone(setup). Длинные имена не делают названия более читабельными. В то время как комментарии могут быть более ценным, чем длинные имена.
Геттеры
^
Go не предоставляет автоматическую поддержку геттеров и сеттеров. Но не будет ошибкой создание геттеров и сеттеров самостоятельно, и если это необходимо, то делайте так, но идиоматически нет необходимости добавлять Get в имя геттера. Если у Вас есть поле с именем owner (с маленькой буквы, неэкспортируемое), то геттер может называться Owner (с большой буквы, экспортируемый), а не GetOwner. Использование имен, начинающихся с заглавной буквы, позволяет отделить экспортируемые методы от неэкспортируемых полей. Cеттер, при необходимости, может быть назван SetOwner. Оба примера в следующем коде:
owner := obj.Owner()
if owner != user {
obj.SetOwner(user)
}
Имена интерфейсов
^
По соглашению, интерфейсы с одним методом должны называться как метод с суффиксом -er или подобно этому, для образования существительного: Reader, Writer, Formatter, CloseNotifier и т.д.
Существует целый ряд имен, которыe соблюдают это соглашение и содержат подобные методы. Read , Write , Close, Flush, String и т.д., имеют канонические подписи и значения. Чтобы избежать путаницы, не давайте методу ни одного из этих имен, если оно не имеет ту же сигнатуру и значение. С другой стороны, если ваш тип реализует метод с тем же значением, как и метод хорошо известного типа, то дайте ему то же имя и значение; назовите Ваш метод конвертации в строку String , а не ToString.
MixedCaps
^
В заключении, Go соглашение использует MixedCaps или mixedCaps , а не подчеркивание для имен из нескольких слов.
Точка с запятой
^
Как и в С, грамматика Go формально использует точку с запятой для разделения операций-выражений (инструкций), но в отличии от C, точка с запятой не представлена в исходном коде. Вместо этого, лексер использует простое правило добавления точки с запятой автоматически, при сканировани. Таким образом текст на входе по большей части освобожден от них.
Правило такое. Если последний токен(лексема) перед символом новой строки - идентификатор (который включает такие слова, как int и float64), базовый литерал, такой как число или строковая константа, или один из нижеперечисленных токенов
break continue fallthrough return ++ -- ) }
то, лексер всегда добавляет точку с запятой после него. Вкратце, это может звучать так: "Если новая строка начинается после токена, который может закрывать операцию-выражение, то добавить точку с запятой".
Точка с запятой также может быть опущена сразу перед закрывающей скобкой, таким образом для операции-выражения такой как:
go func() { for { dst <- <-src } }()
точка с запятой не требуется.
Как следствие из правила, вы не можете перенести открывающую скобку управляющих структур (if, for, switch или select) на новую строку. Если перенесете, точка с запятой будет вставлена перед скобкой, которая может стать причиной нежелательных эффектов. Пишите так,
if i < f() {
g()
}
но не так
if i < f() // ошибка!
{ // ошибка!
g()
}
^
Управляющие структуры
^
Управляющие структуры в Go аналогичны тем же структурам в C, но имеют ряд важных отличий. Во-первых нет циклов do и while, есть лишь обобщенный for. Во-вторых, switch более гибкий. В-третьих if и switch имеют опциональную инициализацию переменных, как и в for. В-четвертых, break и continue опционально принимают метку, к которой необходимо перейти. В-пятых, есть новые операторы, такие как типизированный switch и многоканальный select. Синтаксис также немного отличается: отсутствуют круглые скобки в условии, и тело структуры всегда должно быть ограничено фигурными скобками.
^
If
^
В Go простой if выглядит так:
if x > 0 {
return y
}
Обязательные фигурные скобки упрощают написание простых условий if на несколько строк. Это хороший стиль в любом случае, особенно когда тело содержит управляющие операторы, такие как return или break.
Поскольку if и switch допускают инициализацию переменных, то часто можно видеть подобную запись:
if err := file.Chmod(0664); err != nil {
log.Print(err)
return err
}
В библиотеках Go, вы найдёте подобную запись, если if не переходит в следующий блок, т.е. в теле используется break, continue, goto или return, а необязательный else опускается.
f, err := os.Open(name)
if err != nil {
return err
}
codeUsing(f)
В данном примере представлена общая схема, где код защищен от серии ошибок. Код читается хорошо, если выполняется без ошибок, обходя случаи их возникновения. Так как ошибки приводят к завершению выполнения блока с помощью return, то блок else не требуется.
f, err := os.Open(name)
if err != nil {
return err
}
d, err := f.Stat()
if err != nil {
f.Close()
return err
}
codeUsing(f, d)
^
Переопределение и переприсваивание
^
Последний пример предыдущего раздела демонстрирует использование краткой формы объявления переменных :=. Вызов os.Open объявляет сразу две переменных f и err
f, err := os.Open(name)
Несколькими строками ниже вызывается f.Stat,
d, err := f.Stat()
который выглядит как объявления двух переменных d и err. Хотя err присутствует в обоих объявлениях. Это дублирование вполне законно: err объявляется в первом случае, и лишь переприсваивается во втором. Это означает, что f.Stat использует уже существующую переменную err, определенную выше, и просто присваивает ей новое значение.
В объявлении := переменная v может присутствовать, даже если она уже объявлена, при условии:
• если объявление происходит в той же самой области видимости, что и существующая переменная v (если v уже объявлена за пределами видимости, то объявление создаст новую переменную §)
• соответствующее значение, при инициализации, может быть присвоено v
• существует хотя бы одна новая переменная в объявлении, которая будет создана заново
Это необычное свойство - чистая практичность, которая служит для упрощения использования одной переменной err, к примеру, в длинных цепочках if-else. Вы увидите, это используется часто.
§ Нет ничего плохого в том, что в Go область видимости параметров и возвращаемых значений функции - есть само тело функции, хотя они лексически находятся за скобками, ограничивающими тело функции.
^
Оператор For
^
В Go цикл for очень похож, но не такой же как в C. Он унифицирует for и while, при этом отсутствует do-while цикл. Существует 3 различных формы, и только в одной из них используется точка с запятой.
// C-подобный for
for init; condition; post { }
// C-подобный while
for condition { }
// C-подобный for(;;)
for { }
Краткая запись позволяет легко объявить начальные условия прямо в цикле:
sum := 0
for i := 0; i < 10; i++ {
sum += i
}
Если Вы итерируетесь по массиву, срезу, строке или map'у, или читаете из канала, то для управления можно использовать range.
for key, value := range oldMap {
newMap[key] = value
}
Если необходимо использовать только первый элемент диапазона (ключ или индекс), отбросьте второй:
for key := range m {
if key.expired() {
delete(m, key)
}
}
Если вам необходим только второй элемент (значение), то используйте пустой идентификатор (_) в качестве первого элемента:
sum := 0
for _ , value := range array {
sum += value
}
Пустой идентификатор используется в разных случаях и будет описан позже.
Для строк, оператор range выполняет ещё больше работы, к примеру разделяет строку по символам Unicode в соответствии с UTF-8. При ошибочном использование кодировки, побайтово заменяет рунами(rune) U+FFFD. (rune (и одноименный встроенный тип) в терминологии Go используется для работы с символами Unicode. Смотрите детальную информацию в Спецификации языка).
Данный цикл:
for pos, char := range "日本\x80語" { // \x80 is an illegal UTF-8 encoding
fmt.Printf("character %#U starts at byte position %d\n", char, pos)
}
Выводит:
character U+65E5 '' starts at byte position 0
character U+672C '' starts at byte position 3
character U+FFFD '' starts at byte position 6
character U+8A9E '' starts at byte position 7
И в заключении, в языке Go нет оператора запятая, а ++ и -- являются инструкциями, но не выражениями. Таким образом, если Вам необходимо использовать несколько переменных в цикле for, то Вы можете использовать параллельное определение переменных (без использования ++ и --).
// Reverse a
for i, j := 0, len(a)-1; i < j; i, j = i+1, j-1 {
a[i], a[j] = a[j], a[i]
}
^
Switch(переключатель)
^
В языке Go switch более обобщён, нежели в C. Выражения не обязательно должны быть константами или даже целыми числами, условия проверяются сверху-вниз до нахождения соответствия, и если switch не имеет выражений, то переходит в true. Следовательно, идиоматически возможно записывать if-else-if-else цепочку как switch.
func unhex(c byte) byte {
switch {
case '0' <= c && c <= '9':
return c - '0'
case 'a' <= c && c <= 'f':
return c - 'a' + 10
case 'A' <= c && c <= 'F':
return c - 'A' + 10
}
return 0
}
Автоматический пропуск условий отсутствует, но, при этом, условия могут быть записаны через запятую:
func shouldEscape(c byte) bool {
switch c {
case ' ', '?', '&', '=', '#', '+', '%':
return true
}
return false
}
Несмотря на то, что они не столь распространены в Go, как в некоторых других C-подобных языках, break может быть использован для досрочного прерывания switch. Хотя, иногда, надо прервать внешний (по отношению к switch) цикл, а не сам switch, и в Go это может быть достигнуто путём добавления метки перед циклом, и переходом к этой метке в случае вызова break. В следующем примере представлены оба случая:
Loop:
for n := 0; n < len(src); n += size {
switch {
case src[n] < sizeOne:
if validateOnly {
break
}
size = 1
update(src[n])
case src[n] < sizeTwo:
if n+1 >= len(src) {
err = errShortInput
break Loop
}
if validateOnly {
break
}
size = 2
update(src[n] + src[n+1]<<shift)
}
}
Конечно, continue также допускает использование меток, но только в циклах.
В заключении, метод сравнения байтовых срезов использующий два оператора switch:
// Compare returns an integer comparing the two byte slices,
// lexicographically.
// The result will be 0 if a == b, -1 if a < b, and +1 if a > b
func Compare(a, b []byte) int {
for i := 0; i < len(a) && i < len(b); i++ {
switch {
case a[i] > b[i]:
return 1
case a[i] < b[i]:
return -1
}
}
switch {
case len(a) > len(b):
return 1
case len(a) < len(b):
return -1
}
return 0
}
^
Переключатель типов (Типизированный переключатель, Type switch)
^
switch может быть использован для определения динамических типов интерфейсных переменных. Так, типизированный switch использует синтаксис приведения типов, с ключевым словом type внутри скобок. Если switch объявляет переменную в выражении, то переменная будет иметь соответствующий тип в каждом пункте. Также, идиоматически верно переиспользовать имена переменных для объявления новых переменных с тем же именем, но другим типом в каждом случае:
var t interface{}
t = functionOfSomeType()
switch t := t.(type) {
default:
fmt.Printf("unexpected type %T\n", t) // %T prints whatever type t has
case bool:
fmt.Printf("boolean %t\n", t) // t has type bool
case int:
fmt.Printf("integer %d\n", t) // t has type int
case *bool:
fmt.Printf("pointer to boolean %t\n", *t) // t has type *bool
case *int:
fmt.Printf("pointer to integer %d\n", *t) // t has type *int
}
Функции и методы(Functions, методы)
^
Множественное возвращение результатов
^
Одно из особенностей языка Go - это то, что функции и методы могут возвращать множество значений.
При использовании языка С, передача ошибки производится через отрицательное значение с описанием причины ошибки в "другом" месте.
При использовании языка Go, функция Write может вернуть одновременно и возвращаемое значение и ошибку. Сигнатура метода Write в файлах пакета os:
func (file *File) Write(b []byte) (n int, err error)
и как предусмотрено документацией, он возвращает число записанных байт и ненулевое значение ошибки error, когда n != len(b). Это общий стиль, смотрите также раздел посвящённый ошибкам в качестве примера.
Данный подход исключает необходимость в возращении значимого параметра. Это очень простой способ возвращения из функции количества байт среза, возвращая число и следующий параметр.
func nextInt(b []byte, i int) (int, int) {
for ; i < len(b) && !isDigit(b[i]); i++ {
}
x := 0
for ; i < len(b) && isDigit(b[i]); i++ {
x = x*10 + int(b[i]) - '0'
}
return x, i
}
Вы можете сканировать число чисел во входном срезе b следующим образом:
for i := 0; i < len(b); {
x, i = nextInt(b, i)
fmt.Println(x)
}
Именование параметров результата
^
Возвращаемым "параметрам" в языке Go можно давать имена и это часто используется как входные параметры. Когда они именованы, то они инициализируются нулевым значением необходимого типа в самом начале функции. Если функция, в которой определены именованные параметры, вызывает конструкцию возврата без аргументов, то значения именованных параметров будут использованы ей как возвращаемые значения. Именование не обязательное, но оно может сделать код короче и чище - самодокументированным. Если имя результата будет nextInt, то очевидно что тип результата int.
func nextInt(b []byte, pos int) (value, nextPos int) {
На примере io.ReadFull:
func ReadFull(r Reader, buf []byte) (n int, err error) {
for len(buf) > 0 && err == nil {
var nr int
nr, err = r.Read(buf)
n += nr
buf = buf[nr:]
}
return
}
^
Отсроченный вызов (Defer)
^
В языке Go есть оператор defer для управления отложенного вызова функции, который будет вызван, как только функция имеющая defer оканчивается. Это не типичный но эффективный способ, когда необходимо закрыть ресурс после окончания функции. Канонические примеры - работа с mutex или закрытие файла.
// Contents returns the file's contents as a string.
func Contents(filename string) (string, error) {
f, err := os.Open(filename)
if err != nil {
return "", err
}
defer f.Close() // f.Close will run when we're finished.
var result []byte
buf := make([]byte, 100)
for {
n, err := f.Read(buf[0:])
result = append(result, buf[0:n]...) // append is discussed later.
if err != nil {
if err == io.EOF {
break
}
return "", err // f will be closed if we return here.
}
}
return string(result), nil // f will be closed if we return here.
}
Отложенный вызов функции Close имеет 2 преимущества. Во-первых, гарантирует что не будет забыто закрытие файла - ошибка, которую легко сделать, если в последствии в функции будет изменен параметр на другую папку. Во-вторых, закрытие близко расположено к открытию, что более ясно, чем располагать его в конце функции.
Аргументы отложенной функции выполняются когда выполняется defer, а не когда функция вызвана. Кроме того , во избежания беспокойства по поводу изменяющихся переменных в функции, одна отложенная функция может отложить вызов множества функций.
Вот простой пример:
for i := 0; i < 5; i++ {
defer fmt.Printf("%d ", i)
}
Откладывание функции в LIFO очередь, приведет к следующей работе функции при печати на экран 4 3 2 1 0 . Более интересный пример - простое отслеживание функции в программе. Мы могли бы написать простое отслеживание, как это:
func trace(s string) { fmt.Println("entering:", s) }
func untrace(s string) { fmt.Println("leaving:", s) }
// Use them like this:
func a() {
trace("a")
defer untrace("a")
// do something....
}
Мы могли бы сделать лучше - используя факт отложенных функций для оценки когда будет запущен defer. Отслеживаемая функция может настроить аргументы неотслеживаемой функции. К примеру:
func trace(s string) string {
fmt.Println("entering:", s)
return s
}
func un(s string) {
fmt.Println("leaving:", s)
}
func a() {
defer un(trace("a"))
fmt.Println("in a")
}
func b() {
defer un(trace("b"))
fmt.Println("in b")
a()
}
func main() {
b()
}
выводит:
entering: b
in b
entering: a
in a
leaving: a
leaving: b
Для программистов привыкших к блочному управлению ресурсами в других языках, функция defer может показаться странной, но интересной и мощной, так как позволяет уйти от блочного управления к управлению в функции. В разделах panic и recover будут также рассматриваться несколько примеров.
^
Данные
^
Созданные с помощью new
^
Для создания примитивов в языке Go используются функции new и make. Они разные и применяются для разных типов, это может сбить с толку, но правило очень просто. Для начала обсудим функцию new. Данная функция резервирует память, но не также как в других языках программирования, она не просто инициализирует память, а вместо этого заполняет нулями.
К примеру new(T) резервирует память нулями для нового элемента типа T и возвращает его указатель на значение типа *T. В терминологии Go, он возвращает указатель на новую зарезервированную память заполненная нулями с типом T.
TODO Since the memory returned by new is zeroed, it's helpful to arrange when designing your data structures that the zero value of each type can be used without further initialization. This means a user of the data structure can create one with new and get right to work. For example, the documentation for bytes.Buffer states that "the zero value for Buffer is an empty buffer ready to use." Similarly, sync.Mutex does not have an explicit constructor or Init method. Instead, the zero value for a sync.Mutex is defined to be an unlocked mutex. The zero-value-is-useful property works transitively. Consider this type declaration. -
type SyncedBuffer struct {
lock sync.Mutex
buffer bytes.Buffer
}
TODO Values of type SyncedBuffer are also ready to use immediately upon allocation or just declaration. In the next snippet, both p and v will work correctly without further arrangement. -
p := new(SyncedBuffer) // type *SyncedBuffer
var v SyncedBuffer // type SyncedBuffer
Конструкторы и составные литералы
^
Иногда нулевых значений не достаточно и необходимо иметь конструктор, следующий пример взят из пакета os.
func NewFile(fd int, name string) *File {
if fd < 0 {
return nil
}
f := new(File)
f.fd = fd
f.name = name
f.dirinfo = nil
f.nepipe = 0
return f
}
Существует много шаблонов. Мы просто можем использовать составные литералы, которые будут создавать новые сущности каждый раз.
func NewFile(fd int, name string) *File {
if fd < 0 {
return nil
}
f := File{fd, name, nil, 0}
return &f
}
Обратите внимание на то, что в отличии от языка С, это нормально, возвращать адрес локальных переменных, так как переменная уже существует после возвращения из функции. На самом деле, возвращение адресов составных литералов создает новую сущность каждый раз, как он вычисляется. Итак мы можем объединить последние две строки:
return &File{fd, name, nil, 0}
Поля составных литералов должны быть в порядке объявления и все должны присутствовать. Однако, используя маркировку как пара поле:значение, могут инициализироваться в любом порядке, с пропущенными полями заполняемые нулями. Таким образом, можно объявить:
return &File{fd: fd, name: name}
В предельном случае, когда составной литерал без полей вообще, то создание нулевым значением будет тип. Выражения new(File) и &File{} одинаковы.
Составные литералы могут также создавать массивы, срезы, карты, с пометкой полей как индексов или ключами карт. К примеру, инициализированные значения Enone, Eio, и Einval разные.
a := [...]string {Enone: "no error", Eio: "Eio", Einval: "invalid argument"}
s := []string {Enone: "no error", Eio: "Eio", Einval: "invalid argument"}
m := map[int]string{Enone: "no error", Eio: "Eio", Einval: "invalid argument"}
^
Создание с помощью make
^
Возвращаясь к созданию элементов. Встроенная функция make(T, args) служит для других целей нежели new(T). Он создает только срезы, карты и каналы, и возвращают инициализированные (не нулевые) значение типа T (а не *T). Причиной различия для этих трех типов, в том что внутри они представляют из себя структуры данных, которые необходимо инициализировать перед использованием. К примеру, срезы - это трехэлементная структура, содержащая указатель на данные(внутри массив), длину, и емкость, причём пока все элементы не инициализированы - срез нулевой nil. Для срезов, карт и каналов, встроенная команда make инициализирует внутреннюю структуру данных и подготавливает значения к использованию.
К примеру:
make([]int, 10, 100)
создает массив из 100 значений типа int и затем создает структуру среза длинной 10 и емкостью 100 со ссылкой только на первые 10 элементов. (Когда создается слайс, его емкость задавать не обязательно, смотрите раздел посвящённый срезам.) В противоположность, new([]int) возвращает указатель на новый, созданный, заполненный нулями срез, это указатель на значение nil среза.
Эти примеры показывают различие между new и make.
var p *[]int = new([]int) // allocates slice structure; *p == nil; rarely useful
var v []int = make([]int, 100) // the slice v now refers to a new array of 100 ints
// Unnecessarily complex:
var p *[]int = new([]int)
*p = make([]int, 100, 100)
// Idiomatic:
v := make([]int, 100)
Помните что make используется только для карт, срезов и каналов и не возвращают указатель. Для получения указателя в явном виде используйте new или возьмите указатель в явном виде.
^
Массивы
^
Массивы популярны когда точно известно необходимое количество памяти, чтобы не делать излишних пересозданий, но в первую очередь они являются составной частью для срезов, о которых будет описано в следующем разделе.
Какие основные отличия между обращением с массивами между языками Go и C:
• Массивы значений. Присвоение одно массива другому копирует все элементы.
• Если вы передаёте массив в функцию, то передаётся копия массива, а не указатель на него.
• Размер массива является частью массива. Типы [10]int и [20]int разные.
Массивы могут быть полезными, но дорогими(с точки зрения производительности) и если Вы хотите иметь гибкость и эффективность схожее с поведением в языке C-like, то необходимо использовать в функциях указатели.
func Sum(a *[3]float64) (sum float64) {
for _, v := range *a {
sum += v
}
return
}
array := [...]float64{7.0, 8.5, 9.1}
x := Sum(&array) // Note the explicit address-of operator
Но данный стиль не подходит Go. Используйте срезы вместо массивов.
^
Срезы(Slices, слайсы)
^
Срезы это обёртка для массивов и при этом более общий и мощный, и предоставляет собой более удобный интерфейс по управлению данными, в случаях, когда не известно точное количество элементов и необходимо преобразование размера массивов. Большинство программ на языке Go, выполнены с использованием срезов, а не простых массивов.
Срез хранит ссылку на массив и поэтому если приравнять срез к другому срезу, то будет тот же массив. Если срез является аргументом функции, то изменения элементов в срезе будут видны вызывающему данному функцию, это аналогично передаче указателя на базовый массив. В функция Read может принимать в качестве аргумента срез, что равнозначно указателю на массив и длины массива; длина среза указывает верхний предел количество данных которые необходимо прочитать. В данном случае тип File пакета os имеет следующую сигнатуру метода Read:
func (f * File) Read(buf []byte) (n int, err error)
Метод возвращает количество прочитанных байт или если есть, то ошибку. Для чтения первых 32 байт в буфере buf, получить(срезать) часть буфера.
n, err := f.Read(buf[0:32])
Такой срез является эффективным. На самом деле, если оставить в стороне эффективность, то следующий пример показывает чтение первых 32 байт из буфера.
var n int
var err error
for i := 0; i < 32; i++ {
nbytes, e := f.Read(buf[i:i+1]) // Read one byte.
if nbytes == 0 || e != nil {
err = e
break
}
n += nbytes
}
Длина среза может меняться, пока не исчерпает размер внутреннего массива. С помощью встроенной функции cap можно узнать емкость среза, представляющий максимальную длину среза. В следующем примере рассматривается функция для добавления данных в срез. Если данные превышают ёмкость среза, то срез необходимо переопределить. Функция Append возвращает результирующий срез. Функция использует тот факт что использование len и cap допустимо, даже если у нас имеется нулевой срез nil - при этом возвращая 0.
func Append(slice, data []byte) []byte {
l := len(slice)
if l + len(data) > cap(slice) { // reallocate
// Allocate double what's needed, for future growth.
newSlice := make([]byte, (l+len(data))* 2)
// The copy function is predeclared and works for any slice type.
copy(newSlice, slice)
slice = newSlice
}
slice = slice[0:l+len(data)]
for i, c := range data {
slice[l+i] = c
}
return slice
}
TODO We must return the slice afterwards because, although Append can modify the elements of slice, the slice itself (the run-time data structure holding the pointer, length, and capacity) is passed by value. -
Добавление элементов в срез настолько популярно, что функция append стала встроенной. Для того чтобы понять принцип работы данной функции нам необходимо больше информации, поэтому мы вернёмся к этому позже.
^
Двухмерные срезы
^
Массивы и срезы в Go - одномерные. Для создания двухмерного массива или среза, нам необходимо определять массив-массивов или срез-срезов, как в примере:
type Transform [3][3]float64 // A 3x3 array, really an array of arrays.
type LinesOfText [][]byte // A slice of byte slices.
В связи с тем, что срезы переменной длины, то допустимо иметь каждый внутренний срез разной длины. Это наиболее общая ситуация, как в примере LinesOfText, в котором каждая строка имеет независимую длину.
text := LinesOfText{
[]byte("Now is the time"),
[]byte("for all good gophers"),
[]byte("to bring some fun to the party."),
}
Иногда необходимо создавать двухмерные срезы, к примеру при обработки пикселей. Есть 2 способа для этого:
• Первый, создание каждого среза независимо
• Второй, создание простого массива срезов. Наилучший способ выбирается в зависимости от программы. Если срез можно увеличивать или уменьшать, они должны быть независимы, для того чтобы избежать перезаписи новых строк. Если не требуется изменять размер, то наиболее эффективным был бы способ с создание одним их аллоцированием(инициализацией). Рассмотрим оба способа.
// Allocate the top-level slice.
picture := make([][]uint8, YSize) // One row per unit of y.
// Loop over the rows, allocating the slice for each row.
for i := range picture {
picture[i] = make([]uint8, XSize)
}
с одним созданием:
// Allocate the top-level slice, the same as before.
picture := make([][]uint8, YSize) // One row per unit of y.
// Allocate one large slice to hold all the pixels.
pixels := make([]uint8, XSize*YSize) // Has type []uint8 even though picture is [][]uint8.
// Loop over the rows, slicing each row from the front of the remaining pixels slice.
for i := range picture {
picture[i], pixels = pixels[:XSize], pixels[XSize:]
}
^
Карты(Maps)
^
Карты - это удобная и мощная встроенная структура данных, связывающая значение одного типа(ключ (key)) со значением другого типа (элемент (element) или значение (value)). Ключ может быть любого типа, для которого определён оператор равно, как для целых чисел, чисел с плавающей точкой или комплексные числа, строки, указатели, интерфейсы (если динамические типы поддерживают равенство), структуры и массивы. Срезы не используются в качестве ключа для карт, так как равенство не определено для них. Карты, также как и срезы, имеют внутреннюю структуру данных. Если Вы передадите карту в функции и измените содержание карты, то изменения останутся для вызывающего. Карты могут быть созданы с использованием синтаксиса составных литералов с разделением по колонкам пар ключ-значение, поэтому легко создать начальные данные.
var timeZone = map[string]int{
"UTC": 0*60*60,
"EST": -5*60*60,
"CST": -6*60*60,
"MST": -7*60*60,
"PST": -8*60*60,
}
Добавление и получение значений из карт, синтаксически, выглядит как для массивов или срезов, за тем исключением того что индекс не обязательно должен быть целым числом.
offset := timeZone["EST"]
При попытке получения значения из карты по ключу, которого нет в карте, приведёт к возвращению нулевого значения. К примеру, если карта содержит целые числа, как описывалось выше, для несуществующего ключа будет возвращено 0. Это можно представить как карту у которой в качестве типа значения используется bool. Добавление записи в карту это как добавление со значением true в карту и дальнейшая простая проверка на индексирование.
attended := map[string]bool{
"Ann": true,
"Joe": true,
...
}
if attended[person] { // will be false if person is not in the map
fmt.Println(person, "was at the meeting")
}
Иногда необходимо отличать отсутствие записи от нулевого значения. К примеру, есть ли запись для "UTC" или это пустая строка потому что отсутствует значение в карте? Для того чтобы отличить - Вы можете использовать множественное присвоение.
var seconds int
var ok bool
seconds, ok = timeZone[tz]
Очевидная причина называть данную идиому "запятая ок". В данном примере, если tz существует, то seconds будет иметь необходимое значение и ok будет true, но если не существует, то seconds будет иметь нулевое значение а ok будет false. В следующем примере, представлена функция с хорошим описанием ошибки:
func offset(tz string) int {
if seconds, ok := timeZone[tz]; ok {
return seconds
}
log.Println("unknown time zone:", tz)
return 0
}
В случаи, если нас не интересует само значение, а лишь его наличие, то можно использовать пустой идентификатор _, расположенный вместо значения.
_ , present := timeZone[tz]
Для удаления записи из карты, необходимо использовать встроенную функцию delete, где в качестве аргументов задаётся карта и ключ для удаления. Данная операция безопасна, даже если данного ключа уже нет в карте.
delete(timeZone, "PDT") // Now on Standard Time
^
Печать(Printing)
^
Форматированная печать в Go подобна стилю в языке C printf, но более богаче и более обобщенное. Необходимые функции расположены в пакете fmt и имеют названия с большой буквы: fmt.Printf, fmt.Fprintf, fmt.Sprintf и так далее. Функции (Sprintf и другие) возвращают строку, а не заполняют предоставленный буфер.
Вам нет необходимости в создании форматировании строк, так как для каждой Printf, Fprintf and Sprintf есть пара функций к примеру Print и Println.
Данные функции не берут формат строки, а вместо этого устанавливают форматирование по умолчанию для каждого аргумента. Функция Println также добавляет пробел между аргументами и добавляет разрыв строки в конце строки. Функция Print добавляет пробел только той же строке. В примере каждая строка производит одинаковый результат.
fmt.Printf("Hello %d\n", 23)
fmt.Fprint(os.Stdout, "Hello ", 23, "\n")
fmt.Println("Hello", 23)
fmt.Println(fmt.Sprint("Hello ", 23))
Для форматированной печати функцией fmt.Fprint и его друзьями, принимают в качестве первого аргумента объект реализующий интерфейс io.Writer. Значения os.Stdout и os.Stderr знакомы.
Следующее расходится с реализацией на языке С. Первое, числовые форматы %d не имеют флагов знаковости или размера; Вместо этого, функции печати используют тип аргумента для задания свойств.
var x uint64 = 1<<64 - 1
fmt.Printf("%d %x; %d %x\n", x, x, int64(x), int64(x))
печатает
18446744073709551615 ffffffffffffffff; -1 -1
Если вы используете соглашение по умолчанию, то для целых чисел можно использовать обобщенный формат %v (для "значений"); и результат будет одинаков как для Print так и для Println.
Более того, данный формат может напечатать любое значение, даже срез, структуру или карту. Печать карты временной зоны из предыдущего раздела.
fmt.Printf("%v\n", timeZone) // or just fmt.Println(timeZone)
который печатает следующий результат
map[CST:-21600 PST:-28800 EST:-18000 UTC:0 MST:-25200]
Ключи карт могут быть напечатаны в любом порядке. При печати структуры, с аннотацией %+v производиться печать полей структуры с их именами и для каждого значения с форматом %#v печатается значение с полным синтаксисом Go.
type T struct {
a int
b float64
c string
}
t := &T{ 7, -2.35, "abc\tdef" }
fmt.Printf("%v\n", t)
fmt.Printf("%+v\n", t)
fmt.Printf("%#v\n", t)
fmt.Printf("%#v\n", timeZone)
печатает
&{7 -2.35 abc def}
&{a:7 b:-2.35 c:abc def}
&main.T{a:7, b:-2.35, c:"abc\tdef"}
map[string] int{"CST":-21600, "PST":-28800, "EST":-18000, "UTC":0, "MST":-25200}
(На заметку: обратите внимание на амперсанды)
Для ссылок на строки подходит %q, который принимает значение на string или []byte. Альтернативный формат %#q будет использовать обратные кавычки, если это возможно. (Формат %q также допустим для целых чисел и рун, создавая односсылочные константы рун.) Также, %x работает со строками, массивом байт и срезом байт также как с целыми числами, создаёт шестнадцатеричные целые строки, а с пробелом в формате (% x) добавляет пробелы между байтами.
Другой удобный формат %T, который печатает тип значения.
fmt.Printf("%T\n", timeZone)
печатает
map[string] int
Если Вы хотите свой собственный формат типа, то для этого достаточно метод с сигнатурой String() string для Вашего типа. Для нашего простого примера, тип T, выглядит следующим образом.
func (t * T) String() string {
return fmt.Sprintf("%d/%g/%q", t.a, t.b, t.c)
}
fmt.Printf("%v\n", t)
Печатает в следующем формате
7/-2.35/"abc\tdef"
(Если Вам необходимо напечатать значение типа T как указателя на тип T, то метод String должен иметь значение типа; этот пример использует указатель, т.к. они более эффективны и идиоматичны типу структуры.)
Наша функция String может вызывать Sprintf, потому что функция печати возвращаемая и поэтому можно её обернуть. Это важно для понимания данного подхода. Однако, не создавайте функцию String вызывающую метод Sprintf, в случаи если далее будет рекурсивно вызвана String. Это может произойти если Sprintf вызывает на печать строку получателя, который вызовет функцию снова. Эту ошибку можно легко создать и она показана на следующем примере.
type MyString string
func (m MyString) String() string {
return fmt.Sprintf("MyString=%s", m) // Error: will recur forever.
}
Для того чтобы решить эту проблему, необходимо изменить аргумент на базовый тип, который не имеет функции.
type MyString string
func (m MyString) String() string {
return fmt.Sprintf("MyString=%s", string(m)) // OK: note conversion.
}
Другой способ печати это допустить печать функции аргументов напрямую в другую функцию. Сигнатура Printf используется для типов ...interface{}, что допускает произвольное число аргументов, которые добавляются после формата format.
func Printf(format string, v ...interface{}) (n int, err error) {
TODO Within the function Printf, v acts like a variable of type []interface{} but if it is passed to another variadic function, it acts like a regular list of arguments. Here is the implementation of the function log.Println we used above. It passes its arguments directly to fmt.Sprintln for the actual formatting. -
// Println prints to the standard logger in the manner of fmt.Println.
func Println(v ...interface{}) {
std.Output(2, fmt.Sprintln(v...)) // Output takes parameters (int, string)
}
Запись ... после v при вызове функции Sprintln объявляет компилятору о том что v является списком аргументов; с другой стороны v воспринимается как простой срез аргументов.
Если Вам необходимо большее количество информации, то смотрите документацию godoc в пакете fmt.
Кстати параметр ... может иметь тип, для примера...int для функции определения минимума используется список целых чисел:
func Min(a ...int) int {
min := int(^uint(0) >> 1) // largest int
for _ , i := range a {
if i < min {
min = i
}
}
return min
}
^
Присоединение(Append)
^
В настоящий момент? пришел момент для разъяснения конструкции встроенной функции append. Сигнатура функции append отличается от ранее описанной функции Append. Схематично, выглядит следующим образом:
func append(slice []*T*, elements ...*T*) []*T*
где T любой тип. Вы не можете написать в языке Go функцию в которой T определена вызывающим. Поэтому необходима поддержка компилятора для функции append.
Данная функция append добавляет элемент в конец среза и возвращает результат. Причина возврата результата, в том что как и в рукописной функции Append массив может измениться. Простой пример:
x := []int{1,2,3}
x = append(x, 4, 5, 6)
fmt.Println(x)
печатает [1 2 3 4 5 6]. Итак, append работает в принципе как Printf с произвольным количеством аргументов.
Но что если необходимо добавить срез в срез, как в нашей реализации Append? Все просто: используем ... который мы использовали в Output. Вот пример кода для получение того же результата.
x := []int{1,2,3}
y := []int{4,5,6}
x = append(x, y...)
fmt.Println(x)
Обращаю внимание, что без ... компилятор напишет ошибку, так как y не имеет тип int.
^
Инициализация(Initialization)
^
Инициализация в языке Go более мощный инструмент нежели в языках С или С++. Даже сложные структуры можно инициализировать. Упорядочивание между инициализируемыми объектами разных пакетов, обрабатывается корректно.
^
Константы(Constants)
^
Константы в Go это просто константы. Они создаются во время компиляции даже если она определена в локальной функции и могут быть цифры, символы(руны), строки или булевый тип. Из-за ограничения времени компиляции, компилятор должен определять какие выражения могут быть константами. К примеру, выражение 1<<3 это константное выражение, в то время как выражение math.Sin(math.Pi/4) не является константой, так как вызывает функцию math.Sin требующую выполнения по время выполнения.
В языке Go, перечисление констант производиться с помощью перечислителя iota. Так как iota может быть неявно повторяемой для выражения или выражений, то легко можно строить сложные наборы значений.
//{{code "/doc/progs/eff_bytesize.go" `/^type ByteSize/` `/^\)/`}}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import "fmt"
type ByteSize float64
const (
_ = iota // ignore first value by assigning to blank identifier
KB ByteSize = 1 << (10 * iota)
MB
GB
TB
PB
EB
ZB
YB
)
Использование функции String к пользовательским типам производить печать необходимым образом. TODO Although you'll see it most often applied to structs, this technique is also useful for scalar types such as floating-point types like ByteSize. -
//See code "/doc/progs/eff_bytesize.go"
func (b ByteSize) String() string {
switch {
case b >= YB:
return fmt.Sprintf("%.2fYB", b/YB)
case b >= ZB:
return fmt.Sprintf("%.2fZB", b/ZB)
case b >= EB:
return fmt.Sprintf("%.2fEB", b/EB)
case b >= PB:
return fmt.Sprintf("%.2fPB", b/PB)
case b >= TB:
return fmt.Sprintf("%.2fTB", b/TB)
case b >= GB:
return fmt.Sprintf("%.2fGB", b/GB)
case b >= MB:
return fmt.Sprintf("%.2fMB", b/MB)
case b >= KB:
return fmt.Sprintf("%.2fKB", b/KB)
}
return fmt.Sprintf("%.2fB", b)
}
Выражение YB печатается как 1.00YB, когда ByteSize(1e13) печатает как 9.09TB.
Используемый здесь Sprintf в функции String типа ByteSize безопасна(не вызывается рекурсивно), не потому что происходит конвертирование, а потому что вызывается функция Sprintf с %f, который не строковый формат:Sprintf будет вызывать функцию String, функцию которой необходима строка и %f число с плавающей точкой.
^
Переменные(Variables)
^
Переменные могут инициализироваться как константы, но инициализация производиться во время работы.
var (
home = os.Getenv("HOME")
user = os.Getenv("USER")
gopath = os.Getenv("GOPATH")
)
^
Функция init
^
Каждый исходный код может определить свою первичную функцию init для обязательных настройки. (На самом деле файл может иметь несколько функций init.) Функция init вызывается после всех объявлений переменных и после всех объявлений переменных всех пакетов.
Общее применение функции init в проверки или починки состояния программы до начала реального исполнения.
func init() {
if user == "" {
log.Fatal("$USER not set")
}
if home == "" {
home = "/home/" + user
}
if gopath == "" {
gopath = home + "/go"
}
// gopath may be overridden by --gopath flag on command line.
flag.StringVar(&gopath, "gopath", gopath, "override default GOPATH")
}
^
Методы(Methods)
^
Указатели или Значения
^
Как мы видели в примеры с ByteSize, функции может иметь имя типа (кроме указателей или интерфейсов) и приемник не обязательно должен иметь структуры.
Как обсуждалось ранее в срезах, мы написали функцию Append. Мы можем определить функции вместе со срезом. Для этого, мы объявим именованный тип, который мы можем связать с функцией и там самым создать получателя данной функции для значений этого типа.
type ByteSlice []byte
func (slice ByteSlice) Append(data []byte) []byte {
// Body exactly the same as the Append function defined above.
}
Данный метод все также возвращает обновленный срез. Для решения этой неуклюжести можно воспользоваться указателем на ByteSize в получатель, итак можно переписать следующим образом:
func (p *ByteSlice) Append(data []byte) {
slice := *p
// Body as above, without the return.
*p = slice
}
На самом деле, мы можем сделать это ещё лучше. Если мы изменим функцию, то она будет выглядеть как стандартная функция Write, то есть вот так,
func (p *ByteSlice) Write(data []byte) (n int, err error) {
slice := *p
// Again as above.
*p = slice
return len(data), nil
}
тип *ByteSlice удовлетворяет стандартному интерфейсу io.Writer, что удобно. Например, мы можем напечатать один из них:
var b ByteSlice
fmt.Fprintf(&b, "This hour has %d days\n", 7)
Мы передаем адрес ByteSlice, поскольку только *ByteSlice удовлетворяет интерфейсу io.Writer. Правило получателя о указателях или значениях, в том что функции значения могут использоваться для указателей и значений, а функция указателя может только использовать указатель.
Это правило возникло потому что функции указателя могут изменять получателя. Вызывая значение в функции значений получаешь копию значения, поэтому никаких модификаций не произойдет. Поэтому язык запрещает эту ошибку. Когда адресуется значение, то язык заботится о подставлении символа адресации автоматически.
К примеру, переменная b адресованная, поэтому мы можем вызвать функцию Write просто вызвав b.Write. Компилятор сам допишет (&b).Write за нас.
Кстати, идея использования Write на срезах байт наиважнейшая для реализации bytes.Buffer.
^
Интерфейсы и другие типы
^
Интерфейсы
^
Интерфейсы в Go позволяют создать особое поведения для объектов: Если нечто может делать это , то это можно использовать здесь. Мы уже это встречали в простых примерах, когда реализовывали функцию String для печати, в то время как Fprintf может выдавать на печать другое с методом Write. Интерфейсы с одним или двумя функциями свойственны в языке Go, как io.Writer реализующий Write.
Любой тип может реализовывать множество интерфейсов. К примеру, коллекции могут быть отсортированы с помощью функций из пакета sort, если она реализует sort.Interface, который состоит из Len(), Less(i, j int) bool, и Swap(i, j int) и это может задать собственный формат. Рассмотрим пример Sequence
//{{code "/doc/progs/eff_sequence.go" `/^type/` "$"}}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"fmt"
"sort"
)
func main() {
seq := Sequence{6, 2, -1, 44, 16}
sort.Sort(seq)
fmt.Println(seq)
}
type Sequence []int
// Methods required by sort.Interface.
func (s Sequence) Len() int {
return len(s)
}
func (s Sequence) Less(i, j int) bool {
return s[i] < s[j]
}
func (s Sequence) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Method for printing - sorts the elements before printing.
func (s Sequence) String() string {
sort.Sort(s)
str := "["
for i, elem := range s {
if i > 0 {
str += " "
}
str += fmt.Sprint(elem)
}
return str + "]"
}
^
Преобразование (Conversions)
^
Функция String работает с Sequence и Sprint уже работает со срезами. Мы может распространить данный эффект, если конвертируем Sequence на []int до вызова Sprint.
func (s Sequence) String() string {
sort.Sort(s)
return fmt.Sprint([]int(s))
}
Это функция другой пример техники конвертирования для вызова Sprintf безопасно для функции String. Так как два типа (Sequence и []int) одинаковы, то мы можем игнорировать имя типа, это допустимое конвертирование между ними. При конвертации не происходит создание нового значения, это временная замена существующего значения на новый тип. (При других допустимых конвертациях, к примеру из целого числа в число с плавающей точкой, происходит создание нового значения.)
Это идиоматично в программе Go - конвертация типа позволяет получить доступ к другим функциям. К примеру, мы можем использовать существующий тип sort.IntSlice:
type Sequence []int
// Method for printing - sorts the elements before printing
func (s Sequence) String() string {
sort.IntSlice(s).Sort()
return fmt.Sprint([]int(s))
}
Теперь, наш Sequence реализует множество интерфейсов (сортировка и печать), мы можем использовать множество типов (Sequence, sort.IntSlice и []int), которые выполняют определенную часть работ. Это не типично в использовании, но эффективно.
^
Конвертация интерфейсов и привязка типов
^
Переключатель типов(Type switches) является одной из форм конвертации: на основе интерфейса и переключателя для каждого элемента, в некотором смысле преобразует тип в элемент переключателя. Это простой вариант как в коде fmt.Printf конвертирует значение в строку, используя переключатель типа. И если это уже строка, мы хотим чтобы фактическое значение происходило по его интерфейсу, но в случаи если она имеет функцию String, то хотим чтобы в результате вызывалась именно она.
type Stringer interface {
String() string
}
var value interface{} // Value provided by caller.
switch str := value.(type) {
case string:
return str
case Stringer:
return str.String()
}
В первом случае ищется конкретное значение, во втором случаи происходит преобразование интерфейса в другой интерфейс. Это хороший подход в преобразовании типов.
Что если, мы будем беспокоиться лишь об одном типе? Если мы знаем что значение имеет тип string и мы хотим вытащить только его? Можно сделать переключатель только с одним типом, но это будет type assertion. И type assertion берет значение интерфейса и переводит из его значения в его тип. Заимствование типа из открытия type switch, но переводит тип с помощью ключевого слова type:
value.(typeName)
и в результате у нас значение со статическим типом typeName. Этот тип должен быть конкретным типом имеющим интерфейс, или второй тип интерфейса - это тип в который может быть конвертирован. Если мы знаем что это строка в значении, то мы можем записать:
str := value.(string)
Но если выясниться, что значение хранит не строку, то программа будет обрушена во время работы в run-time error. Для защиты от этого используется идиома запятая, ок "comma, ok" для безопасности и проверка является ли значение строкой:
str, ok := value.(string)
if ok {
fmt.Printf("string value is: %q\n", str)
} else {
fmt.Printf("value is not a string\n")
}
В случаи неудачи, str будет всё ещё существовать и будет типом строка, но будет иметь нулевое значение - пустую строку.
Для иллюстрации, используем условие if-else как эквивалент переключателя типов type switch в начале этого раздела.
if str, ok := value.(string); ok {
return str
} else if str, ok := value.(Stringer); ok {
return str.String()
}
^
Общее(Generality)
^
Если тип существует только для реализации интерфейса и никогда не будет экспортироваться за пределы интерфейса, то нет необходимости экспортировать сам тип. Экспортирование только интерфейса делает более понятным, что значение имеет не так интересно как поведение интерфейса. Также это позволяет избегать повторения документации для каждого экземпляра общего метода.
В таких случаях, конструктор может возвращать значение интерфейса, что лучше чем реализованный тип. Для примера, в библиотеках хэш hash оба конструктора crc32.NewIEEE и adler32.New возвращают тип интерфейса hash.Hash32. Для подстановки алгоритма CRC-32 для Adler-32 в программе Go требуется только изменить вызов конструктора, а остальная часть кода не зависит от алгоритма.
Подобный подход позволяет создать поток шифровальных алгоритмов помимо имеющихся в пакете crypto, устанавливаются в цепочку отдельно от блока шифрования. Интерфейс Block в пакете crypto/cipher имеющий поведение - шифрование, который обеспечивает шифрование одного блока данных. Это по аналогии с пакетом bufio, пакет шифрования реализует этот интерфейс и может использовать конструктор потока шифрования, представляя интерфейс Stream без известных деталей о шифровании.
Интерфейсы crypto/cipher выглядят следующим образом:
type Block interface {
BlockSize() int
Encrypt(src, dst []byte)
Decrypt(src, dst []byte)
}
type Stream interface {
XORKeyStream(dst, src []byte)
}
Определение режима счётчика потока counter mode (CTR) stream, который превращает блоки шифрования в поток шифрования, обратите внимание, что шифрование блоков абстрагировано:
// NewCTR returns a Stream that encrypts/decrypts using the given Block in
// counter mode. The length of iv must be the same as the Block's block size.
func NewCTR(block Block, iv []byte) Stream
Принятое NewCTR не только для одного конкретного алгоритма шифрования и исходных данных, но для любой реализации интерфейса Block и любой Stream. Так как он возвращает тип интерфейса, замена шифрование CTR с другими режимами шифрования это локальное изменение. Вызов конструктора должен быть отредактирован, и при этом окружающий код не заметит разницы , так как в результате Stream.
^
Интерфейсы и методы(функции)
^
Так как метод может иметь почти всё, поэтому все можно удовлетворить интерфейсами. Один из примеров из пакета http, который имеет интерфейс Handler. Любой объект реализующий Handler может служить для HTTP запросов.
type Handler interface {
ServeHTTP(ResponseWriter, *Request)
}
Сам интерфейс ResponseWriter обеспечивает функции для возврата ответа клиенту. Эти функции включают метод Write, то http.ResponseWriter можно использовать везде как где можно использовать io.Writer. Request(Запрос) это структура хранящая информацию о запросе от клиента.
Для упрощения, давайте игнорировать POSTs и предположим что HTTP запросы всегда используют GETs; Это упрощение не влияет на способ настройки обработчика handlers. К примеру следующий код показывает полный обработчик для подсчета количества раз показа данной страницы.
// Simple counter server.
type Counter struct {
n int
}
func (ctr *Counter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
ctr.n++
fmt.Fprintf(w, "counter = %d\n", ctr.n)
}
(Обратите внимание, на то как Fprintf печатает в http.ResponseWriter.) Для справки, следующий код показывает как присоединить сервер к узлу в URL tree.
import "net/http"
...
ctr := new(Counter)
http.Handle("/counter", ctr)
Но зачем использовать структуру для Counter? Все что нам необходимо - это целое число. (Для получателя receiver необходим указатель, тогда инкремент будет виден для вызывающего)
// Simpler counter server.
type Counter int
func (ctr *Counter) ServeHTTP(w http.ResponseWriter, req *http.Request) {
*ctr++
fmt.Fprintf(w, "counter = %d\n", *ctr)
}
Что делать если Ваша программа имеет некое внутреннее состояние и необходимо уведомить что страница была посещена? Необходимо связать веб страницы каналом.
// A channel that sends a notification on each visit.
// (Probably want the channel to be buffered.)
type Chan chan *http.Request
func (ch Chan) ServeHTTP(w http.ResponseWriter, req *http.Request) {
ch <- req
fmt.Fprint(w, "notification sent")
}
Если нам требуется представить на /args аргументы использованные для запуска приложения сервера. Просто необходимо написать функцию для печати аргументов.
func ArgServer() {
fmt.Println(os.Args)
}
Как превратить это в HTTP сервер? Мы могли бы сделать метод ArgServer некоторого типа значение которого мы игнорируем, но есть более простой путь. Так как мы можем определить метод для любого типа, кроме указателя и интерфейса, то мы можем записать метод для функции. В пакете http есть следующий код:
// The HandlerFunc type is an adapter to allow the use of
// ordinary functions as HTTP handlers. If f is a function
// with the appropriate signature, HandlerFunc(f) is a
// Handler object that calls f.
type HandlerFunc func(ResponseWriter, *Request)
// ServeHTTP calls f(c, req).
func (f HandlerFunc) ServeHTTP(w ResponseWriter, req *Request) {
f(w, req)
}
Это тип HandlerFunc с методом ServeHTTP, поэтому значения данного типа может служит для запросов HTTP. Посмотрим на реализацию метода: receiver это функция, f, и метод называется f. Это может показаться странным, но это ничем не отличается от работы с каналами и метод бы отсылал на канал.
Для создания ArgServer как HTTP сервера, вначале мы изменим корректную сигнатуру.
// Argument server.
func ArgServer(w http.ResponseWriter, req *http.Request) {
fmt.Fprintln(w, os.Args)
}
Сейчас, ArgServer имеет ту же сигнатуру как HandlerFunc, поэтому его можно конвертировать в этот тип для доступа к его методам, просто как сконвертировать Sequence в IntSlice для доступа к IntSlice.Sort. Код для настройки лаконичен:
http.Handle("/args", http.HandlerFunc(ArgServer))
Когда кто-то посещает страницу /args, обработчик handler устанавливает страницу со значением ArgServer и типом HandlerFunc. Сервер HTTP будет вызывать метод ServeHTTP данного типа с получателем ArgServer, который будет вызывать ArgServer через вызов f(c, req) внутри HandlerFunc.ServeHTTP. Вследствие этого аргументы будут отображены.
В этом разделе мы сделали сервер HTTP из структуры, целого числа, канала, и функции, все потому что интерфейсы имеют только набор методов, которые могут быть определены для (почти) любого типа.
^
Пустой идентификатор (The blank identifier _)
^
Мы уже упоминали пустой идентификатор пару раз, в разделах о циклах for range и картах maps. Пустой идентификатор может быть назначен или объявлен для любого типа, значение при этом отбрасывается. Это чем то похоже на запись в Unix файл в /dev/null: Это значение только на запись, где переменная необходима, но значение не важно. Есть дополнительные способы использования.
^
Пустой идентификатор в множественном присваивании (_)
^
Использование пустого идентификатора в цикле for range является лишь одним случаем применения в общей картине множественного присваивания.
Если требуется множество значений на левой стороне при присваивании, но одно из значений не будет использоваться программой, то используется пустой идентификатор на левой стороне присвоения для того чтобы избежать необходимости в ненужных переменных и создании понимания что значение отброшенное. Например, когда вызывается функция возвращающая значение и ошибку, но при этом только ошибка важна, то пустой идентификатор используется для того чтобы отбросить ненужное значение.
if _, err := os.Stat(path); os.IsNotExist(err) {
fmt.Printf("%s does not exist\n", path)
}
Иногда Вы увидите код в котором отбрасывается ошибка, это ужасная практика. Всегда проверяйте возвращенную ошибку, так как они предоставляются по некой причине.
// Bad! This code will crash if path does not exist.
fi, _ := os.Stat(path)
if fi.IsDir() {
fmt.Printf("%s is a directory\n", path)
}
^
Неиспользуемое импортирование и значения
^
Ошибкой является неиспользование пакета или объявление переменной без использования. Неиспользованный импорт увеличивает программу и делает компиляцию медленнее, в то время как переменная инициализированная но не используется, по крайней мере приводит к пустому вычислению или может является индикатором об ошибке. Однако неиспользуемые импорты и переменные возникают, когда программа на стадии активной разработки и удаление их может раздражать, только лишь для того чтобы прошла компиляция и если они снова понадобятся позже. Пустые идентификаторы позволяют создать обход(workaround).
Это полунаписанная программа имеет два неиспользуемых импорта (fmt и io) и не используемую переменную (fd), и она не проходит компиляцию, но было бы хорошо если бы можно было увидеть, что код корректен.
///{{code "/doc/progs/eff_unused1.go" `/package/` `$`}}
package main
import (
"fmt"
"io"
"log"
"os"
)
func main() {
fd, err := os.Open("test.go")
if err != nil {
log.Fatal(err)
}
// TODO: use fd.
}
Для того чтобы избежать жалоб о неиспользуемых импортах, необходимо использовать символ пустого идентификатора для обозначения импортирования пакета. Аналогично, можно поступать с неиспользуемой переменной fd при использовании пустого идентификатора, что приведёт к избеганию ошибки о неиспользованной переменной. Следующая версия программы будет компилироваться.
//{{code "/doc/progs/eff_unused2.go" `/package/` `$`}}
package main
import (
"fmt"
"io"
"log"
"os"
)
var _ = fmt.Printf // For debugging; delete when done.
var _ io.Reader // For debugging; delete when done.
func main() {
fd, err := os.Open("test.go")
if err != nil {
log.Fatal(err)
}
// TODO: use fd.
_ = fd
}
В соответствии с соглашением, глобальное объявление для замалчивания ошибки импорта должно идти сразу за импортированием и должно быть откомментировано, это сделано для того чтобы легко можно было найти и помнить об отчистки позже.
^
Импортирование для побочного эффекта (Import for side effect)
^
Неиспользуемые импорты, как например fmt и io в предыдущем примере, в конечном счете должны быть удалены: пустое задание должно определять что код в процессе разработки. Но иногда, используется импортирование пакета только для создания побочного влияния, без какого либо явного использования. К примеру, для функции init в пакете net/http/pprof регистрирует HTTP обработчики для обеспечения отладочной информацией. Он имеет экспортированный API, но большинству клиентов необходима только регистрация обработчиков и получение доступа к данным через веб-страницу. Только для импортирования пакета с этим побочным эффектом, переименовывают пакет в пустой идентификатор:
import _ "net/http/pprof"
Эта форма импортирования означает, что данный пакет импортируется для данного побочного эффекта, потому что нет другой возможности использовать пакет: в этот файл, не имеет имени. (Если же он имеет и мы не используем это имя, то компилятор отменит программу.)
^
Проверка интерфейса (Interface checks)
^
Как мы видели ранее в разделе об интерфейсах, нет необходимости в объявлении что тип реализует определенный интерфейс. Вместо этого, тип реализует интерфейс только путем реализации методов интерфейса. На практике, большинство преобразований интерфейсов статично и поэтому проверяется во время компиляции.
К примеру, передавая *os.File в функцию ожидающая io.Reader не будет скомпилировано, так как *os.File не реализует интерфейс io.Reader.
Хотя все же некоторые проверки интерфейсов происходят во время выполнения. Один из примеров в пакете encoding/json, который определяет интерфейс Marshaler. Когда JSON encoder принимает значение, которое реализует этот интерфейс, encoder вызывает функцию упаковщик значений для преобразования в JSON, в отличии от стандартного преобразования.
Encoder проверяет эти свойства во время работы:
m, ok := val.(json.Marshaler)
Если необходимо только запросить тип реализуемого интерфейса без использования самого интерфейса, то это часть проверки ошибок, используйте пустой идентификатор для игнорирования защиты типов:
if _, ok := val.(json.Marshaler); ok {
fmt.Printf("value %v of type %T implements json.Marshaler\n", val, val)
}
Одна из ситуаций применения это когда необходимо гарантировать в рамках пакета что данный тип реализует интерфейс. Если взглянуть на пример json.RawMessage, где необходима пользовательское представление в формате JSON, он должен реализовывать json.Marshaler, но отсутствует статическое преобразование для автоматической проверки компилятором. Если определенный тип не будет реализовывать интерфейс, то JSON encoder будет все же работать, но без пользовательской реализации. Для гарантирования корректной реализации, в пакете можете использовать пустой идентификатор для глобальной декларации:
var _ json.Marshaler = (*RawMessage)(nil)
в этой деклорации, присвоение с конвертацией *RawMessage к Marshaler требует чтобы, *RawMessage реализовывал Marshaler и данная проверка будет производиться во время компиляции. В случаи если интерфейс json.Marshaler, этот пакет не будет компилироваться и мы будем знать об обновлении.
Использование пустого идентификатора в данном случае является индикатором о проверки типов, и при этом не создается переменной. Не используйте этот подход для проверки каждого типа. В соответствии с соглашением, такое объявление используется только когда отсутствует статическая конвертация уже существующая в коде, и является редким событием.
^
Вложение (Embedding)
^
Язык Go не поддерживает типичное управление типов подклассов, но он имеет возможность "заимствовать" части реализации с помощью типа вложения структуры или интерфейса.
Вложение интерфейса необычно простое. Мы уже упоминали об интерфейсах io.Reader and io.Writer ранее, вот их определение.
type Reader interface {
Read(p []byte) (n int, err error)
}
type Writer interface {
Write(p []byte) (n int, err error)
}
Пакет io также экспортирует несколько других интерфейсов, которые определяют объекты, которые могут реализовывать несколько таких методов. К примеру, io.ReadWriter содержит оба интерфейса Read и Write. Мы может указать io.ReadWriter перечислением двух методов в явном виде, но проще и более запомяющим будет встраивание двух интерфейсов в одну новую форму, вот так:
// ReadWriter is the interface that combines the Reader and Writer interfaces.
type ReadWriter interface {
Reader
Writer
}
Это выглядит следующим образом: ReadWriter может делать все что делает Reader и что делает Writer. Это объединение встраивания интерфейсов (которые не имеют пересечений в методах). Только интерфейсы могут встраивать интерфейсы.
Аналогичная идея используется для структур, но с большим количеством последствий. Пакет bufio имеет две структуры типов - bufio.Reader и bufio.Writer, каждая из которых реализует аналогичные интерфейсы как в пакете io. И bufio также реализует буферизованное чтение/запись, которое объединяет чтение и запись в одну структуру с использованием вложения: этот список типов структур, но не давая имена полям.
// ReadWriter stores pointers to a Reader and a Writer.
// It implements io.ReadWriter.
type ReadWriter struct {
*Reader // *bufio.Reader
*Writer // *bufio.Writer
}
Вложение указателей элементов в структуры и конечно должно быть инициализировано необходимой структурой до его использования. Структура ReadWriter может быть записана так:
type ReadWriter struct {
reader *Reader
writer *Writer
}
TODO but then to promote the methods of the fields and to satisfy the io interfaces, we would also need to provide forwarding methods, like this: -
func (rw *ReadWriter) Read(p []byte) (n int, err error) {
return rw.reader.Read(p)
}
Для непосредственного вложения структур, мы должны избегать эту бухгалтерию. Метод вложенного типа приходит свободно, что означает что bufio.ReadWriter имеет не только его методы bufio.Reader и bufio.Writer, а также удовлетворяет всем трем интерфейсам:
• io.Reader,
• io.Writer, и
• io.ReadWriter.
Это важное отличие вложения от подклассов. Когда мы вкладываем тип, методы этого типа становятся методами внешнего типа, но для получателя они вызываются как встроенные типы, а не внешние. В нашем примере, когда метод Read из bufio.ReadWriter вызывается, он и вызываются также как описано выше; получатель поля reader из ReadWriter, является самим ReadWriter.
Вложение может быть простым и удобным. Этот пример показывает вложение поля рядом с именованным полем.
type Job struct {
Command string
*log.Logger
}
Тип Job сейчас имеет Log, Logf и другие методы *log.Logger. Мы могли бы дать имя для Logger, конечно же, но в этом нет необходимости. И сейчас, мы можем логировать Job:
job.Log("starting now...")
Регулярное поле Logger в структуре Job, поэтому мы можем инициализировать его как обычно внутри конструктора Job, вот так:
func NewJob(command string, logger *log.Logger) *Job {
return &Job{command, logger}
}
или с помощью составных литералов:
job := &Job{command, log.New(os.Stderr, "Job: ", log.Ldate)}
Если нам необходимо обратиться непосредственно к вложенному полю, имени типа поля, игнорируя пакетный классификатор, как к имени поля, как это сделано в методе Read в нашей структуре ReaderWriter. При этом нам необходим доступ к * log.Logger в Job переменной job, мы можем написать job.Logger, что полезно если мы хотим уточнить методы Logger.
func (job *Job) Logf(format string, args ...interface{}) {
job.Logger.Logf("%q: %s", job.Command, fmt.Sprintf(format, args...))
}
Вложение типов создает проблему конфликта имен, но правила для их решения просты.
Первое, поля или метод X скрывает любой иной элемент X в более глубокой части вложенного типа. Если log.Logger содержит поле или метод под названием Command, то поле Command в Job будет преобладать над ним.
Во-вторых, если есть одинаковые имена на том же уровне вложенности, это как правило ошибка и было бы ошибочно вставлять log.Logger, если структура Job имеет другое вложенное поле или метод с названием Logger. Однако, если дублированные имена никогда не встречается в программе вне определённого типа, то это нормально. Это защищает от изменения типов вложенности за его пределами; и это не проблема, если добавлено поле вступающее в конфликт с другим полем в другом подтипе, если ни одно из полей не используется.
^
Согласованность, параллельная обработка, параллельное выполнение (Concurrency)
^
Распределение памяти по сообщениям (Share by communicating)
^
Параллельное программирование является большой темой и здесь будет рассматриваться только специфичное для языка Go.
Параллельное программирование во многих средах затруднено для корректной реализации доступа к общим переменным.
В языке Go поддерживается другой подход, в котором общие переменные shared values передаются через каналы, по сути, никогда активно не распределяется по исполняемым потокам. Только одна го-рутина(goroutine) имеет доступ к переменной в любой момент. Перенос данных не происходит по конструкции языка. Для того чтобы способствовать данному стилю мышления используется лозунг:
Do not communicate by sharing memory; instead, share memory by communicating.
Не общайтесь с распределением памяти; Вместо того чтобы распределять память по коммуникациям.
Это дальновидный подход. К примеру, наилучшим образом подсчет ссылок можно производить установкой мютексов(mutex) вокруг целого переменной. Но это высокоуровневый подход, использование каналов для контроля доступа является более простым и корректным для программ.
Один из способов думать об этой модели как для типичных однопоточных программ запущенных на одном процессоре CPU. И нет необходимости в синхронизации примитивов. Для запуска следующего экземпляра, нет необходимости в синхронизации. Сейчас рассмотрим два способа коммуникации; Если коммуникация синхронна, то все также не требуется дополнительной синхронизации. К примеру, Unix pipelines великолепно используют эту модель. Хотя подход языка Go для организации параллельных процессов берет начало в Hoare's Communicating Sequential Processes (CSP), он также может рассматриваться как обобщение безопасности типов Unix pipes.
^
Го-рутины (Goroutines)
^
Они называются Го-рутины, потому что существующие термины потоки, корутины, процессы и так далее передают неточную коннотацию. Го-рутины имеют простую модель: это функция выполняющаяся параллельно с другими го-рутинами в одном адресном пространстве. Они легковесны стоящие чуть больше чем выделение пространства в стэке. Они дешевы, и растут по мере необходимости путем выделения или освобождения в куче.
Горутины распределяются на несколько потоков OS, и если один заблокируются, например из-за ожидания I/O, другие продолжат работу. Их дизайн скрывает много сложностей по создание потоков и их управлению.
Префикс go у функции или метода запускает новую горутину. Когда вызов закончен, горутина выходит, молча. (Этот эффект похож на команду Unix с нотацией & означающая запуск команды в фоновом режиме.)
go list.Sort() // run list.Sort concurrently; don't wait for it.
Встроенные функции могут быть удобны для вызова горутин.
func Announce(message string, delay time.Duration) {
go func() {
time.Sleep(delay)
fmt.Println(message)
}() // Note the parentheses - must call the function.
}
В языке Go, встроенные функции закрываемые и их реализация гарантирует что ссылаемые переменные будут жить до тех пор пока функция активна.
Эти примеры не очень практичны, так как функции не имеют сигнализировать о своем завершении. Для этого у нас есть каналы.
^
Каналы (Channels)
^
Каналы, как и карты(map) выделяются в памяти с помощью make и полученное значение является ссылкой на изначальную структуру данных. Если задан необязательный целый параметр, то он указывает на размер буфера в канале. По умолчанию, значение нулевое, как для небуферезованного или синхронного канала.
ci := make(chan int) // unbuffered channel of integers
cj := make(chan int, 0) // unbuffered channel of integers
cs := make(chan *os.File, 100) // buffered channel of pointers to Files
Небуферезованные каналы гарантируют, что обмен значениями будет синхронным между двумя горутинами в известном состоянии.
Есть много хороших идиом использования каналов. Вот один с которого мы начнем. В предыдущем разделе мы запускали сортировку в фоне. Канал может помочь отследить завершение горутины с сортировкой.
c := make(chan int) // Allocate a channel.
// Start the sort in a goroutine; when it completes, signal on the channel.
go func() {
list.Sort()
c <- 1 // Send a signal; value does not matter.
}()
doSomethingForAWhile()
<-c // Wait for sort to finish; discard sent value.
Получатель всегда блокируется до тех пор пока данные не получит получатель. Если канал не буферизованный, отсылающий блокируется до тех пор пока получатель не получит данные. Если канал буферизованный, то отсылающий блокируется только тогда когда значение копируется в буфер; если буфер полон, то будет ожидать до тех пор пока получатель не получит значение.
TODO A buffered channel can be used like a semaphore, for instance to limit throughput. In this example, incoming requests are passed to handle, which sends a value into the channel, processes the request, and then receives a value from the channel to ready the "semaphore" for the next consumer. The capacity of the channel buffer limits the number of simultaneous calls to process. -
var sem = make(chan int, MaxOutstanding)
func handle(r *Request) {
sem <- 1 // Wait for active queue to drain.
process(r) // May take a long time.
<-sem // Done; enable next request to run.
}
func Serve(queue chan *Request) {
for {
req := <-queue
go handle(req) // Don't wait for handle to finish.
}
}
TODO Once MaxOutstanding handlers are executing process, any more will block trying to send into the filled channel buffer, until one of the existing handlers finishes and receives from the buffer. -
Данный дизайн имеет проблемы: Serve создает новую горутину для каждого входящего запроса, при этом будет запущено не более MaxOutstanding в один момент. Если количество запросов увеличивается слишком быстро, то как результат, программа может потребовать бесконечное количество ресурсов. Мы можем решить это изменением Serve используя изменения количества порождаемых горутин. Вот очевидное решение, но будьте осторожны, так как оно имеет ошибку, которую позже исправим:
func Serve(queue chan *Request) {
for req := range queue {
sem <- 1
go func() {
process(req) // Buggy; see explanation below.
<-sem
}()
}
}
Ошибка в том, что в языке Go цикл for, цикл переменной повторно используется для каждой итерации, так что переменные req разделяется по всем горутинам. Это не то что мы хотим. Нам нужно убедиться, что req является уникальной для каждой горутиной. Вот один из способов, передавать значение req как в качестве аргумента для закрытии горутины:
func Serve(queue chan *Request) {
for req := range queue {
sem <- 1
go func(req *Request) {
process(req)
<-sem
}(req)
}
}
Сравнивая эту версию с предыдущей можно увидеть разницу в том как объявляется запуск и закрытие. Другое решение заключается в том что создается новая переменная с тем же именем, как в примере:
func Serve(queue chan *Request) {
for req := range queue {
req := req // Create new instance of req for the goroutine.
sem <- 1
go func() {
process(req)
<-sem
}()
}
}
Может кажется странным, писать:
req := req
Но это допустимо и идиоматично делать это. Вы получаете новую переменную с тем же именем, намеренно затеняя переменную цикла локально, но уникальный для каждой горутины.
Возвращаясь к общей проблеме написания сервера, иной подход для управления ресурсами начинается с фиксации числа обработчиков handle горутин читающих из канала запросов. Ограничение количества горутин количеством одновременных вызовов к process.
Функция Serve также принимает канал, на который посылается об окончании; после запуска горутины блокируют получающих в этот канал.
func handle(queue chan *Request) {
for r := range queue {
process(r)
}
}
func Serve(clientRequests chan *Request, quit chan bool) {
// Start handlers
for i := 0; i < MaxOutstanding; i++ {
go handle(clientRequests)
}
<-quit // Wait to be told to exit.
}
^
Канал каналов (Channels of channels)
^
Одно из важных свойств Go в том что каналы это переменная, а значит аллоцированы и могут передаваться как любой другой элемент. Одно из использований данной свойства в реализации безопасного и параллельного демультиплексирования.
В примере из предыдущего раздела, handle был идеальным обработчиком для запросов, но он не определял тип обработки. Если тип включен в канал, на который отвечать, то каждый клиент может предоставить собственный путь для ответа. Вот схематичное определение типа Request.
type Request struct {
args []int
f func([]int) int
resultChan chan int
}
Клиент предоставляет функцию и ее аргументы, а также канал внутри объекта запроса, не который будет получен ответ.
func sum(a []int) (s int) {
for _, v := range a {
s += v
}
return
}
request := &Request{[]int{3, 4, 5}, sum, make(chan int)}
// Send request
clientRequests <- request
// Wait for response.
fmt.Printf("answer: %d\n", <-request.resultChan)
На стороне сервера, функция обработчик это единственное что меняется.
func handle(queue chan *Request) {
for req := range queue {
req.resultChan <- req.f(req.args)
}
}
Этот пример является примером основой для ограничения скорости, параллелизма, неблокирующей RPC системы и без использования мютекса.
^
Параллелизм (Parallelization)
^
Другой пример использования этих идей в расчёте на нескольких ядрах CPU. Если расчет можно разбить на кусочки выполняющиеся независимо, то это можно распараллелить с каналами сигнализирующие, когда отдельный кусочек закончил свою работу.
К примеру, у нас есть дорогая операция выполнения на векторе элементов и эти операции могут выполнять независимо, то вот идеализированный пример.
type Vector []float64
// Apply the operation to v[i], v[i+1] ... up to v[n-1].
func (v Vector) DoSome(i, n int, u Vector, c chan int) {
for ; i < n; i++ {
v[i] += u.Op(v[i])
}
c <- 1 // signal that this piece is done
}
Вы выполняем кусочки независимо в цикле, по одному CPU на кусочек. Они могут закончить в любом порядке, но это не важно; мы только считаем количество сигналов окончания по каналу после запуска всех горутин.
const numCPU = 4 // number of CPU cores
func (v Vector) DoAll(u Vector) {
c := make(chan int, numCPU) // Buffering optional but sensible.
for i := 0; i < numCPU; i++ {
go v.DoSome(i*len(v)/numCPU, (i+1)*len(v)/numCPU, u, c)
}
// Drain the channel.
for i := 0; i < numCPU; i++ {
<-c // wait for one task to complete
}
// All done.
}
Вместо того, чтобы создать постоянное значение для numCPU, мы можем задать во время выполнения необходимое значение. Функция runtime.NumCPU возвращает количество ядер CPU в машине, тогда мы должны записать:
var numCPU = runtime.NumCPU()
Есть также такая функция runtime.GOMAXPROCS, которая возвращает заданное пользователем количество ядер, которая программа Go может использовать. По умолчанию значение runtime.NumCPU, но может быть переопределен путем установки в среде с тем же именем или вызовом функции с положительным числом. Вызов с нулевым значением запрашивает значение. Поэтому если мы хотим выполнить запрос ресурсов пользователя, мы должны написать
var numCPU = runtime.GOMAXPROCS(0)
Будьте уверены, чтобы не путать идеи параллельно-структурированной(concurrency—structuring) программы как независимо исполняемых компонентов и параллельно-выполняемые вычисления(parallelism—executing) для эффективности на нескольких процессорах. Хотя особенности concurrency в языке Go могут решить некоторые проблемы легко с использованием структур параллельного вычисления, Go является concurrent языком, не параллельным и не все проблемы параллелизма подходят модели Go. Для обсуждения различий, смотрите следующий блог.
^
Текущий буфер (A leaky buffer)
^
Инструменты конкарентси программирования позволяют для неконкаренси идей быть нагляднее. Вот пример из пакета RPC. Цикл клиента горутины принимает данные из нескольких источников, возможно из сети. Для того чтобы избежать выделения и освобождения буферов, он пустой список и использует буферизованный канал для его представления. Если канал пуст, то выделяется новый буфер. После того, как буфер готов, он высылает на сервер на serverChan.
var freeList = make(chan *Buffer, 100)
var serverChan = make(chan *Buffer)
func client() {
for {
var b *Buffer
// Grab a buffer if available; allocate if not.
select {
case b = <-freeList:
// Got one; nothing more to do.
default:
// None free, so allocate a new one.
b = new(Buffer)
}
load(b) // Read next message from the net.
serverChan <- b // Send to server.
}
}
Цикл сервера принимает каждое сообщение из клиента, обрабатывает его и возвращает буфер на пустое список.
func server() {
for {
b := <-serverChan // Wait for work.
process(b)
// Reuse buffer if there's room.
select {
case freeList <- b:
// Buffer on free list; nothing more to do.
default:
// Free list full, just carry on.
}
}
}
Клиент пытается получить буфер из freeList; если ни один не доступен, он выделяется новые. Посылка от сервера в freeList подставляется назад b в свободный список, если список не полон, и в этом случаи буфер сбрасывается, чтобы утилизироваться сборщиком мусора.
(Положение default в select выполняется когда другие условия не готовы, это означает что selects никогда не блокируется.) Эта реализация устроена как утекающее ведро со свободным списком всего в несколько строк, опираясь на буферизованный канал и сборщик мусора.
^
Ошибки (Errors)
^
Библиотеки подпрограмм часто должны возвращать какой-то признак ошибки для вызывающего. Как уже упоминалось ранее, множественные значения в Go могут легко возвращать подробное описание ошибки вместе с нормальным возвращением значения. Использование данной особенности Go для возвращения детального описания ошибки является хорошим стилем. Например, как вы увидите os.Open при неудаче не просто возвращает указатель на nil, он также возвращает значение ошибки, описывающей что пошло не так.
В соответствии с соглашением, ошибки имеют тип error, простой встроенный интерфейс.
type error interface {
Error() string
}
Библиотека записи может реализовать данный интерфейс с богатой моделью покрытия, что позволяет не только увидеть ошибку, но и также обеспечить некий контекст. Как уже отмечалось, наряду с обычным *os.File возвращением значения, os.Open также возвращает значение ошибки. Если файл будет успешно открыт то значение ошибки будет nil, но когда есть проблема, то будет передана os.PathError:
// PathError records an error and the operation and
// file path that caused it.
type PathError struct {
Op string // "open", "unlink", etc.
Path string // The associated file.
Err error // Returned by the system call.
}
func (e * PathError) Error() string {
return e.Op + " " + e.Path + ": " + e.Err.Error()
}
Ошибка Error в PathError сгенерирует строку как эта:
open /etc/passwx: no such file or directory
Такая ошибка, которая включает имя проблемного файла, операции, ошибка операционной системы и т.д., полезная, даже если напечатать далеко от вызова; это гораздо полезнее, что просто запись "файл или папка не найдены".
Если это возможно, то строка ошибки должна определять происхождение, например, при наличии префикса имен операции или пакета, который вызвал ошибку. Например, в пакете image при ошибки декодирования представлена от неизвестного формата: "image: unknown format".
Вызывающие, которые заботятся о точности ошибки, могут использовать переключатель типов type switch или type assertion для того специфицирования ошибок и получения большего количества деталей. Для PathErrors это означает включения изучения внутренних полей Err для восстановления причины отказа.
for try := 0; try < 2; try++ {
file, err = os.Create(filename)
if err == nil {
return
}
if e, ok := err.(*os.PathError); ok && e.Err == syscall.ENOSPC {
deleteTempFiles() // Recover some space.
continue
}
return
}
Здесь вторая проверка if это ещё другой тип type assertion. Если это не удается, то ok будет false и значение e будет nil. Если это удается, то ok будет true, который означает, что имеет тип *os.PathError, и затем когда e, который мы можем рассматривать для более подробной информации об ошибке.
^
Паника (Panic)
^
Обычный способ сообщить об ошибке к абоненту, это вернуть error, в качестве дополнительного возвращаемого значения. Канонический метод Read является хорошим примером, который возвращает количество байт и error. Но что если ошибка невосстановимая? Иногда программа просто не может продолжать работать.
Для этого есть встроенная функция panic, которая создаёт ошибку во время выполнения программы, которая остановит программу (но смотрите следующий раздел). Функция принимает один аргумент произвольного типа, часто используется строка для вывода на печать, так как программа умирает. Это также путь указать, что произошло что-то невозможное, как например выход из бесконечного цикла.
// A toy implementation of cube root using Newton's method.
func CubeRoot(x float64) float64 {
z := x/3 // Arbitrary initial value
for i := 0; i < 1e6; i++ {
prevz := z
z -= (z*z*z-x) / (3*z*z)
if veryClose(z, prevz) {
return z
}
}
// A million iterations has not converged; something is wrong.
panic(fmt.Sprintf("CubeRoot(%g) did not converge", x))
}
Это всего лишь пример и в реальных библиотечных функциях следует избегать panic. Если проблема может быть замаскирована или работать по другому алгоритму, то это всегда лучше, чтобы программа продолжала работать, а не выключать её. Один из возможных примеров: если библиотека действительно не может это сделать, то это причина паниковать.
var user = os.Getenv("USER")
func init() {
if user == "" {
panic("no value for $USER")
}
}
^
Восстановление (Recover)
^
Когда вызывается panic, в том числе не явно при наличии ошибок во время выполнения программы, к примеру когда происходит обращение к срезу за его пределами или при некорректной работы с типами, происходит немедленное прекращение работы функции и начинается раскручивание стека горутин, запуск всех отсроченных функций defer. Если раскручивание достигает вершины стека, то программа умирает. Тем не менее, можно использовать встроенную функцию recover, чтобы восстановить контроль над горутинами и возобновить нормальное выполнение.
Вызов recover останавливает раскручивание и возвращает аргументы в panic. Поскольку только код, который работает во время раскручивания внутри отложенных функций, recover полезно устанавливать внутри отложенных функций.
Одно recover приложение выключает недопустимые горутины изнутри, то сервер без выключения других запущенных горутин.
func server(workChan <-chan *Work) {
for work := range workChan {
go safelyDo(work)
}
}
func safelyDo(work *Work) {
defer func() {
if err := recover(); err != nil {
log.Println("work failed:", err)
}
}()
do(work)
}
В этом примере, если будет вызвана паника в do(work), то результат будет залогирован и горутина закончит работу без препятствия выполнения для других. Там нет необходимости делать что то дополнительно при отсроченном выполнении; вызывание recover обрабатывает состояние полностью.
Так как recover всегда возвращает nil, если вызывалась из отложенной функции, отложенный код может вызывать библиотеку функций, которые сами используют panic и recover без сбоя. К примеру, отложенная функция в safelyDo может вызвать функцию логирования до вызова recover, и этот код логирования будет работать не зависимо от состоянии паники.
С помощью данного шаблона восстановления , функция do (и все что он вызывает) может выйти из любой ситуации вызовом panic. Мы можем использовать данную идею для простой обработки ошибок в сложной программе. Давайте взглянем на идеализированную версию пакета regexp, которая сообщает об ошибке с помощью panic с типом локальной ошибки. Это определение Error, в методе error и функции Compile.
// Error is the type of a parse error; it satisfies the error interface.
type Error string
func (e Error) Error() string {
return string(e)
}
// error is a method of *Regexp that reports parsing errors by
// panicking with an Error.
func (regexp *Regexp) error(err string) {
panic(Error(err))
}
// Compile returns a parsed representation of the regular expression.
func Compile(str string) (regexp *Regexp, err error) {
regexp = new(Regexp)
// doParse will panic if there is a parse error.
defer func() {
if e := recover(); e != nil {
regexp = nil // Clear return value.
err = e.(Error) // Will re-panic if not a parse error.
}
}()
return regexp.doParse(str), nil
}
Если происходит паника в doParse, то блок восстановления будет устанавливать значение nil отложенная функция может модифицировать имя возвращаемых значений. Затем он проверяет, значение err, синтаксическая ошибка имеет локальный тип Error. Если этого не произойдет, то это приведет к ошибке во время выполнения и будет раскручивать стек. Эта проверка означает что если происходит что-то неожиданное, как выход за пределы индексирования, код будет прерван даже при использовании panic и recover для обработки ошибок.
При наличии обработчика ошибок, метод error (потому его метод связан с типом, это хорошо, так как он имеет то же имя что встроенный тип error) позволяет легко сообщить о наличии синтаксической ошибки, не беспокоясь о разматывания стек вручную:
if pos == 0 {
re.error("'*' illegal at start of expression")
}
Данный шаблон полезный в рамках только одного пакета. Превращение Parse внутреннего вызова panic в значение error, что позволяет на выставлять panics для клиента. Это хорошее правило, чтобы ему следовать.
Данный подход, меняет идиому паник на значение паники если произошла ошибка. Тем не менее, как оригинальная, так и новые сбои будут представлены в отчёте сбоев, поэтому основная причина этой проблемы не будет видна. Если Вы хотите увидеть только оригинальные значения, Вам необходимо немного больше кода для фильтрации неожиданных проблем и повторно паниковать с оригинальной ошибкой.
^
Веб-сервер
^
Давайте закончим разработкой веб-сервера на Go. Google предоставлен сервис по адресу http://chart.apis.google.com с автоматическим форматированием данных графиков и диаграмм. Это трудно использовать в интерактивном режиме, но Вам необходимо добавить URL в качестве запроса. Здесь программа использует приятный простой интерфейс с одной формой для данных: для небольшого кусочка текста, который вызывает сервер диаграмм для создания QR кода, кодируя текст в матрицу пиксел. Эта картинка можно быть сфотографирована с помощью камеры телефона и интерпретирована, к примеру, как URL, экономя тем самым его набор на маленькой клавиатуре телефона.
Вот программа полностью с последующими пояснениями.
//{{code "/doc/progs/eff_qr.go" `/package/` `$`}}
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"flag"
"html/template"
"log"
"net/http"
)
var addr = flag.String("addr", ":1718", "http service address") // Q=17, R=18
var templ = template.Must(template.New("qr").Parse(templateStr))
func main() {
flag.Parse()
http.Handle("/", http.HandlerFunc(QR))
err := http.ListenAndServe(*addr, nil)
if err != nil {
log.Fatal("ListenAndServe:", err)
}
}
func QR(w http.ResponseWriter, req *http.Request) {
templ.Execute(w, req.FormValue("s"))
}
const templateStr = `
<html>
<head>
<title>QR Link Generator</title>
</head>
<body>
{{if .}}
<img src="http://chart.apis.google.com/chart?chs=300x300&cht=qr&choe=UTF-8&chl={{.}}" />
<br>
{{.}}
<br>
<br>
{{end}}
<form action="/" name=f method="GET"><input maxLength=1024 size=70
name=s value="" title="Text to QR Encode"><input type=submit
value="Show QR" name=qr>
</form>
</body>
</html>`
Легко понять, что происходит в main. Один флаг устанавливает HTTP сервер по умолчания для нашего сервера. В значении шаблона templ, происходит самое интересное. Он конструирует шаблон HTML, который будет выполнен сервером для показа страницы. Давайте опишем, что происходит в этот момент.
Функция main разбирает флаги и использует механизм о котором мы говорили выше, связывает функцию QR для корневого пути для сервера. Когда вызывается http.ListenAndServe для старта сервера, он блокируется пока сервер запущен.
Функция QR только получает запрос, который содержит дынные формы, и выполняет шаблон на данных в форме с именем переменной s.
Пакет шаблонов html/template мощный; данная программа лишь слегка затрагивает его возможности. По сути, он переписывает часть текста HTML на лету, заменяя элементы на элементы данных, передаваемые в templ.Execute, в данном случаи переменной формы. В тексте шаблона (templateStr), имеются двойные скобки разделители обозначающие действия шаблона. Участок от {{html "{{if .}}"}} до {{html "{{end}}"}} выполняются только если значения текущей элемента данных, вызывают . (точка) не пустая. То есть, если строка пуста, то данный участок шаблона игнорируется.
Два примере кода {{html "{{.}}"}} предназначены для показа существующих данных в запросе шаблона на веб странице. Пакет шаблонов HTML автоматически обеспечивает соответствие, поэтому текст является безопасным для отображения.
Остальные строки шаблона, просто строки HTML , которые показываются при загрузки страницы. Если это слишком быстрое объяснение, то смотрите документацию о пакете шаблонов для большего понимания.
В результате у Вас есть: полезный пример веб сервера из нескольких строк кода с управлением данных текста HTML. Язык Go достаточно мощный для создание много чего интересного за несколько строк.
^
Список дополнительных материалов:
About
Перевод - Эффективный Go
Topics
Resources
License
Releases
No releases published
Packages
No packages published
Languages
|
__label__pos
| 0.703986 |
Homo Deus: A Brief History of Tomorrow
Homo Deus, the sequel to the wildly successful hit Sapiens, by Yuval Harari, aims to chronicle the history of tomorrow and to provide us with a unique and dispassionate view of the future of humanity. In Homo Deus, Harari develops further the strongest idea in Sapiens, the idea that religions (or shared fictions) are the reason why humanity came to dominate the world.
Many things are classified by Harari as religions, from the traditional ones like Christianism, Islamism or Hinduism, to other shared fictions that we tend not to view as religions, such as countries, money, capitalism, or humanism. The ability to share fictions, such as these, created in Homo sapiens the ability to coordinate enormous numbers of individuals in order to create vast common projects: cities, empires and, ultimately, modern technology. This is the idea, proposed in Sapiens, that Harari develops further in this book.
Harari thinks that, with the development of modern technology, humans will doggedly pursue an agenda consisting of three main goals: immortality, happiness and divinity. Humanity will try to become immortal, to live in constant happiness and to be god-like in its power to control nature.
The most interesting part of the book is in middle, where Harari analyses, in depth, the progressive but effective replacement of ancient religions by the dominant modern religion, humanism. Humanism, the relatively recent idea that there is a unique spark in humans, that makes human life sacred and every individual unique. Humanism therefore believes that meaning should be sought in the individual choices, views, and feelings, of humans, replaced almost completely traditional religions (some of them with millennia), which believed that meaning was to be found in ancient scriptures or “divine” sayings.
True, many people still believe in traditional religions, but with the exception of a few extremist sects and states, these religions plays a relatively minor role in conducting the business of modern societies. Traditional religions have almost nothing to say about the key ideas that are central to modern societies, the uniqueness of the individual and the importance of the freedom of choice, ideas that led to our current view of democracies and ever-growing market-oriented economies. Being religious, in the traditional sense, is viewed as a personal choice, a choice that must exist because of the essential humanist value of freedom of choice.
Harari’s description of the humanism schism, into the three flavors of liberal humanism, socialist humanism, and evolutionary humanism (Nazism and other similar systems), is interesting and entertaining. Liberal humanism, based on the ideals of free choice, capitalism, and democracy, has been gaining the upper hand in the twentieth century, with occasional relapses, over socialism or enlightened dictatorships.
The last part of the book, where one expects Harari to give us a hint of what may come after humanism, once technology creates systems and machines that make humanist creeds obsolete, is rather disappointing. Instead of presenting us with the promises and threats of transhumanism, he clings to common clichés and rather mundane worries.
Harari firmly believes that there are two types of intelligent systems: biological ones, which are conscious and have, possibly, some other special properties, and the artificial ones, created by technology, which are not conscious, even though they may come to outperform humans in almost every task. According to him, artificial systems may supersede humans in many jobs and activities, and possibly even replace humans as the intelligent species on Earth, but they will never have that unique spark of consciousness that we, humans, have.
This belief leads to two rather short-sighted final chapters, which are little more than a rant against the likes of Facebook, Google, and Amazon. Harari is (and justifiably so) particularly aghast with the new fad, so common these days, of believing that every single human experience should go online, to make shareable and give it meaning. The downsize is that this fad provides data to the all-powerful algorithms that are learning all there is to know about us. I agree with him that this is a worrying trend, but viewing it as the major threat of future technologies is a mistake. There are much much more important issues to deal with.
It is not that these chapters are pessimistic, even though they are. It is that, unlike in the rest of Homo Deus (and in Sapiens), in these last chapters Harari’s views seem to be locked inside a narrow and traditionalist view of intelligence, society, and, ultimately, humanity.
Other books, like SuperintelligenceWhat Technology Wants or The Digital Mind provide, in my opinion, much more interesting views on what a transhumanist society may come to be.
Advertisements
|
__label__pos
| 0.501987 |
Skip to content
Instantly share code, notes, and snippets.
Embed
What would you like to do?
class DateTimePicker < React::Component::Base
param :record # typically an active_record model, but can be any object really
param :field # attr (read and write) of the object to be updated.
after_mount do
initialize_timepicker
initialize_datepicker
end
def initialize_timepicker
Element["##{input_id}_time"].timepicker({ timeFormat: 'H:i' }.to_n)
Element["##{input_id}_time"].on(:change) do |e|
timepicker_val = e.target.value
time = Time.parse("2000-01-01 #{timepicker_val}").strftime('%H:%M:%S')
update_time(time)
end
end
def initialize_datepicker
Element["##{input_id}_date"].datepicker({
dateFormat: 'mm-dd-yy',
onSelect: ->(date, _i) { update_date(date) }
}.to_n)
end
def input_id
"#{params.record.class.name.downcase}_#{params.field}"
end
def input_name
"#{params.record.class.name.downcase}[#{params.field}]"
end
def current_datetime_or_now
return Time.now if params.record.send(params.field).nil?
Time.parse(params.record.send(params.field))
end
def date_value
return '' if params.record.send(params.field).nil?
Time.parse(params.record.send(params.field)).strftime('%m-%d-%Y')
end
def time_value
return '' if params.record.send(params.field).nil?
Time.parse(params.record.send(params.field)).strftime('%H:%M')
end
def update_date(date)
previous_date = current_datetime_or_now
mon, d, y = date.split('-')
h, m, s = previous_date.strftime('%H:%M:%S').split(':')
params.record.send("#{params.field}=", Time.new(y, mon, d, h, m, s))
return unless Element["##{input_id}_time"].value.blank?
Element["##{input_id}_time"].value = params.record.send(params.field).strftime('%H:%M')
end
def update_time(time)
previous_time = current_datetime_or_now
h, m, s = time.split(':')
y, mon, d = previous_time.strftime('%Y-%m-%d').split('-')
params.record.send("#{params.field}=", Time.new(y, mon, d, h, m, s))
return unless Element["##{input_id}_date"].value.blank?
Element["##{input_id}_date"].value = params.record.send(params.field).strftime('%d-%m-%Y')
end
def date_override
return unless Element["##{input_id}_date"].value.blank?
params.record.send("#{params.field}=", nil)
Element["##{input_id}_time"].value = nil
end
render(:div, class: 'row') do
INPUT(id: input_id, name: input_name, type: :hidden, value: params.record.send(params.field))
DIV(class: 'col-sm-6') do
INPUT(id: "#{input_id}_date", name: "#{input_name}[date]", class: 'form-control datepicker',
defaultValue: date_value, placeholder: 'Date')
.on(:blur) { date_override }
end
DIV(class: 'col-sm-6') do
INPUT(id: "#{input_id}_time", name: "#{input_name}[time]", class: 'form-control timepicker',
defaultValue: time_value, placeholder: 'Time')
end
end
end
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.
|
__label__pos
| 0.984481 |
Predicting with decision trees using rpart
Jose M Sallan 2022-07-18 7 min read
In this post, I will make a short introduction to decision trees with the rpart package. This package implements the ideas about classification and regression trees presented in Breiman et al. (1983). I will present how rpart can be used for classification and numerical prediction, and how to plot the outcome of rpart using the rpart.plot package.
I will also use the dplyr and ggplot2 for data manipulation and visualization, BAdatasets to access the WineQuality dataset, mlbench to access the BostonHousing dataset and yardstick to obtain classification metrics.
library(dplyr)
library(ggplot2)
library(rpart)
library(rpart.plot)
library(BAdatasets)
library(mlbench)
library(yardstick)
Let’s start with the same set of synthetic data we used for C50:
n <- 400
set.seed(2020)
x <- c(runif(n*0.5, 0, 10), runif(n*0.25, 4, 10), runif(n*0.25, 0, 6))
y <- c(runif(n*0.5, 0, 5), runif(n*0.25, 4, 10), runif(n*0.25, 4, 10))
class <- as.factor(c(rep("a", n*0.5), c(rep("b", n*0.25)), c(rep("c", n*0.25))))
s_data <- tibble(x=x, y=y, class = class)
The rpart function uses the formula and data syntax. We can use rpart.plot to see the result:
dt01 <- rpart(class ~ ., s_data)
rpart.plot(dt01)
This classification is very precise and complex, but also prone to overfitting. We can control how precise and therefore prone to overfitting a rpart tree is with the complexity parameter cp. Any split that does not decrease the overall lack of fit by a factor of cp is not attempted. Small values of cp will create large trees and large values may not produce any tree. The default value of cp is of 0.01. Let’s see the result to increase cp by an order of magnitude:
dt02 <- rpart(class ~ ., s_data, cp = 0.1)
rpart.plot(dt02)
The outcome is similar to the obtained with C50 and can represent a good balance between bias and variance. Other ways of controlling tree size can be found at rpart.control.
Classifying with rpart
Let’s build the wines table of features predicting quality of red and white Portugese wines from data of WineQuality:
data("WineQuality")
red <- WineQuality$red %>%
mutate(type = "red")
white <- WineQuality$white %>%
mutate(type = "white")
wines <- bind_rows(red, white)
wines <- wines %>%
mutate(quality = case_when(quality == 3 ~ 4,
quality == 9 ~ 8,
!quality %in% c(3,9) ~ quality))
wines <- wines %>%
mutate(quality = factor(quality))
Let’s see the tree we obtain with rpart defaults:
dt_wines01 <- rpart(quality ~ ., wines)
rpart.plot(dt_wines01)
This classification looks too simplistic, as it has not been able to predict extreme values of quality. Let’s try a much smaller value of cp:
dt_wines02 <- rpart(quality ~ ., wines, cp = 0.0001)
The resulting tree is too large to plot. Let’s use the predict function with type = "class" to obtain the predicted values for each element of the sample.
table_wines <- data.frame(value = wines$quality, pred = predict(dt_wines02, wines, type = "class"))
Here is the confusion matrix:
conf_mat(table_wines, truth = value, estimate = pred)
## Truth
## Prediction 4 5 6 7 8
## 4 59 29 23 10 0
## 5 103 1693 323 97 15
## 6 72 350 2332 275 60
## 7 11 58 143 685 59
## 8 1 8 15 12 64
And here some classification metrics:
class_metrics <- metric_set(accuracy, precision, recall)
class_metrics(table_wines, truth = value, estimate = pred)
## # A tibble: 3 × 3
## .metric .estimator .estimate
## <chr> <chr> <dbl>
## 1 accuracy multiclass 0.744
## 2 precision macro 0.672
## 3 recall macro 0.562
The classification metrics are not as good as with C50, as rpart does not implement winnowing and boosting. But maybe this classification is less prone to overfitting.
rpart offers a measure of variable importance equal to the sum of the goodness of split measures for each split for which it was the primary variable:
dt_wines02$variable.importance
## alcohol density total sulfur dioxide
## 524.12183 495.09604 417.53959
## residual sugar chlorides volatile acidity
## 367.55782 344.47974 338.19316
## free sulfur dioxide fixed acidity pH
## 333.89703 270.91568 255.05421
## sulphates citric acid type
## 249.26864 242.07769 76.33693
The variables used in early splits tend to have higher variable importance.
Prediction of house prices in Boston Housing with rpart
Let’s see now how can we use rpart for numerical prediction. I will be using the BostonHousing dataset presented by Harrison and Rubinfeld (1978). The target variable is medv, the median value of owner-occupied homes for each Boston census tract.
data("BostonHousing")
BostonHousing %>%
glimpse()
## Rows: 506
## Columns: 14
## $ crim <dbl> 0.00632, 0.02731, 0.02729, 0.03237, 0.06905, 0.02985, 0.08829,…
## $ zn <dbl> 18.0, 0.0, 0.0, 0.0, 0.0, 0.0, 12.5, 12.5, 12.5, 12.5, 12.5, 1…
## $ indus <dbl> 2.31, 7.07, 7.07, 2.18, 2.18, 2.18, 7.87, 7.87, 7.87, 7.87, 7.…
## $ chas <fct> 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,…
## $ nox <dbl> 0.538, 0.469, 0.469, 0.458, 0.458, 0.458, 0.524, 0.524, 0.524,…
## $ rm <dbl> 6.575, 6.421, 7.185, 6.998, 7.147, 6.430, 6.012, 6.172, 5.631,…
## $ age <dbl> 65.2, 78.9, 61.1, 45.8, 54.2, 58.7, 66.6, 96.1, 100.0, 85.9, 9…
## $ dis <dbl> 4.0900, 4.9671, 4.9671, 6.0622, 6.0622, 6.0622, 5.5605, 5.9505…
## $ rad <dbl> 1, 2, 2, 3, 3, 3, 5, 5, 5, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,…
## $ tax <dbl> 296, 242, 242, 222, 222, 222, 311, 311, 311, 311, 311, 311, 31…
## $ ptratio <dbl> 15.3, 17.8, 17.8, 18.7, 18.7, 18.7, 15.2, 15.2, 15.2, 15.2, 15…
## $ b <dbl> 396.90, 396.90, 392.83, 394.63, 396.90, 394.12, 395.60, 396.90…
## $ lstat <dbl> 4.98, 9.14, 4.03, 2.94, 5.33, 5.21, 12.43, 19.15, 29.93, 17.10…
## $ medv <dbl> 24.0, 21.6, 34.7, 33.4, 36.2, 28.7, 22.9, 27.1, 16.5, 18.9, 15…
The syntax for numerical prediction is similar to classification: rpart knows the type of problem by the class of target variable. The predicted value for the observations in a leaf is the average value of the target variable for the observations included in it.
dt_bh <- rpart(medv ~ ., BostonHousing)
rpart.plot(dt_bh)
I have used the default cp = 0.01, obtaining a tree with a reasonable fit. Let’s examine variable importance:
dt_bh$variable.importance
## rm lstat dis indus tax ptratio nox
## 23825.9224 15047.9426 5385.2076 5313.9748 4205.2067 4202.2984 4166.1230
## age crim zn rad b
## 3969.2913 2753.2843 1604.5566 1007.6588 408.1277
To use the yardstick capabilities for numerical prediction, I am storing the real and predicted values of the target variable in a data frame.
table_bh <- data.frame(value = BostonHousing$medv,
pred = predict(dt_bh, BostonHousing))
Here I am obtaining the metrics, which show a reasonable fit.
np_metrics <- metric_set(rsq, rmse, mae)
np_metrics(table_bh, truth = value, estimate = pred)
## # A tibble: 3 × 3
## .metric .estimator .estimate
## <chr> <chr> <dbl>
## 1 rsq standard 0.808
## 2 rmse standard 4.03
## 3 mae standard 2.91
Here is the plot of real versus predicted values. The algorithm provides as many predicted values as terminal nodes of the tree.
ggplot(table_bh, aes(value, pred)) +
geom_point() +
geom_abline(intercept = 0, slope = 1, color = "red") +
theme_minimal() +
labs(x = "real values", y = "predicted values", title = "Prediction of Boston Housing with rpart")
The rpart package implements in R the decision tree techniques presented in the CART book by Breiman et al. (1983). The package can be used for classification and numerical prediction. An effective implementation calls for tuning of some of the parameters of rpart.control, for instance the complexity parameter cp.
References
Session info
## R version 4.2.1 (2022-06-23)
## Platform: x86_64-pc-linux-gnu (64-bit)
## Running under: Linux Mint 19.2
##
## Matrix products: default
## BLAS: /usr/lib/x86_64-linux-gnu/openblas/libblas.so.3
## LAPACK: /usr/lib/x86_64-linux-gnu/libopenblasp-r0.2.20.so
##
## locale:
## [1] LC_CTYPE=es_ES.UTF-8 LC_NUMERIC=C
## [3] LC_TIME=es_ES.UTF-8 LC_COLLATE=es_ES.UTF-8
## [5] LC_MONETARY=es_ES.UTF-8 LC_MESSAGES=es_ES.UTF-8
## [7] LC_PAPER=es_ES.UTF-8 LC_NAME=C
## [9] LC_ADDRESS=C LC_TELEPHONE=C
## [11] LC_MEASUREMENT=es_ES.UTF-8 LC_IDENTIFICATION=C
##
## attached base packages:
## [1] stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] yardstick_0.0.9 mlbench_2.1-3 BAdatasets_0.1.0 rpart.plot_3.1.0
## [5] rpart_4.1.16 ggplot2_3.3.5 dplyr_1.0.9
##
## loaded via a namespace (and not attached):
## [1] Rcpp_1.0.8.3 highr_0.9 plyr_1.8.7 pillar_1.7.0
## [5] bslib_0.3.1 compiler_4.2.1 jquerylib_0.1.4 tools_4.2.1
## [9] digest_0.6.29 gtable_0.3.0 jsonlite_1.8.0 evaluate_0.15
## [13] lifecycle_1.0.1 tibble_3.1.6 pkgconfig_2.0.3 rlang_1.0.2
## [17] cli_3.3.0 DBI_1.1.2 rstudioapi_0.13 yaml_2.3.5
## [21] blogdown_1.9 xfun_0.30 fastmap_1.1.0 withr_2.5.0
## [25] stringr_1.4.0 knitr_1.39 pROC_1.18.0 generics_0.1.2
## [29] vctrs_0.4.1 sass_0.4.1 grid_4.2.1 tidyselect_1.1.2
## [33] glue_1.6.2 R6_2.5.1 fansi_1.0.3 rmarkdown_2.14
## [37] bookdown_0.26 farver_2.1.0 purrr_0.3.4 magrittr_2.0.3
## [41] scales_1.2.0 htmltools_0.5.2 ellipsis_0.3.2 assertthat_0.2.1
## [45] colorspace_2.0-3 labeling_0.4.2 utf8_1.2.2 stringi_1.7.6
## [49] munsell_0.5.0 crayon_1.5.1
|
__label__pos
| 0.698793 |
×
all 25 comments
[–]JamesRay757 2 points3 points (0 children)
Better set it on black and white mode to get full effect
[–]tny618 1 point2 points (5 children)
❤This is dope, what apps you use so I can achieve this same look?
[–][deleted] 1 point2 points (4 children)
Apps: Google News, Netflix, Spotify, Snapchat, Instagram, Samsung Notes, Google Play Store, Messages, YouTube
I have swipe up actions for each app for instance swipping up on Netflix opens Hulu and swipping up YouTube opens YouTube tv and swipping on messages opens my phone that just a few examples
[–][deleted] 0 points1 point (3 children)
I use nova launcher and kwgt
[–]wsfjs19 0 points1 point (2 children)
Very cool setup man love it. Can you explain how to hide the app labels? All of my icons have the app name under them.
[–][deleted] 1 point2 points (1 child)
I use nova launcher! Inside nova launchers settings you go to the first option which is home screen then select icon layout and then just disable icon labels. This only disables labels for your home screens you have to go to a different location to disable labels everywhere including the app drawer
[–]wsfjs19 0 points1 point (0 children)
Thank you
[–]Mathematical_Pie 1 point2 points (1 child)
Where did you get the widget from
[–][deleted] 1 point2 points (0 children)
The widget is "FREE 082" from the "huk kwgt" pack on the Google play store.
[–]Kaboomeow69 1 point2 points (0 children)
Alpha Male popped off on this one
[–]TheOpenSorcerer 0 points1 point (1 child)
Mind sharing the details? I like the simplicity
[–][deleted] 0 points1 point (0 children)
Absolutely what do you want to know?
[–]alxandrthegr8t 0 points1 point (9 children)
Details?
[–][deleted] 0 points1 point (8 children)
What do you want to know?
[–]nevernotbaking 0 points1 point (7 children)
Which icon pack are you using?
[–][deleted] 2 points3 points (6 children)
Icon pack is called : Lines Free
[–][deleted] 1 point2 points (1 child)
I can't find it anywhere on the themes store. Where'd you find it?
Edit: Never mind. Found it. I do have one question. How did you get all of your apps back from the system UI? I can't find mine anywhere.
[–][deleted] 0 points1 point (0 children)
Can you explain your question some more? I don't quite understand what you are asking.
[–]mr_norr 1 point2 points (1 child)
Also can't find this in the theme store using that name
[–][deleted] 2 points3 points (0 children)
[–]Ruck0loc0 -1 points0 points (1 child)
I also can't seem to find it. Been searching for a while now
[–]Los1717 0 points1 point (1 child)
Where did you find that wallpaper? So dope.
[–][deleted] 1 point2 points (0 children)
Here I made a link to the wallpaper I found it on r/AMOLEDbackgrounds!
http://imgur.com/a/e7bvhvi
[–]Los1717 0 points1 point (0 children)
Thanks!
|
__label__pos
| 0.975098 |
blob: ccbd26dcdedde173575352caed5ef44a319ba409 [file] [log] [blame]
// Copyright 2020 The Pigweed Authors
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not
// use this file except in compliance with the License. You may obtain a copy of
// the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
#include <array>
#include <cstddef>
#include <cstring>
#include <span>
#include "gtest/gtest.h"
#include "pw_blob_store/blob_store.h"
#include "pw_kvs/crc16_checksum.h"
#include "pw_kvs/fake_flash_memory.h"
#include "pw_kvs/flash_memory.h"
#include "pw_kvs/test_key_value_store.h"
#include "pw_log/log.h"
#include "pw_random/xor_shift.h"
namespace pw::blob_store {
namespace {
class BlobStoreChunkTest : public ::testing::Test {
protected:
BlobStoreChunkTest() : flash_(kFlashAlignment), partition_(&flash_) {}
void InitFlashTo(std::span<const std::byte> contents) {
partition_.Erase();
std::memcpy(flash_.buffer().data(), contents.data(), contents.size());
}
void InitSourceBufferToRandom(uint64_t seed) {
partition_.Erase();
random::XorShiftStarRng64 rng(seed);
rng.Get(source_buffer_);
}
void InitSourceBufferToFill(char fill) {
partition_.Erase();
std::memset(source_buffer_.data(), fill, source_buffer_.size());
}
// Fill the source buffer with random pattern based on given seed, written to
// BlobStore in specified chunk size.
void ChunkWriteTest(size_t chunk_size) {
constexpr size_t kBufferSize = 256;
kvs::ChecksumCrc16 checksum;
char name[16] = {};
snprintf(name, sizeof(name), "Blob%u", static_cast<unsigned>(chunk_size));
BlobStoreBuffer<kBufferSize> blob(
name, partition_, &checksum, kvs::TestKvs());
EXPECT_EQ(Status::OK, blob.Init());
BlobStore::BlobWriter writer(blob);
EXPECT_EQ(Status::OK, writer.Open());
EXPECT_EQ(Status::OK, writer.Erase());
ByteSpan source = source_buffer_;
while (source.size_bytes() > 0) {
const size_t write_size = std::min(source.size_bytes(), chunk_size);
PW_LOG_DEBUG("Do write of %u bytes, %u bytes remain",
static_cast<unsigned>(write_size),
static_cast<unsigned>(source.size_bytes()));
ASSERT_EQ(Status::OK, writer.Write(source.first(write_size)));
source = source.subspan(write_size);
}
EXPECT_EQ(Status::OK, writer.Close());
// Use reader to check for valid data.
BlobStore::BlobReader reader(blob);
ASSERT_EQ(Status::OK, reader.Open());
Result<ConstByteSpan> result = reader.GetMemoryMappedBlob();
ASSERT_TRUE(result.ok());
VerifyFlash(result.value());
EXPECT_EQ(Status::OK, reader.Close());
}
void VerifyFlash(ConstByteSpan verify_bytes) {
// Should be defined as same size.
EXPECT_EQ(source_buffer_.size(), flash_.buffer().size_bytes());
// Can't allow it to march off the end of source_buffer_.
ASSERT_LE(verify_bytes.size_bytes(), source_buffer_.size());
for (size_t i = 0; i < verify_bytes.size_bytes(); i++) {
EXPECT_EQ(source_buffer_[i], verify_bytes[i]);
}
}
static constexpr size_t kFlashAlignment = 16;
static constexpr size_t kSectorSize = 2048;
static constexpr size_t kSectorCount = 2;
static constexpr size_t kBlobDataSize = (kSectorCount * kSectorSize);
kvs::FakeFlashMemoryBuffer<kSectorSize, kSectorCount> flash_;
kvs::FlashPartition partition_;
std::array<std::byte, kBlobDataSize> source_buffer_;
};
TEST_F(BlobStoreChunkTest, ChunkWrite1) {
InitSourceBufferToRandom(0x8675309);
ChunkWriteTest(1);
}
TEST_F(BlobStoreChunkTest, ChunkWrite2) {
InitSourceBufferToRandom(0x8675);
ChunkWriteTest(2);
}
TEST_F(BlobStoreChunkTest, ChunkWrite3) {
InitSourceBufferToFill(0);
ChunkWriteTest(3);
}
TEST_F(BlobStoreChunkTest, ChunkWrite4) {
InitSourceBufferToFill(1);
ChunkWriteTest(4);
}
TEST_F(BlobStoreChunkTest, ChunkWrite5) {
InitSourceBufferToFill(0xff);
ChunkWriteTest(5);
}
TEST_F(BlobStoreChunkTest, ChunkWrite16) {
InitSourceBufferToRandom(0x86);
ChunkWriteTest(16);
}
TEST_F(BlobStoreChunkTest, ChunkWrite64) {
InitSourceBufferToRandom(0x9);
ChunkWriteTest(64);
}
TEST_F(BlobStoreChunkTest, ChunkWrite256) {
InitSourceBufferToRandom(0x12345678);
ChunkWriteTest(256);
}
TEST_F(BlobStoreChunkTest, ChunkWrite512) {
InitSourceBufferToRandom(0x42);
ChunkWriteTest(512);
}
TEST_F(BlobStoreChunkTest, ChunkWrite4096) {
InitSourceBufferToRandom(0x89);
ChunkWriteTest(4096);
}
TEST_F(BlobStoreChunkTest, ChunkWriteSingleFull) {
InitSourceBufferToRandom(0x98765);
ChunkWriteTest(kBlobDataSize);
}
} // namespace
} // namespace pw::blob_store
|
__label__pos
| 0.996576 |
Numpy
NumPy is the fundamental package for scientific computing with Python. It contains among other things:
• a powerful N-dimensional array object
• sophisticated (broadcasting) functions
• tools for integrating C/C++ and Fortran code
• useful linear algebra, Fourier transform, and random number capabilities
Besides its obvious scientific uses, NumPy can also be used as an efficient multi-dimensional container of generic data. Arbitrary data-types can be defined. This allows NumPy to seamlessly and speedily integrate with a wide variety of databases. More info at: http://www.numpy.org/
In [1]:
import numpy as np
Creating arrays
In [3]:
np.choose?
We can create two arrays from a list:
In [188]:
a = np.array([1, 2])
a
Out[188]:
array([1, 2])
In [189]:
A = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
A
Out[189]:
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
Each numpy.array is a python object with attributes and methods, let's some of them.
the shape
In [190]:
a.shape
Out[190]:
(2,)
In [191]:
A.shape
Out[191]:
(2, 5)
The array type:
In [192]:
a.dtype
Out[192]:
dtype('int64')
they are statically typed, so you can not, assign something with a different type.
In [193]:
a[0] = 'python'
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-193-4f48d9ac26e9> in <module>()
----> 1 a[0] = 'python'
ValueError: invalid literal for int() with base 10: 'python'
The number of elements:
In [194]:
a.size
Out[194]:
2
In [196]:
A.size
Out[196]:
10
Number of bytes per element
In [198]:
a.itemsize
Out[198]:
8
In [199]:
A.itemsize
Out[199]:
8
the number of bytes
In [195]:
a.nbytes
Out[195]:
16
In [197]:
A.nbytes
Out[197]:
80
The number of dimension:
In [14]:
a.ndim
Out[14]:
1
In [15]:
A.ndim
Out[15]:
2
Generating arrays
In [16]:
np.arange(1, 10, 2)
Out[16]:
array([1, 3, 5, 7, 9])
In [3]:
np.arange(1, 2, 0.2)
Out[3]:
array([ 1. , 1.2, 1.4, 1.6, 1.8])
In [17]:
a = np.arange(10)
a
Out[17]:
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
In [19]:
A= np.reshape(a, (5, 2))
A
Out[19]:
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
Generate an array filled with ones:
In [34]:
np.ones((10, 2), dtype=np.float16)
Out[34]:
array([[ 1., 1.],
[ 1., 1.],
[ 1., 1.],
[ 1., 1.],
[ 1., 1.],
[ 1., 1.],
[ 1., 1.],
[ 1., 1.],
[ 1., 1.],
[ 1., 1.]], dtype=float16)
Generate an array filled with zeros:
In [35]:
np.zeros((10, 2), dtype=np.float128)
Out[35]:
array([[ 0.0, 0.0],
[ 0.0, 0.0],
[ 0.0, 0.0],
[ 0.0, 0.0],
[ 0.0, 0.0],
[ 0.0, 0.0],
[ 0.0, 0.0],
[ 0.0, 0.0],
[ 0.0, 0.0],
[ 0.0, 0.0]], dtype=float128)
Split an interval in piecies:
In [23]:
c = np.linspace(0, 10, num=5)
c
Out[23]:
array([ 0. , 2.5, 5. , 7.5, 10. ])
In [30]:
d = np.logspace(2, 3, num=4, base=2)
d
Out[30]:
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Get random numbers:
In [37]:
np.random.rand(2, 5)
Out[37]:
array([[ 0.70923459, 0.36520342, 0.93789577, 0.37788093, 0.88874502],
[ 0.00887959, 0.07073521, 0.15550365, 0.34043936, 0.34841286]])
In [39]:
np.random.randint(0, 9)
Out[39]:
8
In [40]:
np.random.random_integers(0, 9, (2, 5))
Out[40]:
array([[9, 8, 4, 2, 9],
[9, 0, 4, 2, 1]])
Generate diagonal matrix:
In [41]:
np.diag([1, 2, 3, 4])
Out[41]:
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
In [42]:
np.diag([1, 2, 3, 4], k=2)
Out[42]:
array([[0, 0, 1, 0, 0, 0],
[0, 0, 0, 2, 0, 0],
[0, 0, 0, 0, 3, 0],
[0, 0, 0, 0, 0, 4],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
Generating a mesh:
In [31]:
x, y = np.mgrid[0:5, 5:10]
In [32]:
x
Out[32]:
array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 3],
[4, 4, 4, 4, 4]])
In [33]:
y
Out[33]:
array([[5, 6, 7, 8, 9],
[5, 6, 7, 8, 9],
[5, 6, 7, 8, 9],
[5, 6, 7, 8, 9],
[5, 6, 7, 8, 9]])
Basic manipulation
We can moltiply the arrays to each other:
In [202]:
a = np.array([1, 2])
a
Out[202]:
array([1, 2])
In [203]:
A = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
A
Out[203]:
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
In [201]:
a + 2
Out[201]:
array([3, 4])
In [204]:
A * 10
Out[204]:
array([[ 0, 10, 20, 30, 40],
[50, 60, 70, 80, 90]])
In [359]:
A * A
Out[359]:
array([[ 0, 1, 4, 9, 16],
[25, 36, 49, 64, 81]])
In [363]:
A.T.shape, a.shape
Out[363]:
((5, 2), (2,))
In [364]:
a * A.T
Out[364]:
array([[ 0, 10],
[ 1, 12],
[ 2, 14],
[ 3, 16],
[ 4, 18]])
In [368]:
C = array([[n + m * 10 for n in range(3)] for m in range(3)])
In [369]:
C
Out[369]:
array([[ 0, 1, 2],
[10, 11, 12],
[20, 21, 22]])
In [370]:
np.dot(C, C)
Out[370]:
array([[ 50, 53, 56],
[350, 383, 416],
[650, 713, 776]])
In [4]:
c = np.arange(1, 4)
c
Out[4]:
array([1, 2, 3])
In [373]:
np.dot(c, C)
Out[373]:
array([80, 86, 92])
In [375]:
np.dot(c, c)
Out[375]:
14
Matix
In [383]:
m = np.matrix(c).T
m
Out[383]:
matrix([[1],
[2],
[3]])
In [384]:
M = np.matrix(C)
M
Out[384]:
matrix([[ 0, 1, 2],
[10, 11, 12],
[20, 21, 22]])
In [379]:
M * M
Out[379]:
matrix([[ 50, 53, 56],
[350, 383, 416],
[650, 713, 776]])
In [381]:
M * m
Out[381]:
matrix([[ 8],
[ 68],
[128]])
In [382]:
M + M
Out[382]:
matrix([[ 0, 2, 4],
[20, 22, 24],
[40, 42, 44]])
for a complete list of the linear algebra function please have a look at the documentation
Indexing
In [5]:
B = np.reshape(np.arange(40), (5, 8))
B
Out[5]:
array([[ 0, 1, 2, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 37, 38, 39]])
Slice
In [6]:
B[:, 1::3]
Out[6]:
array([[ 1, 4, 7],
[ 9, 12, 15],
[17, 20, 23],
[25, 28, 31],
[33, 36, 39]])
In [7]:
B[::2, ::3]
Out[7]:
array([[ 0, 3, 6],
[16, 19, 22],
[32, 35, 38]])
Arrays of integer
In [8]:
B[np.array([0, 0, 0, 2, 2, 2, 4, 4, 4]), np.array([0, 3, 6, 0, 3, 6, 0, 3, 6])]
Out[8]:
array([ 0, 3, 6, 16, 19, 22, 32, 35, 38])
Arrays of boolean
In [9]:
B > 25
Out[9]:
array([[False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False],
[False, False, False, False, False, False, False, False],
[False, False, True, True, True, True, True, True],
[ True, True, True, True, True, True, True, True]], dtype=bool)
In [10]:
B[B > 25]
Out[10]:
array([26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39])
In [11]:
bool_index = (B % 2).astype(np.bool)
In [12]:
bool_index
Out[12]:
array([[False, True, False, True, False, True, False, True],
[False, True, False, True, False, True, False, True],
[False, True, False, True, False, True, False, True],
[False, True, False, True, False, True, False, True],
[False, True, False, True, False, True, False, True]], dtype=bool)
In [13]:
B[bool_index]
Out[13]:
array([ 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33,
35, 37, 39])
In [14]:
B[~bool_index]
Out[14]:
array([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32,
34, 36, 38])
In [350]:
B[~bool_index] *= 10
In [351]:
B
Out[351]:
array([[ 0, 1, 20, 3, 40, 5, 60, 7],
[ 80, 9, 100, 11, 120, 13, 140, 15],
[160, 17, 180, 19, 200, 21, 220, 23],
[240, 25, 260, 27, 280, 29, 300, 31],
[320, 33, 340, 35, 360, 37, 380, 39]])
In [353]:
bool_index.nonzero()
Out[353]:
(array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4]),
array([1, 3, 5, 7, 1, 3, 5, 7, 1, 3, 5, 7, 1, 3, 5, 7, 1, 3, 5, 7]))
In [354]:
B[(~bool_index).nonzero()]
Out[354]:
array([ 0, 20, 40, 60, 80, 100, 120, 140, 160, 180, 200, 220, 240,
260, 280, 300, 320, 340, 360, 380])
In [355]:
B
Out[355]:
array([[ 0, 1, 20, 3, 40, 5, 60, 7],
[ 80, 9, 100, 11, 120, 13, 140, 15],
[160, 17, 180, 19, 200, 21, 220, 23],
[240, 25, 260, 27, 280, 29, 300, 31],
[320, 33, 340, 35, 360, 37, 380, 39]])
In [15]:
np.where(B % 2)
Out[15]:
(array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4]),
array([1, 3, 5, 7, 1, 3, 5, 7, 1, 3, 5, 7, 1, 3, 5, 7, 1, 3, 5, 7]))
In [357]:
np.diag(B)
Out[357]:
array([ 0, 9, 180, 27, 360])
In [17]:
which = [1, 0, 1, 0]
choices = [[1, 2, 3, 4], [5, 6, 7, 8]]
np.choose(which, choices)
Out[17]:
array([5, 2, 7, 4])
Add new axis
In [9]:
v = np.array([1,2,3])
v.shape
Out[9]:
(3,)
In [11]:
v[:, np.newaxis]
Out[11]:
array([[1],
[2],
[3]])
In [12]:
v[np.newaxis, :]
Out[12]:
array([[1, 2, 3]])
In [20]:
v[:, np.newaxis] * v[np.newaxis, :]
Out[20]:
array([[1, 2, 3],
[2, 4, 6],
[3, 6, 9]])
In [21]:
v[:, np.newaxis].shape, v[np.newaxis, :].shape
Out[21]:
((3, 1), (1, 3))
Other operations
In [22]:
np.repeat(v, 4)
Out[22]:
array([1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3])
In [23]:
np.concatenate((v, v*10))
Out[23]:
array([ 1, 2, 3, 10, 20, 30])
In [2]:
v = np.arange(10)
np.roll(v, 1)
Out[2]:
array([9, 0, 1, 2, 3, 4, 5, 6, 7, 8])
Vectorialize function
In [24]:
def func(x):
if x>1:
return 5
return x
In [25]:
func(v)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-25-8e4da8fa57ff> in <module>()
----> 1 func(v)
<ipython-input-24-015cd1760d75> in func(x)
1 def func(x):
----> 2 if x>1:
3 return 5
4 return x
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
In [29]:
newfunc = np.vectorize(func)
In [30]:
print(v)
newfunc(v)
[1 2 3]
Out[30]:
array([1, 5, 5])
Import data from a text file
In [45]:
!head Klima_LT_N_daily_1981-2012_8320_Bozen-Bolzano.csv -n 20
AUTONOME PROVINZ PROVINCI
26. Brand- und Ziv 26. P
26.4 - Hydrographi 26.4
Station - stazioneBozen - Bolzano
Nummer - codice :8320
Rechtswert - X UTM677473
Hochwert - Y UTM :5151945
Höhe - quota :254
Zeitraum - periodo1981 2012
Data
Datum Precipitazione NieTemperatura
Temper
massima Maximum minima Minimum
01:01:1981 0,0 -6,0 10,0
02:01:1981 0,0 3,0 10,0
03:01:1981 0,0 -5,0 6,0
04:01:1981 0,4 -4,0 3,0
05:01:1981 0,0 0,0 6,0
Here we have two main problems:
• how to convert the date;
• how to convert the numbers;
We can define two function that take a string and return a python object.
In [13]:
import numpy as np
from datetime import datetime as dt
now = dt.now()
In [16]:
dt.
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-16-383803645475> in <module>()
----> 1 now.hour += 5
AttributeError: attribute 'hour' of 'datetime.datetime' objects is not writable
The modulte datetime in the standard library has function to convert an object to a string and a string to an object.
In [3]:
# object => string
dt.strftime(now, "%d:%m:%Y")
Out[3]:
'02:02:2014'
In [4]:
# string => object
dt.strptime('01:01:1981', "%d:%m:%Y")
Out[4]:
datetime.datetime(1981, 1, 1, 0, 0)
We can define our function using lambda, that is only a short way to define a function
In [5]:
str2npdatetime = lambda bytes: dt.strptime(bytes.decode(), "%d:%m:%Y")
str2float = lambda bytes: float(bytes.decode().replace(',', '.'))
That is equivalent to write:
In [17]:
def bytes2date(bytes):
"""Convert bytes to numpy datetime. ::
>>> bytes2date(b'01:01:1981')
'1981-01-01'
"""
return dt.strftime(dt.strptime(bytes.decode(), "%d:%m:%Y"), "%Y-%m-%d")
def bytes2float(bytes):
"""Convert bytes to float. ::
>>> bytes2float(b'2.5')
2.5
"""
return float(bytes.decode().replace(',', '.'))
Check that the function are working:
In [7]:
bytes2date(b'01:01:1981')
Out[7]:
'1981-01-01'
In [8]:
bytes2float(b'2,5')
Out[8]:
2.5
Ok, we are ready to import the csv.
In [18]:
data = np.genfromtxt(# name of the file with the data
"Klima_LT_N_daily_1981-2012_8320_Bozen-Bolzano.csv",
# set the number of rows that we have to skip
skiprows=15,
# set a dictionary giving a converter function for each column
converters={0: bytes2date,
1: bytes2float,
2: bytes2float,
3: bytes2float},
# define the name and type for each
dtype=[('date', 'datetime64[D]'),
('rainfall', 'float'),
('Tmin', 'float'),
('Tmax', 'float')])
In [19]:
data
Out[19]:
array([(datetime.date(1981, 1, 1), 0.0, -6.0, 10.0),
(datetime.date(1981, 1, 2), 0.0, 3.0, 10.0),
(datetime.date(1981, 1, 3), 0.0, -5.0, 6.0), ...,
(datetime.date(2012, 12, 29), 0.0, -1.9, 6.5),
(datetime.date(2012, 12, 30), 0.0, -3.3, 6.7),
(datetime.date(2012, 12, 31), 0.0, -3.4, 7.2)],
dtype=[('date', '<M8[D]'), ('rainfall', '<f8'), ('Tmin', '<f8'), ('Tmax', '<f8')])
In [20]:
data.shape
Out[20]:
(11688,)
In [22]:
data.dtype.fields
Out[22]:
mappingproxy({'Tmin': (dtype('float64'), 16), 'Tmax': (dtype('float64'), 24), 'date': (dtype('<M8[D]'), 0), 'rainfall': (dtype('float64'), 8)})
In [30]:
data[0]
Out[30]:
(datetime.date(1981, 1, 1), 0.0, -6.0, 10.0)
In [31]:
data['date']
Out[31]:
array(['1981-01-01', '1981-01-02', '1981-01-03', ..., '2012-12-29',
'2012-12-30', '2012-12-31'], dtype='datetime64[D]')
In [32]:
data['rainfall'].mean()
Out[32]:
1.9436344969199226
In [33]:
data['rainfall'].std()
Out[33]:
5.8859954441709244
In [34]:
data['rainfall'].max()
Out[34]:
112.0
In [35]:
np.median(data['rainfall'])
Out[35]:
0.0
We can select only certain columns
In [37]:
data[['date', 'Tmin', 'Tmax']]
Out[37]:
array([(datetime.date(1981, 1, 1), -6.0, 10.0),
(datetime.date(1981, 1, 2), 3.0, 10.0),
(datetime.date(1981, 1, 3), -5.0, 6.0), ...,
(datetime.date(2012, 12, 29), -1.9, 6.5),
(datetime.date(2012, 12, 30), -3.3, 6.7),
(datetime.date(2012, 12, 31), -3.4, 7.2)],
dtype=[('date', '<M8[D]'), ('Tmin', '<f8'), ('Tmax', '<f8')])
In [23]:
data[['Tmin', 'Tmax']].mean()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-23-cd4bf427c4ac> in <module>()
----> 1 data[['Tmin', 'Tmax']].mean()
/usr/lib/python3.3/site-packages/numpy/core/_methods.py in _mean(a, axis, dtype, out, keepdims)
60 dtype = mu.dtype('f8')
61
---> 62 ret = um.add.reduce(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
63 if isinstance(ret, mu.ndarray):
64 ret = um.true_divide(
TypeError: cannot perform reduce with flexible type
This is a rec-array, to perform the mean we have to convert the rec-array to a normal one, we can do this operation with:
In [35]:
data_array = data[['rainfall', 'Tmin', 'Tmax']].view(float).reshape(len(data),-1)
In [36]:
data_array.mean(axis=0, )
Out[36]:
array([ 1.9436345 , nan, 18.79189767])
we have nan because numpy does not handle the nan values.
In [37]:
np.nonzero(np.isnan(data['Tmin']))
Out[37]:
(array([10742, 10834, 10972, 11022, 11043]),)
In [42]:
!head -10770 Klima_LT_N_daily_1981-2012_8320_Bozen-Bolzano.csv | tail -20
24:05:2010 0,0 11,4 29,5
25:05:2010 0,0 13,3 30,5
26:05:2010 0,0 15,9 27,6
27:05:2010 0,0 12,9 22,2
28:05:2010 1,5 11,0 24,0
29:05:2010 0,0 14,1 24,2
30:05:2010 1,3 15,4 22,7
31:05:2010 0,7 --- 22,5
01:06:2010 0,0 11,9 26,1
02:06:2010 0,0 14,6 25,2
03:06:2010 0,0 15,6 26,6
04:06:2010 0,0 12,9 32,8
05:06:2010 0,0 16,2 31,4
06:06:2010 0,0 19,5 32,2
07:06:2010 0,0 19,7 26,1
08:06:2010 0,0 20,4 29,7
09:06:2010 0,0 19,9 31,2
10:06:2010 0,0 20,9 29,9
11:06:2010 0,0 17,2 33,4
12:06:2010 0,0 19,5 31,9
Masked arrays
In [38]:
data_array_ma = np.ma.masked_array(data_array, np.isnan(data_array))
In [39]:
data_array_ma.mean(axis=0, )
Out[39]:
masked_array(data = [1.9436344969199226 6.597629033638557 18.791897672827037],
mask = [False False False],
fill_value = 1e+20)
In [45]:
data_array_ma.std(axis=0, )
Out[45]:
masked_array(data = [5.885995444170924 8.01268084851955 9.408077570471425],
mask = [False False False],
fill_value = 1e+20)
Visualize the data
In [128]:
%matplotlib inline
import matplotlib.pyplot as plt
date = data['date'].astype('O')
fig, ax0 = plt.subplots(figsize=(20,5))
ax1 = ax0.twinx()
ax1.plot(date, data['rainfall'], 'k-', label='rainfall', alpha=0.5)
ax1.axis('tight')
ax1.set_title('$Rainfall/T_{min}/T_{max}$ in Bozen/Bolzano')
ax1.set_xlabel('year')
ax1.set_ylabel('rainfall [mm]')
ax0.plot(date, data['Tmin'], 'b-', label='Tmin', alpha=0.7)
ax0.plot(date, data['Tmax'], 'r-', label='Tmin', alpha=0.7)
ax0.set_ylabel('Temperature [°C]')
ax0.grid()
In [106]:
import datetime as dt
In [163]:
%matplotlib inline
def get_index(date, start, dyears=0, dmonths=0, ddays=0):
indexes = []
dates = []
stop = date[0]
while stop < date[-1]:
stop = dt.date(start.year + dyears, start.month + dmonths, start.day + ddays)
istart = np.where(date == start)[0]
istop = np.where(date == stop)[0]
indexes.append((istart if istart else None, istop if istop else None))
dates.append((start, stop))
start = stop
return indexes, dates
def split_plot(date, data, indexes, dates, figsize=(10, 5)):
date = [obj for obj in date]
fig, axes = plt.subplots(nrows=len(indexes), ncols=1, figsize=figsize)
fig.tight_layout(h_pad=1.)
axes[0].set_title('$Rainfall/T_{min}/T_{max}$ in Bozen/Bolzano')
for (istart, istop), (start, stop), ax0 in zip(indexes, dates, axes):
ax1 = ax0.twinx()
ax1.plot(date[istart:istop], data['rainfall'][istart:istop], 'k-', label='rainfall', alpha=0.5)
#ax1.axis('tight')
ax1.set_xlabel('year')
ax1.set_ylabel('rainfall [mm]')
ax0.plot(date[istart:istop], data['Tmin'][istart:istop], 'b-', label='Tmin', alpha=0.7)
ax0.plot(date[istart:istop], data['Tmax'][istart:istop], 'r-', label='Tmin', alpha=0.7)
ax0.set_ylabel('Temperature [°C]')
limits = [start, stop]
print(limits)
ax0.set_xlim([start, stop])
ax0.grid()
#return fig
dyear = 10
start = dt.date(1980, 1, 1)
indexes, dates = get_index(date, start, dyear)
split_plot(date, data, indexes, dates, figsize=(10, 5))
[datetime.date(1980, 1, 1), datetime.date(1990, 1, 1)]
[datetime.date(1990, 1, 1), datetime.date(2000, 1, 1)]
[datetime.date(2000, 1, 1), datetime.date(2010, 1, 1)]
[datetime.date(2010, 1, 1), datetime.date(2020, 1, 1)]
Save in the native format
The original format in xls is more than 3Mb:
In [170]:
%%sh
du -h Klima_LT_N_daily_1981-2012_8320_Bozen-Bolzano.xls
3.3M Klima_LT_N_daily_1981-2012_8320_Bozen-Bolzano.xls
the version converted in csv is less than 1Mb:
In [168]:
%%sh
du -h Klima_LT_N_daily_1981-2012_8320_Bozen-Bolzano.csv
892K Klima_LT_N_daily_1981-2012_8320_Bozen-Bolzano.csv
We can save the data in the numpy native format, with:
In [44]:
np.save('bozen.npy', data)
And the file size is less than 400Kb.
In [45]:
%%sh
du -h bozen.npy
368K bozen.npy
To load from the native format use the load function
In [165]:
bozen = np.load('bozen.npy')
In [166]:
bozen
Out[166]:
array([(datetime.date(1981, 1, 1), 0.0, -6.0, 10.0),
(datetime.date(1981, 1, 2), 0.0, 3.0, 10.0),
(datetime.date(1981, 1, 3), 0.0, -5.0, 6.0), ...,
(datetime.date(2012, 12, 29), 0.0, -1.9, 6.5),
(datetime.date(2012, 12, 30), 0.0, -3.3, 6.7),
(datetime.date(2012, 12, 31), 0.0, -3.4, 7.2)],
dtype=[('date', '<M8[D]'), ('rainfall', '<f8'), ('Tmin', '<f8'), ('Tmax', '<f8')])
In [ ]:
|
__label__pos
| 0.910633 |
5
$\begingroup$
I have a large piece of data I want to encrypt, and I have a key master_key.
Rather than encrypt the data directly with master_key, I generate a random key item_key, split it in two parts, and encrypt the data with the first half, and use the second half as an auth key in generating an authentication hash with HMAC.
I then encrypt item_key with master_key. This way, if the user were to change their master_key, I would only need to rewrite the item_key, and not the large piece of data.
So the end result of the overall encryption is:
• the ciphertext
• the authentication hash (or tag) of the ciphertext <- HMAC(ciphertext, auth key)
• encrypted item_key
What I overlooked however is that I am generating two ciphertexts here, one of the encrypted data, and one of the encrypted key. But, I only have one auth hash. This is a flaw as I go on to decrypt the encrypted item key using the split master_key without checking its authenticity. I do however check the authenticity of the ciphertext, but that's not as useful.
My challenge now is to use the pieces that I already have to fix this problem. This means I don't want to introduce new keys.
To fix this flaw, I would need to generate an auth hash/tag for both the data ciphertext, and the item key ciphertext. The problem is, I don't have another secret authentication key to spare.
Possible solutions:
1. Do not generate an auth hash for the ciphertext, and instead generate an auth hash only for the encrypted item key, since that is what is being encrypted with the master key. The application promises to never decrypt the actual data ciphertext without first authenticating and decrypting the item key. Will I be roasted by the security community for doing this, even if it's theoretically safe? Is it "bad style"?
2. Derive from the master key an encryption key and an auth key using a PBKDF2 with a small iteration count (less than 3000). I would use this auth key to generate an auth hash for every encrypted item key. (The item key would then be used the same way explained in the beginning: split in half, one half used as encryption key, the other as auth key for the data).
I prefer to go with option 1. Does this sound reasonable? Or are there any other solutions I may be overlooking?
Note: AES-GCM would seem to solve this problem easily, as I do not need auth keys, but I cannot use this method since the platforms I intend to support do not provide native support for this algorithm.
$\endgroup$
0
6
$\begingroup$
The primary purpose of the MAC in an Encrypt-then-MAC scheme is not to detect attempts to decrypt with the wrong key (although it may accomplish that as a side effect, depending on how the encryption and MAC keys are derived), but rather to make the scheme non-malleable, and thus:
1. protect the user's data from tampering by an attacker who can modify the ciphertext, and
2. protect the encryption part of the scheme from attacks (such as padding oracle attacks against CBC mode encryption) which are based on feeding malformed ciphertext to the decryption algorithm.
Thus, if you want your scheme to resist such attacks, you really do need to apply a MAC to the actual ciphertext that encodes the user's data. Only MACing the (encrypted) item keys will not protect the user's data from tampering. Thus, your "option 1" is definitely insecure.
Your option 2, on the other hand, does appear reasonable, at least as far as you describe it. (I cannot, of course, tell if there are security flaws in the parts that you haven't described, or in the way that you implement the parts you do describe.)
Note that the iterated key stretching aspect of PBKDF2 is only needed when the input is a user-entered password or some other potentially low entropy string. In your case, since you already have a high-entropy master key as input, you may safely reduce the iteration count to 1, or even use a non-iterating KDF such as HKDF.
Alternatively, I don't see any reason why you couldn't just double the length of your master key, and split it into two parts like you currently do for the item keys, except that this will require slightly more storage space for the master key. But since you only have one of those, I doubt that would be a major issue.
If you wish to minimize the overhead of storing and authenticating the item keys, I could suggest something like that following scheme:
1. Store a single base master key (of, say, 256 bits); use HKDF-Expand (or PBKDF2 with an iteration count of 1) to derive a master encryption key and a master MAC key from the base master key.
2. For each item, store a single base item key (again of, say, 256 bits) encrypted with the master encryption key and authenticated with the master authentication key.
3. Derive an item encryption key and an item MAC key from the base item key using HKDF-Expand (or PBKDF2 with an iteration count of 1), exactly the same way as for the master key in step 1.
4. Use the derived item encryption and MAC keys to encrypt and authenticate the user's data.
This should consume no more space than your current scheme (assuming that the MAC tokens aren't longer than 256 bits, which they really have no reason to be), while protecting both the item keys and the items themselves from tampering.
As an extra precaution, you may also wish to include some metadata (such as user and item IDs) as "associated data" in the MAC calculations in steps 2 and 4. This will prevent an attacker from, say, replacing one encrypted item and its item key with the corresponding data and key of another item. Whether such attacks are actually a relevant threat for you will depend on things like what these "items" are actually used for, but since they're easily enough prevented, you might as well do it anyway.
$\endgroup$
5
• $\begingroup$ Great answer, thank you. I like the solution you propose. Two questions: 1. When deriving two keys from a KDF, would I just run it twice, once with one iteration, and again with two iterations? And 2. Most HMAC implementations only accept a message and key. Where does associated data come in to play? Is that something I would manually need to implement? $\endgroup$
– Snowman
Mar 9 '17 at 0:43
• $\begingroup$ Actually the answer for 1 seems to be to generate a long key and split it in half. My question then becomes what to use as a salt in both step 1 and step 3 of the proposed solution? $\endgroup$
– Snowman
Mar 9 '17 at 1:01
• $\begingroup$ Also, question #3, for step 1, why couldn't I just take SHA512(master_key) and split that in half? $\endgroup$
– Snowman
Mar 9 '17 at 2:06
• 1
$\begingroup$ @Snowman: The HKDF spec (RFC 5869) has detailed instructions for that: basically, the recommended way is to run HKDF-Expand twice with different info strings. Just generating a single long key and splitting it in half will work too, though. For PBKDF2, you could (slightly mis)use the salt parameter for the same purpose as the HKDF info parameter, possibly combined with an actual salt string (possibly derived e.g. from the user ID, if you don't want to store actual salts). And yes, even just using SHA512 as your KDF should work, but an actual KDF ... $\endgroup$ Mar 9 '17 at 10:47
• $\begingroup$ ... like HKDF is more flexible (you're not limited to 512 bits of output) and lets you say that you're using a standard tool designed for the job. (As for salting, that's really only important when deriving keys from low-entropy sources like passwords, since it prevents a brute-force attacker from testing a single password guess against multiple users at the same time. With a high-entropy random master key as input, you don't really need a salt; however, since what the salt does is let multiple quasi-independent keys be derived from a single input, you could repurpose it as described above.) $\endgroup$ Mar 9 '17 at 10:53
Your Answer
By clicking “Post Your Answer”, you agree to our terms of service, privacy policy and cookie policy
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.865595 |
26
$\begingroup$
So as far as I understand, a function $f\colon A \to B$ is surjective if and only if for every $b\in B$ there exists $a\in A$ such that $f(a) = b$.
My question is when is this actually relevant? Couldn't you arbitrarily define the set $B$ so that any elements never "used" are removed from the set, leaving you with a surjective function?
$\endgroup$
• 21
$\begingroup$ It's not the same function then. $\endgroup$ – Bernard Jun 16 '17 at 0:03
• 1
$\begingroup$ I've seen the use of surjectivity in order to establish that a function is bijective. (If a function is surjective and injective, it is bijective). And bijective functions are quite useful. $\endgroup$ – Anthony D'Arienzo Jun 16 '17 at 0:03
• 1
$\begingroup$ Sometimes that is exactly what's done @Calvin. See, for example, topological embeddings. However, even with embeddings, we don't completely disregard $B$. Instead, we recognized that an isomorphic copy of $A$ exists inside of $B$. It is true that "every function is surjective onto its image". However, for instance, if we have a function $f:A \rightarrow B$, it isn't always clear what the image of $A$ actually is. $\endgroup$ – Kaj Hansen Jun 16 '17 at 0:09
• 6
$\begingroup$ @Bernard It's not the same arrow in the category of sets, say, but it is indeed exactly the same set of ordered pairs. $\endgroup$ – Derek Elkins Jun 16 '17 at 5:20
• 4
$\begingroup$ @JairTaylor: I stick to Bourbaki's definition: it's a triple $(E, F,G)$ satisfying $G\subset E\times F$ and a uniqueness condition. $\endgroup$ – Bernard Jun 16 '17 at 8:17
38
$\begingroup$
Yes, we can arbitrarily only look at the range of functions, but this often misses the point. When we study a function, $f: A \rightarrow B$, often we're interested in the properties of $A$ and $B$ just as much as we are the properties of $f$. So, if we want to learn about $B$, and we know that we can do this somehow using surjective function $f$ from $A$ to $B$, just looking at the range of $f$ means we've given up looking at $B$, which is what we wanted to learn about in the first place.
Edit: Since this seems like we're just talking about very basic stuff, here's a very basic property. Let's say we want to know if $A$ and $B$ have the same cardinality. How do we know that? That there exists a bijective function $f: A \rightarrow B$. If we look at the range of $f$ instead of the codomain, we're no longer thinking about the cardinality of $B$, we're thinking about something else all together.
$\endgroup$
• 2
$\begingroup$ Exactly, as my example in the comments illustrates, we note that a topological embedding $f:A \rightarrow f(A) \subseteq B$ is indeed surjective, but the point is that an isomorphic copy of $A$ exists as a subset of $B$. A similar thing can be / is done in every category. $\endgroup$ – Kaj Hansen Jun 16 '17 at 0:15
28
$\begingroup$
The other answers are good, but I'd like to add one thing. Suppose a function $f:\mathbb{R}\to\mathbb{R}$ is given. Is it possible to solve the equation $f(x)=b$, for some particular $b$?
If we know that $f$ is surjective, then we can be sure that a solution exists for any choice of $b$. If not, then we need to worry about whether $b$ is in the range of $f$ or not.
In linear algebra, this comes up a lot. The range of a linear function, given by a matrix $A$, so $f(x)=Ax$, is called the column space of $A$. Sometimes, the column space is the entire codomain, and sometimes it is a subspace. Whether or not a function like that is surjective becomes an interesting question, not only for solving equations, but for answering other questions about the structure of the function.
For example, what if the domain is $\mathbb{R}^4$, and the codomain is $\mathbb{R}^3$. Then what kind of subset of the domain solves the equation $f(x)=(0,0,0)$? If we know that $f$ is surjective, then we can answer that the set mapping to zero is a one-dimensional subspace. If $f$ is not surjective, then the set mapping to zero will have greater dimension.
$\endgroup$
• $\begingroup$ +1, I used to think like OP, and this is the sort of things that made the concept of surjectivity interesting. $\endgroup$ – Arnaud D. Jun 16 '17 at 8:25
• $\begingroup$ As I recall, this is fundamental in group theory and provides a basis for equivalence relations in A. $\endgroup$ – rrogers Jun 21 '17 at 13:47
5
$\begingroup$
$\newcommand{\Reals}{\mathbf{R}}\newcommand{Ratls}{\mathbf{Q}}$Mathematical problems often come in the form of,
"Some value $y$ depends deterministically on data $x$; is every prospective value $y$ an actual value?"
Two common formulations are:
• Let $Y$ be a set of prospective values (specified in advance by the context of an external question), $X$ the set of allowable inputs, and $f:X \to Y$ a mapping representing the dependence $y = f(x)$.
The question above means Is $f$ surjective?
• Let $Y \subset Z$ be a set of prospective values, $X$ the set of allowable inputs, and $f:X \to Z$ a mapping representing the dependence $y = f(x)$.
The question above means Is $Y \subset f(X)$?
Here's a selection of five examples, four of them kind of the same:
1. Is every real number the square of some real number?
That is, if $f:\Reals \to \Reals$ is defined by $f(x) = x^{2}$, is $f$ surjective? (Answer: No. For instance, $-1$ is not in the image.)
2. Is every non-negative real number the square of some real number?
That is, if $f:\Reals \to \Reals$ is defined by $f(x) = x^{2}$, is $[0,\infty)$ contained in the image of $f$? (Answer: Yes, though proving this "existence of real square roots" requires non-trivial use of the completeness axiom for the real numbers, even though the result is usually introduced into the curriculum many years prior to a careful analysis course.)
3. Is every positive rational number the square of some rational number?
That is, if $f:\Ratls \to \Ratls$ is defined by $f(x) = x^{2}$, is $[0,\infty) \cap \Ratls$ contained in the image of $f$? (Answer: No. For instance, $2$ is not in the image.)
4. If $y:\Reals \to \Reals$ is a continuous function, does there exist a differentiable function $x:\Reals \to \Reals$ such that $x' = y$?
That is, if $X$ is the set of differentiable, real-valued functions on $\Reals$, and $Y$ is the space of continuous functions, and $Z$ the space of all functions, and if $f(x) = x'$, is $Y$ contained in the image of $X$? (Yes: One of the fundamental theorems of calculus guarantees that every continuous function on $\Reals$ is the derivative of some differentiable function.)
5. Let $(M, g_{0})$ be a compact Kähler manifold. If $\rho$ is a smooth $(1, 1)$-form in the cohomology class $2\pi\, c_{1}(M)$, does there exist a Kähler metric $g$ whose Kähler form is cohomologous to the Kähler form of $g_{0}$ and whose Ricci form is $\rho$?
Analogously to the preceding example, you can imagine that there is a partial differential equation of the abstract form $\rho = f(g)$, and the question amounts to surjectivity of the Ricci curvature operator $f$. The answer turns out to be "yes"; largely for this resolution of the Calabi conjecture, S. T. Yau was awarded the Fields Medal in 1982.
The take-away is, not only is surjectivity interesting, but proving that a specific mapping is surjective can constitute a major work in a distinguished mathematical career.
$\endgroup$
4
$\begingroup$
To fully specify a function requires three things.
• A domain $X$, a set of allowable inputs
• A co-domain $Y$, a set of allowable outputs
• A rule $f$ that, for each $x\in X$ specifies some $f(x) \in Y$.
Failure to include all three means you have failed to properly define a function. For a function to be onto, every element $y\in Y$ must have some $x\in X$ so that $f(x) = y$. A function is not just a rule; it is these three items.
$\endgroup$
• 1
$\begingroup$ But this raises the OP's question: Why do we care if $f(X) \subsetneq Y$? Why specify $Y$ as the co-domain when we could just restrict the the codomain only to the image of $f$? $\endgroup$ – fleablood Jun 16 '17 at 0:22
• $\begingroup$ Think about reflexivity of Banach Spaces. That is about the surjectivity of the evaluation map $j: V \rightarrow V^{**}$. $\endgroup$ – ncmathsadist Jun 16 '17 at 0:32
2
$\begingroup$
The definition of a function is not just the rule $f$. Instead, a function is defined by a domain $A$ and a codomain $B$ together with a rule $f$ that takes every $x\in A$ and returns a unique element of $B$. So, if we were to change the set $B$ as you suggest, we are actually changing the function itself, so although the new function will indeed be surjective, it will not be the same function that you started with.
For example, $f(x)=x^2$, with $f:\mathbb{R}\rightarrow \mathbb{R}$ is not surjective, but $f(x)=x^2$, with $f:\mathbb{R}\rightarrow [0,\infty) $ is surjective. The rule and domain are the same for both functions, but the codomains differ, so the two functions are not the same.
As for why the concept of surjectivity is important, one example is that if a function is both surjective and injective (i.e. both 1-1 and onto), then the function is called bijective, and showing that a function is bijective is one of the most common tools in analysis. For example, one way to show that two sets have the same cardinality is to construct a bijection from one set to the other. As another example, a function is invertible if and only if it is bijective.
$\endgroup$
• $\begingroup$ In response to your first paragraph, define $f: \mathbb R\to \mathbb R$ given by $f(x)=x^2$, and $g: \mathbb R\to [0,\infty)$ given by $g(x)=x^2$. How are the sets $f$ and $g$ different? Both are equal to $\{(x,y)\in\mathbb R^2\,\vert\, y=x^2\}$. $\endgroup$ – florence Jun 16 '17 at 0:18
• $\begingroup$ The difference, I think, is that while the first is equal to the set you describe, the second is equal to $\{(x,y)\in \mathbb{R} \times [0,\infty) | \ y=x^2 \}$ $\endgroup$ – M_B Jun 16 '17 at 0:25
• $\begingroup$ It's easy to show that $\{(x,y)\in \mathbb R^2 \, \vert \, y=x^2\}=\{(x,y)\in \mathbb R\times [0,\infty) \, \vert \, y=x^2\}$. In fact, $f=g$. A function $h$ is a set of ordered pairs with the property that $(x,y), (x,y)\in h \implies x=z$; it doesn't contain any information as to what its codomain is intended to be. $\endgroup$ – florence Jun 16 '17 at 0:28
• $\begingroup$ I'm browsing some introductory analysis books, and most definitions say something like: a function from $A$ to $B$ is defined as a subset of the cross product $A\times B $. I see your reasoning, but perhaps the two definitions of function are valid for different purposes. Or perhaps I am mistaken. $\endgroup$ – M_B Jun 16 '17 at 0:36
• 1
$\begingroup$ I see my mistake, thanks Florence, that is a nice explanation. I need to run out for a bit, but I will update my post tonight. $\endgroup$ – M_B Jun 16 '17 at 0:44
Your Answer
By clicking “Post Your Answer”, you agree to our terms of service, privacy policy and cookie policy
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.996133 |
21
I'd like to use name__iexact with get_or_create to avoid duplication on user entered fields where possible.
My Provider model has a name field which I use in get_or_create.
The lookup works fine but when creating an instance for the first time as in the p1/Timber example below (the name
Fails:
>>> p1, c1 = Provider.objects.get_or_create(name__iexact="Timber")
>>> p1, c1
(<Provider: >, True)
>>> p1.name
u''
Works as expected here:
>>> p2, c2 = Provider.objects.get_or_create(name="Cedar")
>>> p2.name, c2
('Cedar', True)
>>> p3, c3 = Provider.objects.get_or_create(name__iexact="Cedar")
>>> p3, c3
(<Provider: Cedar>, False)
>>> Provider.objects.get_or_create(name__iexact="cedar")
(<Provider: Cedar>, False)
Is __iexact incompatible with the create portion of get_or_create, is this expected behavior (and why), or have I run into a Django bug?
20
What you're seeing is the correct behaviour.
get_or_create is shorthand for 'get and return the object matching kwargs, if it doesn't exist, create it using defaults'. Your lookup is looking for an object where name is a case-insensitive match to 'cedar'. That object exists, so it is returned. Nothing more, nothing less.
Now if there was no match, Stéphane is right, and you would need to specify name in the defaults parameter. All lookups containing the lookup separator __ are stripped from the parameters passed to create().
13
According to the documentation, you can try to use default args (haven't try this code):
p1, c1 = Provider.objects.get_or_create(
defaults={'name':"Timber"},
name__iexact="Timber"
)
It makes sense since you can then have the search and the object creation that differs.
Your Answer
By clicking “Post Your Answer”, you agree to our terms of service, privacy policy and cookie policy
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.952152 |
Source code for napari._qt.qthreading
import inspect
import warnings
from functools import partial, wraps
from types import FunctionType, GeneratorType
from typing import (
Callable,
Dict,
List,
Optional,
Sequence,
Type,
TypeVar,
Union,
)
from superqt.utils import _qthreading
from typing_extensions import ParamSpec
from napari.utils.progress import progress
from napari.utils.translations import trans
wait_for_workers_to_quit = _qthreading.WorkerBase.await_workers
class _NotifyingMixin:
def __init__(self: _qthreading.WorkerBase, *args, **kwargs) -> None: # type: ignore
super().__init__(*args, **kwargs) # type: ignore
self.errored.connect(self._relay_error)
self.warned.connect(self._relay_warning)
def _relay_error(self, exc: Exception):
from napari.utils.notifications import notification_manager
notification_manager.receive_error(type(exc), exc, exc.__traceback__)
def _relay_warning(self, show_warn_args: tuple):
from napari.utils.notifications import notification_manager
notification_manager.receive_warning(*show_warn_args)
_Y = TypeVar("_Y")
_S = TypeVar("_S")
_R = TypeVar("_R")
_P = ParamSpec("_P")
[docs] class FunctionWorker(_qthreading.FunctionWorker[_R], _NotifyingMixin): ...
[docs] class GeneratorWorker( _qthreading.GeneratorWorker[_Y, _S, _R], _NotifyingMixin ): ...
# these are re-implemented from superqt just to provide progress
[docs] def create_worker( func: Union[FunctionType, GeneratorType], *args, _start_thread: Optional[bool] = None, _connect: Optional[Dict[str, Union[Callable, Sequence[Callable]]]] = None, _progress: Optional[Union[bool, Dict[str, Union[int, bool, str]]]] = None, _worker_class: Union[ Type[GeneratorWorker], Type[FunctionWorker], None ] = None, _ignore_errors: bool = False, **kwargs, ) -> Union[FunctionWorker, GeneratorWorker]: """Convenience function to start a function in another thread. By default, uses :class:`Worker`, but a custom ``WorkerBase`` subclass may be provided. If so, it must be a subclass of :class:`Worker`, which defines a standard set of signals and a run method. Parameters ---------- func : Callable The function to call in another thread. _start_thread : bool, optional Whether to immediaetly start the thread. If False, the returned worker must be manually started with ``worker.start()``. by default it will be ``False`` if the ``_connect`` argument is ``None``, otherwise ``True``. _connect : Dict[str, Union[Callable, Sequence]], optional A mapping of ``"signal_name"`` -> ``callable`` or list of ``callable``: callback functions to connect to the various signals offered by the worker class. by default None _progress : Union[bool, Dict[str, Union[int, bool, str]]], optional Can be True, to provide indeterminate progress bar, or dictionary. If dict, requires mapping of 'total' to number of expected yields. If total is not provided, progress bar will be indeterminate. Will connect progress bar update to yields and display this progress in the viewer. Can also take a mapping of 'desc' to the progress bar description. Progress bar will become indeterminate when number of yields exceeds 'total'. By default None. _worker_class : Type[WorkerBase], optional The :class`WorkerBase` to instantiate, by default :class:`FunctionWorker` will be used if ``func`` is a regular function, and :class:`GeneratorWorker` will be used if it is a generator. _ignore_errors : bool, optional If ``False`` (the default), errors raised in the other thread will be reraised in the main thread (makes debugging significantly easier). *args will be passed to ``func`` **kwargs will be passed to ``func`` Returns ------- worker : WorkerBase An instantiated worker. If ``_start_thread`` was ``False``, the worker will have a `.start()` method that can be used to start the thread. Raises ------ TypeError If a worker_class is provided that is not a subclass of WorkerBase. TypeError If _connect is provided and is not a dict of ``{str: callable}`` TypeError If _progress is provided and function is not a generator Examples -------- .. code-block:: python def long_function(duration): import time time.sleep(duration) worker = create_worker(long_function, 10) """ # provide our own classes with the notification mixins if not _worker_class: if inspect.isgeneratorfunction(func): _worker_class = GeneratorWorker else: _worker_class = FunctionWorker worker = _qthreading.create_worker( func, *args, _start_thread=False, _connect=_connect, _worker_class=_worker_class, _ignore_errors=_ignore_errors, **kwargs, ) # either True or a non-empty dictionary if _progress: if isinstance(_progress, bool): _progress = {} desc = _progress.get('desc', None) total = int(_progress.get('total', 0)) if isinstance(worker, FunctionWorker) and total != 0: warnings.warn( trans._( "_progress total != 0 but worker is FunctionWorker and will not yield. Returning indeterminate progress bar...", deferred=True, ), RuntimeWarning, ) total = 0 with progress._all_instances.events.changed.blocker(): pbar = progress(total=total, desc=desc) worker.started.connect( partial( lambda prog: progress._all_instances.events.changed( added={prog}, removed={} ), pbar, ) ) worker.finished.connect(pbar.close) if total != 0 and isinstance(worker, GeneratorWorker): worker.yielded.connect(pbar.increment_with_overflow) worker.pbar = pbar if _start_thread is None: _start_thread = _connect is not None if _start_thread: worker.start() return worker
[docs] def thread_worker( function: Optional[Callable] = None, start_thread: Optional[bool] = None, connect: Optional[Dict[str, Union[Callable, Sequence[Callable]]]] = None, progress: Optional[Union[bool, Dict[str, Union[int, bool, str]]]] = None, worker_class: Union[ Type[FunctionWorker], Type[GeneratorWorker], None ] = None, ignore_errors: bool = False, ): """Decorator that runs a function in a separate thread when called. When called, the decorated function returns a :class:`WorkerBase`. See :func:`create_worker` for additional keyword arguments that can be used when calling the function. The returned worker will have these signals: - *started*: emitted when the work is started - *finished*: emitted when the work is finished - *returned*: emitted with return value - *errored*: emitted with error object on Exception It will also have a ``worker.start()`` method that can be used to start execution of the function in another thread. (useful if you need to connect callbacks to signals prior to execution) If the decorated function is a generator, the returned worker will also provide these signals: - *yielded*: emitted with yielded values - *paused*: emitted when a running job has successfully paused - *resumed*: emitted when a paused job has successfully resumed - *aborted*: emitted when a running job is successfully aborted And these methods: - *quit*: ask the thread to quit - *toggle_paused*: toggle the running state of the thread. - *send*: send a value into the generator. (This requires that your decorator function uses the ``value = yield`` syntax) Parameters ---------- function : callable Function to call in another thread. For communication between threads may be a generator function. start_thread : bool, optional Whether to immediaetly start the thread. If False, the returned worker must be manually started with ``worker.start()``. by default it will be ``False`` if the ``_connect`` argument is ``None``, otherwise ``True``. connect : Dict[str, Union[Callable, Sequence]], optional A mapping of ``"signal_name"`` -> ``callable`` or list of ``callable``: callback functions to connect to the various signals offered by the worker class. by default None progress : Union[bool, Dict[str, Union[int, bool, str]]], optional Can be True, to provide indeterminate progress bar, or dictionary. If dict, requires mapping of 'total' to number of expected yields. If total is not provided, progress bar will be indeterminate. Will connect progress bar update to yields and display this progress in the viewer. Can also take a mapping of 'desc' to the progress bar description. Progress bar will become indeterminate when number of yields exceeds 'total'. By default None. Must be used in conjunction with a generator function. worker_class : Type[WorkerBase], optional The :class`WorkerBase` to instantiate, by default :class:`FunctionWorker` will be used if ``func`` is a regular function, and :class:`GeneratorWorker` will be used if it is a generator. ignore_errors : bool, optional If ``False`` (the default), errors raised in the other thread will be reraised in the main thread (makes debugging significantly easier). Returns ------- callable function that creates a worker, puts it in a new thread and returns the worker instance. Examples -------- .. code-block:: python @thread_worker def long_function(start, end): # do work, periodically yielding i = start while i <= end: time.sleep(0.1) yield i # do teardown return 'anything' # call the function to start running in another thread. worker = long_function() # connect signals here if desired... or they may be added using the # `connect` argument in the `@thread_worker` decorator... in which # case the worker will start immediately when long_function() is called worker.start() """ def _inner(func): @wraps(func) def worker_function(*args, **kwargs): # decorator kwargs can be overridden at call time by using the # underscore-prefixed version of the kwarg. kwargs['_start_thread'] = kwargs.get('_start_thread', start_thread) kwargs['_connect'] = kwargs.get('_connect', connect) kwargs['_progress'] = kwargs.get('_progress', progress) kwargs['_worker_class'] = kwargs.get('_worker_class', worker_class) kwargs['_ignore_errors'] = kwargs.get( '_ignore_errors', ignore_errors ) return create_worker( func, *args, **kwargs, ) return worker_function return _inner if function is None else _inner(function)
_new_worker_qthread = _qthreading.new_worker_qthread def _add_worker_data(worker: FunctionWorker, return_type, source=None): from napari._app_model.injection import _processors cb = _processors._add_layer_data_to_viewer worker.signals.returned.connect( partial(cb, return_type=return_type, source=source) ) def _add_worker_data_from_tuple( worker: FunctionWorker, return_type, source=None ): from napari._app_model.injection import _processors cb = _processors._add_layer_data_tuples_to_viewer worker.signals.returned.connect( partial(cb, return_type=return_type, source=source) ) def register_threadworker_processors(): from functools import partial import magicgui from napari import layers, types from napari._app_model import get_app from napari.types import LayerDataTuple from napari.utils import _magicgui as _mgui app = get_app() for _type in (LayerDataTuple, List[LayerDataTuple]): t = FunctionWorker[_type] magicgui.register_type(t, return_callback=_mgui.add_worker_data) app.injection_store.register( processors={t: _add_worker_data_from_tuple} ) for layer_name in layers.NAMES: _type = getattr(types, f'{layer_name.title()}Data') t = FunctionWorker[_type] magicgui.register_type( t, return_callback=partial(_mgui.add_worker_data, _from_tuple=False), ) app.injection_store.register(processors={t: _add_worker_data})
|
__label__pos
| 0.992241 |
Refactor: draw fcurve bezt function #108748
Merged
Christoph Lendenfeld merged 4 commits from ChrisLend/blender:graph_editor_draw_refactor into main 2023-06-15 18:20:22 +02:00
View File
@ -819,7 +819,65 @@ static bool fcurve_can_use_simple_bezt_drawing(FCurve *fcu)
return true;
}
/* helper func - draw one repeat of an F-Curve (using Bezier curve approximations) */
static int calculate_bezt_draw_resolution(BezTriple *bezt,
BezTriple *prevbezt,
const int max_bez_resolution,
const bool is_driver)
{
if (is_driver) {
return max_bez_resolution;
}
const int resolution = (int)(5.0f * len_v2v2(bezt->vec[1], prevbezt->vec[1]));
/* NOTE: higher values will crash */
/* TODO: view scale should factor into this someday too... */
return min_ii(resolution, max_bez_resolution);
}
/** Draw a segment from \param prevbezt to \param bezt at the given \param resolution.
This could return min_ii(resolution, max_bez_resolution), making it possible for resolution to be const.
This could `return min_ii(resolution, max_bez_resolution)`, making it possible for `resolution` to be `const`.
* immBeginAtMost is expected to be called with enough space for this function to run.
*/
static void draw_bezt(BezTriple *bezt, BezTriple *prevbezt, int resolution, uint pos)
{
float prev_key[2], prev_handle[2], bez_handle[2], bez_key[2];
float data[120];
if (resolution < 2) {
prev_key[0] = prevbezt->vec[1][0];
prev_key[1] = prevbezt->vec[1][1];
immVertex2fv(pos, prev_key);
return;
}
prev_key[0] = prevbezt->vec[1][0];
prev_key[1] = prevbezt->vec[1][1];
prev_handle[0] = prevbezt->vec[2][0];
prev_handle[1] = prevbezt->vec[2][1];
bez_handle[0] = bezt->vec[0][0];
bez_handle[1] = bezt->vec[0][1];
bez_key[0] = bezt->vec[1][0];
bez_key[1] = bezt->vec[1][1];
BKE_fcurve_correct_bezpart(prev_key, prev_handle, bez_handle, bez_key);
BKE_curve_forward_diff_bezier(
prev_key[0], prev_handle[0], bez_handle[0], bez_key[0], data, resolution, sizeof(float[3]));
BKE_curve_forward_diff_bezier(prev_key[1],
prev_handle[1],
bez_handle[1],
bez_key[1],
data + 1,
resolution,
sizeof(float[3]));
for (float *fp = data; resolution; resolution--, fp += 3) {
immVertex2fv(pos, fp);
}
}
/* Helper function - draw one repeat of an F-Curve (using Bezier curve approximations). */
static void draw_fcurve_curve_bezts(
bAnimContext *ac, ID *id, FCurve *fcu, View2D *v2d, uint pos, const bool draw_extrapolation)
{
@ -827,178 +885,136 @@ static void draw_fcurve_curve_bezts(
return;
}
BezTriple *prevbezt = fcu->bezt;
BezTriple *bezt = prevbezt + 1;
float v1[2], v2[2], v3[2], v4[2];
float *fp, data[120];
float fac = 0.0f;
int b = fcu->totvert - 1;
int resol;
float unit_scale, offset;
short mapping_flag = ANIM_get_normalization_flags(ac);
/* apply unit mapping */
/* Apply unit mapping. */
GPU_matrix_push();
unit_scale = ANIM_unit_mapping_get_factor(ac->scene, id, fcu, mapping_flag, &offset);
float offset;
short mapping_flag = ANIM_get_normalization_flags(ac);
const float unit_scale = ANIM_unit_mapping_get_factor(ac->scene, id, fcu, mapping_flag, &offset);
GPU_matrix_scale_2f(1.0f, unit_scale);
GPU_matrix_translate_2f(0.0f, offset);
/* For now, this assumes the worst case scenario, where all the keyframes have
* bezier interpolation, and are drawn at full res.
* This is tricky to optimize, but maybe can be improved at some point... */
immBeginAtMost(GPU_PRIM_LINE_STRIP, (b * 32 + 3));
int b = fcu->totvert - 1;
const int max_bezt_resolution = 32;
immBeginAtMost(GPU_PRIM_LINE_STRIP, (b * max_bezt_resolution + 3));
/* extrapolate to left? */
BezTriple *prevbezt = fcu->bezt;
BezTriple *bezt = prevbezt + 1;
float vertex_position[2];
float fac = 0.0f;
/* Extrapolate to the left? */
if (draw_extrapolation && prevbezt->vec[1][0] > v2d->cur.xmin) {
/* left-side of view comes before first keyframe, so need to extend as not cyclic */
v1[0] = v2d->cur.xmin;
vertex_position[0] = v2d->cur.xmin;
/* y-value depends on the interpolation */
if ((fcu->extend == FCURVE_EXTRAPOLATE_CONSTANT) || (prevbezt->ipo == BEZT_IPO_CONST) ||
(prevbezt->ipo == BEZT_IPO_LIN && fcu->totvert == 1))
{
/* just extend across the first keyframe's value */
v1[1] = prevbezt->vec[1][1];
vertex_position[1] = prevbezt->vec[1][1];
}
else if (prevbezt->ipo == BEZT_IPO_LIN) {
/* extrapolate linear doesn't use the handle, use the next points center instead */
fac = (prevbezt->vec[1][0] - bezt->vec[1][0]) / (prevbezt->vec[1][0] - v1[0]);
fac = (prevbezt->vec[1][0] - bezt->vec[1][0]) / (prevbezt->vec[1][0] - vertex_position[0]);
if (fac) {
fac = 1.0f / fac;
}
v1[1] = prevbezt->vec[1][1] - fac * (prevbezt->vec[1][1] - bezt->vec[1][1]);
vertex_position[1] = prevbezt->vec[1][1] - fac * (prevbezt->vec[1][1] - bezt->vec[1][1]);
}
else {
/* based on angle of handle 1 (relative to keyframe) */
fac = (prevbezt->vec[0][0] - prevbezt->vec[1][0]) / (prevbezt->vec[1][0] - v1[0]);
fac = (prevbezt->vec[0][0] - prevbezt->vec[1][0]) /
(prevbezt->vec[1][0] - vertex_position[0]);
if (fac) {
fac = 1.0f / fac;
}
v1[1] = prevbezt->vec[1][1] - fac * (prevbezt->vec[0][1] - prevbezt->vec[1][1]);
vertex_position[1] = prevbezt->vec[1][1] - fac * (prevbezt->vec[0][1] - prevbezt->vec[1][1]);
}
immVertex2fv(pos, v1);
immVertex2fv(pos, vertex_position);
}
/* if only one keyframe, add it now */
/* If only one keyframe, add it now. */
if (fcu->totvert == 1) {
v1[0] = prevbezt->vec[1][0];
v1[1] = prevbezt->vec[1][1];
immVertex2fv(pos, v1);
vertex_position[0] = prevbezt->vec[1][0];
vertex_position[1] = prevbezt->vec[1][1];
immVertex2fv(pos, vertex_position);
}
/* draw curve between first and last keyframe (if there are enough to do so) */
/* Draw curve between first and last keyframe (if there are enough to do so). */
/* TODO: optimize this to not have to calc stuff out of view too? */
while (b--) {
if (prevbezt->ipo == BEZT_IPO_CONST) {
/* Constant-Interpolation: draw segment between previous keyframe and next,
* but holding same value */
v1[0] = prevbezt->vec[1][0];
v1[1] = prevbezt->vec[1][1];
immVertex2fv(pos, v1);
vertex_position[0] = prevbezt->vec[1][0];
vertex_position[1] = prevbezt->vec[1][1];
immVertex2fv(pos, vertex_position);
v1[0] = bezt->vec[1][0];
v1[1] = prevbezt->vec[1][1];
immVertex2fv(pos, v1);
vertex_position[0] = bezt->vec[1][0];
vertex_position[1] = prevbezt->vec[1][1];
immVertex2fv(pos, vertex_position);
}
else if (prevbezt->ipo == BEZT_IPO_LIN) {
/* Linear interpolation: just add one point (which should add a new line segment) */
v1[0] = prevbezt->vec[1][0];
v1[1] = prevbezt->vec[1][1];
immVertex2fv(pos, v1);
vertex_position[0] = prevbezt->vec[1][0];
vertex_position[1] = prevbezt->vec[1][1];
immVertex2fv(pos, vertex_position);
}
else if (prevbezt->ipo == BEZT_IPO_BEZ) {
/* Bezier-Interpolation: draw curve as series of segments between keyframes
* - resol determines number of points to sample in between keyframes
*/
/* resol depends on distance between points
* (not just horizontal) OR is a fixed high res */
/* TODO: view scale should factor into this someday too... */
if (fcu->driver) {
resol = 32;
}
else {
resol = (int)(5.0f * len_v2v2(bezt->vec[1], prevbezt->vec[1]));
}
if (resol < 2) {
/* only draw one */
v1[0] = prevbezt->vec[1][0];
v1[1] = prevbezt->vec[1][1];
immVertex2fv(pos, v1);
}
else {
/* clamp resolution to max of 32 */
/* NOTE: higher values will crash */
if (resol > 32) {
resol = 32;
}
v1[0] = prevbezt->vec[1][0];
v1[1] = prevbezt->vec[1][1];
v2[0] = prevbezt->vec[2][0];
v2[1] = prevbezt->vec[2][1];
v3[0] = bezt->vec[0][0];
v3[1] = bezt->vec[0][1];
v4[0] = bezt->vec[1][0];
v4[1] = bezt->vec[1][1];
BKE_fcurve_correct_bezpart(v1, v2, v3, v4);
BKE_curve_forward_diff_bezier(v1[0], v2[0], v3[0], v4[0], data, resol, sizeof(float[3]));
BKE_curve_forward_diff_bezier(
v1[1], v2[1], v3[1], v4[1], data + 1, resol, sizeof(float[3]));
for (fp = data; resol; resol--, fp += 3) {
immVertex2fv(pos, fp);
}
}
int resolution = calculate_bezt_draw_resolution(
bezt, prevbezt, max_bezt_resolution, fcu->driver != NULL);
draw_bezt(bezt, prevbezt, resolution, pos);
}
/* get next pointers */
/* Get next pointers. */
prevbezt = bezt;
bezt++;
/* last point? */
/* Last point? */
if (b == 0) {
v1[0] = prevbezt->vec[1][0];
v1[1] = prevbezt->vec[1][1];
immVertex2fv(pos, v1);
vertex_position[0] = prevbezt->vec[1][0];
vertex_position[1] = prevbezt->vec[1][1];
immVertex2fv(pos, vertex_position);
}
}
/* extrapolate to right? (see code for left-extrapolation above too) */
/* Extrapolate to the right? (see code for left-extrapolation above too) */
if (draw_extrapolation && prevbezt->vec[1][0] < v2d->cur.xmax) {
v1[0] = v2d->cur.xmax;
vertex_position[0] = v2d->cur.xmax;
/* y-value depends on the interpolation */
/* y-value depends on the interpolation. */
if ((fcu->extend == FCURVE_EXTRAPOLATE_CONSTANT) || (fcu->flag & FCURVE_INT_VALUES) ||
(prevbezt->ipo == BEZT_IPO_CONST) || (prevbezt->ipo == BEZT_IPO_LIN && fcu->totvert == 1))
{
/* based on last keyframe's value */
v1[1] = prevbezt->vec[1][1];
vertex_position[1] = prevbezt->vec[1][1];
}
else if (prevbezt->ipo == BEZT_IPO_LIN) {
/* extrapolate linear doesn't use the handle, use the previous points center instead */
/* Extrapolate linear doesn't use the handle, use the previous points center instead. */
bezt = prevbezt - 1;
fac = (prevbezt->vec[1][0] - bezt->vec[1][0]) / (prevbezt->vec[1][0] - v1[0]);
fac = (prevbezt->vec[1][0] - bezt->vec[1][0]) / (prevbezt->vec[1][0] - vertex_position[0]);
if (fac) {
fac = 1.0f / fac;
}
v1[1] = prevbezt->vec[1][1] - fac * (prevbezt->vec[1][1] - bezt->vec[1][1]);
vertex_position[1] = prevbezt->vec[1][1] - fac * (prevbezt->vec[1][1] - bezt->vec[1][1]);
}
else {
/* based on angle of handle 1 (relative to keyframe) */
fac = (prevbezt->vec[2][0] - prevbezt->vec[1][0]) / (prevbezt->vec[1][0] - v1[0]);
/* Based on angle of handle 1 (relative to keyframe). */
fac = (prevbezt->vec[2][0] - prevbezt->vec[1][0]) /
(prevbezt->vec[1][0] - vertex_position[0]);
if (fac) {
fac = 1.0f / fac;
}
v1[1] = prevbezt->vec[1][1] - fac * (prevbezt->vec[2][1] - prevbezt->vec[1][1]);
vertex_position[1] = prevbezt->vec[1][1] - fac * (prevbezt->vec[2][1] - prevbezt->vec[1][1]);
}
immVertex2fv(pos, v1);
immVertex2fv(pos, vertex_position);
}
immEnd();
|
__label__pos
| 0.998293 |
For loops
I need a program with a nested for loop. It asks the user for n, a postive value, if it is negative it exits the program. once n is entered positively it must make an nxn checkerboard pattern made from x and o.
example: positive in 4
XOXO
OXOX
XOXO
OXOX
int n;
cout << "enter positive value:";
cin >> n;
for (int n = 1; n < n++; n++){
for (int n = 0; n < 0; n++){
break;
}
cout << "XOXO" << endl;
}
i tried this but it wont compile it says
expected initializer befor %
Last edited on
I think there may be something you aren't showing us, but one glaring problem is that you re-define n inside the outer loop, then inside the inner loop. By convention, use:
1
2
3
4
for (int i = 0; i != something; i++)
{
for (int j =0; j != somethingElse; j++) {}
}
Topic archived. No new replies allowed.
|
__label__pos
| 0.983403 |
Questions related to the calculus and analysis branches of Mathematica, including, but not limited to, limits, derivatives, integrals, series, and residues.
learn more… | top users | synonyms (3)
18
votes
0answers
2k views
How to visualize Riemann surfaces?
In WolframAlpha we can easily visualize Riemann surfaces of arbitrary functions, can we plot the Riemann surface of an arbitrary function using Mathematica and ...
14
votes
0answers
278 views
Why does Mathematica choose branches as it does in this situation?
Consider these integrals: ...
9
votes
0answers
238 views
Is there a way to teach integrate new solutions?
I have an integral which I can solve, but integrate cannot: ...
8
votes
0answers
68 views
Finding simplifying substitutions for an integral involving limits and integrand
[The following is based on a William Lowell Putnam Mathematical Competition problem.] Consider the definite integral: $I = \int\limits_2^4 \frac{\sqrt{\log (9-x)}}{\sqrt{\log (9-x)}+\sqrt{\log ...
7
votes
0answers
86 views
Strange kernel segmentation fault in Integrate
Bug introduced in 8.0 or earlier and persisting through 10.3 Define tointegrate = D[V[y1[t], y2[t]], y2[t]] D[V[y1[t], y2[t]], {y1[t], 2}] and try to find its ...
7
votes
0answers
185 views
Incorrect evaluation for Thue-Morse signed harmonic series
I would like to evaluate $$s = 1 - \frac{1}{2} - \frac{1}{3} + \frac{1}{4} - \frac{1}{5} + \frac{1}{6}+\frac{1}{7}-\frac{1}{8} - ... + \frac{(-1)^{\textrm{binary digit sum}(n-1)}}{n} + ... $$ where ...
5
votes
0answers
163 views
Integral formula for the inverse Laplace transform doesn't work?
The direct implementation of the definition of the inverse Laplace transform using Integrate fails in the following case: ...
5
votes
0answers
110 views
Convoluting inverse square root with Gaussian
I would like to convolute the inverse square root on the interval [0,inf] with a Gaussian function, like so: ...
5
votes
0answers
193 views
Strange Integrate behavior (a bug!)
The following two calculations should give the same result. After all, integration is a linear operation. I have pasted the code below in case you want to play with it. ...
5
votes
0answers
137 views
Calculating a limit with a result that is discontinuous in the parameters
The following limit is left unevaluated (Edit: added the assumption that $\epsilon$ is real thanks to the comment below): ...
4
votes
0answers
102 views
Exploring formal limit definition
I found a few demonstrations on the Wolfram Demonstration Project site that help users to explore the formal definition of a limit. That is, $$\lim_{x\to a}f(x)=L$$ if and only if for every ...
4
votes
0answers
108 views
Integral of DiracDelta giving an unusual answer
I have been getting a number of seemingly inconsistent solutions to integrals of Dirac delta functions in which the integrand evaluates to DiracDelta[0] at one of ...
4
votes
0answers
89 views
MacDonald formula for Modified Bessel Functions
How can I make Mathematica understand these two integrals? $$\int_0^{\infty} e^{-x \cosh{\xi}} d\xi = K_0(x)$$ $$\int_0^{\infty} e^{-\frac{1}{2} \Big( \frac{x y}{u} + u \frac{x^2+y^2}{x y} \Big) } ...
4
votes
0answers
81 views
Integrate yields complex value, while after variable transformation the result is real. Bug?
I have the follwoing integral: Integrate[1/Sqrt[0.7 + 0.3*(1 + z)^3], {z, 0, Infinity}, Assumptions -> z \[Element] Reals] >> -3.36354 - 3.85013 I the ...
4
votes
0answers
230 views
Strange behaviour of MMA in derivatives of some standard functions
There are some peculiar things to be discovered in derivatives of some standard functions in MMA: Strange behaviour Example 1: Abs We have ...
4
votes
0answers
122 views
Linearised Einstein equations
I need to compute the linearised Einstein Equations around a fixed metric $$g_{μν}=\text{Minkowski metric} + h_{μν}$$ which is not the flat metric. Does anyone know a Mathematica package or a ...
4
votes
0answers
304 views
Spherical harmonic derivative
Consider the following substitution Derivative[2, 0][S][th, ph] /. S -> Function[{th, ph}, SphericalHarmonicY[3, 0, th, ph]] which gives correct answer. While ...
4
votes
0answers
157 views
Version 8.0 integrates but Version 9.0.1 doesn't
I am trying to run the following integral in version 9.0, but it fails: ...
3
votes
0answers
80 views
Inconsistent results for symbolic trigonometric integral
I am trying to evaluate (on Mathematica v.9) the integral \begin{equation} \int_0^x [\sin (x) \sin (2 y)-\sin (2 x) \sin (y)]^t \,\mathrm{d}y, \end{equation} where $t$ is an even, positive integer. ...
3
votes
0answers
85 views
Bug or feature? If statement inside double integrals
(taken from http://math.stackexchange.com/questions/1592901/bug-or-feature-if-statement-inside-double-integrals) Generally speaking, $$ \int_c^a f(x) dx = \int_c^{a-b} f(x) dx + \int_{a-b}^a f(x) ...
3
votes
0answers
90 views
Using NIntegrate and DiscretePlot to visualize pseudodifferential operators
In harmonic analysis, pseudodifferential operators are a way to generalize the notions of derivatives, through the use of Fourier transforms. The basic idea being, Let ...
3
votes
0answers
118 views
Function for the Second Derivative Test
I wrote the following function. It is based on Mathematica for Rogawski's Calculus, 2nd Ed, 2007, Based on Mathematica 7. See: http://users.rowan.edu/~hassen/Math_Rogawski_Calc.htm, Chapter 14. I made ...
3
votes
0answers
50 views
Indeterminate expression 0(I*Infinity]) on indefinite integrals on first use only
I do not know if this is new or not, I googled before. Windows 7, 10.1 64 bit: Integrals generate this error message The strange thing also is that the message shows up the first time the command ...
3
votes
0answers
491 views
Symbolic matrix calculus: What's new in Version 9
I have seen some of the related posts, which ask about doing matrix calculation on tensors unknown dimensions. One of the posts mentioned that version 9 has some capability, but it was concerned about ...
3
votes
0answers
103 views
Derivative of generating function (Example from documentation)
In the documentation for GeneratingFunction, the following example is given under Examples -> Properties & Relations -> Derivative: ...
3
votes
0answers
202 views
Is this result wrong because of calculation time? (and more questions about Assumptions)
I am confused with the Integrate Given by Mathematica. First let's see a one-dimensional case: ...
3
votes
0answers
244 views
Numerical-Symbolical Integration (Calculus)
I created a simple numeric-symbolic integration. Here you can use symbolical and numerical techniques at the same time. You can also interpolate numerical integrals. The problem with my function is ...
2
votes
0answers
67 views
How to Mellin transform a complicated Log integrand?
I got a question concerning an integral. I need to know the analytical expression. I have to Mellin transforn a function and the integral is then sth. like this: $$ \int x^{N-1} \frac{Ln(a -x)}{1-x} ...
2
votes
0answers
70 views
Derivative of vector dot product with respect to a vector
I have the expression: Transpose[gvecI, {2, 1}].x (m[1] + m[2]) gvecI and x are [3x1] ...
2
votes
0answers
75 views
Minimax for conditioned UnitStep functions
I made a simplified version of my problem, and I have now: ...
2
votes
0answers
127 views
Converting integral equations to differential equations
I am trying to use Mathematica to convert integrals to differential equations, of any order. An example of an integral equation is given below, in Mathematica code. Could you please advise as to the ...
2
votes
0answers
47 views
Computing an inexact derivative with some terms preserved in exact form
I am beginning to learn Mathematica and have the following question: How can I return a derivative with exact numbers instead of one involving numerical approximations? My original, single variable ...
2
votes
0answers
82 views
Evaluating an integral
Here is the input: Integrate[E^(2 I*t)/(2*Pi*(E^(I*t) - z)), {t, 0, 2 Pi}] and here the output: ...
2
votes
0answers
107 views
How can I do multiple (double) integral with periodic boundary conditions?
I am trying to do a simple double integral $\int_0^l \left[\int_0^l h^3 h_{xxx}dx\right]dx$ with periodic boundary conditions $h[0,\tau]=h[l,\tau]$ $h_x[0,\tau]=h_x[l,\tau]$ ...
2
votes
0answers
154 views
NDSolve is running an extremely long time: how can I save the existing data?
I am trying to solve a PDE by the following code. It takes 1 hour or so to reach t=25.72 but about 20 hours to reach t=25.72404031638060174049337306126853310997. Actually, the time step is extremely ...
2
votes
0answers
77 views
Integrate with Subscript: does this crash your kernel too?
I encountered some strange behaviour running v10.0.1 on a Mac Pro, while working with Integrate and Subscript. Starting from a ...
2
votes
0answers
74 views
Order of integration resolves “Indeterminate expression encountered.”
Bug introduced in 10.0.0 and fixed in 10.0.1 Today I came across the following integral: ...
2
votes
0answers
168 views
Mathematica not evaluating q derivative of Jacobi theta function
Jacobi theta functions, $\theta_a(u,q)$ for $a=1,2,3,4$ are defined in the unit disk $|q|<1$. For some reason that I would like to understand, Mathematica does not evaluate numerically the $q$ ...
2
votes
0answers
98 views
Result of symbolic integration changed drastically by making assumptions
I would like to know the underlying reason for different outcomes for the two integration operation below. One of them includes a few assumptions, otherwise both have the same integrand: ...
2
votes
0answers
176 views
Integration of UnitStep or HeavisideTheta
I am using Mathematica 9. If I evaluate Integrate[Piecewise[{{1 - r, r > 0 && r < R}}, 0], r] then I get ...
2
votes
0answers
239 views
How to prevent simplification of hypergeometric functions resulting from integrations?
Definite integrals from 0 to Infinity over a product of two hypergeometric (including exponential, trigonometric, hyperbolic, ...
2
votes
0answers
255 views
Positive integrand giving negative answer
I'm integrating a positive function f(t) times sin(t) from 0 to pi/5 and get -38. Actually f is slightly negative for a short time (smallest value ~ -0.0005), but far from enough to explain this. ...
2
votes
0answers
211 views
About calculating Integrals
I am trying to Integrate the following Integral (all of the variables are reals) $$\int Exp(-(\cos^{-1}\left[\frac{\text{n}_0 (\text{v}_0-\text{x}_0)+\text{n}_1 (\text{v}_1-\text{x}_1)+\text{n}_2 (u ...
2
votes
0answers
399 views
Simple contour integral with a parameter gone wrong
Bug introduced in 7.0 and fixed in 7.0.1 I run into the following problem, I tried to evaluate a very simple integral: ...
1
vote
0answers
109 views
How do I calculate this integral along a complex line (not a contour) in mathematica?
Given the function: $$f(z) = \frac{\pi ^2 \cot(\pi \sqrt z) \cot(\pi \sqrt {z-a})}{4\sqrt{z^2 - az}}$$ Where a is a positive constant and: $$z = \frac{i}{b} + t $$ $$t>a$$ Where b is also a ...
1
vote
0answers
40 views
When evaluating this integral Mathematica returns two different answers
When evaluating this integral in Mathematica 10.3 Student Edition: ...
1
vote
0answers
57 views
Good coding for integral substituions
This question is aimed at improving both my coding and my understanding of MMa's capabilities. A lot of my code is still hacks and workarounds for things I don't know how to do properly. I want to ...
1
vote
0answers
131 views
A triple integral involving Abs over an ellipsoidal region
I'm a newbie and I'm trying to calculate a triple integral. But Mathematica doesn't output for half an hour and the CPU occupancy rate of my Wolfram doesn't changed when it's calculating. Here is the ...
|
__label__pos
| 0.805063 |
Smart Contracts
Here, you will get a short introduction to smart contracts.
Smart contracts (also known as cryptocontract) are often referred to as a threat towards the traditional legal industry, because it is a contract that can automatically execute its content online. It can be used when e.g. entering a contract with a media outlet regarding payment per click generated by your homepage. This way, you can enter a number of data points into the contract to implement automatic payment for every click generated by you.
What is smart contracts?
In other words, smart contracts is a protocol that verifies, facilitates and executes a contract digitally and with nearly no human involvement. This way, you can avoid having to use a lawyer or a notary as middle man.
How do you use Smart Contracts?
There are numerous ways to use smart contracts. You can integrate them into code to be replicated and supervised as part of a blockchain network. It is very common when trading with cryptocurrency. One benefit is, that it is very difficult to change or manipulate a smart contract simultaneously verified by many. On the other hand, the visibility of the smart contracts in question to anyone is a drawback.
You can also use a simpler automatic setup, such as Contractbook’s API-solution, but that is not a smart contract, technically speaking.
Give your contracts the attention they deserve
Tame your chaos with a central place to store and manage contracts, so you can analyze, decide and act quicker.
Tame your chaos with a central place to store and manage contracts, so you can analyze, decide and act quicker.
|
__label__pos
| 0.939077 |
DEV Community
Cover image for POD - Kubernetes
Ibrahim S
Ibrahim S
Posted on
POD - Kubernetes
A Pod encapsulates one (or) more containers, shared storage (volumes), and configuration options about how to run the containers.
Here are some key points about Pods in Kubernetes:
Atomic Unit: A Pod represents a single instance of an application in Kubernetes. It's the basic building block for deploying and managing containers.
Single (or) Multiple Containers: A Pod can contain one (or) more containers that are tightly coupled and share certain resources such as networking and storage. These containers are scheduled onto the same node and can communicate with each other via localhost.
Shared Network and Storage: Containers within the same Pod share the same network namespace, including IP address and port space, and they can communicate with each other using localhost. They can also share storage volumes, allowing them to share data.
Lifecycle: Pods have a lifecycle that includes phases such as Pending, Running, Succeeded, Failed, (or) Unknown. Kubernetes manages the lifecycle of Pods, ensuring they are always in the desired state.
Immutable: Pods are immutable once created. If changes are needed, a new Pod is typically created with the updated configuration.
Labels and Selectors: Pods can be labeled with key-value pairs, and selectors are used to identify Pods based on these labels. This allows for grouping and organizing Pods, which is useful for tasks like service discovery and load balancing.
Controllers: While Pods can be created manually, they are typically managed by higher-level controllers like Deployments, StatefulSets (or) DaemonSets. These controllers ensure that a specified number of Pod replicas are running and manage rolling updates, scaling, and self-healing.
Resource Management: Pods can have resource requests and limits specified for CPU and memory usage, allowing Kubernetes to make scheduling decisions based on available resources.
Pod Lifecycle Management: Kubelet handles the lifecycle of pods, including starting, stopping, and restarting containers as necessary to maintain the desired state specified in the pod definition.
Pods in Kubernetes using YAML files. Using these YAML files, we can create objects that interact with the Kubernetes API (Pods, Namespace, Deployments, etc.). Under the hood, kubectl converts the information defined in our YAML file to JSON, which requests the Kubernetes API.
apiVersion: v1
kind: Pod
metadata:
name: nginx-1
labels:
name: nginx-1
env: production
spec:
containers:
- name: nginx
image: nginx
Enter fullscreen mode Exit fullscreen mode
To create Kubernetes objects using YAML, we need to set values for the following fields.
apiVersion - This defines the Kubernetes API version. We want to use this YAML file.
kind - This defines what kind of Kubernetes object we want to create.
metadata - This is data that helps us uniquely identify the object that we want to create. Here we can provide a name for our app, as well as apply labels to our object.
spec - This defines the state that we want or our object. The format that we use for spec. For our Pod file, we have provided information about the containers that we want to host on our Pod.
Deploy and interact with our Pods using kubectl
kubectl, (kube-control (or) as some people call it, kube-cuddle) is the Kubernetes command-line tool. It allows us to run commands against Kubernetes clusters.
Create a Pod using our YAML definition file like
kubectl apply -f mypod.yaml
Enter fullscreen mode Exit fullscreen mode
List all of our Pods
kubectl get pods
Enter fullscreen mode Exit fullscreen mode
Pods and Containers are only accessible within the Kubernetes Cluster. Expose a container port externally using kubectl
kubectl port-forward mypod 8080:80
Enter fullscreen mode Exit fullscreen mode
Delete the pod
kubectl delete pod mypod
Enter fullscreen mode Exit fullscreen mode
Pod to be destroyed and created. We can also delete the Deployment that manages the Pod.
kubectl delete deployment mydeployment
Enter fullscreen mode Exit fullscreen mode
Ensure that our Pods in Kubernetes are healthy
Kubernetes relies on Probes to determine whether (or) not a Pod is healthy.
Liveness Probes
Readiness Probes
Startup Probe
Probe https://dev.to/ibrahimsi/probe-kubernetes-o67
Kubernetes manages the lifecycle of Pods, ensuring they are always in the desired state and below the lifecycle of a pod in Kubernetes.
✅ 𝗣𝗲𝗻𝗱𝗶𝗻𝗴 ➡ When a pod is created, it enters the Pending state. In this state, the Kubernetes scheduler assigns the pod to a suitable node in the cluster.
The scheduler considers factors like resource availability, node affinity, and pod anti-affinity when making this assignment.
✅ 𝗥𝘂𝗻𝗻𝗶𝗻𝗴 ➡ Once a pod is assigned to a node, it transitions to the Running state. In this state, the pod's containers are created and started, and they begin running on the assigned node.
However, the containers may still be initializing or starting up, so the pod may not be fully ready to serve traffic.
✅ 𝗦𝘂𝗰𝗰𝗲𝗲𝗱𝗲𝗱 ➡ If a pod completes its main task successfully and terminates, it enters the Succeeded state.
This typically happens for batch jobs or one-time tasks. Once in the Succeeded state, the pod remains in this state until it is explicitly deleted. The system can reclaim the resources occupied by the pod.
✅ 𝗙𝗮𝗶𝗹𝗲𝗱 ➡ If a pod encounters an error or its containers fail to start or run, it enters the Failed state.
Top comments (0)
|
__label__pos
| 0.728453 |
1.0.0[][src]Struct std::option::IntoIter
pub struct IntoIter<A> { /* fields omitted */ }
An iterator over the value in Some variant of an Option.
The iterator yields one value if the Option is a Some, otherwise none.
This struct is created by the Option::into_iter function.
Trait Implementations
impl<A> TrustedLen for IntoIter<A>[src]
impl<A> DoubleEndedIterator for IntoIter<A>[src]
impl<A> FusedIterator for IntoIter<A>1.26.0[src]
impl<A> Iterator for IntoIter<A>[src]
type Item = A
The type of the elements being iterated over.
impl<A> Debug for IntoIter<A> where
A: Debug
[src]
impl<A> Clone for IntoIter<A> where
A: Clone
[src]
impl<A> ExactSizeIterator for IntoIter<A>[src]
Auto Trait Implementations
impl<A> UnwindSafe for IntoIter<A> where
A: UnwindSafe
impl<A> RefUnwindSafe for IntoIter<A> where
A: RefUnwindSafe
impl<A> Unpin for IntoIter<A> where
A: Unpin
impl<A> Send for IntoIter<A> where
A: Send
impl<A> Sync for IntoIter<A> where
A: Sync
Blanket Implementations
impl<T> From<T> for T[src]
impl<T, U> TryFrom<U> for T where
U: Into<T>,
[src]
type Error = Infallible
The type returned in the event of a conversion error.
impl<I> IntoIterator for I where
I: Iterator
[src]
type Item = <I as Iterator>::Item
The type of the elements being iterated over.
type IntoIter = I
Which kind of iterator are we turning this into?
impl<T, U> Into<U> for T where
U: From<T>,
[src]
impl<T, U> TryInto<U> for T where
U: TryFrom<T>,
[src]
type Error = <U as TryFrom<T>>::Error
The type returned in the event of a conversion error.
impl<T> Borrow<T> for T where
T: ?Sized
[src]
impl<T> BorrowMut<T> for T where
T: ?Sized
[src]
impl<T> Any for T where
T: 'static + ?Sized
[src]
impl<T> ToOwned for T where
T: Clone
[src]
type Owned = T
The resulting type after obtaining ownership.
|
__label__pos
| 0.516926 |
Unable to fetch a commit's statuses *uncollapsed* by context
#1
Given a query like
{
repository(owner: "org", name:"repo") {
object(oid: "524d23eee1e7b21e2cd4788cb7e4a354d0a78f13") {
... on Commit {
status {
contexts {
context
state
}
}
}
}
}
}
I’d expect to get a list of every commit status ever published to the commit, i.e., what https://developer.github.com/enterprise/2.13/v3/repos/statuses/#list-statuses-for-a-specific-ref provides. For example:
{
"data": {
"repository": {
"object": {
"status": {
"contexts": [
{
"context": "my-ci",
"state": "success"
},
{
"context": "my-ci",
"state": "pending"
},
{
"context": "my-ci",
"state": "failure"
},
{
"context": "my-ci",
"state": "pending"
},
{
"context": "some-other-thing",
"state": "success"
},
]
}
}
}
}
}
However, it appears that querying for contexts collapses the statuses by context similar to what https://developer.github.com/enterprise/2.13/v3/repos/statuses/#get-the-combined-status-for-a-specific-ref does. Therefore I’m instead seeing:
{
"data": {
"repository": {
"object": {
"status": {
"contexts": [
{
"context": "my-ci",
"state": "success"
},
{
"context": "some-other-thing",
"state": "success"
},
]
}
}
}
}
}
Is there a way to express the query I want without resorting to hitting the REST API? In my particular case I’m dealing with (potentially) hundreds of commits so I’d rather not make an additional request to fetch the status history for each commit.
I’m using Github Enterprise 2.13.12.
|
__label__pos
| 1 |
VoiceOver Accessibility
Apple has made it really easy to add VoiceOver support to iOS Apps. In many cases setting the accessibility label for each interface element in Interface Builder gets 90% of the job done. What I want to cover in this post is the remaining 10% that will ensure an App interacts well with VoiceOver. If you have never considered making your App accessible you should read this great post by Matt Gemmell on Accessibility for iPhone and iPad apps.
What is VoiceOver?
VoiceOver is a technology built into all iOS (and Mac) devices that provides audible aids to the user when navigating and interacting with an App. The UIAccessibility API was introduced with version 3.0 of the SDK to allow the app to provide additional information to VoiceOver. This includes such things as setting the label and optionally a hint that is read by VoiceOver when an element in the user interface is selected. In addition you set accessibility traits for each UI element indicating for example that it acts like a button or whether or not it has been selected.
The easiest way to understand VoiceOver is to try it on some existing apps and get a feel for how it works. Before diving into an example app I will quickly review how to access VoiceOver and some other tools that you will want to use when testing your App.
Toggling VoiceOver Support on a Device
You can turn VoiceOver support on for a device from the Settings application (Settings > General > Accessibility > VoiceOver). However for testing purposes I find it more convenient to be able to quickly switch VoiceOver on and off from within the App. To do that I set the Triple-click Home button option to Toggle VoiceOver (from the Settings > General > Accessibility screen):
With this option enabled you can easily turn VoiceOver on and off at any point by triple-clicking the Home button on the device. This makes it easy to examine specific situations in your App with and without VoiceOver activated. It can also be an education to enable VoiceOver on other Apps to see how well or badly other developers are handling accessibility.
Accessibility Inspector in the iOS Simulator.
The iOS Simulator does not directly support VoiceOver so you will always need to test on a device to see how well your App performs. However the iOS Simulator does have an extremely useful tool for debugging UIAccessibility issues. It is called the Accessibility Inspector and it can be activated on the Simulator from Settings > General > Accessibility. Once enabled you should see the inspector window displayed:
The Accessibility Inspector does not speak to you but it does provide you with information about the currently selected accessible element. The Inspector can be temporarily disabled/enabled by clicking the close control in the upper left corner of the Inspector. This can be useful when you need to navigate around the app as you can selectively enable the inspector to examine each element in your user interface.
TaskTimer - An Example in Adding Accessibility
The rest of this post is going to be a case study in adding VoiceOver support to an example App called TaskTimer. The app is based on the Xcode Master Detail template and is a variation on a task list manager - the variation being that you can time how long a particular task takes. This is a universal app so both iPhone and iPad User Interfaces are supported. The master view is a UITableView containing the list of tasks as shown in the iPhone screenshot below:
The task list follows the usual conventions of a table view in that new tasks can be inserted using the “+” button in the navigation bar and deleted via the “Edit” button. The actual task data is stored in a core data model. A completed task is shown by the green tick and the actual duration (mm:ss) is shown under the task name in the table cell view. The App is somewhat limited in that it only handles task durations up to 59 minutes 59 seconds long but it is good enough for the purposes of this post. Selecting a row in the table shows the detail view for the selected task which allows the task name to be edited. In addition a custom view allows the task timer to be stopped and started via two buttons with the current task duration displayed in a central text label:
As I mentioned this is a Universal app so just for completeness the iPad user interface is shown below in portrait orientation:
UITableView Accessibility
If we use the Accessibility Inspector in the iOS Simulator we can see how well this App performs before we make any modifications. If we first take a look at the Task List table view we can find some UI elements that work fine and others that definitely need some work. The standard Navigation bar elements such as the Edit button, the title and the Add button all work fine without any effort on our part:
Note how the accessibility traits indicate that the first UI element is a button where as the second element is static text. Correctly setting the accessibility traits of an element is key to ensuring that VoiceOver interacts correctly with the App. Standard iOS UIKit elements such as the Navigation Bar are generally already properly configured. Simple UI elements can also be configured in Interface Builder when creating the interface.
Things are not so good if we look at one of the rows in the table for a completed task:
By default the table view cell has combined the main text label and the detailed text label for the accessibility label. The two fields are separated by a comma which means VoiceOver will pause briefly when reading the label. This is fine but VoiceOver does not understand that 00:10 actually means 0 minutes, 10 seconds - it simply reads “zero - ten” which is not very informative. Note also that there is no mention of the status of the task. In this case the green tick indicates that the task has been completed but the VoiceOver interface is unable to present that piece of information to the user.
So to improve the accessibility of this view we should in the first instance address the following issues:
• Ensure that when VoiceOver reads the task details it correctly interprets the duration making it clear that it is a time.
• We should also ensure that the task completion status is indicated for each row in the list
One thing we could also do is add a hint to the table view cell indicating that selecting a row will take the user to the detail task view. In this case I consider that unnecessary as I think it is OK to assume that users understand the basic iOS user interface idioms.
To customise the response from VoiceOver for a UITableView cell we simply need to set the accessibilityLabel property when we configure the cell in the table view controller. In the situation where the task is not yet completed and so does not have a final duration we will stick with the default provided by the cell text label which is the name of the task (task.note). However when the task is complete (task.complete) we want to both indicate the completion status and read the duration in a human friendly way. The following code added to the configureCell:atIndexPath method in UYLTaskListViewController should do the job:
cell.accessibilityLabel = task.note;
if ([task.complete boolValue]) {
NSUInteger minutes = [task.duration integerValue] / 60;
NSUInteger seconds = [task.duration integerValue] % 60;
cell.detailTextLabel.text = [NSString stringWithFormat:@"%02u:%02u",
minutes, seconds];
cell.imageView.image = [UIImage imageNamed:@"checked.png"];
NSString *durationText = [NSString stringWithFormat:@"%@ %@",
task.note,
NSLocalizedString(@"completed in", nil)];
durationText = [durationText stringByAppendingString:
[task.duration stringValueAsTime]];
cell.accessibilityLabel = durationText;
}
To generate the text to represent the duration I have created a category (UYLTimeFormatter) on the NSNumber class to add the method stringValueAsTime since we will need it more than once. An example of the output using the Accessibility Inspector is shown below:
Custom View Accessibility
So far adding VoiceOver support has been very easy since it has just involved setting the accessibilityLabel for certain key UI elements. However for the detailed task view things get slightly more complicated. A custom class UYLCounterView is used to implement the numeric counter and the two buttons used to start and stop the counter. By default the buttons will use either the title if set or the name of the UIImage file to provide an accessibility label. In this case I am using images named start.png and stop.png which provides a reasonable default but note that these labels are of course not localised.
A more serious problem though is that the value of the counter is not accessible at all. The problem is that the text is drawn in the view using a drawAtPoint method so there is no way to directly set an accessibilityLabel or other accessibility properties for the counter. The solution is to implement the UIAccessibilityContainer Protocol for the custom view. This allows us to define arbitrary rectangles in the view that represent the elements we want to make accessible. In this case the view contains three elements, the start button, the stop button and the counter that we want to make accessible.
The UIAccessibilityContainer protocol is an informal protocol so we do not need to declare anything in our UYLCounterView class indicating that we are adopting it. What we will need though is a mutable array to hold our accessible elements. The following property is added to the UYLCounterView interface definition:
@property (nonatomic, strong) NSMutableArray *accessibleElements;
We need to create and configure a UIAccessibilityElement object for each of the three elements in our custom view and store them in this array. To avoid unnecessary effort when VoiceOver is not active we will lazily create them in the getter method in the UYLCounterView implementation. The first thing we do therefore in the getter is to check if we have already created the array and if not we allocate it:
- (NSArray *)accessibleElements {
if (_accessibleElements != nil) {
return_accessibleElements;
}
_accessibleElements = [[NSMutableArray alloc] init];
Now we need to create a UIAccessibilityElement and correctly set the accessibility properties for each view element. We need to add the elements to the accessibleElements array in the order they are presented to the user from top-left to bottom-right. So the first element we will create is for the start button:
UIAccessibilityElement *startElement = [[UIAccessibilityElement alloc]
initWithAccessibilityContainer:self];
startElement.accessibilityFrame = [self convertRect:self.startButton.frame
toView:nil];
startElement.accessibilityLabel = NSLocalizedString(@"Start", nil);
startElement.accessibilityTraits = UIAccessibilityTraitButton;
if (self.startButton.enabled == NO)
startElement.accessibilityTraits |= UIAccessibilityTraitNotEnabled;
[_accessibleElements addObject:startElement];
Some notes about each of the above lines of code
• A UIAccessibilityElement is created using the initWithAccessibilityContainer instance method. This method takes a single parameter which is the containing view. In this case the UYLCounterView is the containing view so we can simply use self.
• The accessibilityFrame property is where we specify the area of the view that we want to use for this accessibility element. For the start button this is just the frame of the UIButton but note that the accessibility frame must be specified in screen coordinates. This is an important point to understand, if you use the view coordinate system you will get unexpected results especially when the view rotates (more on handling view rotation later). The easiest way to convert the button frame to the screen coordinates is to use the UIView instance method convertRect:toView: specifying nil as the target view, this will give us a frame in the screen coordinate system.
• Once we have an accessibility element allocated we can set the accessibilityLabel as with standard UIKit controls. We can, if required, also set an accessibilityHint but in this case we will just set the label to “Start”.
• The accessibilityTraits property is this case indicates that this element behaves as a button
• We need to provide VoiceOver with a indication when the button is enabled/disabled. Usually a UIButton will take care of this for us but since we are creating our own accessibility element to represent the button we need to add the UIAccessibilityTraitNotEnabled trait when the button is not enabled.
• Finally we add the element to our array of accessibileElements.
The next element represents the text drawn in the centre of the view for the timer counter. Calculating the frame for the text is complicated by the need to convert between the view and screen coordinate systems. The code to create the accessibilityElement for the counter is as follows:
CGRect frame = [self convertRect:self.accessibilityFramefromView:nil];
UIAccessibilityElement *counterElement = [[UIAccessibilityElement alloc]
initWithAccessibilityContainer:self];
CGRect textFrame = CGRectInset(frame, UYLCOUNTERVIEW_MARGIN +
self.startButton.bounds.size.width +
UYLCOUNTERVIEW_MARGIN,
UYLCOUNTERVIEW_MARGIN);
counterElement.accessibilityFrame = [self convertRect:textFrame toView:nil];
counterElement.accessibilityLabel = NSLocalizedString(@"Duration", nil);
counterElement.accessibilityValue = [[NSNumber
numberWithInteger:self.secondsCounter]
stringValueAsTime];
counterElement.accessibilityTraits = UIAccessibilityTraitUpdatesFrequently;
[_accessibleElements addObject:counterElement];
Note that in this case we start by converting the accessibilityFrame of the view to our local view coordinate system from the screen coordinate system. We can then calculate the frame for the text using an inset for the margin and size of the buttons. Finally before setting the accessibilityFrame for the element we need to convert back again to screen coordinates.
As well as setting the accessibilityLabel for this element we also set an accessibilityValue string to represent the current value of the counter. We will see how this gets updated later but note that for elements that can have a value which changes (such as volume control) it is better to use the accessibilityLabel to describe the function of the control and accessibilityValue to represent the current value. In this case the label is set to “Duration” and the value will the actual value of the counter (for example “2 minutes 10 seconds”).
Since the value of the counter can update frequently (in this case once a second) we set the UIAccessibilityTraitUpdatesFrequently trait. We will discuss the impact of this attribute and some other options once we have seen the rest of the setup code.
Finally we add the element for the stop button and return the completed array. Since the stop button code is very similar to the code for the start button I will include the code without further comment:
UIAccessibilityElement *stopElement = [[UIAccessibilityElement alloc]
initWithAccessibilityContainer:self];
stopElement.accessibilityFrame = [self convertRect:self.stopButton.frame
toView:nil];
stopElement.accessibilityLabel = NSLocalizedString(@"Stop", nil);
stopElement.accessibilityTraits = UIAccessibilityTraitButton;
if (self.stopButton.enabled == NO)
stopElement.accessibilityTraits |= UIAccessibilityTraitNotEnabled;
[_accessibleElements addObject:stopElement];
return_accessibleElements;
}
With the accessibileElements defined we need to implement three very simple access methods required by the UIAccessibilityContainer protocol. These methods provide an interface to our accessibileElements array:
- (NSInteger)accessibilityElementCount {
return [[self accessibleElements] count];
}
- (id)accessibilityElementAtIndex:(NSInteger)index {
return [[self accessibleElements] objectAtIndex:index];
}
- (NSInteger)indexOfAccessibilityElement:(id)element {
return [[self accessibleElements] indexOfObject:element];
}
To ensure the UIAccessibilityContainer protocol and our newly defined accessibileElements take effect we need to ensure that the UYLCounterView does not itself respond to accessibility requests. To do that we need to implement the isAccessibilityElement for the custom view and ensure it returns NO:
- (BOOL)isAccessibilityElement {
return NO;
}
We still have a few refinements to make but we already have enough of an implementation to make our custom view accessible. The Accessibility Inspector now gives us a result when we select the counter text as follows:
Updating the Counter Value
When we created the UIAccessibilityElement for the counter text we set the accessibilityValue property to the current value of the counter. Since the counter is updated every second when the counter is running we need to ensure that we also update the accessibilityValue. We can easily do that in the setter for the secondsCounter value maintained by the UYLCounterView:
- (void)setSecondsCounter:(NSUInteger)secondsCounter {
if (secondsCounter > UYLCOUNTERVIEW_LIMIT) {
secondsCounter = UYLCOUNTERVIEW_LIMIT;
}
_secondsCounter = secondsCounter;
if (_accessibleElements) {
UIAccessibilityElement *counterElement = [self.accessibleElements
objectAtIndex:UYLCOUNTERVIEW_ELEMENTINDEX_COUNTERTEXT];
counterElement.accessibilityValue = [[NSNumber
numberWithInteger:secondsCounter]
stringValueAsTime];
}
[self setNeedsDisplay];
}
Note that that accessibleElements array should only be allocated via the getter method we saw previously when VoiceOver is active. We therefore use the ivar to check if it has been allocated before attempting to access it. After retrieving the UIAccessibilityElement for the counter we then set the accessibilityValue using our NSNumber stringValueAsTime category method.
Setting UIButton Traits
The next refinement we need to make is to update the accessibility traits for the two UIButton controls. When the start button is used it disables itself and enables the stop button. We can update the accessibilityTraits of the corresponding UIAccessibilityElement in the method that is triggered by the UIButton action to indicate to VoiceOver which buttons are enabled:
- (void)startAction:(UIButton *)sender {
...
...
if (_accessibleElements) {
UIAccessibilityElement *startElement = [self.accessibleElements
objectAtIndex:UYLCOUNTERVIEW_ELEMENTINDEX_STARTBUTTON];
startElement.accessibilityTraits = UIAccessibilityTraitButton |
UIAccessibilityTraitNotEnabled;
UIAccessibilityElement *stopElement = [self.accessibleElements
objectAtIndex:UYLCOUNTERVIEW_ELEMENTINDEX_STOPBUTTON];
stopElement.accessibilityTraits = UIAccessibilityTraitButton;
}
...
...
}
Likewise when the stop button is used it disables itself so we also need to set the accessibility trait to indicate the new button state:
- (void)stopAction:(UIButton *)sender {
...
if (_accessibleElements) {
UIAccessibilityElement *stopElement = [self.accessibleElements
objectAtIndex:UYLCOUNTERVIEW_ELEMENTINDEX_STOPBUTTON];
stopElement.accessibilityTraits = UIAccessibilityTraitButton |
UIAccessibilityTraitNotEnabled;
}
...
}
Handling View Rotation
I am not sure why but most accessibility example Apps do not support device orientation changes. I can only suspect that this is due to the extra complexity involved in dealing with the conversion between screen and view coordinate systems. The implementation of the accessibileElements getter in our UYLCounterView takes this conversion into account when calculating the accessibilityFrame for each element. This ensures that the frame is set correctly based on the device orientation at the time the getter is invoked. There is however a problem if the orientation changes once we have calculated the frame. Since we never recalculate these frames they will no longer be correct if the orientation changes. To illustrate the problem this is how the accessibilityFrame for the counter text appears if we rotate from portrait to landscape:
To fix this problem we need to force the accessibility frames to be recalculated when the device orientation changes. The easiest way I have found to do that is to detect the orientation change in the view controller and force the accessibleElements array to be recreated. To detect the orientation change in the UYLTaskViewController we can implement the didRotateFromInterfaceOrientation method:
- (void)didRotateFromInterfaceOrientation:
(UIInterfaceOrientation)fromInterfaceOrientation {
self.taskCounterView.accessibleElements = nil;
}
Now the next time that the accessibleElements array is accessed it will be created from scratch and the frames that are created will be based on the new orientation. We can check that with the inspector with the simulator in landscape:
Notifications
When I covered the creation of the UIAccessibilityElement for the text counter I mentioned that we were using the UIAccessibilityTraitUpdatesFrequently trait but I did not fully describe the effect that this achieves. To fully understand how VoiceOver works in this case you will need to build the example App and install it on a physical device. With the text element selected VoiceOver announces the changing timer value every few seconds. In this case that turns out to be a good choice as the value is changing too quickly for VoiceOver to keep up. In my testing I found that VoiceOver would announce a new value every 4-5 seconds. Note that a new announcement is only made when the value is actually changing so if the time is stopped the announcements also stop. If you take a look at how the StopWatch function within the Apple Clock App works you will find that it also uses a similar technique.
However if we wanted to force VoiceOver to announce every change to the counter view we can tell it that something has changed by posting a notification. The notifications used by VoiceOver are a little different from the usual iOS notifications. To create a notification you need to use a UIKit function named UIAccessibilityPostNotification. This function takes two parameters, the first to specify the notification type and the second an optional notification specific parameter which is usually nil.
To indicate that something on the screen has changed we have two choices for the notification type. The first possibility is to send a UIAccessibilityLayoutChangedNotification to indicate that one or more, but not all, elements on the screen have changed. The other possibility is to send a UIAccessibilityScreenChangedNotification to indicate that the whole screen has changed and VoiceOver should reset. In this case we are only changing the duration text so we could send the layout changed notification by inserting the following function call into the setter method setSecondsCounter:
UIAccessibilityPostNotification(
UIAccessibilityLayoutChangedNotification, nil);
To test this change I also removed the UIAccessibilityTraitUpdatesFrequently trait from the text element. When run on the device VoiceOver does indeed attempt to announce the value of the duration every time it changes. Of course since it takes longer than a second to announce the duration it quickly falls behind which is not very useful. So in this situation where the value of an element is changing faster than VoiceOver can announce the changes it is better to use the UIAccessibilityTraitUpdatesFrequently trait.
Whilst on the subject of notifications there is another option which can be useful when you need to make an announcement for an event that does not update the user interface. You can post a UIAccessibilityAnnouncementNotification which causes VoiceOver to announce the NSString that is passed as the second parameter:
UIAccessibilityPostNotification(
UIAccessibilityAnnouncementNotification, @"Hello World");
Wrapping Up
This has been a long post but I hope that if you have made it this far I have convinced you that adding accessibility support to your App is not difficult. If you do not have custom views it is often trivial and requires minimal coding. Even if you do have custom views creating the necessary accessibility container elements is not much more effort. The key point to remember is that the accessibility frames need to be specified in screen coordinates.
I’ve archived the old Xcode project for this post in my code examples repository:
|
__label__pos
| 0.907702 |
• Advertisement
Sign in to follow this
Gamma Correction
Recommended Posts
If a material is represented by a base color and some material parameters which are all normalized in the range [0,1], one typically only needs to apply inverse gamma correction on the base color?
Similarly if one uses both a coefficient and texture, which are multiplied, one typically only needs to apply inverse gamma correction to the product of the base color coefficient and texture?
Does one normally perform this inverse gamma correction while packing the GBuffer or while unpacking the GBuffer (with regard to the base color texture of the GBuffer)?
Should one use a sRGB format to avoid having to manually apply the inverse gamma correction?
Edited by matt77hias
Share this post
Link to post
Share on other sites
Advertisement
1 hour ago, matt77hias said:
If a material is represented by a base color and some material parameters which are all normalized in the range [0,1], one typically only needs to apply inverse gamma correction on the base color?
"Gamma correction" refers to both the encoding and decoding step. To be clear, I find it best to refer to the Linear->sRGB step and the sRGB->Linear step.
Typically anything that's authored by eye requires a sRGB->Linear conversion. This is because the artist who created the data was viewing it on an sRGB monitor. So when they looked at 187/255 on their monitor, they saw a value that appeared to them to be about 50% as bright as 255/255, meaning they're directly painting in sRGB format. Moreover, anything that represents a colour that will be viewed by humans benefits from great compression in sRGB format. When storing linear RGB you need more than 10 bits per channel to not perceive any colour banding, while sRGB achieves this with only 8 bits per channel.
So yes - base colour needs a sRGB->Linear conversion before doing your lighting calculations.
Normal maps definitely do not -- the source data is linear (127 is in the middle), so attempting to decode it when it's not encoded will just bias the normals off to the side :P
Alpha masks, metalness masks, roughness values, probably don't need it... but you can try it to see if artists like the "incorrect" bias. e.g. in some PBR papers you'll see that they choose to expose roughness-squared to their artists instead of roughness, simply because the artists liked that parameter better.
Share this post
Link to post
Share on other sites
9 minutes ago, Hodgman said:
Moreover, anything that represents a colour that will be viewed by humans benefits from great compression in sRGB format.
So in that case the sRGB->linear is done by the hardware and only the coefficient needs to be manually converted. Is the latter normally done on the CPU or shader code?
One thing I do not understand about gamma correction is the portability? If someone uses an old mac with gamma=1.8, how is someone else with a gamma=2.2 able to see what is intended?
Share this post
Link to post
Share on other sites
3 minutes ago, matt77hias said:
So in that case the sRGB->linear is done by the hardware
If you use one of the blah_SRGB texture formats, then yeah, the hardware will to sRGB->Linear when you read from them, and also do Linear->sRGB when you write to them.
So if you've got sRGB texture maps on your objects, and are copying them into an sRGB GBuffer, the hardware will do [SourceTexture]-sRGB->Linear-[Pixel shader]-Linear->sRGB-[GBuffer]... This seems wasteful, but there's transistors in the HW dedicated to the tasks, so I don't think it's actually that harmful.
3 minutes ago, matt77hias said:
the coefficient needs to be manually converted. Is the latter normally done on the CPU or shader code?
I'm not sure what coefficient you mean?
Any per-material parameters that you're going to put into a cbuffer, but are source from an artist's colour picker -- yeah, you can do sRGB->Linear conversion on the CPU before you place the value into the cbuffer.
4 minutes ago, matt77hias said:
One thing I do not understand about gamma correction is the portability? If someone uses an old mac with gamma=1.8, how is someone else with a gamma=2.2 able to see what is intended?
So, the source data you control. You buy all of your artists an sRGB monitor and use a colour calibrator to ensure that they're all fairly well calibrated. Then you know that your source data is sRGB. Internally you store sRGB data in your GBuffer, because there's magic HW support for it, and your source data is sRGB (so storing linear data in your GBuffer would either be lossy, or require a 16bit channel format...).
For final output (the swap chain), you can create this as a *_SRGB texture and get automatic conversion from linear->sRGB when writing to it... but yeah, this assumes that the user actually has an sRGB / gamma 2.2 monitor.
If you want to give your users a "gamma" slider, which allows them to see the image as you intended, even though they've got a silly gamma 1.8 or gamma 2.4 monitor, then you've got to do a bit of work :) Instead of making the swapchain with an *_SRGB, make a non-SRGB swap chain. This disables the automatic linear->sRGB conversion when writing data into it. Instead, in all of your shaders that directly write into the swap-chain, implement gamma encoding yourself in the shader code -- e.g. return pow( result, rcp_outputGamma )
Share this post
Link to post
Share on other sites
37 minutes ago, Hodgman said:
If you want to give your users a "gamma" slider, which allows them to see the image as you intended, even though they've got a silly gamma 1.8 or gamma 2.4 monitor, then you've got to do a bit of work :) Instead of making the swapchain with an *_SRGB, make a non-SRGB swap chain. This disables the automatic linear->sRGB conversion when writing data into it. Instead, in all of your shaders that directly write into the swap-chain, implement gamma encoding yourself in the shader code -- e.g. return pow( result, rcp_outputGamma )
Is this the same slider as "move the slider till the image is barely visible", I have seen in some games?
Is it common feature (seem to have a large impact on the design)?
37 minutes ago, Hodgman said:
I'm not sure what coefficient you mean?
I use a coefficient and texture for all my material parameters. If you do not want to vary the parameter between surface positions, just use a white texture and modify the coefficient. Otherwise you need to create lots of single texel textures. (I now also notice that I need two textures if I want to use sRGB textures for colors and RGB textures for the remainder. Except for black and white because they just map to the same values in both spaces.)
Edited by matt77hias
Share this post
Link to post
Share on other sites
Create an account or sign in to comment
You need to be a member in order to leave a comment
Create an account
Sign up for a new account in our community. It's easy!
Register a new account
Sign in
Already have an account? Sign in here.
Sign In Now
Sign in to follow this
• Advertisement
|
__label__pos
| 0.94624 |
«
Go 处理 JSON 教程 - 使用 gabs 包来处理 JSON 数据
在上篇博文 《Go 处理 JSON 教程 - 如何创建和解析 JSON 数据》 中,介绍了如何使用 Go 标准库中的 encoding/json,来创建和解析 JSON 数据。
这篇文章,将介绍一个更方便的 Go 三方库 gabs ,它对 json.Marshal/json.Unmarshalmap[string]interface{} 做了一个更好的良好封装,用来处理 动态或不确定JSON 数据。
一、如何安装
go get github.com/Jeffail/gabs
二、如何使用
gabs 封装了几个函数,可以非常方便的用来处理解析和创建 JSON。
1、解析和搜索 JSON
核心函数是: ParseJSON, PathSearch,示例如下:
package main
import (
"fmt"
"github.com/Jeffail/gabs"
)
func main() {
jsonParsed, _ := gabs.ParseJSON([]byte(`{
"outter":{
"inner":{
"value1":10,
"value2":22
},
"alsoInner":{
"value1":20
}
}
}`))
var value float64
var ok bool
value, ok = jsonParsed.Path("outter.inner.value1").Data().(float64)
fmt.Println("value ==", value, ", ok ==", ok)
// value == 10.0, ok == true
value, ok = jsonParsed.Search("outter", "inner", "value1").Data().(float64)
fmt.Println("value ==", value, ", ok ==", ok)
// value == 10.0, ok == true
value, ok = jsonParsed.Path("does.not.exist").Data().(float64)
fmt.Println("value ==", value, ", ok ==", ok)
// value == 0.0, ok == false
exists := jsonParsed.Exists("outter", "inner", "value1")
fmt.Println("exists ==", exists)
// exists == true
exists = jsonParsed.Exists("does", "not", "exist")
fmt.Println("exists ==", exists)
// exists == false
exists = jsonParsed.ExistsP("does.not.exist")
fmt.Println("exists ==", exists)
// exists == false
}
2、遍历对象
核心函数:ChildrenMap
jsonParsed, _ := gabs.ParseJSON([]byte(`{"object":{ "first": 1, "second": 2, "third": 3 }}`))
// S is shorthand for Search
children, _ := jsonParsed.S("object").ChildrenMap()
for key, child := range children {
fmt.Printf("key: %v, value: %v\n", key, child.Data().(string))
}
3、遍历数组
核心函数: Children。对于数组,会按顺序返回,对于对象集,返回顺序随机。
...
jsonParsed, _ := gabs.ParseJSON([]byte(`{"array":[ "first", "second", "third" ]}`))
// S is shorthand for Search
children, _ := jsonParsed.S("array").Children()
for _, child := range children {
fmt.Println(child.Data().(string))
}
...
打印:
first
second
third
4、搜索数组
支持直接搜索对象数据中的值,如下:
jsonParsed, _ := gabs.ParseJSON([]byte(`{"array":[ {"value":1}, {"value":2}, {"value":3} ]}`))
fmt.Println(jsonParsed.Path("array.value").String())
输出:
[1,2,3]
5、生成 JSON
核心函数:NewSet
jsonObj := gabs.New()
// or gabs.Consume(jsonObject) to work on an existing map[string]interface{}
jsonObj.Set(10, "outter", "inner", "value")
jsonObj.SetP(20, "outter.inner.value2")
jsonObj.Set(30, "outter", "inner2", "value3")
fmt.Println(jsonObj.String())
输出:
{"outter":{"inner":{"value":10,"value2":20},"inner2":{"value3":30}}}
如果要输出的好看(添加缩进),可以如下输出:
fmt.Println(jsonObj.StringIndent("", " "))
6、生成数组
核心函数:New, Array, ArrayAppend
jsonObj := gabs.New()
jsonObj.Array("foo", "array")
// Or .ArrayP("foo.array")
jsonObj.ArrayAppend(10, "foo", "array")
jsonObj.ArrayAppend(20, "foo", "array")
jsonObj.ArrayAppend(30, "foo", "array")
fmt.Println(jsonObj.String())
输出:
{"foo":{"array":[10,20,30]}}
还支持通过索引来生成数组。
jsonObj := gabs.New()
// Create an array with the length of 3
jsonObj.ArrayOfSize(3, "foo")
jsonObj.S("foo").SetIndex("test1", 0)
jsonObj.S("foo").SetIndex("test2", 1)
// Create an embedded array with the length of 3
jsonObj.S("foo").ArrayOfSizeI(3, 2)
jsonObj.S("foo").Index(2).SetIndex(1, 0)
jsonObj.S("foo").Index(2).SetIndex(2, 1)
jsonObj.S("foo").Index(2).SetIndex(3, 2)
fmt.Println(jsonObj.String())
输出:
{"foo":["test1","test2",[1,2,3]]}
7、转换回 JSON
最简单的例子:
jsonParsedObj, _ := gabs.ParseJSON([]byte(`{
"outter":{
"values":{
"first":10,
"second":11
}
},
"outter2":"hello world"
}`))
jsonOutput := jsonParsedObj.String()
// Becomes `{"outter":{"values":{"first":10,"second":11}},"outter2":"hello world"}`
意义何在?比如可以获取子JSON,如下:
jsonOutput := jsonParsedObj.Search("outter").String()
// Becomes `{"values":{"first":10,"second":11}}`
8、Merge two containers 合并 JSON
支持合并 2 个 JSON 结构到一个,还会自动处理冲突,核心函数 Merge, 如下两个例子:
jsonParsed1, _ := ParseJSON([]byte(`{"outter": {"value1": "one"}}`))
jsonParsed2, _ := ParseJSON([]byte(`{"outter": {"inner": {"value3": "three"}}, "outter2": {"value2": "two"}}`))
jsonParsed1.Merge(jsonParsed2)
// Becomes `{"outter":{"inner":{"value3":"three"},"value1":"one"},"outter2":{"value2":"two"}}`
jsonParsed1, _ := ParseJSON([]byte(`{"array": ["one"]}`))
jsonParsed2, _ := ParseJSON([]byte(`{"array": ["two"]}`))
jsonParsed1.Merge(jsonParsed2)
// Becomes `{"array":["one", "two"]}`
9、Parsing Numbers
gabs 底层采用的是 Go 标准库中的 json 包,默认会把所有的数字解析为 float64,如果需要解析成 Int,可以使用 json.Decoder,核心函数:ParseJSONDecoder
核心函数:ParseJSONDecoder
sample := []byte(`{"test":{"int":10, "float":6.66}}`)
dec := json.NewDecoder(bytes.NewReader(sample))
dec.UseNumber()
val, err := gabs.ParseJSONDecoder(dec)
if err != nil {
t.Errorf("Failed to parse: %v", err)
return
}
intValue, err := val.Path("test.int").Data().(json.Number).Int64()
// intValue = 10
参考资料
分享
|
__label__pos
| 0.85647 |
0
votes
0answers
9 views
How can i iterate the statement in a function using CodeElement / TextPoint / Editpoint?
How can I iterate though the lines in a function and then classify its statements using Code Element? I have a function with select statement of a particular enumeration. like below, Select case ...
2
votes
1answer
337 views
Is there any macro to get the root directory of the TFS Sourcecontrol in Visual Studio?
Is there any built-in macro to obtain the TFS Sourcecontrol Root directory in Visual Studio? For example just like $(ProjectDir), I would use $(TFSSourceControlRoot) or something like that? The ...
1
vote
0answers
88 views
Build solution only if old in VS Macro IDE
Using the Visual Studio Macro IDE, how can I tell if a build is old or outdated? I currently have my F5 remapped to run a custom macro that will go into debugging mode and attach to the browser ...
2
votes
0answers
168 views
How can I call a macro with multiple arguments via the Visual Studio IDE?
In Visual Studio 2010, if a macro has one argument, you can call it from the Command window, the Find box, or the Immediate window, by typing >Foo bar where Foo is the macro name and bar is a ...
3
votes
1answer
222 views
Is there an API Style reference for Visual Studio Macros
I would like to automate some common tasks that I do using Visual Studio Macros but I can't find a decent API reference for visual studio objects accessible through macros. Does such a think exist? ...
1
vote
2answers
688 views
Visual Studio 2010 Macro to delete SQL files from a database project
I've discovered that in Visual Studio 2010 Professional it is possible to refresh the database for a SQL Server Database Project (This feature is normally only available to the Premium and Ultimate ...
6
votes
4answers
3k views
How can I make a Visual Studio Macro to attach the debugger to all instances of w3wp.exe?
I'm normally developing web apps, and a surprisingly large amount of my work time is spent doing "Ctrl + Alt + P", sorting by Process Name, and picking w3wp.exe to attach my debugger. To make matters ...
0
votes
1answer
900 views
How can I turn DTE.ActiveWindow.Selection into the closest CodeElement in a VS2008 Macro?
I'm working with the Visual Studio 2008 object model as seen here: VS2008 Automation Object Model Chart. I want to act on a CodeElement in a VS2008 Marco, one that is chosen by the user's text ...
|
__label__pos
| 0.532991 |
Take the 2-minute tour ×
Stack Overflow is a question and answer site for professional and enthusiast programmers. It's 100% free.
I try to load a remote image from a server and thanks to a lot of code examples on stackoverflow I have a solution which works in 2 out of 3 images. I don't really know what the problem is with the third picture and sometimes when letting the code run in the debugger the picture is loading. Also if I load the problem picture first the other two pictures are sometimes not loaded.
Here is the code:
public static Drawable getPictureFromURL(Context ctx, String url, final int REQUIRED_SIZE) throws NullPointerException {
//Decode image size
BitmapFactory.Options o = new BitmapFactory.Options();
int scale = 1;
if (o.outWidth > REQUIRED_SIZE) {
scale = (int) Math.pow(2, (int) Math.round(Math.log(REQUIRED_SIZE / (double) Math.max(o.outHeight, o.outWidth)) / Math.log(0.5)));
}
Log.i(Prototype.TAG, "scale: "+scale);
//Decode with inSampleSize
BitmapFactory.Options o2 = new BitmapFactory.Options();
o2.inSampleSize = scale;
Bitmap bmp;
try {
bmp = BitmapFactory.decodeStream((InputStream) Tools.fetch(url), null, o2);
if(bmp!=null)
return new BitmapDrawable(ctx.getResources(), bmp);
else
return null;
} catch (Exception e) {
Log.e(Prototype.TAG, "Exception while decoding stream", e);
return null;
}
}
During debugging I found out that o.outWidth is -1 which indicates an error, but no Exception is thrown, so I can't really tell what went wrong. The InputStream always returned a valid value, and I know that the picture exists on the server.
Best wishes, Daniel
share|improve this question
1 Answer 1
up vote 15 down vote accepted
I found the answer here and updated the fetch method to:
private static InputStream fetch(String address) throws MalformedURLException,IOException {
HttpGet httpRequest = new HttpGet(URI.create(address) );
HttpClient httpclient = new DefaultHttpClient();
HttpResponse response = (HttpResponse) httpclient.execute(httpRequest);
HttpEntity entity = response.getEntity();
BufferedHttpEntity bufHttpEntity = new BufferedHttpEntity(entity);
InputStream instream = bufHttpEntity.getContent();
return instream;
}
share|improve this answer
+1, but it seems now that the images are being downloaded in sequence instead of in parallel (I am dl-ing each image on a separate AsyncTask). At least it works.. – kellogs Apr 15 '11 at 15:33
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.876127 |
Técnicas de muestreo de fórmulas Slovins
• Home
• Química
• Astronomía
• Energía
• Naturaleza
• Biología
• Física
• Electrónica
• Técnicas de muestreo de fórmulas Slovins
Cuando no es posible estudiar una población completa (como la población de los Estados Unidos), se toma una muestra más pequeña utilizando una técnica de muestreo aleatorio. La fórmula de Slovin permite que un investigador muestree la población con el grado de precisión deseado. La fórmula de Slovin le da al investigador una idea de cuán grande debe ser el tamaño de la muestra para garantizar una precisión razonable de los resultados.
TL; DR (Demasiado tiempo; No leyó)
Fórmula de Slovin proporciona el tamaño de muestra (n) usando el tamaño de población conocido (N) y el valor de error aceptable (e). Rellene los valores N y e en la fórmula n = N ÷ (1 + Ne 2). El valor resultante de n es igual al tamaño de muestra que se utilizará.
Cuándo utilizar la fórmula de Slovin
Si se toma una muestra de una población, se debe usar una fórmula para tener en cuenta los niveles de confianza y márgenes de error. Cuando se toman muestras estadísticas, a veces se conoce mucho sobre una población, a veces se puede conocer un poco y, a veces, no se sabe nada. Por ejemplo, una población puede estar distribuida normalmente (por ejemplo, para alturas, pesos o coeficientes intelectuales), puede haber una distribución bimodal (como suele ocurrir con las calificaciones de clase en matemáticas) o puede que no haya información sobre cómo se comportará una población ( como sondear a los estudiantes universitarios para obtener sus opiniones sobre la calidad de la vida estudiantil). Usa la fórmula de Slovin cuando no se sabe nada sobre el comportamiento de una población.
Cómo usar la fórmula de Slovin
La fórmula de Slovin está escrita como:
n = N ÷ (1+ Ne 2)
donde n = Número de muestras, N = Población total y e = Tolerancia a errores.
Para usar la fórmula, primero determine el error de tolerancia. Por ejemplo, un nivel de confianza del 95 por ciento (dando un margen de error de 0.05) puede ser lo suficientemente preciso, o puede ser necesaria una exactitud más estricta de un nivel de confianza del 98 por ciento (un margen de error de 0.02). Enchufe el tamaño de la población y el margen de error requerido en la fórmula. El resultado es igual al número de muestras requeridas para evaluar la población.
Por ejemplo, supongamos que un grupo de 1,000 empleados del gobierno municipal debe ser encuestado para descubrir qué herramientas son las más adecuadas para sus trabajos. Para esta encuesta, un margen de error de 0.05 se considera suficientemente preciso. Usando la fórmula de Slovin, el tamaño de encuesta de muestra requerido es igual a n = N ÷ (1 + Ne 2) personas:
n = 1,000 ÷ (1 + 1,000x0.05x0.05) = 286
Por lo tanto, la encuesta debe incluir 286 empleados.
Limitaciones de la fórmula de Slovin
La fórmula de Slovin calcula el número de muestras requeridas cuando la población es demasiado grande para muestrear directamente a cada miembro. La fórmula de Slovin funciona para un muestreo aleatorio simple. Si la población a muestrear tiene subgrupos obvios, la fórmula de Slovin podría aplicarse a cada grupo individual en lugar de a todo el grupo. Considera el problema de ejemplo. Si todos los 1,000 empleados trabajan en oficinas, los resultados de la encuesta probablemente reflejarían las necesidades de todo el grupo. Si, en cambio, 700 de los empleados trabajan en oficinas mientras que los otros 300 hacen trabajos de mantenimiento, sus necesidades serán diferentes. En este caso, una sola encuesta podría no proporcionar los datos requeridos, mientras que el muestreo de cada grupo proporcionaría resultados más precisos.
© Ciencia http://es.scienceaq.com
|
__label__pos
| 0.529344 |
Rechercher une page de manuel
Chercher une autre page de manuel:
mysql_tableinfo
Langue: en
Version: 2003-04-05 (ubuntu - 07/07/09)
Section: 1 (Commandes utilisateur)
NAME
mysql_tableinfo - creates and populates information tables with the output of SHOW DATABASES, SHOW TABLES (or SHOW TABLE STATUS), SHOW COLUMNS and SHOW INDEX.
This is version 1.1.
SYNOPSIS
mysql_tableinfo [OPTIONS] database_to_write [database_like_wild] [table_like_wild]
Do not backquote (``) database_to_write,
and do not quote ('') database_like_wild or table_like_wild
Examples:
mysql_tableinfo info
mysql_tableinfo info this_db
mysql_tableinfo info %a% b%
mysql_tableinfo info --clear-only
mysql_tableinfo info --col --idx --table-status
DESCRIPTION
mysql_tableinfo asks a MySQL server information about its databases, tables, table columns and index, and stores this in tables called `db`, `tbl` (or `tbl_status`), `col`, `idx` (with an optional prefix specified with --prefix). After that, you can query these information tables, for example to build your admin scripts with SQL queries, like
SELECT CONCAT(``CHECK TABLE '',`database`,``.'',`table`,`` EXTENDED;'') FROM info.tbl WHERE ... ;
as people usually do with some other RDBMS (note: to increase the speed of your queries on the info tables, you may add some index on them).
The database_like_wild and table_like_wild instructs the program to gather information only about databases and tables whose names match these patterns. If the info tables already exist, their rows matching the patterns are simply deleted and replaced by the new ones. That is, old rows not matching the patterns are not touched. If the database_like_wild and table_like_wild arguments are not specified on the command-line they default to ``%''.
The program :
- does CREATE DATABASE IF NOT EXISTS database_to_write where database_to_write is the database name specified on the command-line.
- does CREATE TABLE IF NOT EXISTS database_to_write.`db`
- fills database_to_write.`db` with the output of SHOW DATABASES LIKE database_like_wild
- does CREATE TABLE IF NOT EXISTS database_to_write.`tbl` (respectively database_to_write.`tbl_status` if the --tbl-status option is on)
- for every found database, fills database_to_write.`tbl` (respectively database_to_write.`tbl_status`) with the output of SHOW TABLES FROM found_db LIKE table_like_wild (respectively SHOW TABLE STATUS FROM found_db LIKE table_like_wild)
- if the --col option is on,
* does CREATE TABLE IF NOT EXISTS database_to_write.`col`
* for every found table,
fills database_to_write.`col` with the output of
SHOW COLUMNS FROM found_tbl FROM found_db
- if the --idx option is on,
* does CREATE TABLE IF NOT EXISTS database_to_write.`idx`
* for every found table,
fills database_to_write.`idx` with the output of
SHOW INDEX FROM found_tbl FROM found_db
Some options may modify this general scheme (see below).
As mentioned, the contents of the info tables are the output of SHOW commands. In fact the contents are slightly more complete :
- the `tbl` (or `tbl_status`) info table
has an extra column which contains the database name,
- the `col` info table
has an extra column which contains the table name,
and an extra column which contains, for each described column,
the number of this column in the table owning it (this extra column
is called `Seq_in_table`). `Seq_in_table` makes it possible for you
to retrieve your columns in sorted order, when you are querying
the `col` table.
- the `index` info table
has an extra column which contains the database name.
Caution: info tables contain certain columns (e.g. Database, Table, Null...) whose names, as they are MySQL reserved words, need to be backquoted (`...`) when used in SQL statements.
Caution: as information fetching and info tables filling happen at the same time, info tables may contain inaccurate information about themselves.
OPTIONS
--clear
Does DROP TABLE on the info tables (only those that the program is going to fill, for example if you do not use --col it won't drop the `col` table) and processes normally. Does not drop database_to_write.
--clear-only
Same as --clear but exits after the DROPs.
--col
Adds columns information (into table `col`).
--idx
Adds index information (into table `idx`).
--prefix prefix
The info tables are named from the concatenation of prefix and, respectively, db, tbl (or tbl_status), col, idx. Do not quote ('') or backquote (``) prefix.
-q, --quiet
Does not warn you about what the script is going to do (DROP TABLE etc) and does not ask for a confirmation before starting.
--tbl-status
Instead of using SHOW TABLES, uses SHOW TABLE STATUS (much more complete information, but slower).
--help
Display helpscreen and exit
-u, --user=#
user for database login if not current user. Give a user who has sufficient privileges (CREATE, ...).
-p, --password=# (INSECURE)
password to use when connecting to server. WARNING: Providing a password on command line is insecure as it is visible through /proc to anyone for a short time.
-h, --host=#
host to connect to
-P, --port=#
port to use when connecting to server
-S, --socket=#
UNIX domain socket to use when connecting to server
WARRANTY
This software is free and comes without warranty of any kind. You should never trust backup software without studying the code yourself. Study the code inside this script and only rely on it if you believe that it does the right thing for you.
Patches adding bug fixes, documentation and new features are welcome.
TO DO
Use extended inserts to be faster (for servers with many databases or tables). But to do that, must care about net-buffer-length.
AUTHOR
2002-06-18 Guilhem Bichot ([email protected])
And all the authors of mysqlhotcopy, which served as a model for the structure of the program.
Nous avons tous assez de force pour supporter les maux d'autrui.
-+- François de La Rochefoucauld (1613-1680), Maximes 19 -+-
|
__label__pos
| 0.930627 |
My Approach to Testing
Someone asked me, “do I have a method for generating testing ideas?”. My answer; “I follow an exploratory approach to simultaneously learn, generate and execute test ideas”. This might involve exploring the product a little to learn about how it works, this will feed into questions/ideas and then I might explore some other ideas further.
I might use sources like the Design Sketch as an oracle (source of truth) for inspiration/ideas, or my mobile testing experience (heuristics/rules of thumb like “accessibility always sucks and its easy to find bugs there” or “screen rotations often causes bugs”), or things like boundaries to explore. It depends on the context of what I’m testing to what ideas get generated.
Exploratory Testing
Exploratory testing is a fairly efficient approach to finding bugs. It’s not like we know where the bugs are in the product before hand. However there are always gaps in anyone’s testing approach. It would take an almost infinite amount of time to test all of the possible permutations. So testers will also use their risk radars to help hone in on testing high risk areas.
Mareet Pyhäjärvi has an interesting keynote for SeleniumConf on the intersection of exploratory testing and automation:
And she also has a Medium article explaining Exploratory Testing. Here is the sketch notes for her talk:
James Bach has a 10 page article on exploratory testing but it can be a little dry to read all in one go. I’ll also often reference Elisabeth Hendrickson’s book, “Explore it! Reduce Risk and Increase Confidence with Exploratory Testing“. Elisabeth has also put together a cheat sheet for testing heuristics but I feel like that is more suited to testing a web frontend context.
Mindmaps
I often use mindmaps to help generate ideas, e.g. when I was at Tyro I created this mind map for generating mobile testing ideas, the idea being I’d bring this along to planning meanings and it would prompt me to ask questions like, “what about performance testing?”.
Everyone does testing on some levels, some people are just more practiced at the skills involved than others. For me, testing is my craft.
Leave a comment
Your email address will not be published. Required fields are marked *
|
__label__pos
| 0.579329 |
Company Name Starts with ...
# A B C D E F G H I J K L M N O P Q R S T U V W X Y Z
Accenture Oracle Apps Technical Interview Questions
Questions Answers Views Company eMail
What is INBOUND and OUT BOUND? (Different types of interfaces)
17 106849
What are profile options?
4 9049
What is multi org?
6 9000
What are the User PARAMETERS in the Reports?
6 10835
What is Value Set?
2 17496
What is P2P cycle.
27 74887
If there is any issue then whom do you report.(this is imp question for every interview)
3 6663
How many phases of implementation and what are those.
2 5685
Explain the multi-organization structure.
1 6738
What is responsibility and how u attach
5 13490
How to create user and how u attach with responsibility.
2 5157
What is request group
4 18954
What is menu.
2 5555
Cycle of requestion to receving transaction.
1 4484
How many typres of Purchase order and what are those.
2 5388
Post New Accenture Oracle Apps Technical Interview Questions
Accenture Oracle Apps Technical Interview Questions
Un-Answered Questions
Can we use qtp/uft for mobile automation testing?
20
How you to access command line arguments passed to a go program?
1
What are age pyramids?
7
How to write a left outer join with the where clause in oracle?
20
What is codec and DSP?
1
What is an arp and how does it work?
12
What is UML and its advantages?
39
Can anyone send most commonly asked interview question in Oil & Gas industry
1167
How to Create and Consume a Web Service?
1820
why do you want to work at zensar?
25
what is the process of creating quotation using BAPIs.
1056
Give me the calculation steps to design an electromagnet to be work in 230V 50 Hz AC Supply. Also give force that is created by that electromagnet when it gets energized.
1229
How the interaction between DB2, SQL, and QMF takes place?
581
What is the key- value pair in Hadoop MapReduce?
35
How work of capacitor in dc supply...???
414
|
__label__pos
| 1 |
Enhance your MP3s all you want with iDFX Audio Enhancer. They still suck.
Tuesday, October 20th, 2009
We know the music industry is stuck in a tailspin. We’ve beaten that issue into the ground. But more alarming than the death of records labels, is the continuing degradation of musical mediums. We’ve left behind wax cylinders, records, and magnetic tape to embrace digital conversion, Compact Discs, and MP3s. Quality exchanged for convenience. If this trend continues, we’re going to get to the point where we can upload an entire compilation of all the music that has been made, that is being made, and will be made, directly into our brain stems, Matrix-style. But it’ll sound like crap.
Picture 3MP3s and other “lossy” audio codecs work by getting rid of data that is considered unimportant. Basic audio theory: Higher frequency = smaller waveforms. Thus, higher pitches have much more waves, meaning more data, than lower pitches. Human hearing doesn’t go higher than 20kHz, and even then, there aren’t any instruments that go anywhere in that range. So the creators of MPEG-1, Layer 3 encoding had a stroke of genius. If you have a range of frequencies that adds tons of data, but people can’t even hear it, why keep it?
iDFX Audio Enhancer is an add-on to iTunes that serves to, “re-encode your current MP3 and AAC files using a patent-pending method that repairs the damage and lost harmonics that occurred during the original encoding process”. Sounds like doublespeak to me for a $40 EQ and extrapolative guesswork. And the demo of iDFX sounds like just that. if you want good sounding audio, stop buying MP3s. And if you want smaller file sizes, start compressing with FLAC, Ogg Vorbis, or any number of lossless codecs.
blog comments powered by Disqus
|
__label__pos
| 0.760465 |
I'm not sure it this is possible at all but lets say I want to serve different content with the same URL but with different cookies. How can I get squid to cache based on a combination of a cookie and url?....
Example: I want to squid cache different user accounts, urls are not good enough but using a user cookie and url would work.
|
__label__pos
| 0.977141 |
R/textTools.R
Defines functions ctxpand cleantext textcounter
Documented in cleantext ctxpand textcounter
#' Text Counter
#'
#' @description Counts total prevalence of a set of items in each of a set of texts.
#' @param counted character vector of items to search for in the texts.
#' @param texts character vector of to-be-searched text.
#' @param words logical. Default FALSE. Does \code{counted} contain words, or sequences of chracters?
#' @param fixed logical. Default TRUE. Use literal characters instead of regular expressions?
#' @param num_mc_cores integer Number of cores for parallelization. Default is parallel::detectCores().
#' @return numeric vector as long as \code{texts} indicating total frequencies of \code{counted} items.
#' @keywords internal
#'
textcounter<-function (counted, texts, words=FALSE, fixed = TRUE, num_mc_cores = parallel::detectCores()) {
if(words){
counts<-unlist(parallel::mclapply(texts,function(x) sum(unlist(x)%in%counted), mc.cores=num_mc_cores))
}else {
counts <- rep(0, length(texts))
for (x in counted) {
counts <- counts + unlist(sapply(gregexpr(x, texts, fixed = fixed),
function(z) ifelse(z[1] == (-1), 0, length(z))))
}
}
return(counts)
}
#' Clean Text
#' @description Basic text cleaning
#' @param text character text to be cleaned
#' @param language string. Default "english".
#' @param stop.words logical. Default TRUE
#' @return a character vector
#' @keywords internal
cleantext<-function (text, language = "english", stop.words = TRUE) {
text <- tolower(text)
if (language == "english") {
text <- ctxpand(text)
}
text <- gsub("[[:punct:]]", " ", text)
text <- gsub("[[:cntrl:]]", " ", text)
if (length(stop.words) > 1) {
text <- tm::removeWords(text, stop.words)
}
else if (stop.words) {
text <- tm::removeWords(text, tm::stopwords(language))
}
text <- tm::removeNumbers(text)
text <- tm::stripWhitespace(text)
return(as.character(text))
}
#' Contraction Expander
#' @description Expands Contractions
#' @param text a character vector of texts.
#' @return a character vector
#' @keywords internal
ctxpand<-function(text){
text<-sapply(text, function(x) gsub("let's", "let us", x, fixed=TRUE))
text<-sapply(text, function(x) gsub("i'm", "i am", x, fixed=TRUE))
text<-sapply(text, function(x) gsub("won't", "will not", x, fixed=TRUE))
text<-sapply(text, function(x) gsub("can't", "cannot", x, fixed=TRUE))
text<-sapply(text, function(x) gsub("shan't", "shall not", x, fixed=TRUE))
text<-sapply(text, function(x) gsub("'d", " would", x, fixed=TRUE))
text<-sapply(text, function(x) gsub("'ve", " have", x, fixed=TRUE))
text<-sapply(text, function(x) gsub("'s", " is", x, fixed=TRUE))
text<-sapply(text, function(x) gsub("'ll", " will", x, fixed=TRUE))
text<-sapply(text, function(x) gsub("'re", " are", x, fixed=TRUE))
text<-sapply(text, function(x) gsub("n't", " not", x, fixed=TRUE))
text<-sapply(text, function(x) gsub("u.s.a.", "usa", x, fixed=TRUE))
text<-sapply(text, function(x) gsub("u.s.", "usa", x, fixed=TRUE))
text<-sapply(text, function(x) gsub("e.g.", "eg", x, fixed=TRUE))
text<-sapply(text, function(x) gsub("i.e.", "ie", x, fixed=TRUE))
return(text)
}
Try the politeness package in your browser
Any scripts or data that you put into this service are public.
politeness documentation built on Oct. 30, 2018, 5:05 p.m.
|
__label__pos
| 0.999478 |
Erd Multiplicity
For example, the subtleties of a natural language such as French are such that it is a prohibitively complex task to formally document its syntax, semantics and pronunciation. base type) defines the state and behavior common for a given type and lets the subclasses (a. ManyToOne annotation on the corresponding persistent property or field. UMLet is a free, open-source UML tool with a simple user interface: draw UML diagrams fast, build sequence and activity diagrams from plain text, export diagrams to eps, pdf, jpg, svg, and clipboard, share diagrams using Eclipse, and create new, custom UML elements. This standard UML diagram symbol legend shows the symbols and notations used in UML diagram documentation. An entity is a real-world item or concept that exists on its own. This is an IT assignment that deals with entities, business rules, ERD models. How to use demoralize in a sentence. Two other pieces of information are also commonly relayed in an ERD: cardinality (i. In such cases, all foreign keys will also need to include all the columns in the composite key. Multiplicity in Entity Relationships. It covers symbols for all UML diagram types, including UML class diagram, UML collaboration d. and Zeitouni, O. 4 Entity–relationship cardinalities⌘ 1. maximizing multiplicity of the total constant energy system. The following CSDL defines the PublishedBy association shown in the diagram above:. n / D 1 N X n N X p a jn a 1 1 D 1 N X p a N a. 1/ implies that 5 1 N XN n D 1. Draw Fully-Attributed ERD If you introduced new entities and attributes in step 8, you need to redraw the entity relationship diagram. See [23] Erd}os and R enyi [16] showed that, for almost all graphs, this is the \shortest. x standard is ominously silent on this…), most of the relations are zero or more (* in UML parlance) except where explicitly stated differently. Proof: If (0;f) is an eigenpair then 0 = 1 2 P i;j N w i;j(f i f j) 2. Constraints for describing limitations that can be assigned to roles in a NIAM view. Multiplicity element defines some collection of elements, and includes both multiplicity as well as specification of order and uniqueness of the collection elements. The ER diagrams are used for designing database schemas. 3) one can deduce that (0. 5 Different diagramming conventions⌘ 1. There is no official way in an ERD to indicate that data is spread out over several data bases or files system -- largely because it is a tool for integrating data onto one system. In this tutorial, I will show you how to create an ER diagram with Microsoft SQL Server Management Studio (SSMS) 16. (Entity-Relationship diagram) Dept. UML design and business analysis tool for modeling, documenting, reverse engineering, building and maintaining object-oriented software systems, fast and intuitive. Self-referencing table in entity framework means in order to store some hierarchical data you can say that a self referencing table is a table where the primary key (PK) of a table is also known a foreign key. An Entity Relationship Diagram (ERD) is a visual representation of different entities within a system and how they relate to each other. Initially, when defining requirements, the approach to modeling using an ERD or UML is very similar Class Diagram - a diagram consisting of classes (i. Looking above there are two diagrams the right hand one uses ERD symbols for the various relations whereas the equivalent UML diagram makes use of UML equivalent symbols for the associations. The Following Information May. Vatche Ishakian, D ora Erd}os, Evimaria Terzi, Azer Bestavros, A Framework for the. 0 that offers bug fixes and has improved its compatibility with the latest operating systems, i. Database Mcq question are important for technical exam and interview. I then generated the ERD from this model, and instead of it copying the multiplicities, it made all the associations many to many – any suggestions on how I could make that work? This is a question we’ve received from our customer:. ERD s are not UML diagrams, and an association is called a relationship, multiplicity is called cardinality, and generalization. Code First can't determine which class is dependent and when you execute to build the model, an exception is thrown. When designing your database, an entity-relationship diagram (ER or ERD) is an excellent way to visually lay out your plan. physical database by means of its Entity Relationship Diagram (ERD). Culture & Society Culture and society are also remarkably complex. Removing Redundant Multiplicity Constraints In Uml Class with regard to Er Diagram Multiplicity By admin On October 18, 2019 Er Diagram Multiplicity – This is one of the types of ER Diagram. For example, one company will have one or more employees, but each employee works for one company only. We named our instance of the Open edX platform Lagunita, after the name of a cherished lake bed on the Stanford campus, a favorite gathering place of students. Erd}os, who pioneered the study of these multiplicity constants in [E62] proved, by a simple application of Ramsey’s Theorem, that c t r(t;t) t 1; where r(t;t) is just the standard diagonal Ramsey number. benefit analysis report,” “draw ERD,” “prepare class diagram”, “draw activity diagram for Accept Orders Process”, “code client/server web-enabled order system”, etc. Misalnya, dalam kelas pegawai, kita mungkin mempunyai beberapa instant, satu untuk Ani, satu untuk Ina, satu untuk Nana dan seterusnya. Usage models focuses on solution from the users prospectives. What’s referential integrity? Referential integrity is a relational database concept in which multiple tables share a relationship based on the data stored in the tables, and that relationship must remain consistent. InvalidOperationException: 'Unable to determine the principal end of an association between the types 'EFDemo. Represent the multiplicity for each relationship in the ER diagrams created in (b). First Normal form is the first step of normalization. what are steps to make entity relationship diagram ? it,s very difficult to me create entity relationship diagraṃ erd and i know that there are steps to make it. edu [email protected] It has a name, which is a. Goodman’s theorem implies Erd}os’s conjecture is true for v = 3. The cardinality of a relationship is the number of instances of entity B that can be associated with entity A. ERD : Entity Relation Diagram. x standard is ominously silent on this…), most of the relations are zero or more (* in UML parlance) except where explicitly stated differently. Le us write ordp(n) for the multiplicity of p in n; then n = Y. Note, that [UML 2. Bluehost - Top rated web hosting provider - Free 1 click installs For blogs, shopping carts, and more. The diagonal F-threshold (i. •A pseudograph may include loops, – Erd ős number. There are serious reasons to believe that the multiplicity M n of the largest part of a random partition of n behaves asymptotically in a much simpler way than many other partition statistics. data dictionary: A data dictionary is a collection of descriptions of the data objects or items in a data model for the benefit of programmers and others who need to refer to them. The fundamental difference between generalization and specialization is that Generalization is a bottom-up approach. Initially, when defining requirements, the approach to modeling using an ERD or UML is very similar Class Diagram - a diagram consisting of classes (i. Multiplicity. It is intuitively true for t= 2, and it is also true for t= 3 follows from a result of Goodman’s work [15]. so please help me to know it. Tuliskan masing masing atribut. " Physics Letters B. Removing Redundant Multiplicity Constraints In Uml Class with regard to Er Diagram Multiplicity By admin On October 18, 2019 Er Diagram Multiplicity - This is one of the types of ER Diagram. Initially, when defining requirements, the approach to modeling using an ERD or UML is very similar Class Diagram - a diagram consisting of classes (i. It is one of the most misunderstood relationships which describes the number of instances allowed for a particular element by providing an inclusive non-negative integers interval. Hubungan Inheritance. Phones) are allowed I represented by a. One of the motivations behind the conjecture is the fact that the conjecture is true for any t while random graph is assumed. Synonyms for various include diverse, diversified, different, differing, varied, varying, dissimilar, disparate, assorted and distinctive. Algebraic multiplicity. N A and N B are fixed), W A is a function only of the variable n A and W B is a function only of the variable n B, the numbers of particles in the ε=1 level in systems A and B. Entity Relationship Diagrams (ERD) have been widely used for modeling relational databases. Vatche Ishakian, D ora Erd}os, Evimaria Terzi, Azer Bestavros, A Framework for the. Where the model does not follow convention, the Fluent API can be used to configure the correct relationship between entities. According to the above, the book object consists of page object or pages. In general, you can have a whole hierarchy of special types. Chen pada tahun 1976. Cross-species transmission of zoonotic coronaviruses (CoVs) can result in pandemic disease outbreaks. Some subclasses of multiplicity element are structural feature , operation , parameter , pin. Object Oriented Data Modeling (Database) mcq questions and answers with easy and logical explanations for various competitive examination, interview and entrance test. D students and some of my. An association can be named. ERD TypeSet: Used for ERD imports: Specifies the TypeSet, i. edu [email protected] Get a free domain name, real NON-outsourced 24/7 support, and superior speed. It is not a detailed survey and it doesn't hope to answer any questions. Tuliskan masing masing atribut. Entity Relationship Diagram (ERD) An Entity Relationship Diagram (ERD) is a data modeling technique that creates a graphical representation of the entities, and the relationships between entities, within an information system. One to One Relationship using Data Annotations. However, extending UML by introducing profile for modeling relational data can replace ERD, which is a very tempting approach since developers then could reuse their UML tools for database modeling. Note, that [UML 2. equilibria { including multiplicity of the type discussed above { while avoiding the vast complexity and additional equilibrium multiplicity that arises purely from the ability to condition on ne details of network structure. 1) A seminal result in non-Hermitian random matrix theory is the circular law, which describes the asymptotic global distribution of the spectrum for matrices with i. There is a minimum cardinality and a maximum cardinality for each relationship, with an unspecified maximum cardinality being shown as N. Usage models. 2, 566{625. In this note, we merge these two approaches and construct better graphs for both problems. The Guth-Katz result on the Erd}os distance problem 110 x3. They require the cooperation of many. Nama class, asosiasi, metode. 6 Draw Key-Based ERD Now add them (the primary key attributes) to your ERD. To obtain conclusive results, real-time systems, which exhibit a more complex dynamic behavior than non-real-time systems, were selected as the focus of the experiment. It features a simple yet powerful editor that allows you to create Class Diagram quickly and easily. 19 Identifying Important Associations. Draw an entity-relationship diagram showing the items you identified. In an ERD the special types are shown with a 0. The eigenvalues of J +qI are (q+1)2 with multiplicity 1, and q with multiplicity q2+q. The term used in ERD modeling to describe things about which the system needs to store information. Erd‰os and RØnyi (1959), who proposed a random graph model where links are formed (2010) and Mele (2017) circumvent the multiplicity issue by consider-. November/December 2011. This extends investigations of Erdos and Nicolas, who treated the case F(T)=T(T +1). Multiplicity is the same as what concept for an ERD? a) Relationship b) Attribute c) Entity d) Cardinality. The eigenvalues of J +qI are (q+1)2 with multiplicity 1, and q with multiplicity q2+q. Neonates, both mouse and human, are prone to mount Th2-biased responses, which for mice was shown to be due to epigenetic. multiplicity. org: http://www. It is helpful for communicating ideas to a wide range. Tabel Multiplicity. D ora Erd}os, Vatche Ishakian, Andrei Lapets, Evimaria Terzi, Azer Bestavros, The Filter Place-ment Problem and its Application to Minimizing Information Multiplicity, International Conference on Very Large DataBases (VLDB), Istanbul, Turkey, August 2012 C2. After Cohen [6], we have (1. Cornelius. But first off, let’s get the answer to the question why we should use naming conventions. Question Is every number a jump for multigraphs of bounded multiplicity? Again Erd}os conjectured that the answer was yes. ER-model based diagrams (ERD) consist of these main components: relation, entity and attributes. org/v/BlxL/. Download link: https://www. If you use a tool like Systems Architect, redrawing the diagram is relatively easy. (ISBN: 9780441788385) from Amazon's Book Store. (b)De s i g n a r e l a ti o n a l s c h e ma fo r th e d a ta b a s e. ii ii HALAMAN PERSETUJUAN Nama : Ria Achmalia NIM : 2008-53-112 Bidang Studi : Sistem Informasi S-1 Judul Skripsi : Sistem Informasi Pemesanan Tiket Pesawat. This both generalizes and provides. the multiplicity and the mass (size of the system) conservation reads P s sn s = S. In a previous post we learned a whole bunch about the most common database relationship that exists, the one-to-many relationship. Addressed issues include estimation of time-frequency energy density (matching pursuit and spectrogram), choice of resampling statistics to test the hypothesis of change in one small region (resel), and correction for multiplicity (false discovery rate). SQL Normalization Examples : In this section i would like to give you example of this normalization technique. Cardinality can be of different types: One to one – When each entity in each entity set can take part only once in the relationship, the cardinality is one to one. Erd‰os and RØnyi (1959), who proposed a random graph model where links are formed (2010) and Mele (2017) circumvent the multiplicity issue by consider-. An entity is a class of similar objects in the model, each entity is depicted in the form of rectangle and has the name expressed by a noun. base type) defines the state and behavior common for a given type and lets the subclasses (a. How to use multiplicity in a sentence. This was an. What is cardinality, Types With Example IN DBMS: In the context of databases, cardinality refers to the distinctiveness of information values contained in a column. It is one of the most misunderstood relationships which describes the number of instances allowed for a particular element by providing an inclusive non-negative integers interval. As ArchiMate 2. It represents one-to-many or many-to-one or many-to-many or many-to-one relationships. It is important to describe not just the multiplicity but also the range of possible values of the multiplicity (the minimum and maximum multiplicity). Nama class, package, asosiasi B. So what is EER? In some cases, you may want to opt for an enhanced entity-relationship diagram (EER or EERD), which includes a few more elements than ER diagrams do. This both generalizes and provides. NET Entity Framework uses a domain-specific language (DSL) called conceptual schema definition language to define conceptual models. sparxsystems. Multiplicity E. Once a database is normalized, relationships between the data in multiple tables must be established. Each end of an association is a role having ; Multiplicity. When show? If memory of relationship needs to be preserved for some duration. Psychomotor retardation is a central feature of depression which includes motor and cognitive impairments. Thus, for this particular partition, the probability of choosing a part that has multiplicity three is 1/3. Multiplicity. 3 Entity–relationship elements⌘ 1. Erd}os and H. A requirement to participate; Denoted with a bold line Multiplicity = Cardinality + Participation. ERD ini dirancang untuk menggambarkan persepsi dari pemakai dan berisi obyek-obyek dasar yang disebut entity dan hubungan antar entity-entity tersebut yang disebut relationship. au The UML Data Model Profile. This can be zero or more (0. By Geoffrey Sparks, www. Removing Redundant Multiplicity Constraints In Uml Class with regard to Er Diagram Multiplicity By admin On October 18, 2019 Er Diagram Multiplicity - This is one of the types of ER Diagram. An Entity Relationship Diagram page in the Diagram Toolbox Entity Relationship element and relationship entries in the 'Toolbox Shortcut' menu and Quick Linker Enterprise Architect also provides transformation templates to transform Entity Relationship diagrams into Data Modeling diagrams, and vice versa. ERD enhanced respiratory disease FI-RSV formalin inactivated RSV GMO genetically modified organism ICP immune correlate of protection LMIC low- and middle-income countries LRTI lower respiratory tract infection(s) MCB master cell bank MOI multiplicity of infection MPL 3-O-desacyl-4′-monophosphoryl lipid A. The multiplicity of the Publisher end is one (1) and the multiplicity of the Book end is many (*). Revise your diagram to eliminate many-to-many relationships, and tag all foreign keys. Note, that [UML 2. When you do so, try to rearrange it so no lines cross by putting the entities with the most relationships in the middle. so please help me to know it. Object Oriented Data Modeling (Database) mcq questions and answers with easy and logical explanations for various competitive examination, interview and entrance test. au Introduction. First Normal form is the first step of normalization. An ERD shows a single database -- or a part of a larger database. Describe the activities in the different phases of the object-oriented development life cycle. The ER diagrams are used for designing database schemas. emphasises the multiplicity of dimensions of wellbeing; it also shows the need to understand processes that result in deprivation and marginalisation. Erd cardinality 1. data dictionary: A data dictionary is a collection of descriptions of the data objects or items in a data model for the benefit of programmers and others who need to refer to them. The Entity-Relationship Diagram. ERD : Entity Relation Diagram. An entity-relationship diagram (also known as ERD) depicts the data model of a system (or its part) using entities that represent data types and relationships that define the dependencies between entities. au The UML Data Model Profile. (ISBN: 9780441788385) from Amazon's Book Store. A definition of complexity with examples. ∗Computer Science and Artificial Intelligence Laboratory, Massachusetts Institute of Technology, Cambridge, Massachusetts, USA, [email protected] Entity-relationship diagrams (ERD) are essential to modeling anything from simple to complex databases, but the shapes and notations used can be very confusing. One to One Relationship using Data Annotations. In an ERD the special types are shown with a 0. Once a database is normalized, relationships between the data in multiple tables must be established. Entities in a DBMS ERD Contain Meta-data 3. Integrating ERD and UML Concepts When Teaching Data Modeling Traci A. Draw Key. One-to-One. Question Is every number a jump for multigraphs of bounded multiplicity? Again Erd}os conjectured that the answer was yes. counting a zero with multiplicity mρ exactly mρ times in this sum. Graph data closes the gap between the way humans and computers view the world. 1) A seminal result in non-Hermitian random matrix theory is the circular law, which describes the asymptotic global distribution of the spectrum for matrices with i. A minimum of one and a maximum of one instance of this class are associated with an instance of the other related class (indicates a mandatory class). ERD ini dirancang untuk menggambarkan persepsi dari pemakai dan berisi obyek-obyek dasar yang disebut entity dan hubungan antar entity-entity tersebut yang disebut relationship. Place multiplicity notations near the ends of an association. Usage models focuses on solution from the users prospectives. These symbols indicate the number of instances of one class linked to one instance of the other class. feature 78 rosary hall in cleveland fights addiction betsy taylor 86 remembering fr. After folding and assembly of nascent secretory proteins in the endoplasmic reticulum (ER), the coat protein complex II (COPII) selects folded cargo for export in membrane-bound vesicles. In this model, it is the di erence between the number of bonds of the parent cluster and of the sub-clusters in their ground states. the datatype naming convention, used to designate the datatypes of ERD Domains. For example, one company will have one or more employees, but each employee works for one company only. Sebelum kita membuat ERD ada baiknya kita berkenalan dulu dengan segala sesuatu yang berhubungan dengan ERD. The theory of grain boundary (the interface between crystallites, GB) structure has a long history1 and the concept of GBs undergoing phase transformations was proposed 50 years ago2,3. An association class, which is essentially a class attached to an association, is used to model an association as a UML class. name (what relationship the objects have) 3. READ MORE on opentextbc. In an entity relationship diagram (ERD), an entity type is represented by a name in a box. 1), or 1 to a specific number (1. In the example just mentioned, the relationship to Order from the perspective of LineItem is many-to-one. We address a general problem to find explicit formulae, for such intertwining operators in the setting of multiplicity-free branching laws for reductive symmetric pairs. This is an IT assignment that deals with entities, business rules, ERD models. 1 multiplicity 1. The concept of cover was rst introduced by Erd os in the 1930’s (cf. We named our instance of the Open edX platform Lagunita, after the name of a cherished lake bed on the Stanford campus, a favorite gathering place of students. the most important is u all can draw all entities and their attribute in erd yado not forget the name of relationship and multiplicityand to be safe, arrange according to the entity you found in sentence in the case study. (Entity-Relationship diagram) Dept. The Following Information May. These entities can have attributes that define its properties. Multiplicity memberikan gambaran ebuah instant yang akan ditampung dalam kelas. VP-UML CE supports not only UML but also ERD and SysML. 0, ERD, ORMD, SysML. Goodman’s theorem implies Erd}os’s conjecture is true for v = 3. Software Ideas Modeler is a data modeling ERD tool that allows you to draw and model standard ERD diagrams or ERD diagram by Chen notation. The side of the relationship with a multiplicity of many is usually depicted by an asterisk (*) or an infinity symbol (∞). multiplicity problem through a comparison between the two methods. Entity Framework is an object-relational mapper that enables. Synonyms for various include diverse, diversified, different, differing, varied, varying, dissimilar, disparate, assorted and distinctive. Indeed, there are countless ways in which data can be converted into better medical diagnostic tools, more effective therapeutics, and improved productivity for clinicians. so please help me to know it. When I checked the models done by our designer, I found something like this: I am not a technical person but would it not mean that many customers have one. * Sedikitnya hanya satu bagian Setiap diagram Class memiliki Class (ke las), association, dan multiplicity. SEJARAH ERD Model Entity Relationship diperkenalkan pertama kali oleh P. WhitestarUML. Database modeling techniques. We start with some remarks about the arithmetic function. 3) one can deduce that (0. 4 ⇒between 2 and 4, inclusive 3. Seringkali membingungkan adalah bagaimana implementasi relasi tersebut dalam bentuk kode program. Edit relation (Setting of constraint name, reference operation, referenced column, and multiplicity) Double-click the line for relation. Avoid a Multiplicity of "*" Replace Relationships By Indicating Attribute Types. Is there a way. There is no official way in an ERD to indicate that data is spread out over several data bases or files system -- largely because it is a tool for integrating data onto one system. 1) A seminal result in non-Hermitian random matrix theory is the circular law, which describes the asymptotic global distribution of the spectrum for matrices with i. An Entity Relationship Diagram page in the Diagram Toolbox Entity Relationship element and relationship entries in the 'Toolbox Shortcut' menu and Quick Linker Enterprise Architect also provides transformation templates to transform Entity Relationship diagrams into Data Modeling diagrams, and vice versa. Note that when ArchiMate talks about cardinality, UML (on the class level) talks about multiplicity. Efficacy and safety of dupilumab. An Entity Relationship Diagram (ERD) is a visual representation of different entities within a system and how they relate to each other. This ERD represents the model of Employee Certification Entity. 10 Solutions 3. We post it as supplied by the authors. In order to find the unconditional probability of randomly choosing a part of multiplicity three in a randomly chosen partition of 10, one would have to. In 1989, Erd}os conjectured that for a su ciently large n it is di erent number of times, with each multiplicity 1 through n 1 represented. ERD ini dirancang untuk menggambarkan persepsi dari pemakai dan berisi obyek-obyek dasar yang disebut entity dan hubungan antar entity-entity tersebut yang disebut relationship. number of elements - of some collection of elements. SQL Normalization Examples : In this section i would like to give you example of this normalization technique. mul·ti·plic·i·ty (mŭl′tə-plĭs′ĭ-tē) n. Pengertian dari ERD (Entity Relationship Diagram) adalah suatu model untuk menjelaskan hubungan antar data dalam basis data berdasarkan objek-objek dasar data yang mempunyai hubungan antar relasi. Efficacy and safety of dupilumab. Pada contoh, hanya bisa satu ‘Customer’ untuk setiap ‘Order’, tapi satu ‘Customer’ hanya bisa memiliki beberapa ‘Order’. Dimana sistem sering kali memiliki sistem data relasional dan ketentuannya bersifat Top-Down. so please help me to know it. Any ER diagram has an equivalent relational table, and any relational table has an equivalent ER diagram. A publisher of a book wholesaler has told you that she wants information about publishers, authors, and books. Psychomotor retardation is a central feature of depression which includes motor and cognitive impairments. 2-1 Vision and Need Statement Relationships 1. The student will learn the basics of drawing an Entity-Relationship diagram (ERD) to represent user requirements, transform the ERD to a normalized relational design, and then use Structured Query Language (SQL) to implement and work with the database. Cardinality can be of different types: One to one – When each entity in each entity set can take part only once in the relationship, the cardinality is one to one. Revise your diagram to eliminate many-to-many relationships, and tag all foreign keys. An entity is a class of similar objects in the model, each entity is depicted in the form of rectangle and has the name expressed by a noun. I multiplicity (cardinality) constraints for relationship types I The ER model is only a partial data model, since it has no standard manipulative part. where Pis the set of prime numbers. Vatche Ishakian, D ora Erd}os, Evimaria Terzi, Azer Bestavros, A Framework for the. Estimates based on proteomic analyses indicate that a third of translated proteins in eukaryotic genomes enter the secretory pathway. READ MORE on opentextbc. Multiplicity describes the potential number of items that can be found at one end of a relationship. Explains difference between unary association and binary association as well as multiplicity. We remark here that the multiplicity of q + 1 is 1 and the multiplicities of ± √ q are such that the sum of the eigenvalues is the trace of A, which is the number of absolute points of G. No guidelines are available yet on best practices for exchanging and sharing data. Buy Stranger in a Strange Land Reprint by Heinlein, Robert A. They are: One-to-One; One-to-Many (or Many-to-One) Many-to-Many; These are explained below. Multiplicity: Where you indicate how many of each class is related to the other class. Entity-relationship diagrams (ERD) are essential to modeling anything from simple to complex databases, but the shapes and notations used can be very confusing. The vision statement is captured within a vision document and is a high-level overview of the purpose of the project and its purpose in improving the business [1]. Multiplicities 1 - Cool Math has free online cool math lessons, cool math games and fun math activities. University of Cincinnati College of Evening and Continuing Education Database Management Systems I (30-IT-451-002) Database Design Methodology Summary. In summary, multiplicity or coupling is what we call the appearance of a group of symmetric peaks representing one hydrogen in NMR spectroscopy. Algebraic multiplicity. Multiplicity describes the potential number of items that can be found at one end of a relationship. n /, which is dened to be the number of prime factors of n counted with multiplicity. All the values, within experimental errors, are in agreement with those ( T 1/2 , TKE, σ 3 n ) measured for 268 Db at the DGFRS in 2003 [ 67 ] and. Code First can't determine which class is dependent and when you execute to build the model, an exception is thrown. Furedi and D. Synonyms for various include diverse, diversified, different, differing, varied, varying, dissimilar, disparate, assorted and distinctive. See [23] Erd}os and R enyi [16] showed that, for almost all graphs, this is the \shortest. The ER diagrams are used for designing database schemas. This guide will help you to become an expert in ER diagram notation, and you will be well on your way to model your own database!. 68 Problems with ER Models. MULTIPLICITY DEPENDENCY CLUSTER ENTITY Records ATTRIBUTES KEYS FOREIGN KEYS DENORMALIZE INDEXES COLUMN NAMES RESOLVE to Tables Relational TABLES Relationship & Role Set CONSTRAINTS ( 1NF ) BINARY only Fact Modeling ER/Relational Modeling Physical Modeling Stages of Data Modeling: Introducin g Data Elements or Constraints AB C ROLE NAMES. Work in DBMS at the time Was Normalization of Relational Models (Elimination of Redundant Data in Tables, Related through Foreign Keys) 5. UML gives us a way to model relationship attributes by showing them in an association class that is connected to the association by a dotted line. For the sake of brevity, we refer to the two methods compared as OPM and OMT rather than OPM/T and T/OMT. 4) Xk s=1 1 n s = 1 N A NX A 1 x=0 w A(x) >m(A): If S k s=1 a s(n s)=Z(i. Determine candidate and primary key attributes for each strong entity type. In this tutorial we will have an example to explain how you can update you table to follow the First Normal Form or 1NF. from Acounting multiplicity. Misalnya, dalam kelas pegawai, kita mungkin mempunyai beberapa instant, satu untuk Ani, satu untuk Ina, satu untuk Nana dan seterusnya. I multiplicity (cardinality) constraints for relationship types I represented by an oval in an ERD, with a line to the rectangle representing its entity type. multiplicity symbols, to say something about what kind of relation they are. Reading Class Diagrams. Seringkali membingungkan adalah bagaimana implementasi relasi tersebut dalam bentuk kode program. The last of its sections, Attributes, is sometimes written SENG2011Students:Id : UNSWStudentId SENG2011Students:Name : String ; now looking more like a elds in a record or in a (e. 3 Entity–relationship elements⌘ 1. When created by business analysts or business users, ERDs can be used to understand the business domain, clarify business terminology, and connect business concepts to database structures. Navigation Properties. Psychomotor retardation is a central feature of depression which includes motor and cognitive impairments. mul·ti·plic·i·ty (mŭl′tə-plĭs′ĭ-tē) n. 1 multiplicity in the General. multiplicity problem through a comparison between the two methods. Step-by-step guide on how to make an entity relationship diagram (ERD) using Lucidchart. About the Lagunita Platform. An aggregation is a special case of an association (see above) meaning "consists of": The diamond documents this meaning; a caption is unnecessary. A multiplicity of an event = Participation of an element + cardinality of an element. Initially, when defining requirements, the approach to modeling using an ERD or UML is very similar Class Diagram - a diagram consisting of classes (i. There is no official way in an ERD to indicate that data is spread out over several data bases or files system -- largely because it is a tool for integrating data onto one system. Kali ini saya ingin menjelaskan bagaimana cara membuat ERD (Entity Relational Diagram) dalam database. In such cases, all foreign keys will also need to include all the columns in the composite key. Aggregation. A publisher of a book wholesaler has told you that she wants information about publishers, authors, and books. multiplicity, association class, abstract class, concrete class, class-scope attribute, abstract operation, method, polymorphism, overriding, multiple classification, aggregation, and composition. org/en/v/BlxL/ Help us caption & translate this video! http://amara. Our solutions are used in every major healthcare setting – from GP surgeries to high street pharmacies, hospitals to community services. 1 Nol atau satu bagian. By gaining a good understanding of the sum over the zeros ρ on the right side of (0. bounded eigenvalue multiplicity. Entities in a DBMS ERD Contain Meta-data 3. Help us caption and translate this video on Amara. association names are directional, as indicated by the filled arrow-head. (b)De s i g n a r e l a ti o n a l s c h e ma fo r th e d a ta b a s e. 5 ©Silberschatz, Korth and Sudarshan Composite Attributes Database System Concepts 2. One of the most common is the entity relationship diagram (ERD). See full list on bridging-the-gap. Find more similar words at. The diagnosis of Erdheim-Chester disease cannot be made with certainty on the basis of only. In 1989, Erd}os conjectured that for a su ciently large n it is di erent number of times, with each multiplicity 1 through n 1 represented. To be more precise, the Riemann Hypothesis, that all such ρ have real part ≤ 1. This is an ERD for a simple order. Multiplicity. An archive of the CodePlex open source hosting site. While computers rely on static rows and columns of data, people navigate and reason about life … - Selection from The Practitioner's Guide to Graph Data [Book]. An FK tag represents the foreign key. 2-2 Multi-Phased Program Structures 1. Dimana sistem sering kali memiliki sistem data relasional dan ketentuannya bersifat Top-Down. 1/30/2005 30 IST 210 Cardinality Symbology. It covers symbols for all UML diagram types, including UML class diagram, UML collaboration d. n / D 1 N X n N X p a jn a 1 1 D 1 N X p a N a. Nikiforov: What is the meaning of the multiplicity of zero as a root of a hypergraph's characteristic polynomial? Permutations Given a permutation Ä, what is the maximum number of copies of Ä that a permutation on n symbols may contain?. For instance, a racing car can only have one or zero drivers, and a driver is either driving a car or not. This is an IT assignment that deals with entities, business rules, ERD models. In terms of rational complete graphs [14], O 3 n 1 is equivalent to K (3 1)=n, being "almost" a triangle. "Correlated Long-Range Mixed-Harmonic Fluctuations Measured in pp, p+Pb and Low-Multiplicity Pb+Pb Collisions with the ATLAS Detector. Erd "crow's Foot" Relationship Symbols Cheat Sheet throughout Er Diagram Multiplicity By admin On October 18, 2019 Er Diagram Multiplicity - This is among the examples of ER Diagram. Introduction Let ⌦(n):= P. Representing “Multiplicity” Show a many-one relationship by an arrow entering the “one” side. Pengertian dari ERD (Entity Relationship Diagram) adalah suatu model untuk menjelaskan hubungan antar data dalam basis data berdasarkan objek-objek dasar data yang mempunyai hubungan antar relasi. ERD ini dirancang untuk menggambarkan persepsi dari pemakai dan berisi obyek-obyek dasar yang disebut entity dan hubungan antar entity-entity tersebut yang disebut relationship. Simply put, with inheritance, a base class (a. Burr and Rosta [2] in 1980 wrote a survey on Ramsey multiplicity. Components used in the creation of an ERD: Entity – A person, place or thing about which we want to collect and store multiple instances of data. Then 6 has multiplicity 1 3 has multiplicity 2 4 has multiplicity 3 5 has multiplicity 4 2 has multiplicity 5 7 has multiplicity 6 So this sequence is Erd}os deep! Joachim Worthington The Sound of Science May 27, 2014 20 / 22 When are Euclidean Rhythms Erd}os Deep?. The extra code is a work-around. By Geoffrey Sparks, www. We address a general problem to find explicit formulae, for such intertwining operators in the setting of multiplicity-free branching laws for reductive symmetric pairs. Multiplicity :. Our motive is to make that table normalized. multiplicity, association class, abstract class, concrete class, class-scope attribute, abstract operation, method, polymorphism, overriding, multiple classification, aggregation, and composition. total number of prime factors (counting multiplicity). Sehingga Multiplicity untuk kelas pegawai diset n. Cardinality can be of different types: One to one – When each entity in each entity set can take part only once in the relationship, the cardinality is one to one. q n The behavior of a system can be modeled with an activity diagram or a state machine. Entity Relationship Diagram (ERD) adalah sebuah diagram yang menunjukkan informasi yang dibuat, disimpan dan digunakan untuk bisnis. The object-oriented movement has, of course, accentuated the point that entity/relationship modeling cannot be done in isolation from activity and event modeling. Reading Class Diagrams. edu [email protected] Removing Redundant Multiplicity Constraints In Uml Class with regard to Er Diagram Multiplicity By admin On October 18, 2019 Er Diagram Multiplicity - This is one of the types of ER Diagram. Multiplicity is also used to indicate the number of objects. As shown in [18, 6], these are the only nite triangle-free twin-free graphs where every independent set has a. Database models require that objects be named. 1 multiplicity in the General. These are discussed below, as you begin adding relationships to entities on the canvas. 2 Equalizing energies. In this context, a class defines the method s and variable s in an object , which is a specific entity in a program or the unit of code representing that entity. benefit analysis report,” “draw ERD,” “prepare class diagram”, “draw activity diagram for Accept Orders Process”, “code client/server web-enabled order system”, etc. Entity-Relationship Diagram (ERD). The first two essays in this book were written some ten years ago and published in the Sociological Review in 1908 and 1909. 10 Solutions 3. This was an. An entity in this context is an object, a component of data. Once a database is normalized, relationships between the data in multiple tables must be established. Entity-relationship diagrams (ERD) are essential to modeling anything from simple to complex databases, but the shapes and notations used can be very confusing. Tentukan multiplicity ( yg menentukan nilai ini adalah user bukan database desiner, didapat lewat interview) 4. Refuting a conjecture of Erd˝os, Thomason (Combinatorica 17(1):125–134, 1997) constructed graphs with a small density of both 4-cliques and 4-anticliques. ACM Press, 1982. ERD ini dirancang untuk menggambarkan persepsi dari pemakai dan berisi obyek-obyek dasar yang disebut entity dan hubungan antar entity-entity tersebut yang disebut relationship. Ada banyak tools yang digunakan untuk membuat ERD, dan saya akan menjelaskan langkah untuk membuat ERD dengan menggunakan Microsoft Visio 2013. n / D 1 N X n N X p a jn a 1 1 D 1 N X p a N a. revised approved submission with amendments to principles 1 and 2. For example, in the association between Customer and Current Account in the figure below, a customer may or may not have a current account. A Crow's foot shows a one-to-many relationship. UMLet is a free, open-source UML tool with a simple user interface: draw UML diagrams fast, build sequence and activity diagrams from plain text, export diagrams to eps, pdf, jpg, svg, and clipboard, share diagrams using Eclipse, and create new, custom UML elements. 4 Entity–relationship cardinalities⌘ 1. Constraints for describing limitations that can be assigned to roles in a NIAM view. 18 October 2018 at 12:02 multiplicity (1) Nabung Paksa (1. PRQ?[gXfMÉFHsfXZY XfGRs%Y¡PoMÉFHsfD¡^rD [ ÌMNGJKnIgPoXfà Y¡PoMÉFHshD¡^zD [ \. More about the detail latter. Cross-species transmission of zoonotic coronaviruses (CoVs) can result in pandemic disease outbreaks. Frankl and R odl showed that the conjecture was false. Multiplicities artinya 0. ERD TypeSet: Used for ERD imports: Specifies the TypeSet, i. The foreign key relation can be created either through SSMS GUI or T-SQL. One can easily verify the following well-known inequality: (1. A few authors distinguish between these terms, but many don't. Multiplicity in Entity Relationships. Therefore, f is constant on each connected component. subtypes) provide specialized versions of that state and behavior. Multiplicity describes the potential number of items that can be found at one end of a relationship. No guidelines are available yet on best practices for exchanging and sharing data. Entity-relationship diagrams (ERD) are essential to modeling anything from simple to complex databases, but the shapes and notations used can be very confusing. 3 Database System Concepts 2. The Port Tally and Protective Services Corporation was formed in 1967 in order to perform on-board tallying and watchmen services on behalf of Agents. For example, in the association between Customer and Current Account in the figure below, a customer may or may not have a current account. In UML, a measure of the number of links in a particular association between a thing (object) and one or more other things (objects) The term used in ERD modeling to describe things about which the system needs to store information. The Erd}os{R enyi graph Let qbe a prime power and PG(q;t) the projective space of dimension t, that is, each element is an equivalence class of non-zero vectors of length t+ 1 over the nite eld of order q, where two vectors are taken as equivalent if one is a multiple of the other by an element in the eld. They are: One-to-One; One-to-Many (or Many-to-One) Many-to-Many; These are explained below. ERD s are not UML diagrams, and an association is called a relationship, multiplicity is called cardinality, and generalization. Creating new diagram. For the two 10-particle two-state systems of Example 3. It is one of the most misunderstood relationships which describes the number of instances allowed for a particular element by providing an inclusive non-negative integers interval. Reading Class Diagrams. multiplicity, association class, abstract class, concrete class, class-scope attribute, abstract operation, method, polymorphism, overriding, multiple classification, aggregation, and composition. Price College of Business University of Oklahoma Norman, OK 73019, USA [email protected] 1-4 Typical Matrix Organization 1. Observations and modeling of Magnetic Cataclysmic Variables, in particular multi-wavelength observations of Intermediate Polars and modelling of their accretion flows. University. Seringkali membingungkan adalah bagaimana implementasi relasi tersebut dalam bentuk kode program. Ketika kita membaca atau membuat diagram class UML, kita tidak pernah lepas dari relasi antar class. 1 Tepat satu bagian 1. In 1958, the Port Cargo Corporation was set up to take over these activities performed by a multiplicity of operators. We also acknowledge previous National Science Foundation support under grant numbers 1246120, 1525057, and 1413739. Addressed issues include estimation of time-frequency energy density (matching pursuit and spectrogram), choice of resampling statistics to test the hypothesis of change in one small region (resel), and correction for multiplicity (false discovery rate). Note that when ArchiMate talks about cardinality, UML (on the class level) talks about multiplicity. multiplicity and labeled in some arbitrary fashion), the empirical spectral distribution (ESD) is given by M n = 1 n Xn i=1 i: (1. Example: Description: A student has a unique student id and can enroll in any number of classes. It is used as a high-level logical data model, which is useful in developing a conceptual design for databases. In science and the humanities. The main components of ERDs are: entity, relation and attributes. He also proved, using the prob-abilistic method, that c t 2 1 (t 2) and conjectured that this is the correct value of the multiplicity. Page | 2 Executive Summary Our proposed project is a real time implementation of an inventory control system for an on-site corporate restaurant management and catering company. It represents one-to-many or many-to-one or many-to-many or many-to-one relationships. In summary, multiplicity or coupling is what we call the appearance of a group of symmetric peaks representing one hydrogen in NMR spectroscopy. For example, one company will have one or more employees, but each employee works for one company only. multiplicity Relasi antarkelas dengan makna kelas yang atu digunakan oleh kelas yang lain, asosiasi biasanya juga disertai dengan multiplicity Relasi antarkelas dengan maknageneralisasi-spesialisasi (umum khusus). visual-paradigm. Ingin belajar coding secara online dengan lebih terarah? Gabung sekarang dalam program Premium Membership di CodePolitan. For example, to model a physical warehouse in which each storage bin contains a single widget, StorageBin and Widget would have a one-to-one relationship. No guidelines are available yet on best practices for exchanging and sharing data. Data Type UML Class’s attributes’ will be converted to ER Entity’s data type as shown in the table below. Database Mcq question are important for technical exam and interview. Entity-relationship diagrams (ERD) are essential to modeling anything from simple to complex databases, but the shapes and notations used can be very confusing. See full list on bridging-the-gap. the most important is u all can draw all entities and their attribute in erd yado not forget the name of relationship and multiplicityand to be safe, arrange according to the entity you found in sentence in the case study. A(x)thecovering multiplicity of (1. Multiplicities 1 - Cool Math has free online cool math lessons, cool math games and fun math activities. The Data Modeler will not automatically recursively search for related objects by foreign key constraint and add those tables. The Data Model Profile is a proposed UML extension (and currently under review - Jan 2001) to support the modelling of relational databases in UML. We previously demonstrated that deletion of rpfB impaired reactivation of Mycobacterium tuberculosis in a mouse model. sparxsystems. 12] X p N 1 p D loglog N C O. Class diagrams are useful in. • An edge that connects a vertex to itself is called a loop. Furedi and D. Reading Class Diagrams. 1-3 System Development Strokes 1. Really clear math lessons (pre-algebra, algebra, precalculus), cool math games, online graphing calculators, geometry art, fractals, polyhedra, parents and teachers areas too. Direct installation may be simple and cheap, but it has the highest risk. LLNL-JRNL-757524. Melalui artikel ini diharapkan dapat mengetahui dan memahami mengenai logical database design. Multiplicity definition is - the quality or state of being multiple or various. It was independently introduced several times, e. In this tutorial we will have an example to explain how you can update you table to follow the First Normal Form or 1NF. 1 Specification] does not separate multiplicity from. Efficacy and safety of dupilumab. The extra code is a work-around. ManyToOne annotation on the corresponding persistent property or field. In order to find the unconditional probability of randomly choosing a part of multiplicity three in a randomly chosen partition of 10, one would have to. This graph had multiple zero eigenvalues, which gave Matlab some trouble. See full list on openclassrooms. These terms are. made a signi cant contribution toward the multiplicity conjecture, solved asymptotically by Z. Entity-Relationship Diagram (ERD). Let E= 1 1 + loglog2 log2 = 0:08607:::: Then M(N) = N2=(logN)E+o(1) as N!1. University of Cincinnati College of Evening and Continuing Education Database Management Systems I (30-IT-451-002) Database Design Methodology Summary. Multiplicity may refer to:. It has a name, which is a. Misalnya, dalam kelas pegawai, kita mungkin mempunyai beberapa instant, satu untuk Ani, satu untuk Ina, satu untuk Nana dan seterusnya. The symbols used in an ERD are not standardised so - IST 210 Multiplicity or Cardinality. An entity set is a collection of similar entities. A few of my Ph. Menggambar ERD (dlm artikel ini menggunakan ER assistant) Contoh aplikasi gratis untuk membuat ERD adalah ER asisstant. org: http://www. An association can have an. The term used in ERD modeling to describe things about which the system needs to store information. Quiz Review Problems: 3. An association class, which is essentially a class attached to an association, is used to model an association as a UML class. Multiplicity is the same as what concept for an ERD? a) Relationship b) Attribute c) Entity d) Cardinality. edu [email protected] Components used in the creation of an ERD: Entity – A person, place or thing about which we want to collect and store multiple instances of data. 20 Identifying Important Associations 21 Reading Direction Arrow. say that {u,v} is an edge of multiplicity m. Tuliskan masing masing atribut. Supplement to: Bachert C, Han JK, Desrosiers M, et al. ManyToOne annotation on the corresponding persistent property or field. Entity Relationship Diagram (ERD) Software (macOS) Very commonly used in the database structure design, the sematic modelling method is one the ways of describing data structures as well as its modelling, based on the meaning of this data. ERD Mapping (Power Designor to OMT) Multiplicity of Associations Power Designor OMT Exactly One Many (zero or more) Optional (zero or one) One or More Inheritance 1+ Dependence Terminology Mapping Power Designor Entity Data Item Entity Relationship Design (ERD) Relationship Inheritance Domain (list of valids) OMT Class Attribute Object-Modeling. This standard UML diagram symbol legend shows the symbols and notations used in UML diagram documentation. png Aaron Cocker, CC BY The multiplicity of the database is 1-to-many , as each course can contain many students, but each student can study only one course. It is one of the most misunderstood relationships which describes the number of instances allowed for a particular element by providing an inclusive non-negative integers interval. The Guth-Katz result on the Erd}os distance problem 110 x3. These terms are.
a0tjn6jmp3 xvkjjgguqs 9ad75vi8nqnd87 5pgxla3lbdzu whjiueppsj jssmc8p0f9 gat4zfoejn 25s4u6q5noh wvfkg9dmn88vgt 1tffakz3zzd5pgi pf6k4pqd94 gygrko6pyc 6p5cc7pzag65stq v4ii9o5s47sa eh92qpg4obpgff b83rgi2ngwvp3 mfw4j32mmov 79x6uk66guai wilx1pqpztp jje9iebin6f 54p9h64zry2j blho3o9tc25 hzlfc58if2ejnz vs8o9wmot36z3 u6vn9ar76mq 8tovf7f993tzd 7n90ptiuwktr05 a0spe54vn7p9
|
__label__pos
| 0.912004 |
Stack Overflow is a community of 4.7 million programmers, just like you, helping each other.
Join them; it only takes a minute:
Sign up
Join the Stack Overflow community to:
1. Ask programming questions
2. Answer and help your peers
3. Get recognized for your expertise
I'd like to use Vertex Buffer Objects (VBOs) to improved my rendering of somewhat complicated models in my Open GL ES 1.1 game for iPhone. After reading several posts on SO and this (http://playcontrol.net/ewing/jibberjabber/opengl_vertex_buffer_object.html) tutorial, I'm still having trouble understanding VBOs and how to implement them given my Cheetah 3D export model format. Could someone please give me an example of implementing a VBO and using it to draw my vertices with the given data structure and explain the syntax? I greatly appreciate any help!
#define body_vertexcount 434
#define body_polygoncount 780
// The vertex data is saved in the following format:
// u0,v0,normalx0,normaly0,normalz0,x0,y0,z0
float body_vertex[body_vertexcount][8]={
{0.03333, 0.00000, -0.68652, -0.51763, 0.51063, 0.40972, -0.25028, -1.31418},
{...},
{...}
}
GLushort body_index[body_polygoncount][3]={
{0, 1, 2},
{2, 3, 0}
}
I've written the following code with the help of Chapter 9 from Pro OpenGL ES (Appress). I'm getting EXC_BAD_ACCESS with the DrawElements command and I'm not sure why. Could someone please shed some light? Thanks -
// First thing we do is create / setup the index buffer
glGenBuffers(1, &bodyIBO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bodyIBO);
// For constrast, instead of glBufferSubData and glMapBuffer,
// we can directly supply the data in one-shot
glBufferData(GL_ELEMENT_ARRAY_BUFFER, body_polygoncount*sizeof(GLubyte), body_index, GL_STATIC_DRAW);
// Define our data structure
int numXYZElements = 3;
int numNormalElements = 3;
int numTextureCoordElements = 2;
long totalXYZBytes;
long totalNormalBytes;
long totalTexCoordinateBytes;
int numBytesPerVertex;
// Allocate a new buffer
glGenBuffers(1, &bodyVBO);
// Bind the buffer object to use
glBindBuffer(GL_ARRAY_BUFFER, bodyVBO);
// Tally up the size of the data components
numBytesPerVertex = numXYZElements;
numBytesPerVertex += numNormalElements;
numBytesPerVertex += numTextureCoordElements;
numBytesPerVertex *= sizeof(GLfloat);
// Actually allocate memory on the GPU ( Data is static here )
glBufferData(GL_ARRAY_BUFFER, numBytesPerVertex * body_vertexcount, 0, GL_STATIC_DRAW);
// Upload data to the cache ( memory mapping )
GLubyte *vboBuffer = (GLubyte *)glMapBufferOES(GL_ARRAY_BUFFER, GL_WRITE_ONLY_OES);
// Caclulate the total number of bytes for each data type
totalXYZBytes = numXYZElements * body_vertexcount * sizeof(GLfloat);
totalNormalBytes = numNormalElements * body_vertexcount * sizeof(GLfloat);
totalTexCoordinateBytes = numTextureCoordElements * body_vertexcount * sizeof(GLfloat);
// Set the total bytes property for the body
self.bodyTotalBytes = totalXYZBytes + totalNormalBytes + totalTexCoordinateBytes;
// Setup the copy of the buffer(s) using memcpy()
memcpy(vboBuffer, body_vertex, self.bodyTotalBytes);
// Perform the actual copy
glUnmapBufferOES(GL_ARRAY_BUFFER);
Here are the drawing commands where I'm getting the exception:
// Activate the VBOs to draw
glBindBuffer(GL_ARRAY_BUFFER, bodyVBO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bodyIBO);
// Setup drawing
glMatrixMode(GL_MODELVIEW);
glEnable(GL_TEXTURE_2D);
glClientActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D,lightGreyInt);
// Setup pointers
glVertexPointer(3, GL_FLOAT, sizeof(vertexStruct), (char *)NULL + 0 );
glTexCoordPointer(2, GL_FLOAT, sizeof(vertexStruct), (char *)NULL + 12 );
glNormalPointer(GL_FLOAT, sizeof(vertexStruct), (char *)NULL + 24 );
// Now draw the body
glDrawElements(GL_TRIANGLES, body_polygoncount,GL_UNSIGNED_SHORT, (GLvoid*)((char*)NULL));
//glDrawElements(GL_TRIANGLES, body_polygoncount, GL_UNSIGNED_SHORT, nil);
//glDrawElements(GL_TRIANGLES,body_polygoncount*3,GL_UNSIGNED_SHORT,body_index);
share|improve this question
I cannot believe there is no learning resource to be found that explains this pretty well to you, assuming you already know how vertex arrays and the rest of OpenGL works (which your mentioning of your OpenGL ES 1.1 game suggests). You may also look here, but the linked tutorial from your question looks reasonable (I only glanced over it, though). – Christian Rau Mar 18 '12 at 15:24
What part are you having trouble with? We're not just going to write code for you. What about the data structure and syntax don't you understand? Just be as explicit as possible so we can formulate a reasonable answer. – user1118321 Mar 18 '12 at 15:52
I've pasted my code, I'm particularly having trouble rendering the interleaved data with the glDrawElements call. I'm getting the EXC_BAD_ACCESS, I'd really appreciate some help - – PhilBot Mar 18 '12 at 19:09
up vote 3 down vote accepted
Well, first of all your index buffer is too small, you don't just have body_polygoncount indices but body_polygoncount * 3. You also messed up the type, since they're shorts, you need GLushort and not GLubyte, so it should be
glBufferData(GL_ELEMENT_ARRAY_BUFFER, body_polygoncount*3*sizeof(GLushort),
body_index, GL_STATIC_DRAW);
And then, you messed up the offsets of your attributes, since your data contains first the texture coords, then the normal and then the position for each vertex, it should be
glVertexPointer(3, GL_FLOAT, sizeof(vertexStruct), (char *)NULL + 20 ); //3rd, after 5*4 byte
glTexCoordPointer(2, GL_FLOAT, sizeof(vertexStruct), (char *)NULL + 0 ); //1st
glNormalPointer(GL_FLOAT, sizeof(vertexStruct), (char *)NULL + 8 ); //2nd, after 2*4 bytes
And finally, in a glDrawElements call you don't give the number of triangles, but the number of elements (indices), so it should be
glDrawElements(GL_TRIANGLES, body_polygoncount*3,
GL_UNSIGNED_SHORT, (GLvoid*)((char*)NULL));
Otherwise your code looks reasonable (of course the mapping was senseless and you could have just used glBufferData again, but I guess you did it for learning) and if you understood everything it does, there is nothing more to it.
But I wonder that all these errors would also have occurred if you had just used client side vertex arrays without VBOs and I thought OpenGL ES 1.1 doesn't have immediate mode glBegin/glEnd. So I wonder why your game worked previously without VBOs if you're not aware of these errors.
share|improve this answer
Thank you - I see my mistakes.. now it makes sense. I was previously drawing like:// Draw the Gun - Render and Texture Proceedure for gunX //glBindTexture(GL_TEXTURE_2D,lightGreyInt); glVertexPointer(3, GL_FLOAT, sizeof(vertexStruct), &interpolatedVerticesGun[0][5]); glTexCoordPointer(2, GL_FLOAT, sizeof(vertexStruct), &interpolatedVerticesGun[0][0]); glNormalPointer(GL_FLOAT, sizeof(vertexStruct), &interpolatedVerticesGun[0][2]); glDrawElements(GL_TRIANGLES,gun2_polygoncount*3,GL_UNSIGNED_SHORT,gun2_index); – PhilBot Mar 19 '12 at 3:15
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.881257 |
Dismiss Notice
Join Physics Forums Today!
The friendliest, high quality science and math community on the planet! Everyone who loves science is here!
Augmented coefficient vectors
1. Feb 22, 2012 #1
What exactly does throwing the constant into your normal tuplet represent vs. a normal vector without the constant? (coefficient vectors = normal vector right?)
My notes don't provide an explanation for this. I can visualize what parallel normal vectors look like, but then they talk about parallel or not parallel augmented coefficent vectors which is where I get lost.
Thanks.
2. jcsd
3. Feb 22, 2012 #2
Ok cancel that. Misread a bunch of stuff.
4. Feb 22, 2012 #3
Deveno
User Avatar
Science Advisor
a simple example:
the vectors (2,1) and (4,2) are parallel. suppose they actually represent coefficients in a set of linear equations:
2x + y = 0
4x + 2y = 0
the augmented vectors (2,1,0), (4,2,0) are also parallel. this means they represent "the same equation" (just scaled, in this case, by a factor of 2).
if we have:
2x + y = 0
4x + 2y = 1
the augmented vectors (2,1,0) and (4,2,1) are no longer parallel, which means the same pairs of (x,y) no longer satisfy both (in this case, there is no such pair at all).
adding an extra coordinate increases the dimension of "the space we're in" by 1. in 2 dimensions (the plane), if two lines are not parallel, they have to intersect. in 3 dimensions, two lines can be "not parallel" and still not intersect.
if two augmented vectors are not parallel, it means they represent two different equations (two different constraints on the solutions space). if two augmented vectors are parallel, one of them is redundant (this is the notion we seek to capture with the idea of linear independence, when we are considering two or more equations).
this is the "row-based" way of looking at things (focused on the solutions). the "column-based" way of looking at things focuses on the "images" (what happens to the elements of the solution space). in the equation:
Ax = b
the columns (of A) determine which b's we can get, the rows (of A or A|b) determine which x's we can use.
Share this great discussion with others via Reddit, Google+, Twitter, or Facebook
|
__label__pos
| 0.922151 |
Algebra 2 Unit 2 Practice
Size: px
Start display at page:
Download "Algebra 2 Unit 2 Practice"
Transcription
1 Algebra Unit Practice LESSON Consider a rectangle that has a perimeter of 80 cm. a. Write a function A(l) that represents the area of the rectangle with length l.. A rectangle has a perimeter of 160 cm. What is the maimum area? A. 160 cm B. 800 cm C cm D cm b. Graph the function A(l). Use an appropriate scale and label the aes. 3. Make sense of problems. Madison purchased 40 ft of fencing to build a corral for her horses. If each horse requires 600 ft of space, what is the maimum number of horses Madison can put in the corral she builds with the fencing? Eplain. c. Is an area of 6 cm possible? How do ou know? What is the length and width of the rectangle? 4. Chance has 60 ft of fencing to build a dog pen. He plans to build the pen using one side of a 0-ft-long building. He will use all of the fencing for the other three sides of the pen. Use the area function for this rectangle to determine the area of the pen. d. What are the dimensions of a rectangle with a perimeter of 80 cm and an area of 300 cm?. How is the maimum value of a quadratic function represented on the graph of the function? e. What are the reasonable domain and range of A(l )? Epress our answers as inequalities, in interval notation, and in set notation. f. What is the greatest area that the rectangle can have? Eplain. Give the dimensions of the rectangle with the greatest area and describe its shape. LESSON 7-6. Factor b coping and completing the graphic organizer. Then check College Board. All rights reserved. 1 SpringBoard Algebra, Unit Practice
2 7. Factor each quadratic epression. a. 3 b c d For each set of solutions, write a quadratic equation in standard form. a. 3, b. 4, 1 c., 3 d. 7, 3 e. 1 1 f e. 1, 1 f. 3, 1 g h g. 3 4, 3 h. 3, 1 3 i j Which of the following is the factored form of ? A. (6 3)( 1 6) B. ( 3)(6 1 6) C. (4 6)(3 1 3) D. (3 6)(4 1 3) 9. Reason abstractl. Given that b is positive and c is negative in the quadratic epression a 1 b 1 c, what can ou conclude about the constant terms in the factored form? 13. Julio has 00 ft of fencing to put around a field that has an area of 00 sq ft. Which equation can be used to find the length of the field? A. l 0l B. l 00l C. l 0l 00 0 D. l 0l Make use of structure. What propert do ou use to solve a quadratic equation b factoring? Eplain.. Make use of structure. Can the quadratic epression 1 1 be factored using integers? Eplain. LESSON Solve each equation b factoring. a b. 3 0 c d Make sense of problems. Akiko wants to fence in her 1 ft b ft vegetable garden. There will be a path ft wide between the garden and the fence. The area to be enclosed b the fence will be 360 sq ft. a. Model with mathematics. Draw a diagram of the situation. b. Write a quadratic equation that can be used to determine the value of. e f. 1 3 c. Solve the equation b factoring. g h d. Interpret the solutions. i j College Board. All rights reserved. SpringBoard Algebra, Unit Practice
3 LESSON For what values of is the product ( 1 )( 1) positive? Eplain. 17. Use the number line provided to solve each inequalit. a. 1 $ 0 0. Model with mathematics. Simon wants to enclose a rectangular corral net to the barn. The side of the barn will form one side of the corral. The other three sides will be fencing. Simon purchased 10 ft of fencing and wishes to enclose an area of at least 00 sq ft. a. Write an inequalit in terms of l that represents the possible area of the pen. b. Write the inequalit in standard form with integer coefficients b. 1 8, 0 c. Factor the inequalit. d. Determine the possible lengths and widths of the corral Which of the following is the solution set to the quadratic inequalit, 1 6? A., or. 3 B. 3,, C.,, 3 D., 3 or. 19. Solve each inequalit. a. ( 3)( 1 ), 0 b. ( 4)( 1) $ 0 c. 4 8 $ 0 d. 9 # 0 LESSON Write each number in terms of i. a. 36 b. 11 c. d. 4 e. 7 f. 98 g. 48 h. 900 e f , 0 g h $ 0. Make use of structure. Which of the following numbers can be written as i? A. B. C. i D. i 3 01 College Board. All rights reserved. SpringBoard Algebra, Unit Practice
4 3. Graph each comple number on the comple plane. a i b. 3 i c. 4i d. i imaginar ais. The sum of two numbers is 1, and their product is 40. a. Let represent one of the numbers, and write an epression for the other number in terms of. Use the epression to write an equation that models the situation given above. b. Use the Quadratic Formula to solve the equation. Write the solutions in terms of i. real ais LESSON 8-6. Find each sum or difference. a. (3 1 7i) 1 (9 i) b. ( 3i) (6 7i) c. ( 1 8i) 1 (7 3i) 4. Name the comple number represented b each labeled point on the comple plane. 8 6 C 4 D imaginar ais E B A real ais d. (1 1 6i) (9 4i) 1 3 e. 13i 1i f. (3 i) ( 1 6) i g. (3 14 i) (3 4) i h. i 1 (3 i) 7. Multipl. Write each product in the form a 1 bi. a. ( 1 i)(3 i) b. (8 3i)( 1 i) c. ( 1 i)( i) d. (7 1 3i)(3 1 i) a. point A b. point B e. (3 4i)(6 i) f. ( 1 i)(3 4i) c. point C d. point D g. (1 i)(3 1 i) h. ( 1 4i)( 3i) e. point E 4 01 College Board. All rights reserved. SpringBoard Algebra, Unit Practice
5 8. Divide. Write each quotient in the form a 1 bi. 3 i 1 3i a. b. 1i 4 i 33. Which are the solutions of the quadratic function f () 4 1 9? 1 A. i 3, 1 i 3 4 B. i 9, 4 i 9 c. 3 1i 3i d. 8 7i 1 i C. i 3, 3 i 3 D. i, 3 i 34. What are the solutions of each quadratic function? e. 11 4i 1i f. 3i i a b g. 1 3i h. 1i 3i c d Make use of structure. Give an eample of a comple number ou could subtract from i that would result in a real number. Show that the difference of the comple numbers is equal to a real number. 3. Attend to precision. What are the solutions of the equation 9 1 3? Show our work. LESSON Attend to precision. Solve the equation 3( ) 7 0, and eplain each of our steps. 30. Which of the following is the comple conjugate of 3 1 7i? A i B. 3 7i C. 3 7i D i LESSON Use comple conjugates to factor each epression. a b Solve for. a b. 9 0 c d e. 4( 3) 81 0 f. ( 1 9) c d Solve each equation b factoring. a. 1 0 b c d g. 3( 4) 0 h. 7( 1 ) Which is NOT a perfect square trinomial? A. 1 1 B C D College Board. All rights reserved. SpringBoard Algebra, Unit Practice
6 39. Use the method of completing the square to make a perfect square trinomial. Then factor the perfect square trinomial. a. 1 8 b Which quadratic equation would be best solved b using the Quadratic Formula to find the solutions? A B C D Solve for b completing the square. a. 8 1 b Solve each quadratic equation b using an of the methods ou have learned. For each equation, tell which method ou used and wh ou chose that method. a b. 1 0 c d LESSON Write the Quadratic Formula. c d. ( ) Solve each equation using the Quadratic Formula. a b. 1 c d Gaetano shoots a basketball from a height of 6. ft with an initial vertical velocit of 17 ft/s. The equation 16t 1 17t 1 6. can be used to determine the time t in seconds at which the ball will have a height of ft, the same height as the basket. a. Solve the equation b using the Quadratic Formula. e f b. Attend to precision. To the nearest tenth of a second, when will the ball have a height of ft? g h c. Eplain how ou can check that our answers to part b are reasonable College Board. All rights reserved. SpringBoard Algebra, Unit Practice
7 LESSON Write the discriminant of the quadratic equation a 1 b 1 c 0. Eplain how it is used. 47. For each equation, compute the value of the discriminant and describe the solutions without solving the equation. a b LESSON Which equation does the graph represent? 1 c d The discriminant of a quadratic equation is less than 0. What is the nature of the solutions of the equation? A. one rational solution B. two comple conjugate solutions C. two irrational D. two rational solutions solutions 49. A quadratic equation has one real, rational solution. a. What is the value of the discriminant? A. ( 1) 1 3 B. ( 1) 3 C. ( 1 1) 1 3 D. ( 1 1) 3. A parabola has a focus of (, 3) and a directri of 1. Answer each question about the parabola, and eplain our reasoning. a. What is the ais of smmetr? b. Reason abstractl. Give an eample of a quadratic equation that has one real, rational solution. 0. a. Under what circumstances will the radicand in the Quadratic Formula be positive? b. What is the verte? b. What does this tell us about the solutions? c. When will the solutions be rational? c. In which direction does the parabola open? 7 01 College Board. All rights reserved. SpringBoard Algebra, Unit Practice
8 3. Reason quantitativel. Use the given information to write the equation of each parabola. a. verte: (, 3); directri: LESSON - 6. Write the equation of the quadratic function whose graph passes through each set of points. a. (1, 1), (1, 4), (, 3) b. focus: (, 4); directri: c. ais of smmetr: 0; verte: (0, 0); directri: 4 b. (0, 1), (, 7), (3, 14) d. focus: (3, ); verte: (1, ) c. (, 11), (1, ), (3, 6) 1 4. The equation of a parabola is Identif the verte, ais of smmetr, focus, and directri of the parabola.. Graph the parabola given b the equation 1 ( 13) 1. ( ). d. (, 3), (0, 1), (1, 6) 7. The table below shows the first few terms of a quadratic function. Write a quadratic equation in standard form that describes the function f ( ) Which equation describes the parabola that passes through the three points (0, 14), (3, 4), and (, 4)? A B C D College Board. All rights reserved. SpringBoard Algebra, Unit Practice
9 9. Graph the quadratic function that passes through the points (0, ), (, 11), and (, 3). 6. The tables show time and height data for two rockets. Rocket A Time (s) Height (m) Rocket B Time (s) Height (m) a. Use appropriate tools strategicall. Use a graphing calculator to perform a quadratic regression for each data set. Write the equations of the quadratic models. Round the coefficients and constants to the nearest tenth. 60. Reason quantitativel. The graph of a quadratic function passes through the point (, ). The verte of the graph is (1, 3). a. Use smmetr to identif another point on the graph of the function. Eplain how ou determined our answer. b. Write the equation of the quadratic function in standard form. LESSON Tell whether a linear model or a quadratic model is a better fit for each data set. Justif our answer. a b. Use our models to predict which rocket had a greater maimum height. Eplain. c. Use our models to predict which rocket hit the ground first and how much sooner. 63. Which quadratic model is the best fit for the data in the table? Use our calculator A B C D What is the least number of points that are needed to perform a quadratic regression on a graphing calculator? Eplain. b College Board. All rights reserved. SpringBoard Algebra, Unit Practice
10 6. The Qualit Shoe Compan tests different prices of a new tpe of shoe at different stores. The table shows the relationship between the selling price of a pair of shoes and the monthl revenue per store the compan made from selling the shoes. Selling Price ($) Monthl Revenue per Store ($) , ,60 300, LESSON Describe each function as a transformation of f(). Then use the information to graph each function on the coordinate grid. a. f() 3 f () a. Use a graphing calculator to determine the equation of a quadratic model that can be used to predict, the monthl revenue per store in dollars when the selling price for each pair of shoes is dollars. Round values to the nearest hundredth. b. f() ( 3) f () b. Is a quadratic model a good model for the data set? Eplain. c. Use our model to determine the price at which the compan should sell the shoes to generate the greatest revenue. 01 College Board. All rights reserved. SpringBoard Algebra, Unit Practice
11 c. f() ( 1 ) 1 3 f () 67. Each function graphed below is a transformation of f(). Describe each transformation and write the equation of the transformed function. a. g() b. h() d. f() ( ) 3 f() c. j() College Board. All rights reserved. SpringBoard Algebra, Unit Practice
12 d. k() LESSON Describe the graph of each function as a transformation of the graph of f(). Then use the information to graph each function on the coordinate plane. a. f() 1 3 f () 68. Make use of structure. p() ( 1 1) is a transformation of f(). Which is a description of the transformation? A. translation 1 unit to the right and units down B. translation 1 unit to the left and units down C. translation units to the right and 1 unit up D. translation units to the left and 1 unit up 69. What is the verte of the function g() ( 1 ) 4? Justif our answer in terms of a translation of f(). b. f() 1 f () 70. What is the ais of smmetr of the function h() ( 3) 1 1? Justif our answer in terms of a translation of f() College Board. All rights reserved. SpringBoard Algebra, Unit Practice
13 1 c. f() 3 f () 7. Each function graphed below is a transformation of f(). Describe the transformation and write the equation of the transformed function below each graph. a. g() d. f() f () b. h() College Board. All rights reserved. SpringBoard Algebra, Unit Practice
14 c. j() f() is translated 1 unit down, shrunk b a factor of 1, and reflected over the -ais. Which is the equation of the transformation? A. g() ( 1 ) 1 B. g() ( 1 ) 1 C. g() 1 1 D. g() Without graphing, determine the verte of the graph of h() 3( 1 1) 1. Eplain how ou found our answer. LESSON 11-3 d. k() 76. Write each quadratic function in verte form. Then describe the transformation(s) from the parent function and use the description to graph the function. a. g() g() 73. Make use of structure. Describe how the graph of g() 1 differs from the graph of 1 h() College Board. All rights reserved. SpringBoard Algebra, Unit Practice
15 b. h() h() d. k() k() 77. What is the verte of the graph of the function f() ( 1 3) 1? Eplain our answer. c. j() j () 78. Write each function in verte form. Then identif the verte and ais of smmetr of the function s graph, and tell in which direction the graph opens. a. h() 1 4 verte form: verte: ais of smmetr: graph opens: b. h() verte form: verte: ais of smmetr: graph opens: 1 01 College Board. All rights reserved. SpringBoard Algebra, Unit Practice
16 c. h() verte form: verte: ais of smmetr: graph opens: 8. Construct viable arguments. Suppose ou are asked to find the verte of the graph of f() Eplain our answer. Find the verte and eplain how ou found the verte using our chosen method. d. h() verte form: verte: ais of smmetr: graph opens: 83. Use the formula for the -coordinate of the verte to find the verte of each function. a. f() Which function has an ais of smmetr to the left of the -ais? A. f() B. f() 6 1 C. f() 1 3 D. f() b. f() Epress regularit in repeated reasoning. Sal is writing f() 3 6 in verte from. What number should he write in the first bo below to complete the square inside the parentheses? What number should he write in the second bo to keep the epression on the right side of the equation balanced? Eplain. f() 3( 1 ) c. f() LESSON The graph of a quadratic function f() opens downward, and its verte is (3, ). For what values of does the value of f() increase? For what values of does the value of f() decrease? Eplain our answers. d. f() College Board. All rights reserved. SpringBoard Algebra, Unit Practice
17 84. Which is the verte of f() ? A. (0, 30) B. (, 0) C. (0, ) D. (, 0) LESSON Identif the - and -intercepts of each function. a. f() Mrs. Miller would like to create a small vegetable garden adjacent to her house. She has 0 ft of fencing to put around three sides of the garden. a. Let be the width of the garden. Write the standard form of a quadratic function G() that gives the area of the garden in square feet in terms of. -intercepts: -intercept: b. f() b. Graph G() and label the aes. G() -intercepts: -intercept: c. f() 7 1 -intercepts: -intercept: c. What is the verte of the graph of G()? What do the coordinates of the verte represent in this situation? d. f() d. Reason quantitativel. What are the dimensions of the garden that ield the maimum area? Eplain our answer. -intercepts: -intercept: College Board. All rights reserved. SpringBoard Algebra, Unit Practice
18 87. What is the -intercept of a quadratic function? How man -intercepts can a quadratic function have? c. Give the reasonable domain and range of T(), assuming that the tour compan does not want to lose mone b selling the tickets. Eplain how ou determined the reasonable domain and range. 88. Which of the following are the -intercepts of f() 6 4? A. and B. and 0 C. 1 and 3 D. and 3 d. Make sense of problems. What selling price for the tickets would maimize the tour compan s profit? Eplain our answer. 89. When does the graph of a quadratic function have onl one -intercept? 90. You can bu a 4-hour ticket for the Hop-On Hop-Off bus tour in London for 0. (The basic unit of mone in the United Kingdom is the pound,.) The tour compan is considering increasing the cost of the ticket to increase the profit. If the tickets are too epensive, the will not have an customers. The function T() 1 0 models the profit the tour compan makes b selling tickets for pounds each. a. What is the -intercept of the graph of T(), and what is its significance? LESSON For each function, identif the verte, -intercept, -intercept(s), and ais of smmetr. Graph the function. Identif whether the function has a maimum or minimum and give its value. a. f() 7 1 verte: -intercept: -intercept(s): ais of smmetr: ma or min: f() b. What are the -intercepts of the graph of T(), and what is their significance? College Board. All rights reserved. SpringBoard Algebra, Unit Practice
19 b. f() verte: -intercept: -intercept(s): ais of smmetr: 9. Make sense of problems. Consider the London bus tour compan function T() 1 0 whose graph is below T() ma or min: f() a. Based on the model, what selling price(s) would result in a profit of 300? Eplain how ou determined our answer. b. Could the tour compan make 00? Eplain. c. If the tour compan sells the tickets for each, how much profit can it epect to make? Eplain how ou determined our answer. 93. Eplain how to find the -intercept of the quadratic function f() without graphing the function. 94. If a parabola opens down, then the -coordinate of the verte is the A. minimum value B. ais of smmetr C. -intercept D. maimum value 9. Suppose ou are given the verte of a parabola. How can ou find the ais of smmetr? College Board. All rights reserved. SpringBoard Algebra, Unit Practice
20 LESSON What does the discriminant of a quadratic function tell ou about the -intercepts of the graph of the function? 97. The discriminant of a quadratic equation is greater than zero but not a perfect square. What is the nature of the solutions? How man -intercepts will the graph of the equation have? b value of the discriminant: nature of the solutions: -intercepts: 98. For each equation, find the value of the discriminant and describe the nature of the solutions. Then graph the related function and find the -intercepts if the eist. a value of the discriminant: nature of the solutions: -intercepts: c value of the discriminant: nature of the solutions: -intercepts: 0 01 College Board. All rights reserved. SpringBoard Algebra, Unit Practice
21 d value of the discriminant: nature of the solutions: -intercepts: LESSON 1-1. Solve each quadratic inequalit b graphing. a. $ b If the discriminant is zero, what is true about the solution(s) of the quadratic equation? A. There are two comple conjugate solutions. B. There are two real solutions. C. There is one, rational solution. D. There are no solutions. 0. Construct viable arguments. A quadratic equation has two comple conjugate solutions. What can ou conclude about the value of the discriminant of the equation? 1 01 College Board. All rights reserved. SpringBoard Algebra, Unit Practice
22 c. # Which inequalit s solutions are shown in the graph? A. # B., C. $ D d., Which of the following is NOT a solution of the inequalit $ 3 1? A. (0, ) B. (3, 3) C. (, 1) D. (1, 9) 01 College Board. All rights reserved. SpringBoard Algebra, Unit Practice
23 4. Graph the quadratic inequalit, Then state whether each ordered pair is a solution of the inequalit. LESSON Graph each sstem. Write the solution(s) if an. a a. (0, ) b. (3, 4) c. (3, 6) d. (4, 1). Model with mathematics. Foresters use a measure known as diameter at breast height (DBH) to measure trees for logging. To find the DBH, the use diameter tape or a caliper to measure the tree at a height of 4. feet (breast height) from the ground to determine the diameter. A tree of a certain species should have a cross-sectional area of at least square feet at breast height for it to be logged. Suppose that mature trees for this species do not have a cross-sectional area of more than 30 square feet at this height. b a. Write the function for the DBH in terms of the radius, r. Is this a linear function or a quadratic function? Eplain. b. Write the function for the area which is acceptable for logging in terms of the radius, r. Is this a linear function or a quadratic function? What is the reasonable domain and range of the function? 3 01 College Board. All rights reserved. SpringBoard Algebra, Unit Practice
24 c. 1 ( 4) 1 7. Critique the reasoning of others. Mari claims that a sstem of a linear equation and a quadratic equation can have one solution. Zell sas that the sstem has to have two solutions. Who is correct? Eplain using a sstem and a graph as an eample. d. 1 ( 3) The demand function for a product is f() 3 1. The suppl function is g() Use a graphing calculator to determine the solution(s) to the sstem. A. (64.9, 13.3) and (0., 8.3) B. (13.3, 64.9) and (8.3, 0.) C. (13.3, 64.9) and (8.3, 0.) D. (64.9, 13.3) and (0., 8.3) 4 01 College Board. All rights reserved. SpringBoard Algebra, Unit Practice
25 9. Aaron sells T-shirts at the Jazz Festival in New Orleans. He decides to lower the price of the T-shirts. a. How might this affect the demand for the T-shirts? LESSON Epress regularit in repeated reasoning. Write a sstem of equations that consists of one linear equation and one quadratic equation. Eplain how ou would solve the sstem algebraicall. b. Will he realize an increase in profit? Eplain. 11. Find the real solutions of each sstem algebraicall. a b c. What will be the break-even point, the point where revenue from sales covers the cost? Eplain. c d Use appropriate tools strategicall. Cathie wrote the following sstem of equations to model a problem in her research project. f( ) g ( ) Sketch a graph of the sstem and identif the solution(s) How man real solutions does the following sstem have? A. none B. one C. two D. infinitel man 114. Describe the solutions of the sstem of equations from Item 11b. 11. Confirm the solutions to Item 11c b graphing. Describe the solution(s). 01 College Board. All rights reserved. SpringBoard Algebra, Unit Practice
Ready To Go On? Skills Intervention 5-1 Using Transformations to Graph Quadratic Functions
Ready To Go On? Skills Intervention 5-1 Using Transformations to Graph Quadratic Functions Read To Go On? Skills Intervention 5-1 Using Transformations to Graph Quadratic Functions Find these vocabular words in Lesson 5-1 and the Multilingual Glossar. Vocabular quadratic function parabola verte
More information
LESSON #24 - POWER FUNCTIONS COMMON CORE ALGEBRA II
LESSON #24 - POWER FUNCTIONS COMMON CORE ALGEBRA II 1 LESSON #4 - POWER FUNCTIONS COMMON CORE ALGEBRA II Before we start to analze polnomials of degree higher than two (quadratics), we first will look at ver simple functions known as power functions. The
More information
3.1-Quadratic Functions & Inequalities
3.1-Quadratic Functions & Inequalities 3.1-Quadratic Functions & Inequalities Quadratic Functions: Quadratic functions are polnomial functions of the form also be written in the form f ( ) a( h) k. f ( ) a b c. A quadratic function ma Verte
More information
3.1 Graph Quadratic Functions
3.1 Graph Quadratic Functions 3. Graph Quadratic Functions in Standard Form Georgia Performance Standard(s) MMA3b, MMA3c Goal p Use intervals of increase and decrease to understand average rates of change of quadratic functions. Your
More information
f(x) = 2x 2 + 2x - 4
f(x) = 2x 2 + 2x - 4 4-1 Graphing Quadratic Functions What You ll Learn Scan the tet under the Now heading. List two things ou will learn about in the lesson. 1. Active Vocabular 2. New Vocabular Label each bo with the terms
More information
REVIEW KEY VOCABULARY REVIEW EXAMPLES AND EXERCISES
REVIEW KEY VOCABULARY REVIEW EXAMPLES AND EXERCISES Etra Eample. Graph.. 6. 7. (, ) (, ) REVIEW KEY VOCABULARY quadratic function, p. 6 standard form of a quadratic function, p. 6 parabola, p. 6 verte, p. 6 ais of smmetr, p. 6 minimum, maimum value, p.
More information
Study Guide and Intervention
Study Guide and Intervention 6- NAME DATE PERID Stud Guide and Intervention Graphing Quadratic Functions Graph Quadratic Functions Quadratic Function A function defined b an equation of the form f () a b c, where a 0 b Graph of a
More information
LESSON #28 - POWER FUNCTIONS COMMON CORE ALGEBRA II
LESSON #28 - POWER FUNCTIONS COMMON CORE ALGEBRA II 1 LESSON #8 - POWER FUNCTIONS COMMON CORE ALGEBRA II Before we start to analze polnomials of degree higher than two (quadratics), we first will look at ver simple functions known as power functions. The
More information
5. Determine the discriminant for each and describe the nature of the roots.
5. Determine the discriminant for each and describe the nature of the roots. 4. Quadratic Equations Notes Day 1 1. Solve by factoring: a. 3 16 1 b. 3 c. 8 0 d. 9 18 0. Quadratic Formula: The roots of a quadratic equation of the form A + B + C = 0 with a 0 are given by the following
More information
Algebra II Notes Unit Five: Quadratic Functions. Syllabus Objectives: 5.1 The student will graph quadratic functions with and without technology.
Algebra II Notes Unit Five: Quadratic Functions. Syllabus Objectives: 5.1 The student will graph quadratic functions with and without technology. Sllabus Objectives:.1 The student will graph quadratic functions with and without technolog. Quadratic Function: a function that can be written in the form are real numbers Parabola: the U-shaped graph
More information
Algebra 2 Unit 1 Practice
Algebra 2 Unit 1 Practice Algebra Unit Practice LESSON - Use this information for Items. Aaron has $ to rent a bike in the cit. It costs $ per hour to rent a bike. The additional fee for a helmet is $ for the entire ride.. Write
More information
Mini-Lecture 8.1 Solving Quadratic Equations by Completing the Square
Mini-Lecture 8.1 Solving Quadratic Equations by Completing the Square Mini-Lecture 8.1 Solving Quadratic Equations b Completing the Square Learning Objectives: 1. Use the square root propert to solve quadratic equations.. Solve quadratic equations b completing the square.
More information
Eam Name algebra final eam review147 aam032020181t4highschool www.alvarezmathhelp.com MULTIPLE CHOICE. Choose the one alternative that best completes the statement or answers the question. Solve the equation.
More information
f(x) Determine whether each function has a maximum or minimum value, and find that value. Then state the domain and range of the function.
f(x) Determine whether each function has a maximum or minimum value, and find that value. Then state the domain and range of the function. NAME DATE PERID 4-1 Practice Graphing Quadratic Functions Complete parts a c for each quadratic function. a. Find the -intercept, the equation of the ais of smmetr, and the -coordinate of the verte. b.
More information
MATH 115: Review for Chapter 3
MATH 115: Review for Chapter 3 MATH : Review for Chapter Can ou use the Zero-Product Propert to solve quadratic equations b factoring? () Solve each equation b factoring. 6 7 8 + = + ( ) = 8 7p ( p ) p ( p) = = c = c = + Can ou solve
More information
Honors Math 2 Unit 1 Test #2 Review 1
Honors Math 2 Unit 1 Test #2 Review 1 Honors Math Unit 1 Test # Review 1 Test Review & Study Guide Modeling with Quadratics Show ALL work for credit! Use etra paper, if needed. Factor Completely: 1. Factor 8 15. Factor 11 4 3. Factor 1 4.
More information
Algebra 2 Semester Exam Review
Algebra 2 Semester Exam Review Algebra Semester Eam Review 7 Graph the numbers,,,, and 0 on a number line Identif the propert shown rs rs r when r and s Evaluate What is the value of k k when k? Simplif the epression 7 7 Solve the equation
More information
Polynomial and Rational Functions
Polynomial and Rational Functions Polnomial and Rational Functions Figure -mm film, once the standard for capturing photographic images, has been made largel obsolete b digital photograph. (credit film : modification of work b Horia Varlan;
More information
4 B. 4 D. 4 F. 3. How can you use the graph of a quadratic equation to determine the number of real solutions of the equation?
4 B. 4 D. 4 F. 3. How can you use the graph of a quadratic equation to determine the number of real solutions of the equation? 3.1 Solving Quadratic Equations COMMON CORE Learning Standards HSA-SSE.A. HSA-REI.B.b HSF-IF.C.8a Essential Question Essential Question How can ou use the graph of a quadratic equation to determine the
More information
Lesson 9.1 Using the Distance Formula
Lesson 9.1 Using the Distance Formula Lesson. Using the Distance Formula. Find the eact distance between each pair of points. a. (0, 0) and (, ) b. (0, 0) and (7, ) c. (, 8) and (, ) d. (, ) and (, 7) e. (, 7) and (8, ) f. (8, ) and (, 0)
More information
2 nd Semester Final Exam Review Block Date
2 nd Semester Final Exam Review Block Date Algebra 1B Name nd Semester Final Eam Review Block Date Calculator NOT Allowed Graph each function. 1 (10-1) 1. (10-1). (10-1) 3. (10-1) 4. 3 Graph each function. Identif the verte, ais of smmetr, and
More information
Additional Factoring Examples:
Additional Factoring Examples: Honors Algebra -3 Solving Quadratic Equations by Graphing and Factoring Learning Targets 1. I can solve quadratic equations by graphing. I can solve quadratic equations by factoring 3. I can write a quadratic
More information
Characteristics of Quadratic Functions
Characteristics of Quadratic Functions . Characteristics of Quadratic Functions Essential Question What tpe of smmetr does the graph of f() = a( h) + k have and how can ou describe this smmetr? Parabolas and Smmetr Work with a partner. a. Complete
More information
Writing Quadratic Functions in Standard Form
Writing Quadratic Functions in Standard Form Chapter Summar Ke Terms standard form (general form) of a quadratic function (.1) parabola (.1) leading coefficient (.) second differences (.) vertical motion model (.3) zeros (.3) interval (.3) open interval
More information
Lesson 7.1 Polynomial Degree and Finite Differences
Lesson 7.1 Polynomial Degree and Finite Differences Lesson 7.1 Polnomial Degree and Finite Differences 1. Identif the degree of each polnomial. a. 1 b. 0. 1. 3. 3 c. 0 16 0. Determine which of the epressions are polnomials. For each polnomial, state its
More information
Functions and Their Graphs
Functions and Their Graphs Functions and Their Graphs 015 College Board. All rights reserved. Unit Overview In this unit you will study polynomial and rational functions, their graphs, and their zeros. You will also learn several
More information
MAT 1033C -- Martin-Gay Intermediate Algebra Chapter 8 (8.1, 8.2, 8.5, 8.6) Practice for the Exam
MAT 1033C -- Martin-Gay Intermediate Algebra Chapter 8 (8.1, 8.2, 8.5, 8.6) Practice for the Exam MAT 33C -- Martin-Ga Intermediate Algebra Chapter 8 (8.1 8. 8. 8.6) Practice for the Eam Name Date Da/Time: SHORT ANSWER. Write the word or phrase that best completes each statement or answers the question.
More information
Mth Quadratic functions and quadratic equations
Mth Quadratic functions and quadratic equations Mth 0 - Quadratic functions and quadratic equations Name Find the product. 1) 8a3(2a3 + 2 + 12a) 2) ( + 4)( + 6) 3) (3p - 1)(9p2 + 3p + 1) 4) (32 + 4-4)(2-3 + 3) ) (4a - 7)2 Factor completel. 6) 92-4 7)
More information
APPLIED ALGEBRA II SEMESTER 1 EXAM ITEM SPECIFICATION SHEET & KEY
APPLIED ALGEBRA II SEMESTER 1 EXAM ITEM SPECIFICATION SHEET & KEY APPLIED ALGEBRA II SEMESTER 1 EXAM ITEM SPECIFICATION SHEET & KEY Constructed Response # Objective Sllabus Objective NV State Standard 1 Graph a polnomial function. 1.1.7.1 Analze graphs of polnomial functions
More information
SHORT ANSWER. Write the word or phrase that best completes each statement or answers the question
SHORT ANSWER. Write the word or phrase that best completes each statement or answers the question Midterm Review 0 Precalculu Name SHORT ANSWER. Write the word or phrase that best completes each statement or answers the question ) A graph of a function g is shown below. Find g(0). (-, ) (-, 0) - -
More information
MATH 91 Final Study Package Name
MATH 91 Final Study Package Name MATH 91 Final Stud Package Name Solve the sstem b the substitution method. If there is no solution or an infinite number of solutions, so state. Use set notation to epress the solution set. 1) - = 1 1)
More information
The Quadratic Formula
The Quadratic Formula - The Quadratic Formula Content Standard Reviews A.REI..b Solve quadratic equations by... the quadratic formula... Objectives To solve quadratic equations using the Quadratic Formula To determine the number
More information
(TPP #3) Test Preparation Practice. Algebra Holt Algebra 1. Name Date Class
(TPP #3) Test Preparation Practice. Algebra Holt Algebra 1. Name Date Class Test Preparation Practice Algebra 1 Solve each problem. Choose the best answer for each question and record our answer on the Student Answer Sheet. Figures are not drawn to scale 1. Jack budgets $35 for
More information
Quadratic Functions ESSENTIAL QUESTIONS EMBEDDED ASSESSMENTS
Quadratic Functions ESSENTIAL QUESTIONS EMBEDDED ASSESSMENTS Quadratic Functions 5 01 College Board. All rights reserved. Unit Overview In this unit ou will stud a variet of was to solve quadratic functions and sstems of equations and appl our learning to analzing
More information
The Quadratic Formula VOCABULARY
The Quadratic Formula VOCABULARY - The Quadratic Formula TEKS FOCUS TEKS ()(F) Solve quadratic and square root equations. TEKS ()(G) Display, eplain, and justify mathematical ideas and arguments using precise mathematical language in
More information
Solving Linear-Quadratic Systems
Solving Linear-Quadratic Systems 36 LESSON Solving Linear-Quadratic Sstems UNDERSTAND A sstem of two or more equations can include linear and nonlinear equations. In a linear-quadratic sstem, there is one linear equation and one quadratic
More information
College Algebra ~ Review for Test 2 Sections
College Algebra ~ Review for Test 2 Sections College Algebra ~ Review for Test Sections. -. Use the given graphs of = a + b to solve the inequalit. Write the solution set in interval notation. ) - + 9 8 7 6 (, ) - - - - 6 7 8 - Solve the inequalit
More information
REVIEW PACKET FOR END OF COURSE EXAM
REVIEW PACKET FOR END OF COURSE EXAM Math H REVIEW PACKET FOR END OF COURSE EXAM DO NOT WRITE ON PACKET! Do on binder paper, show support work. On this packet leave all fractional answers in improper fractional form (ecept where appropriate
More information
2 nd Semester Final Exam Review Block Date
2 nd Semester Final Exam Review Block Date Algebra 1B Name nd Semester Final Eam Review Block Date Calculator NOT Allowed Graph each function. Identif the verte and ais of smmetr. 1 (10-1) 1. (10-1). 3 (10-) 3. 4 7 (10-) 4. 3 6 4 (10-1) 5. Predict
More information
Write each expression in terms of i : Add: (3 4i) (5 7i) (3 5) ( 4 7)i. 8 3i. Subtract: (3 4i) (5 7i) (3 4i) ( 5 7i) Find each product:
Write each expression in terms of i : Add: (3 4i) (5 7i) (3 5) ( 4 7)i. 8 3i. Subtract: (3 4i) (5 7i) (3 4i) ( 5 7i) Find each product: 7_Ch09_online 7// 0:7 AM Page 9-0 9-0 CHAPTER 9 Quadratic Equations SECTION 9. Comple Numbers DEFINITIONS AND CONCEPTS EXAMPLES The imaginar number i is defined as Write each epression in terms of i :
More information
3 Polynomial and Rational Functions
3 Polynomial and Rational Functions 3 Polnomial and Rational Functions 3.1 Quadratic Functions and Models 3.2 Polnomial Functions and Their Graphs 3.3 Dividing Polnomials 3.4 Real Zeros of Polnomials 3.5 Comple Zeros and the Fundamental
More information
UNIT #9 ROOTS AND IRRATIONAL NUMBERS REVIEW QUESTIONS
UNIT #9 ROOTS AND IRRATIONAL NUMBERS REVIEW QUESTIONS Answer Key Name: Date: UNIT #9 ROOTS AND IRRATIONAL NUMBERS REVIEW QUESTIONS Part I Questions. Which of the following is the value of 6? () 6 () 4 () (4). The epression is equivalent to 6 6 6 6 () () 6
More information
MULTIPLE CHOICE. Choose the one alternative that best completes the statement or answers the question.
MULTIPLE CHOICE. Choose the one alternative that best completes the statement or answers the question. Math 2 Stud Guide-Chapters 8 and 9 Name Date: Time: MULTIPLE CHOICE. Choose the one alternative that best completes the statement or answers the question. Find all square roots of the number. ) 600 9,
More information
2.3 Quadratic Functions
2.3 Quadratic Functions 88 Linear and Quadratic Functions. Quadratic Functions You ma recall studing quadratic equations in Intermediate Algebra. In this section, we review those equations in the contet of our net famil of functions:
More information
Answers. Chapter Warm Up. Sample answer: The graph of h is a translation. 3 units right of the parent linear function.
Answers. Chapter Warm Up. Sample answer: The graph of h is a translation. 3 units right of the parent linear function. Chapter. Start Thinking As the string V gets wider, the points on the string move closer to the -ais. This activit mimics a vertical shrink of a parabola... Warm Up.. Sample answer: The graph of f is a
More information
Quadratic Graphs and Their Properties
Quadratic Graphs and Their Properties - Think About a Plan Quadratic Graphs and Their Properties Physics In a physics class demonstration, a ball is dropped from the roof of a building, feet above the ground. The height h (in feet) of the
More information
Review for Intermediate Algebra (MATD 0390) Final Exam Oct 2009
Review for Intermediate Algebra (MATD 0390) Final Exam Oct 2009 Review for Intermediate Algebra (MATD 090) Final Eam Oct 009 Students are epected to know all relevant formulas, including: All special factoring formulas Equation of a circle All formulas for linear equations
More information
Fair Game Review. Chapter 9. Find the square root(s) ± Find the side length of the square. 7. Simplify Simplify 63.
Fair Game Review. Chapter 9. Find the square root(s) ± Find the side length of the square. 7. Simplify Simplify 63. Name Date Chapter 9 Find the square root(s). Fair Game Review... 9. ±. Find the side length of the square.. s. s s Area = 9 ft s Area = 0. m 7. Simplif 0. 8. Simplif. 9. Simplif 08. 0. Simplif 88. Copright
More information
Unit 10 - Graphing Quadratic Functions
Unit 10 - Graphing Quadratic Functions Unit - Graphing Quadratic Functions PREREQUISITE SKILLS: students should be able to add, subtract and multipl polnomials students should be able to factor polnomials students should be able to identif
More information
Unit 2 Notes Packet on Quadratic Functions and Factoring
Unit 2 Notes Packet on Quadratic Functions and Factoring Name: Period: Unit Notes Packet on Quadratic Functions and Factoring Notes #: Graphing quadratic equations in standard form, verte form, and intercept form. A. Intro to Graphs of Quadratic Equations: a
More information
Graph Quadratic Functions in Standard Form
Graph Quadratic Functions in Standard Form TEKS 4. 2A.4.A, 2A.4.B, 2A.6.B, 2A.8.A Graph Quadratic Functions in Standard Form Before You graphed linear functions. Now You will graph quadratic functions. Wh? So ou can model sports revenue, as in
More information
Name Class Date. Identify the vertex of each graph. Tell whether it is a minimum or a maximum.
Name Class Date. Identify the vertex of each graph. Tell whether it is a minimum or a maximum. Practice Quadratic Graphs and Their Properties Identify the verte of each graph. Tell whether it is a minimum or a maimum. 1. y 2. y 3. 2 4 2 4 2 2 y 4 2 2 2 4 Graph each function. 4. f () = 3 2 5. f ()
More information
Final Exam Review Part 2 #1 Page 1 / 21
Final Exam Review Part 2 #1 Page 1 / 21 Final Eam Review Part #1 Intermediate Algebra / MAT 135 Spring 017 Master ( Master Templates) Student Name/ID: v 1. Solve for, where is a real number. v v + 1 + =. Solve for, where is a real number. +
More information
2.1 Evaluate and Graph Polynomial
2.1 Evaluate and Graph Polynomial 2. Evaluate and Graph Polnomial Functions Georgia Performance Standard(s) MM3Ab, MM3Ac, MM3Ad Your Notes Goal p Evaluate and graph polnomial functions. VOCABULARY Polnomial Polnomial function Degree of
More information
Mth 95 Module 4 Chapter 8 Spring Review - Solving quadratic equations using the quadratic formula
Mth 95 Module 4 Chapter 8 Spring Review - Solving quadratic equations using the quadratic formula Mth 95 Module 4 Chapter 8 Spring 04 Review - Solving quadratic equations using the quadratic formula Write the quadratic formula. The NUMBER of REAL and COMPLEX SOLUTIONS to a quadratic equation ( a b
More information
Maintaining Mathematical Proficiency
Maintaining Mathematical Proficiency Chapter Maintaining Mathematical Proficienc Find the -intercept of the graph of the linear equation. 1. = + 3. = 3 + 5 3. = 10 75. = ( 9) 5. 7( 10) = +. 5 + 15 = 0 Find the distance between the two points.
More information
Practice A ( 1, 3 ( 0, 1. Match the function with its graph. 3 x. Explain how the graph of g can be obtained from the graph of f. 5 x.
Practice A ( 1, 3 ( 0, 1. Match the function with its graph. 3 x. Explain how the graph of g can be obtained from the graph of f. 5 x. 8. Practice A For use with pages 65 7 Match the function with its graph.. f. f.. f 5. f 6. f f Lesson 8. A. B. C. (, 6) (0, ) (, ) (0, ) ( 0, ) (, ) D. E. F. (0, ) (, 6) ( 0, ) (, ) (, ) (0, ) Eplain how
More information
= x. Algebra II Notes Quadratic Functions Unit Graphing Quadratic Functions. Math Background
= x. Algebra II Notes Quadratic Functions Unit Graphing Quadratic Functions. Math Background Algebra II Notes Quadratic Functions Unit 3.1 3. Graphing Quadratic Functions Math Background Previousl, ou Identified and graphed linear functions Applied transformations to parent functions Graphed quadratic
More information
Systems of Linear Equations
Systems of Linear Equations Sstems of Linear Equations Monetar Sstems Overload Lesson 3-1 Learning Targets: Use graphing, substitution, and elimination to solve sstems of linear equations in two variables. Formulate sstems of linear
More information
TEST REVIEW QUADRATICS EQUATIONS Name: 2. Which of the following statements is true about the graph of the function?
TEST REVIEW QUADRATICS EQUATIONS Name: 2. Which of the following statements is true about the graph of the function? Chapter MATHEMATICS 00 TEST REVIEW QUADRATICS EQUATIONS Name:. Which equation does not represent a quadratic function?. Which of the following statements is true about the graph of the function? it has
More information
Summary, Review, and Test
Summary, Review, and Test Summar, Review, and Test 79 56. Galileo s telescope brought about revolutionar changes in astronom. A comparable leap in our abilit to observe the universe took place as a result of the Hubble Space Telescope.
More information
PRACTICE FINAL EXAM. 3. Solve: 3x 8 < 7. Write your answer using interval notation. Graph your solution on the number line.
PRACTICE FINAL EXAM. 3. Solve: 3x 8 < 7. Write your answer using interval notation. Graph your solution on the number line. MAC 1105 PRACTICE FINAL EXAM College Algebra *Note: this eam is provided as practice onl. It was based on a book previousl used for this course. You should not onl stud these problems in preparing for
More information
Nonlinear Systems. No solution One solution Two solutions. Solve the system by graphing. Check your answer.
Nonlinear Systems. No solution One solution Two solutions. Solve the system by graphing. Check your answer. 8-10 Nonlinear Sstems CC.9-1.A.REI.7 Solve a simple sstem consisting of a linear equation and a quadratic equation in two variables algebraicall and graphicall. Objective Solve sstems of equations in two
More information
10.4 Nonlinear Inequalities and Systems of Inequalities. OBJECTIVES 1 Graph a Nonlinear Inequality. 2 Graph a System of Nonlinear Inequalities.
10.4 Nonlinear Inequalities and Systems of Inequalities. OBJECTIVES 1 Graph a Nonlinear Inequality. 2 Graph a System of Nonlinear Inequalities. Section 0. Nonlinear Inequalities and Sstems of Inequalities 6 CONCEPT EXTENSIONS For the eercises below, see the Concept Check in this section.. Without graphing, how can ou tell that the graph of + =
More information
LESSON #42 - INVERSES OF FUNCTIONS AND FUNCTION NOTATION PART 2 COMMON CORE ALGEBRA II
LESSON #42 - INVERSES OF FUNCTIONS AND FUNCTION NOTATION PART 2 COMMON CORE ALGEBRA II LESSON #4 - INVERSES OF FUNCTIONS AND FUNCTION NOTATION PART COMMON CORE ALGEBRA II You will recall from unit 1 that in order to find the inverse of a function, ou must switch and and solve for. Also,
More information
Section 5.5 Complex Numbers
Section 5.5 Complex Numbers Name: Period: Section 5.5 Comple Numbers Objective(s): Perform operations with comple numbers. Essential Question: Tell whether the statement is true or false, and justify your answer. Every comple number
More information
Algebra 1 Unit 9 Quadratic Equations
Algebra 1 Unit 9 Quadratic Equations Algebra 1 Unit 9 Quadratic Equations Part 1 Name: Period: Date Name of Lesson Notes Tuesda 4/4 Wednesda 4/5 Thursda 4/6 Frida 4/7 Monda 4/10 Tuesda 4/11 Wednesda 4/12 Thursda 4/13 Frida 4/14 Da 1- Quadratic
More information
MATH 60 Review Problems for Final Exam
MATH 60 Review Problems for Final Exam MATH 60 Review Problems for Final Eam Scientific Calculators Onl - Graphing Calculators Not Allowed NO CLASS NOTES PERMITTED Evaluate the epression for the given values. m 1) m + 3 for m = 3 2) m 2 - n2
More information
MATH 115: Final Exam Review. Can you find the distance between two points and the midpoint of a line segment? (1.1)
MATH 115: Final Exam Review. Can you find the distance between two points and the midpoint of a line segment? (1.1) MATH : Final Eam Review Can ou find the distance between two points and the midpoint of a line segment? (.) () Consider the points A (,) and ( 6, ) B. (a) Find the distance between A and B. (b) Find the
More information
Shape and Structure. Forms of Quadratic Functions. Lesson 2.1 Assignment
Shape and Structure. Forms of Quadratic Functions. Lesson 2.1 Assignment Lesson.1 Assignment Name Date Shape and Structure Forms of Quadratic Functions 1. Analze the graph of the quadratic function. a. The standard form of a quadratic function is f() 5 a 1 b 1 c. What possible
More information
Mathematics 2201 Midterm Exam Review
Mathematics 2201 Midterm Exam Review Mathematics 0 Midterm Eam Review Chapter : Radicals Chapter 6: Quadratic Functions Chapter 7: Quadratic Equations. Evaluate: 6 8 (A) (B) (C) (D). Epress as an entire radical. (A) (B) (C) (D). What is the
More information
PRINCIPLES OF MATHEMATICS 11 Chapter 2 Quadratic Functions Lesson 1 Graphs of Quadratic Functions (2.1) where a, b, and c are constants and a 0
PRINCIPLES OF MATHEMATICS 11 Chapter 2 Quadratic Functions Lesson 1 Graphs of Quadratic Functions (2.1) where a, b, and c are constants and a 0 PRINCIPLES OF MATHEMATICS 11 Chapter Quadratic Functions Lesson 1 Graphs of Quadratic Functions (.1) Date A. QUADRATIC FUNCTIONS A quadratic function is an equation that can be written in the following
More information
7.2 Connecting Intercepts and Linear Factors
7.2 Connecting Intercepts and Linear Factors Name Class Date 7.2 Connecting Intercepts and Linear Factors Essential Question: How are -intercepts of a quadratic function and its linear factors related? Resource Locker Eplore Connecting Factors and
More information
Chapter 5: Quadratic Equations and Functions 5.1 Modeling Data With Quadratic Functions Quadratic Functions and Their Graphs
Chapter 5: Quadratic Equations and Functions 5.1 Modeling Data With Quadratic Functions Quadratic Functions and Their Graphs Ch 5 Alg Note Sheet Ke Chapter 5: Quadratic Equations and Functions 5.1 Modeling Data With Quadratic Functions Quadratic Functions and Their Graphs Definition: Standard Form of a Quadratic Function The
More information
16x y 8x. 16x 81. U n i t 3 P t 1 H o n o r s P a g e 1. Math 3 Unit 3 Day 1 - Factoring Review. I. Greatest Common Factor GCF.
16x y 8x. 16x 81. U n i t 3 P t 1 H o n o r s P a g e 1. Math 3 Unit 3 Day 1 - Factoring Review. I. Greatest Common Factor GCF. P a g e 1 Math 3 Unit 3 Day 1 - Factoring Review I. Greatest Common Factor GCF Eamples: A. 3 6 B. 4 8 4 C. 16 y 8 II. Difference of Two Squares Draw ( - ) ( + ) Square Root 1 st and Last Term Eamples:
More information
1.3. Absolute Value and Piecewise-Defined Functions Absolutely Piece-ful. My Notes ACTIVITY
1.3. Absolute Value and Piecewise-Defined Functions Absolutely Piece-ful. My Notes ACTIVITY Absolute Value and Piecewise-Defined Functions Absolutel Piece-ful SUGGESTED LEARNING STRATEGIES: Activating Prior Knowledge, Create Representations, Quickwrite. Graph both = - for < 3 and = - + 7 for
More information
Final Exam Review Part 2 #4
Final Exam Review Part 2 #4 Final Eam Review Part # Intermediate Algebra / MAT 135 Fall 01 Master (Prof. Fleischner) Student Name/ID: 1. Solve for, where is a real number. + = 8. Solve for, where is a real number. 9 1 = 3. Solve
More information
Final Exam Review Part 2 #4
Final Exam Review Part 2 #4 Final Eam Review Part # Intermediate Algebra / MAT 135 Fall 01 Master (Prof. Fleischner) Student Name/ID: 1. Solve for, where is a real number. + =. Solve for, where is a real number. 9 1 = 3. Solve for,
More information
Systems of Linear and Quadratic Equations. Check Skills You ll Need. y x. Solve by Graphing. Solve the following system by graphing.
Systems of Linear and Quadratic Equations. Check Skills You ll Need. y x. Solve by Graphing. Solve the following system by graphing. NY- Learning Standards for Mathematics A.A. Solve a sstem of one linear and one quadratic equation in two variables, where onl factoring is required. A.G.9 Solve sstems of linear and quadratic equations
More information
Fair Game Review. Chapter 8. Graph the linear equation. Big Ideas Math Algebra Record and Practice Journal
Fair Game Review. Chapter 8. Graph the linear equation. Big Ideas Math Algebra Record and Practice Journal Name Date Chapter Graph the linear equation. Fair Game Review. =. = +. =. =. = +. = + Copright Big Ideas Learning, LLC Big Ideas Math Algebra Name Date Chapter Fair Game Review (continued) Evaluate the
More information
math FALL developmental mathematics sullivan 1e
math FALL developmental mathematics sullivan 1e TSIpractice eam review 1 131 180 plus 34 TSI questions for elementar and intermediate algebra m0300004301 aaa Name www.alvarezmathhelp.com math0300004301 FALL 01 100 interactmath developmental mathematics
More information
Maintaining Mathematical Proficiency
Maintaining Mathematical Proficiency Name Date Chapter 5 Maintaining Mathematical Proficienc Graph the equation. 1. + =. = 3 3. 5 + = 10. 3 = 5. 3 = 6. 3 + = 1 Solve the inequalit. Graph the solution. 7. a 3 > 8. c 9. d 5 < 3 10. 8 3r 5 r
More information
Quadratic Function. Parabola. Parent quadratic function. Vertex. Axis of Symmetry
Quadratic Function. Parabola. Parent quadratic function. Vertex. Axis of Symmetry Name: Chapter 10: Quadratic Equations and Functions Section 10.1: Graph = a + c Quadratic Function Parabola Parent quadratic function Verte Ais of Smmetr Parent Function = - -1 0 1 1 Eample 1: Make a table,
More information
x Radical Sign: Radicand: the number beneath the radical sign
x Radical Sign: Radicand: the number beneath the radical sign Sllabus Objective: 9.4 The student will solve quadratic equations using graphic and algebraic techniques to include the quadratic formula, square roots, factoring, completing the square, and graphing.
More information
Properties of the Graph of a Quadratic Function. has a vertex with an x-coordinate of 2 b } 2a
Properties of the Graph of a Quadratic Function. has a vertex with an x-coordinate of 2 b } 2a 0.2 Graph 5 a 2 b c Before You graphed simple quadratic functions. Now You will graph general quadratic functions. Wh? So ou can investigate a cable s height, as in Eample 4. Ke Vocabular minimum value
More information
Name Class Date. Quadratic Functions and Transformations. 4 6 x
Name Class Date. Quadratic Functions and Transformations. 4 6 x - Quadratic Functions and Transformations For Eercises, choose the correct letter.. What is the verte of the function 53()? D (, ) (, ) (, ) (, ). Which is the graph of the function f ()5(3) 5? F 6 6 O
More information
20.2 Connecting Intercepts and Linear Factors
20.2 Connecting Intercepts and Linear Factors Name Class Date 20.2 Connecting Intercepts and Linear Factors Essential Question: How are -intercepts of a quadratic function and its linear factors related? Resource Locker Eplore Connecting Factors and
More information
x. 4. 2x 10 4x. 10 x
x. 4. 2x 10 4x. 10 x CCGPS UNIT Semester 1 COORDINATE ALGEBRA Page 1 of Reasoning with Equations and Quantities Name: Date: Understand solving equations as a process of reasoning and eplain the reasoning MCC9-1.A.REI.1 Eplain
More information
8.4. If we let x denote the number of gallons pumped, then the price y in dollars can $ $1.70 $ $1.70 $ $1.70 $ $1.
8.4. If we let x denote the number of gallons pumped, then the price y in dollars can $ $1.70 $ $1.70 $ $1.70 $ $1. 8.4 An Introduction to Functions: Linear Functions, Applications, and Models We often describe one quantit in terms of another; for eample, the growth of a plant is related to the amount of light it receives,
More information
LESSON #11 - FORMS OF A LINE COMMON CORE ALGEBRA II
LESSON #11 - FORMS OF A LINE COMMON CORE ALGEBRA II LESSON # - FORMS OF A LINE COMMON CORE ALGEBRA II Linear functions come in a variet of forms. The two shown below have been introduced in Common Core Algebra I and Common Core Geometr. TWO COMMON FORMS
More information
LESSON #12 - FORMS OF A LINE COMMON CORE ALGEBRA II
LESSON #12 - FORMS OF A LINE COMMON CORE ALGEBRA II LESSON # - FORMS OF A LINE COMMON CORE ALGEBRA II Linear functions come in a variet of forms. The two shown below have been introduced in Common Core Algebra I and Common Core Geometr. TWO COMMON FORMS
More information
Unit 4 Practice Problem ANSWERS
Unit 4 Practice Problem ANSWERS Unit Practice Problem ANSWERS SECTION.1A 1) Parabola ) a. Root, Zeros b. Ais of smmetr c. Substitute = 0 into the equation to find the value of. -int 6) 7 6 1 - - - - -1-1 1 - - - - -6-7 - ) ) Maimum )
More information
CHAPTER 8 Quadratic Equations, Functions, and Inequalities
CHAPTER 8 Quadratic Equations, Functions, and Inequalities CHAPTER Quadratic Equations, Functions, and Inequalities Section. Solving Quadratic Equations: Factoring and Special Forms..................... 7 Section. Completing the Square................... 9 Section.
More information
Equations and Inequalities
Equations and Inequalities Equations and Inequalities Figure 1 CHAPTER OUTLINE.1 The Rectangular Coordinate Systems and Graphs. Linear Equations in One Variable.3 Models and Applications. Comple Numbers.5 Quadratic Equations.6 Other
More information
Honors Algebra 2 ~ Spring 2014 Name 1 Unit 3: Quadratic Functions and Equations
Honors Algebra 2 ~ Spring 2014 Name 1 Unit 3: Quadratic Functions and Equations Honors Algebra ~ Spring Name Unit : Quadratic Functions and Equations NC Objectives Covered:. Define and compute with comple numbers. Operate with algebraic epressions (polnomial, rational, comple fractions)
More information
Grade 11 Functions (MCR3U) Unit 1 Algebraic & Quadratic Functions
Grade 11 Functions (MCR3U) Unit 1 Algebraic & Quadratic Functions Grade Functions (MCRU) Unit Algebraic & Quadratic Functions Topics Homework Tet book Worksheet Da Order of Operations Order of Operations #- Da Eponent Laws Review Eponent Laws Review (All) Da Review of
More information
Polynomial Functions. INVESTMENTS Many grandparents invest in the stock market for
Polynomial Functions. INVESTMENTS Many grandparents invest in the stock market for 4-1 BJECTIVES Determine roots of polnomial equations. Appl the Fundamental Theorem of Algebra. Polnomial Functions INVESTMENTS Man grandparents invest in the stock market for their grandchildren s college
More information
MATH College Algebra Review for Test 2
MATH College Algebra Review for Test 2 MATH 4 - College Algebra Review for Test Sections. and.. For f (x) = x + 4x + 5, give (a) the x-intercept(s), (b) the -intercept, (c) both coordinates of the vertex, and (d) the equation of the axis of
More information
TRANSFORMATIONS OF f(x) = x Example 1
TRANSFORMATIONS OF f(x) = x Example 1 TRANSFORMATIONS OF f() = 2 2.1.1 2.1.2 Students investigate the general equation for a famil of quadratic functions, discovering was to shift and change the graphs. Additionall, the learn how to graph
More information
Chapter 9 Notes Alg. 1H 9-A1 (Lesson 9-3) Solving Quadratic Equations by Finding the Square Root and Completing the Square
Chapter 9 Notes Alg. 1H 9-A1 (Lesson 9-3) Solving Quadratic Equations by Finding the Square Root and Completing the Square Chapter Notes Alg. H -A (Lesson -) Solving Quadratic Equations b Finding the Square Root and Completing the Square p. *Calculator Find the Square Root: take the square root of. E: Solve b finding square
More information
|
__label__pos
| 0.988299 |
Questions on more advanced topics of number theory. Consider first if (elementary-number-theory) might be a more appropriate tag before adding this tag.
learn more… | top users | synonyms (1)
1
vote
1answer
71 views
Looking for references on results on powers of primes dividing $y^n-1$
For a prime $p$ and positive integer $n$, let $E(n,p)$ be the greatest $k$ such that $p^k \mid n$, and $E(n,p) = 0$ if $p \nmid n$. Let $E(n) = E(n, 2)$. A number of years back, I proved the ...
7
votes
1answer
439 views
Proof: If $n \in \mathbb{N}$ is not the sum of two squares, then $n$ is also not the sum of two rational squares
I have to prove the following: If $n \in \mathbb{N}$ is not representable by the sum of two squares, then $n$ is also not representable by the sum of two rational squares. How do I start here? Any ...
8
votes
1answer
177 views
Mean Value of a Multiplicative Function close to $n$ in Terms of the Zeta Function.
Let $f(n)$ be a multiplicative function defined by $f(p^a)=p^{a-1}(p+1)$, where $p$ is a prime number. How could I obtain a formula for $$\sum_{n\leq x} f(n)$$ with error term $O(x\log{x})$ and ...
0
votes
2answers
192 views
Heuristic Proof of Hardy-Littlewood Conjecture for 3-term Arithmetic Progressions
The Hardy-Littlewood Conjecture for 3-term arithmetic progressions is that $$ \# \{ x,d \in \{1,\ldots,N\} \, | \, x,x+d,x+2d \text{ are all prime} \} \sim \frac{3}{2} \prod_{p > 2} ...
2
votes
2answers
246 views
Tamagawa numbers and Genus class numbers
I was reading the paper of Prof.Franz Lemmermeyer titled "Pell-conics" which is here, in that the author writes in page 9 that one can define Tamagawa numbers as $$ c_p = \begin{cases} 2 & \text{ ...
3
votes
1answer
182 views
surjectivity of group homomorphisms
I don't know if the next thing is true, but I'm not able to find a counterexample: suppose you have a surjective group homomorphism of finite groups $f:G \rightarrow G'$ and normal subgroups $H ...
2
votes
1answer
193 views
How to go from Fermat’s little theorem to Euler’s theorem thought Ivory’s demonstration?
Ivory’s demonstration of Fermat’s theorem exploit the fact that given a prime $p$, all the numbers from $1$ to $p-1$ are relatively prime to $p$ (obvious since $p$ is prime). Ivory multiply them by x ...
6
votes
1answer
270 views
Does the inverse of this matrix of size $n \times n$ approach the zero-matrix in the limit as $\small n \to \infty$?
Fiddling with another (older) question here I constructed an example-matrix of the type $\small M_n: m_{n:r,c} = {1 \over (1+r)^c } \quad \text{ for } r,c=0 \ldots n-1 $ . I considered the inverse ...
1
vote
1answer
114 views
Quadratic form over the dyadic numbers
I would like to know whether $q=\langle 3,3,11\rangle$ (a diagonal ternary form) represents $2$ over $\mathbb{Q}_2$ (i.e. whether there exist $x,y,z\in\mathbb{Q}_2^\times$ such that $q(x,y,z)=2$). I ...
3
votes
1answer
191 views
On the asymptotic behavior of a function related to the number of distinct prime divisors
Let $\omega(n)$ be the number of distinct primes dividing $n$. For $x\in(0,1)$, let $\varphi(x,n)$ be the number of positive integers $m\leq xn$ which are prime to $n$. Show that ...
0
votes
1answer
69 views
Continued Fraction: Why do we get with $\gamma \in \mathbb{R}\setminus \mathbb{Q}$ the CF $\frac{1}{\gamma}=\langle0;a_0,a_1,\dotsc\rangle$
I have a question concerning continued fractions: If we have $\gamma \in \mathbb{R} \setminus \mathbb{Q}$ and $\gamma=\langle a_0;a_1,a_2,\dotsc\rangle$. Why do we get $$\frac1\gamma = \langle ...
1
vote
2answers
111 views
Every number $n^k$ can be written as a sum of $n$ distinct odd numbers
I wish to prove that for $n,k\in\mathbb{N} > 1$, we can always write $n^k$ as a sum of $n$ odd positive integers. I have an idea of how to approach this, but my method seems to cumbersome. I am ...
1
vote
1answer
161 views
A Rough Estimation for the number of square free integers
Show by a sieve argument that the number of square free integers not exceeding $x$ is less than $$x\prod_p\left(1-\frac{1}{p^2}\right)+o(x),$$where the product extends over all primes. I happened ...
0
votes
1answer
289 views
Primitive Dirichlet Character
Let $\chi$ be the trivial Dirichlet character mod $N$. What is the primitive Dirichlet character associated to $\chi$? Is it just the character on $\mathbb{Z}$ that sends all integers to 1?
8
votes
1answer
1k views
Is there a way to find the first digits of a number?
Is there a way to find the first digits of a number? For example, the largest known prime is $2^{43,112,609}-1$, and I did sometime before a induction to find the first digit of a prime like that. ...
3
votes
0answers
144 views
Champernowne-like squares, are there any?
I read about the Champernowne constant on Wikipedia a couple of days ago, and I got curious about something similar: is there some "Champernowne-like" number; that is, a concatenation of all numbers ...
0
votes
1answer
390 views
An attempt of catching the where-abouts of “ Mysterious group $Ш$ ”
This question is a bit concerned with the Tate-Shaferevich group, lets start defining $C$ as $$C: X^2- \Delta Y^2=4$$ which are generally called as Pell-conics, so all in this question $K$ refers to ...
7
votes
2answers
182 views
On a double sum involving prime numbers
$$\sum_{i,j=1}^{\infty}\left[\frac{x}{p_ip_j}\right]=x\sum_{p_ip_j\leq x}\frac{1}{p_ip_j}+O(x),p_i< p_j$$ where $p_i$ is the $i$th prime, and "[ ]" represents the largest integer not ...
18
votes
3answers
948 views
Prove $x = \sqrt[100]{\sqrt{3} + \sqrt{2}} + \sqrt[100]{\sqrt{3} - \sqrt{2}}$ is irrational
Prove $x = \sqrt[100]{\sqrt{3} + \sqrt{2}} + \sqrt[100]{\sqrt{3} - \sqrt{2}}$ is irrational. I can prove that $x$ is irrational by showing that it's a root of a polynomial with integer coefficients ...
1
vote
1answer
84 views
$\mathbb{Q}(t_1,…,t_n) \cap \overline{\mathbb{Q}}$
Let $\overline{\mathbb{Q}}$ the algebraic closure of $\mathbb{Q}$, and $K$ a field extension of $\mathbb{Q}$ (not necessarily algebraic) such that $[K:\mathbb{Q}]= \infty$. Let $t_1,...,t_n \in K$, ...
3
votes
3answers
486 views
Finding all positive integer solutions to $(x!)(y!) = x!+y!+z!$
The equation is $(x!)(y!) = x!+y!+z! $ where $x,y,z$ are natural numbers. How to find out them all?
0
votes
1answer
85 views
Representing complex numbers with nested exponentiation of rationals
Define $L_0=Q$ $L_1=\lbrace x \in C; e^{x} \in L_0 \rbrace$ $L_{-1}=\lbrace x \in C; \ln{x} \in L_0 \rbrace$ $L_{n+1}=\lbrace x \in C; e^{x} \in L_n \rbrace$ $0$ is in $L_1$ and $L_0$. Do any ...
5
votes
1answer
389 views
Universality of Tate-conjectures
We all know that Prof.John Tate proposed a set of conjectures(along with Prof.Emil Artin) formally spread under the name of "Tate conjectures", they have a wide range of influence on various fields of ...
4
votes
1answer
72 views
Generating Functions: how do I get my answers in terms of differential operators?
I'm reading and enjoying "generatingfunctionology". What a great fun book! But, I'm having some difficulty with the exercises. For example, take the series $a_n = n^2$ I'd like to find the Generating ...
7
votes
3answers
1k views
RSA: How Euler's Theorem is used?
I'm trying to understand the working of RSA algorithm. I am getting confused in the decryption part. I'm assuming $$n = pq$$ $$m = \phi(n) = (p - 1)(q - 1)$$ E is the encryption key $\gcd(n, E) = ...
6
votes
3answers
167 views
Showing $\pi(ax)/\pi(bx) \sim a/b$ as $x \to \infty$
I'm having a bit of a problem with exercise 4.12 in Apostol's "Introduction to Analytic Number Theory". I don't think it's supposed to be a very hard exercise, it's the first one in its section ...
-3
votes
1answer
133 views
Right angle triangles with bases $c/2^{2^n}$
Let's have a right angle triangle where $a=5$, $b=4$, $c=3$. Is it possible to create an infinity of right angle triangles with rational sides from the above triplet, with bases equal to $3/2^{2^n}$ ...
9
votes
2answers
253 views
Erdős: Sum of rational function of positive integers is either rational or transcendental
I am trying to find a conjecture apparently made by Erdős and Straus. I say apparently because I have had so much trouble finding anything information about it that I'm beginning to doubt its ...
2
votes
0answers
102 views
Dedekind Spectra
Is there a class of ring spectra that corresponds to and/or extends the class of Dedekind rings from traditional algebra? Is there a notion of "ring of integers" of a ring spectrum which is somehow ...
-1
votes
1answer
273 views
Number theory conjecture [closed]
Let us observe the following pattern $N - p_1 = m_1, N - p_2 = m_2, \ldots , N - p_r = m_r$; take $p_1 = 3$ and $p_2 = 5,\ldots$ notice that $p_r$ is the larger prime less than or equal to square ...
2
votes
3answers
335 views
How to prove that if $a\equiv b \pmod{2n}$ then $a^2\equiv b^2 \pmod{2^2n}$
What I have done is this: $a\equiv b \pmod{2n}$, $a=b+c\times2n$, for some $c$, $a^2=b^2+2b\times c\times2n+c^2\times2^2n^2$, $a^2-b^2=(b\times c+c^2n)\times4n$, then $a^2\equiv b^2\pmod{2^2n}$. ...
6
votes
1answer
285 views
Primes that ramify in a field
Consider the number field $L/\mathbb{Q}$. I know that the only primes $p$ that ramify over $L$ are the ones that divide $\Delta_{L}$, the discriminant of $L$. But what if I can't compute $\Delta_{L}$? ...
0
votes
1answer
71 views
why did they use 9 in step 3 as the 4-th root of unity
Im doing a problem for al algorithms class to multiply 2 polynomials using FFT, and am confused as to why they picked 9 in step 3 in this document: ...
1
vote
0answers
157 views
Forcing and divisibility
I'm going to bring together a couple of seemingly unrelated questions that I've asked here. This may be silly. Or maybe not? Imagine that $n$ is some sort of infinitely large integer, and thus so ...
5
votes
3answers
250 views
Evaluate $d(n!)$
An exercise: Using the prime number theorem find an asymptotic expression for $d(n!)$ where $d$ is the number of divisors.
0
votes
2answers
215 views
computing primes
As per my knowledge, I have seen the only following functions which will produce primes for $n$: $n^2 - n + 41$ $n^2 + n + 41$ Of course both functions faile for $n = 41$ due to the polynomial ...
13
votes
5answers
3k views
What is the fastest growing total computable function you can describe in a few lines?
What is the fastest growing total computable function you can describe in a few lines? Well, not necessarily the fastest - I just would like to know how far an ingenious mathematician can go using ...
11
votes
2answers
339 views
How rare are the primes $p$ such that $p$ divides the sum of all primes less than $p$?
This is just for fun! The title pretty much says it all. It's probably a very difficult question. Up to the $40,000^{th}$ prime $(479909)$, I have found only $5$, $71$ and $369119$ with this ...
2
votes
0answers
105 views
Does there exist a number field with the following property
Let $\overline{\mathbf{Q}}\subset \mathbf{C}$ be the field of algebraic numbers. Does there exist a number field $K$ with the following property? There are embeddings $\sigma,\tau:K\to ...
3
votes
1answer
265 views
Inertia groups generate Galois Group
While reading a paper about the Kronecker-Weber Theorem, I noticed a theorem saying that for a Galois extension $K/\mathbb{Q}$, its Galois group is generated by $I_p$s, being the inertia groups of ...
2
votes
4answers
254 views
Form of rational solutions to $a^2+b^2=1$?
Is there a way to determine the form of all rational solutions to the equation $a^2+b^2=1$?
0
votes
2answers
99 views
Questions regarding congruences of multiples and relatively prime numbers
If we say $n=p_1^{\alpha_1}\times p_2^{\alpha2}\times \cdots \times p_k^{\alpha_k}$, where $p_i$ are prime numbers, $\alpha_i$ are natural numbers, can or can we not say that: Choose a $p_i$ such ...
0
votes
1answer
175 views
Vinogradov's equidistribution theorem
Is it true that $(\alpha p_k)$ is equidistributed on $[0,1)$ mod 1 (Vinogradov) $\Leftrightarrow$ $(p_k)$ is equidistributed on $[0,2\pi) $mod $2\pi$ ? $p_k$ is the kth prime and $\alpha$ is an ...
2
votes
1answer
148 views
Reciprocal of a continued fraction
I have to prove the following: Let $\alpha=[a_0;a_1,a_2,...,a_n]$ and $\alpha>0$, then $\dfrac1{\alpha}=[0;a_0,a_1,...,a_n]$ I started with ...
3
votes
1answer
129 views
$\pi$ and $e$ as irrational linear combinations
Let $S=\{m\cdot n^r\mid m,n\in\mathbb Z,r\in\mathbb Q\}$ Can $e$ or $\pi$ be written as a finite sum of elements of $S$? Can $\pi=xe$, with $x$ algebraic?
4
votes
1answer
177 views
2-dimensional $\ell$-adic representations [closed]
In an assignment, I have to give an example of a 2-dimensional $\ell$-adic representation of the absolute Galois group of $\mathbb{Q}$, bu I am faced with the problem that I do not a lot of these. Or ...
-1
votes
1answer
131 views
proof of theorem
How to prove the following theorem. Could you explain. Given the number $A = \langle a_n, a_{n-1}, \dots, a_0 \rangle_{10}$ and the modulus $m$ such that $(10, m) = 1$ from the sequence ...
0
votes
1answer
370 views
Resurrection of my Tamagawa numbers Question, to understand the Formulation of BSD
My previous question was closed very badly for asking the broad and deep things, so I now understand the consequences of asking such questions, so I refrain from asking such questions, so this is not ...
2
votes
3answers
230 views
Kronecker-Weber Theorem
I'm stuck with an article "A simple proof of Kronecker-Weber Theorem" on this website. On page 7, the author proofs that $\mathbb{Q}_p((-p)^{\frac{1}{p-1}}) = \mathbb{Q}_p(\zeta_p)$. While I ...
3
votes
1answer
345 views
Question about a proof in Apostol's “Introduction to Analytic Number Theory”
The question is about the proof of Theorem 2.17 (Page 36) of the book Introduction to Analytic Number Theory by Apostol: Theorem 2.17. Let $f$ be multiplicative. Then $f$ is completely ...
|
__label__pos
| 0.990435 |
What DNSSEC cannot do
DNSSEC:
What DNSSEC can do
Domain Name Security Extensions (DNSSEC) add a layer of security to your domain names by attaching digital signatures to their Domain Name System (DNS) information. The DNSSEC extensions are used to verify data by means of cryptographically secured signatures.
DNSSEC offers:
dot-ca-registry.ca is pleased to announce that they have successfully completed certification by CIRA to process your DNSSEC requirements for all .ca domains
|
__label__pos
| 0.987058 |
LLVM mainline
InstCombineCompares.cpp
Go to the documentation of this file.
00001 //===- InstCombineCompares.cpp --------------------------------------------===//
00002 //
00003 // The LLVM Compiler Infrastructure
00004 //
00005 // This file is distributed under the University of Illinois Open Source
00006 // License. See LICENSE.TXT for details.
00007 //
00008 //===----------------------------------------------------------------------===//
00009 //
00010 // This file implements the visitICmp and visitFCmp functions.
00011 //
00012 //===----------------------------------------------------------------------===//
00013
00014 #include "InstCombineInternal.h"
00015 #include "llvm/ADT/APSInt.h"
00016 #include "llvm/ADT/Statistic.h"
00017 #include "llvm/Analysis/ConstantFolding.h"
00018 #include "llvm/Analysis/InstructionSimplify.h"
00019 #include "llvm/Analysis/MemoryBuiltins.h"
00020 #include "llvm/IR/ConstantRange.h"
00021 #include "llvm/IR/DataLayout.h"
00022 #include "llvm/IR/GetElementPtrTypeIterator.h"
00023 #include "llvm/IR/IntrinsicInst.h"
00024 #include "llvm/IR/PatternMatch.h"
00025 #include "llvm/Support/CommandLine.h"
00026 #include "llvm/Support/Debug.h"
00027 #include "llvm/Analysis/TargetLibraryInfo.h"
00028
00029 using namespace llvm;
00030 using namespace PatternMatch;
00031
00032 #define DEBUG_TYPE "instcombine"
00033
00034 // How many times is a select replaced by one of its operands?
00035 STATISTIC(NumSel, "Number of select opts");
00036
00037 // Initialization Routines
00038
00039 static ConstantInt *getOne(Constant *C) {
00040 return ConstantInt::get(cast<IntegerType>(C->getType()), 1);
00041 }
00042
00043 static ConstantInt *ExtractElement(Constant *V, Constant *Idx) {
00044 return cast<ConstantInt>(ConstantExpr::getExtractElement(V, Idx));
00045 }
00046
00047 static bool HasAddOverflow(ConstantInt *Result,
00048 ConstantInt *In1, ConstantInt *In2,
00049 bool IsSigned) {
00050 if (!IsSigned)
00051 return Result->getValue().ult(In1->getValue());
00052
00053 if (In2->isNegative())
00054 return Result->getValue().sgt(In1->getValue());
00055 return Result->getValue().slt(In1->getValue());
00056 }
00057
00058 /// AddWithOverflow - Compute Result = In1+In2, returning true if the result
00059 /// overflowed for this type.
00060 static bool AddWithOverflow(Constant *&Result, Constant *In1,
00061 Constant *In2, bool IsSigned = false) {
00062 Result = ConstantExpr::getAdd(In1, In2);
00063
00064 if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
00065 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
00066 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
00067 if (HasAddOverflow(ExtractElement(Result, Idx),
00068 ExtractElement(In1, Idx),
00069 ExtractElement(In2, Idx),
00070 IsSigned))
00071 return true;
00072 }
00073 return false;
00074 }
00075
00076 return HasAddOverflow(cast<ConstantInt>(Result),
00077 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
00078 IsSigned);
00079 }
00080
00081 static bool HasSubOverflow(ConstantInt *Result,
00082 ConstantInt *In1, ConstantInt *In2,
00083 bool IsSigned) {
00084 if (!IsSigned)
00085 return Result->getValue().ugt(In1->getValue());
00086
00087 if (In2->isNegative())
00088 return Result->getValue().slt(In1->getValue());
00089
00090 return Result->getValue().sgt(In1->getValue());
00091 }
00092
00093 /// SubWithOverflow - Compute Result = In1-In2, returning true if the result
00094 /// overflowed for this type.
00095 static bool SubWithOverflow(Constant *&Result, Constant *In1,
00096 Constant *In2, bool IsSigned = false) {
00097 Result = ConstantExpr::getSub(In1, In2);
00098
00099 if (VectorType *VTy = dyn_cast<VectorType>(In1->getType())) {
00100 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
00101 Constant *Idx = ConstantInt::get(Type::getInt32Ty(In1->getContext()), i);
00102 if (HasSubOverflow(ExtractElement(Result, Idx),
00103 ExtractElement(In1, Idx),
00104 ExtractElement(In2, Idx),
00105 IsSigned))
00106 return true;
00107 }
00108 return false;
00109 }
00110
00111 return HasSubOverflow(cast<ConstantInt>(Result),
00112 cast<ConstantInt>(In1), cast<ConstantInt>(In2),
00113 IsSigned);
00114 }
00115
00116 /// isSignBitCheck - Given an exploded icmp instruction, return true if the
00117 /// comparison only checks the sign bit. If it only checks the sign bit, set
00118 /// TrueIfSigned if the result of the comparison is true when the input value is
00119 /// signed.
00120 static bool isSignBitCheck(ICmpInst::Predicate pred, ConstantInt *RHS,
00121 bool &TrueIfSigned) {
00122 switch (pred) {
00123 case ICmpInst::ICMP_SLT: // True if LHS s< 0
00124 TrueIfSigned = true;
00125 return RHS->isZero();
00126 case ICmpInst::ICMP_SLE: // True if LHS s<= RHS and RHS == -1
00127 TrueIfSigned = true;
00128 return RHS->isAllOnesValue();
00129 case ICmpInst::ICMP_SGT: // True if LHS s> -1
00130 TrueIfSigned = false;
00131 return RHS->isAllOnesValue();
00132 case ICmpInst::ICMP_UGT:
00133 // True if LHS u> RHS and RHS == high-bit-mask - 1
00134 TrueIfSigned = true;
00135 return RHS->isMaxValue(true);
00136 case ICmpInst::ICMP_UGE:
00137 // True if LHS u>= RHS and RHS == high-bit-mask (2^7, 2^15, 2^31, etc)
00138 TrueIfSigned = true;
00139 return RHS->getValue().isSignBit();
00140 default:
00141 return false;
00142 }
00143 }
00144
00145 /// Returns true if the exploded icmp can be expressed as a signed comparison
00146 /// to zero and updates the predicate accordingly.
00147 /// The signedness of the comparison is preserved.
00148 static bool isSignTest(ICmpInst::Predicate &pred, const ConstantInt *RHS) {
00149 if (!ICmpInst::isSigned(pred))
00150 return false;
00151
00152 if (RHS->isZero())
00153 return ICmpInst::isRelational(pred);
00154
00155 if (RHS->isOne()) {
00156 if (pred == ICmpInst::ICMP_SLT) {
00157 pred = ICmpInst::ICMP_SLE;
00158 return true;
00159 }
00160 } else if (RHS->isAllOnesValue()) {
00161 if (pred == ICmpInst::ICMP_SGT) {
00162 pred = ICmpInst::ICMP_SGE;
00163 return true;
00164 }
00165 }
00166
00167 return false;
00168 }
00169
00170 // isHighOnes - Return true if the constant is of the form 1+0+.
00171 // This is the same as lowones(~X).
00172 static bool isHighOnes(const ConstantInt *CI) {
00173 return (~CI->getValue() + 1).isPowerOf2();
00174 }
00175
00176 /// ComputeSignedMinMaxValuesFromKnownBits - Given a signed integer type and a
00177 /// set of known zero and one bits, compute the maximum and minimum values that
00178 /// could have the specified known zero and known one bits, returning them in
00179 /// min/max.
00180 static void ComputeSignedMinMaxValuesFromKnownBits(const APInt& KnownZero,
00181 const APInt& KnownOne,
00182 APInt& Min, APInt& Max) {
00183 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
00184 KnownZero.getBitWidth() == Min.getBitWidth() &&
00185 KnownZero.getBitWidth() == Max.getBitWidth() &&
00186 "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
00187 APInt UnknownBits = ~(KnownZero|KnownOne);
00188
00189 // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
00190 // bit if it is unknown.
00191 Min = KnownOne;
00192 Max = KnownOne|UnknownBits;
00193
00194 if (UnknownBits.isNegative()) { // Sign bit is unknown
00195 Min.setBit(Min.getBitWidth()-1);
00196 Max.clearBit(Max.getBitWidth()-1);
00197 }
00198 }
00199
00200 // ComputeUnsignedMinMaxValuesFromKnownBits - Given an unsigned integer type and
00201 // a set of known zero and one bits, compute the maximum and minimum values that
00202 // could have the specified known zero and known one bits, returning them in
00203 // min/max.
00204 static void ComputeUnsignedMinMaxValuesFromKnownBits(const APInt &KnownZero,
00205 const APInt &KnownOne,
00206 APInt &Min, APInt &Max) {
00207 assert(KnownZero.getBitWidth() == KnownOne.getBitWidth() &&
00208 KnownZero.getBitWidth() == Min.getBitWidth() &&
00209 KnownZero.getBitWidth() == Max.getBitWidth() &&
00210 "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
00211 APInt UnknownBits = ~(KnownZero|KnownOne);
00212
00213 // The minimum value is when the unknown bits are all zeros.
00214 Min = KnownOne;
00215 // The maximum value is when the unknown bits are all ones.
00216 Max = KnownOne|UnknownBits;
00217 }
00218
00219
00220
00221 /// FoldCmpLoadFromIndexedGlobal - Called we see this pattern:
00222 /// cmp pred (load (gep GV, ...)), cmpcst
00223 /// where GV is a global variable with a constant initializer. Try to simplify
00224 /// this into some simple computation that does not need the load. For example
00225 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
00226 ///
00227 /// If AndCst is non-null, then the loaded value is masked with that constant
00228 /// before doing the comparison. This handles cases like "A[i]&4 == 0".
00229 Instruction *InstCombiner::
00230 FoldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP, GlobalVariable *GV,
00231 CmpInst &ICI, ConstantInt *AndCst) {
00232 Constant *Init = GV->getInitializer();
00233 if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
00234 return nullptr;
00235
00236 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
00237 if (ArrayElementCount > 1024) return nullptr; // Don't blow up on huge arrays.
00238
00239 // There are many forms of this optimization we can handle, for now, just do
00240 // the simple index into a single-dimensional array.
00241 //
00242 // Require: GEP GV, 0, i {{, constant indices}}
00243 if (GEP->getNumOperands() < 3 ||
00244 !isa<ConstantInt>(GEP->getOperand(1)) ||
00245 !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
00246 isa<Constant>(GEP->getOperand(2)))
00247 return nullptr;
00248
00249 // Check that indices after the variable are constants and in-range for the
00250 // type they index. Collect the indices. This is typically for arrays of
00251 // structs.
00252 SmallVector<unsigned, 4> LaterIndices;
00253
00254 Type *EltTy = Init->getType()->getArrayElementType();
00255 for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
00256 ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
00257 if (!Idx) return nullptr; // Variable index.
00258
00259 uint64_t IdxVal = Idx->getZExtValue();
00260 if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index.
00261
00262 if (StructType *STy = dyn_cast<StructType>(EltTy))
00263 EltTy = STy->getElementType(IdxVal);
00264 else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
00265 if (IdxVal >= ATy->getNumElements()) return nullptr;
00266 EltTy = ATy->getElementType();
00267 } else {
00268 return nullptr; // Unknown type.
00269 }
00270
00271 LaterIndices.push_back(IdxVal);
00272 }
00273
00274 enum { Overdefined = -3, Undefined = -2 };
00275
00276 // Variables for our state machines.
00277
00278 // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
00279 // "i == 47 | i == 87", where 47 is the first index the condition is true for,
00280 // and 87 is the second (and last) index. FirstTrueElement is -2 when
00281 // undefined, otherwise set to the first true element. SecondTrueElement is
00282 // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
00283 int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
00284
00285 // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
00286 // form "i != 47 & i != 87". Same state transitions as for true elements.
00287 int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
00288
00289 /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
00290 /// define a state machine that triggers for ranges of values that the index
00291 /// is true or false for. This triggers on things like "abbbbc"[i] == 'b'.
00292 /// This is -2 when undefined, -3 when overdefined, and otherwise the last
00293 /// index in the range (inclusive). We use -2 for undefined here because we
00294 /// use relative comparisons and don't want 0-1 to match -1.
00295 int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
00296
00297 // MagicBitvector - This is a magic bitvector where we set a bit if the
00298 // comparison is true for element 'i'. If there are 64 elements or less in
00299 // the array, this will fully represent all the comparison results.
00300 uint64_t MagicBitvector = 0;
00301
00302 // Scan the array and see if one of our patterns matches.
00303 Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
00304 for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
00305 Constant *Elt = Init->getAggregateElement(i);
00306 if (!Elt) return nullptr;
00307
00308 // If this is indexing an array of structures, get the structure element.
00309 if (!LaterIndices.empty())
00310 Elt = ConstantExpr::getExtractValue(Elt, LaterIndices);
00311
00312 // If the element is masked, handle it.
00313 if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst);
00314
00315 // Find out if the comparison would be true or false for the i'th element.
00316 Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
00317 CompareRHS, DL, TLI);
00318 // If the result is undef for this element, ignore it.
00319 if (isa<UndefValue>(C)) {
00320 // Extend range state machines to cover this element in case there is an
00321 // undef in the middle of the range.
00322 if (TrueRangeEnd == (int)i-1)
00323 TrueRangeEnd = i;
00324 if (FalseRangeEnd == (int)i-1)
00325 FalseRangeEnd = i;
00326 continue;
00327 }
00328
00329 // If we can't compute the result for any of the elements, we have to give
00330 // up evaluating the entire conditional.
00331 if (!isa<ConstantInt>(C)) return nullptr;
00332
00333 // Otherwise, we know if the comparison is true or false for this element,
00334 // update our state machines.
00335 bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
00336
00337 // State machine for single/double/range index comparison.
00338 if (IsTrueForElt) {
00339 // Update the TrueElement state machine.
00340 if (FirstTrueElement == Undefined)
00341 FirstTrueElement = TrueRangeEnd = i; // First true element.
00342 else {
00343 // Update double-compare state machine.
00344 if (SecondTrueElement == Undefined)
00345 SecondTrueElement = i;
00346 else
00347 SecondTrueElement = Overdefined;
00348
00349 // Update range state machine.
00350 if (TrueRangeEnd == (int)i-1)
00351 TrueRangeEnd = i;
00352 else
00353 TrueRangeEnd = Overdefined;
00354 }
00355 } else {
00356 // Update the FalseElement state machine.
00357 if (FirstFalseElement == Undefined)
00358 FirstFalseElement = FalseRangeEnd = i; // First false element.
00359 else {
00360 // Update double-compare state machine.
00361 if (SecondFalseElement == Undefined)
00362 SecondFalseElement = i;
00363 else
00364 SecondFalseElement = Overdefined;
00365
00366 // Update range state machine.
00367 if (FalseRangeEnd == (int)i-1)
00368 FalseRangeEnd = i;
00369 else
00370 FalseRangeEnd = Overdefined;
00371 }
00372 }
00373
00374
00375 // If this element is in range, update our magic bitvector.
00376 if (i < 64 && IsTrueForElt)
00377 MagicBitvector |= 1ULL << i;
00378
00379 // If all of our states become overdefined, bail out early. Since the
00380 // predicate is expensive, only check it every 8 elements. This is only
00381 // really useful for really huge arrays.
00382 if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
00383 SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
00384 FalseRangeEnd == Overdefined)
00385 return nullptr;
00386 }
00387
00388 // Now that we've scanned the entire array, emit our new comparison(s). We
00389 // order the state machines in complexity of the generated code.
00390 Value *Idx = GEP->getOperand(2);
00391
00392 // If the index is larger than the pointer size of the target, truncate the
00393 // index down like the GEP would do implicitly. We don't have to do this for
00394 // an inbounds GEP because the index can't be out of range.
00395 if (!GEP->isInBounds()) {
00396 Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
00397 unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
00398 if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
00399 Idx = Builder->CreateTrunc(Idx, IntPtrTy);
00400 }
00401
00402 // If the comparison is only true for one or two elements, emit direct
00403 // comparisons.
00404 if (SecondTrueElement != Overdefined) {
00405 // None true -> false.
00406 if (FirstTrueElement == Undefined)
00407 return ReplaceInstUsesWith(ICI, Builder->getFalse());
00408
00409 Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
00410
00411 // True for one element -> 'i == 47'.
00412 if (SecondTrueElement == Undefined)
00413 return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
00414
00415 // True for two elements -> 'i == 47 | i == 72'.
00416 Value *C1 = Builder->CreateICmpEQ(Idx, FirstTrueIdx);
00417 Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
00418 Value *C2 = Builder->CreateICmpEQ(Idx, SecondTrueIdx);
00419 return BinaryOperator::CreateOr(C1, C2);
00420 }
00421
00422 // If the comparison is only false for one or two elements, emit direct
00423 // comparisons.
00424 if (SecondFalseElement != Overdefined) {
00425 // None false -> true.
00426 if (FirstFalseElement == Undefined)
00427 return ReplaceInstUsesWith(ICI, Builder->getTrue());
00428
00429 Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
00430
00431 // False for one element -> 'i != 47'.
00432 if (SecondFalseElement == Undefined)
00433 return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
00434
00435 // False for two elements -> 'i != 47 & i != 72'.
00436 Value *C1 = Builder->CreateICmpNE(Idx, FirstFalseIdx);
00437 Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
00438 Value *C2 = Builder->CreateICmpNE(Idx, SecondFalseIdx);
00439 return BinaryOperator::CreateAnd(C1, C2);
00440 }
00441
00442 // If the comparison can be replaced with a range comparison for the elements
00443 // where it is true, emit the range check.
00444 if (TrueRangeEnd != Overdefined) {
00445 assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
00446
00447 // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
00448 if (FirstTrueElement) {
00449 Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
00450 Idx = Builder->CreateAdd(Idx, Offs);
00451 }
00452
00453 Value *End = ConstantInt::get(Idx->getType(),
00454 TrueRangeEnd-FirstTrueElement+1);
00455 return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
00456 }
00457
00458 // False range check.
00459 if (FalseRangeEnd != Overdefined) {
00460 assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
00461 // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
00462 if (FirstFalseElement) {
00463 Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
00464 Idx = Builder->CreateAdd(Idx, Offs);
00465 }
00466
00467 Value *End = ConstantInt::get(Idx->getType(),
00468 FalseRangeEnd-FirstFalseElement);
00469 return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
00470 }
00471
00472
00473 // If a magic bitvector captures the entire comparison state
00474 // of this load, replace it with computation that does:
00475 // ((magic_cst >> i) & 1) != 0
00476 {
00477 Type *Ty = nullptr;
00478
00479 // Look for an appropriate type:
00480 // - The type of Idx if the magic fits
00481 // - The smallest fitting legal type if we have a DataLayout
00482 // - Default to i32
00483 if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
00484 Ty = Idx->getType();
00485 else
00486 Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
00487
00488 if (Ty) {
00489 Value *V = Builder->CreateIntCast(Idx, Ty, false);
00490 V = Builder->CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
00491 V = Builder->CreateAnd(ConstantInt::get(Ty, 1), V);
00492 return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
00493 }
00494 }
00495
00496 return nullptr;
00497 }
00498
00499
00500 /// EvaluateGEPOffsetExpression - Return a value that can be used to compare
00501 /// the *offset* implied by a GEP to zero. For example, if we have &A[i], we
00502 /// want to return 'i' for "icmp ne i, 0". Note that, in general, indices can
00503 /// be complex, and scales are involved. The above expression would also be
00504 /// legal to codegen as "icmp ne (i*4), 0" (assuming A is a pointer to i32).
00505 /// This later form is less amenable to optimization though, and we are allowed
00506 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
00507 ///
00508 /// If we can't emit an optimized form for this expression, this returns null.
00509 ///
00510 static Value *EvaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
00511 const DataLayout &DL) {
00512 gep_type_iterator GTI = gep_type_begin(GEP);
00513
00514 // Check to see if this gep only has a single variable index. If so, and if
00515 // any constant indices are a multiple of its scale, then we can compute this
00516 // in terms of the scale of the variable index. For example, if the GEP
00517 // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
00518 // because the expression will cross zero at the same point.
00519 unsigned i, e = GEP->getNumOperands();
00520 int64_t Offset = 0;
00521 for (i = 1; i != e; ++i, ++GTI) {
00522 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
00523 // Compute the aggregate offset of constant indices.
00524 if (CI->isZero()) continue;
00525
00526 // Handle a struct index, which adds its field offset to the pointer.
00527 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
00528 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
00529 } else {
00530 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
00531 Offset += Size*CI->getSExtValue();
00532 }
00533 } else {
00534 // Found our variable index.
00535 break;
00536 }
00537 }
00538
00539 // If there are no variable indices, we must have a constant offset, just
00540 // evaluate it the general way.
00541 if (i == e) return nullptr;
00542
00543 Value *VariableIdx = GEP->getOperand(i);
00544 // Determine the scale factor of the variable element. For example, this is
00545 // 4 if the variable index is into an array of i32.
00546 uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
00547
00548 // Verify that there are no other variable indices. If so, emit the hard way.
00549 for (++i, ++GTI; i != e; ++i, ++GTI) {
00550 ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
00551 if (!CI) return nullptr;
00552
00553 // Compute the aggregate offset of constant indices.
00554 if (CI->isZero()) continue;
00555
00556 // Handle a struct index, which adds its field offset to the pointer.
00557 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
00558 Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
00559 } else {
00560 uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
00561 Offset += Size*CI->getSExtValue();
00562 }
00563 }
00564
00565
00566
00567 // Okay, we know we have a single variable index, which must be a
00568 // pointer/array/vector index. If there is no offset, life is simple, return
00569 // the index.
00570 Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
00571 unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
00572 if (Offset == 0) {
00573 // Cast to intptrty in case a truncation occurs. If an extension is needed,
00574 // we don't need to bother extending: the extension won't affect where the
00575 // computation crosses zero.
00576 if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
00577 VariableIdx = IC.Builder->CreateTrunc(VariableIdx, IntPtrTy);
00578 }
00579 return VariableIdx;
00580 }
00581
00582 // Otherwise, there is an index. The computation we will do will be modulo
00583 // the pointer size, so get it.
00584 uint64_t PtrSizeMask = ~0ULL >> (64-IntPtrWidth);
00585
00586 Offset &= PtrSizeMask;
00587 VariableScale &= PtrSizeMask;
00588
00589 // To do this transformation, any constant index must be a multiple of the
00590 // variable scale factor. For example, we can evaluate "12 + 4*i" as "3 + i",
00591 // but we can't evaluate "10 + 3*i" in terms of i. Check that the offset is a
00592 // multiple of the variable scale.
00593 int64_t NewOffs = Offset / (int64_t)VariableScale;
00594 if (Offset != NewOffs*(int64_t)VariableScale)
00595 return nullptr;
00596
00597 // Okay, we can do this evaluation. Start by converting the index to intptr.
00598 if (VariableIdx->getType() != IntPtrTy)
00599 VariableIdx = IC.Builder->CreateIntCast(VariableIdx, IntPtrTy,
00600 true /*Signed*/);
00601 Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
00602 return IC.Builder->CreateAdd(VariableIdx, OffsetVal, "offset");
00603 }
00604
00605 /// FoldGEPICmp - Fold comparisons between a GEP instruction and something
00606 /// else. At this point we know that the GEP is on the LHS of the comparison.
00607 Instruction *InstCombiner::FoldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
00608 ICmpInst::Predicate Cond,
00609 Instruction &I) {
00610 // Don't transform signed compares of GEPs into index compares. Even if the
00611 // GEP is inbounds, the final add of the base pointer can have signed overflow
00612 // and would change the result of the icmp.
00613 // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
00614 // the maximum signed value for the pointer type.
00615 if (ICmpInst::isSigned(Cond))
00616 return nullptr;
00617
00618 // Look through bitcasts and addrspacecasts. We do not however want to remove
00619 // 0 GEPs.
00620 if (!isa<GetElementPtrInst>(RHS))
00621 RHS = RHS->stripPointerCasts();
00622
00623 Value *PtrBase = GEPLHS->getOperand(0);
00624 if (PtrBase == RHS && GEPLHS->isInBounds()) {
00625 // ((gep Ptr, OFFSET) cmp Ptr) ---> (OFFSET cmp 0).
00626 // This transformation (ignoring the base and scales) is valid because we
00627 // know pointers can't overflow since the gep is inbounds. See if we can
00628 // output an optimized form.
00629 Value *Offset = EvaluateGEPOffsetExpression(GEPLHS, *this, DL);
00630
00631 // If not, synthesize the offset the hard way.
00632 if (!Offset)
00633 Offset = EmitGEPOffset(GEPLHS);
00634 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
00635 Constant::getNullValue(Offset->getType()));
00636 } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
00637 // If the base pointers are different, but the indices are the same, just
00638 // compare the base pointer.
00639 if (PtrBase != GEPRHS->getOperand(0)) {
00640 bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
00641 IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
00642 GEPRHS->getOperand(0)->getType();
00643 if (IndicesTheSame)
00644 for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
00645 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
00646 IndicesTheSame = false;
00647 break;
00648 }
00649
00650 // If all indices are the same, just compare the base pointers.
00651 if (IndicesTheSame)
00652 return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
00653
00654 // If we're comparing GEPs with two base pointers that only differ in type
00655 // and both GEPs have only constant indices or just one use, then fold
00656 // the compare with the adjusted indices.
00657 if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
00658 (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
00659 (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
00660 PtrBase->stripPointerCasts() ==
00661 GEPRHS->getOperand(0)->stripPointerCasts()) {
00662 Value *LOffset = EmitGEPOffset(GEPLHS);
00663 Value *ROffset = EmitGEPOffset(GEPRHS);
00664
00665 // If we looked through an addrspacecast between different sized address
00666 // spaces, the LHS and RHS pointers are different sized
00667 // integers. Truncate to the smaller one.
00668 Type *LHSIndexTy = LOffset->getType();
00669 Type *RHSIndexTy = ROffset->getType();
00670 if (LHSIndexTy != RHSIndexTy) {
00671 if (LHSIndexTy->getPrimitiveSizeInBits() <
00672 RHSIndexTy->getPrimitiveSizeInBits()) {
00673 ROffset = Builder->CreateTrunc(ROffset, LHSIndexTy);
00674 } else
00675 LOffset = Builder->CreateTrunc(LOffset, RHSIndexTy);
00676 }
00677
00678 Value *Cmp = Builder->CreateICmp(ICmpInst::getSignedPredicate(Cond),
00679 LOffset, ROffset);
00680 return ReplaceInstUsesWith(I, Cmp);
00681 }
00682
00683 // Otherwise, the base pointers are different and the indices are
00684 // different, bail out.
00685 return nullptr;
00686 }
00687
00688 // If one of the GEPs has all zero indices, recurse.
00689 if (GEPLHS->hasAllZeroIndices())
00690 return FoldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
00691 ICmpInst::getSwappedPredicate(Cond), I);
00692
00693 // If the other GEP has all zero indices, recurse.
00694 if (GEPRHS->hasAllZeroIndices())
00695 return FoldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
00696
00697 bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds();
00698 if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
00699 // If the GEPs only differ by one index, compare it.
00700 unsigned NumDifferences = 0; // Keep track of # differences.
00701 unsigned DiffOperand = 0; // The operand that differs.
00702 for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
00703 if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
00704 if (GEPLHS->getOperand(i)->getType()->getPrimitiveSizeInBits() !=
00705 GEPRHS->getOperand(i)->getType()->getPrimitiveSizeInBits()) {
00706 // Irreconcilable differences.
00707 NumDifferences = 2;
00708 break;
00709 } else {
00710 if (NumDifferences++) break;
00711 DiffOperand = i;
00712 }
00713 }
00714
00715 if (NumDifferences == 0) // SAME GEP?
00716 return ReplaceInstUsesWith(I, // No comparison is needed here.
00717 Builder->getInt1(ICmpInst::isTrueWhenEqual(Cond)));
00718
00719 else if (NumDifferences == 1 && GEPsInBounds) {
00720 Value *LHSV = GEPLHS->getOperand(DiffOperand);
00721 Value *RHSV = GEPRHS->getOperand(DiffOperand);
00722 // Make sure we do a signed comparison here.
00723 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
00724 }
00725 }
00726
00727 // Only lower this if the icmp is the only user of the GEP or if we expect
00728 // the result to fold to a constant!
00729 if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
00730 (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
00731 // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2) ---> (OFFSET1 cmp OFFSET2)
00732 Value *L = EmitGEPOffset(GEPLHS);
00733 Value *R = EmitGEPOffset(GEPRHS);
00734 return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
00735 }
00736 }
00737 return nullptr;
00738 }
00739
00740 /// FoldICmpAddOpCst - Fold "icmp pred (X+CI), X".
00741 Instruction *InstCombiner::FoldICmpAddOpCst(Instruction &ICI,
00742 Value *X, ConstantInt *CI,
00743 ICmpInst::Predicate Pred) {
00744 // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
00745 // so the values can never be equal. Similarly for all other "or equals"
00746 // operators.
00747
00748 // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255
00749 // (X+2) <u X --> X >u (MAXUINT-2) --> X > 253
00750 // (X+MAXUINT) <u X --> X >u (MAXUINT-MAXUINT) --> X != 0
00751 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
00752 Value *R =
00753 ConstantExpr::getSub(ConstantInt::getAllOnesValue(CI->getType()), CI);
00754 return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
00755 }
00756
00757 // (X+1) >u X --> X <u (0-1) --> X != 255
00758 // (X+2) >u X --> X <u (0-2) --> X <u 254
00759 // (X+MAXUINT) >u X --> X <u (0-MAXUINT) --> X <u 1 --> X == 0
00760 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
00761 return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantExpr::getNeg(CI));
00762
00763 unsigned BitWidth = CI->getType()->getPrimitiveSizeInBits();
00764 ConstantInt *SMax = ConstantInt::get(X->getContext(),
00765 APInt::getSignedMaxValue(BitWidth));
00766
00767 // (X+ 1) <s X --> X >s (MAXSINT-1) --> X == 127
00768 // (X+ 2) <s X --> X >s (MAXSINT-2) --> X >s 125
00769 // (X+MAXSINT) <s X --> X >s (MAXSINT-MAXSINT) --> X >s 0
00770 // (X+MINSINT) <s X --> X >s (MAXSINT-MINSINT) --> X >s -1
00771 // (X+ -2) <s X --> X >s (MAXSINT- -2) --> X >s 126
00772 // (X+ -1) <s X --> X >s (MAXSINT- -1) --> X != 127
00773 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
00774 return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantExpr::getSub(SMax, CI));
00775
00776 // (X+ 1) >s X --> X <s (MAXSINT-(1-1)) --> X != 127
00777 // (X+ 2) >s X --> X <s (MAXSINT-(2-1)) --> X <s 126
00778 // (X+MAXSINT) >s X --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
00779 // (X+MINSINT) >s X --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
00780 // (X+ -2) >s X --> X <s (MAXSINT-(-2-1)) --> X <s -126
00781 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
00782
00783 assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
00784 Constant *C = Builder->getInt(CI->getValue()-1);
00785 return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C));
00786 }
00787
00788 /// FoldICmpDivCst - Fold "icmp pred, ([su]div X, DivRHS), CmpRHS" where DivRHS
00789 /// and CmpRHS are both known to be integer constants.
00790 Instruction *InstCombiner::FoldICmpDivCst(ICmpInst &ICI, BinaryOperator *DivI,
00791 ConstantInt *DivRHS) {
00792 ConstantInt *CmpRHS = cast<ConstantInt>(ICI.getOperand(1));
00793 const APInt &CmpRHSV = CmpRHS->getValue();
00794
00795 // FIXME: If the operand types don't match the type of the divide
00796 // then don't attempt this transform. The code below doesn't have the
00797 // logic to deal with a signed divide and an unsigned compare (and
00798 // vice versa). This is because (x /s C1) <s C2 produces different
00799 // results than (x /s C1) <u C2 or (x /u C1) <s C2 or even
00800 // (x /u C1) <u C2. Simply casting the operands and result won't
00801 // work. :( The if statement below tests that condition and bails
00802 // if it finds it.
00803 bool DivIsSigned = DivI->getOpcode() == Instruction::SDiv;
00804 if (!ICI.isEquality() && DivIsSigned != ICI.isSigned())
00805 return nullptr;
00806 if (DivRHS->isZero())
00807 return nullptr; // The ProdOV computation fails on divide by zero.
00808 if (DivIsSigned && DivRHS->isAllOnesValue())
00809 return nullptr; // The overflow computation also screws up here
00810 if (DivRHS->isOne()) {
00811 // This eliminates some funny cases with INT_MIN.
00812 ICI.setOperand(0, DivI->getOperand(0)); // X/1 == X.
00813 return &ICI;
00814 }
00815
00816 // Compute Prod = CI * DivRHS. We are essentially solving an equation
00817 // of form X/C1=C2. We solve for X by multiplying C1 (DivRHS) and
00818 // C2 (CI). By solving for X we can turn this into a range check
00819 // instead of computing a divide.
00820 Constant *Prod = ConstantExpr::getMul(CmpRHS, DivRHS);
00821
00822 // Determine if the product overflows by seeing if the product is
00823 // not equal to the divide. Make sure we do the same kind of divide
00824 // as in the LHS instruction that we're folding.
00825 bool ProdOV = (DivIsSigned ? ConstantExpr::getSDiv(Prod, DivRHS) :
00826 ConstantExpr::getUDiv(Prod, DivRHS)) != CmpRHS;
00827
00828 // Get the ICmp opcode
00829 ICmpInst::Predicate Pred = ICI.getPredicate();
00830
00831 /// If the division is known to be exact, then there is no remainder from the
00832 /// divide, so the covered range size is unit, otherwise it is the divisor.
00833 ConstantInt *RangeSize = DivI->isExact() ? getOne(Prod) : DivRHS;
00834
00835 // Figure out the interval that is being checked. For example, a comparison
00836 // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
00837 // Compute this interval based on the constants involved and the signedness of
00838 // the compare/divide. This computes a half-open interval, keeping track of
00839 // whether either value in the interval overflows. After analysis each
00840 // overflow variable is set to 0 if it's corresponding bound variable is valid
00841 // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
00842 int LoOverflow = 0, HiOverflow = 0;
00843 Constant *LoBound = nullptr, *HiBound = nullptr;
00844
00845 if (!DivIsSigned) { // udiv
00846 // e.g. X/5 op 3 --> [15, 20)
00847 LoBound = Prod;
00848 HiOverflow = LoOverflow = ProdOV;
00849 if (!HiOverflow) {
00850 // If this is not an exact divide, then many values in the range collapse
00851 // to the same result value.
00852 HiOverflow = AddWithOverflow(HiBound, LoBound, RangeSize, false);
00853 }
00854
00855 } else if (DivRHS->getValue().isStrictlyPositive()) { // Divisor is > 0.
00856 if (CmpRHSV == 0) { // (X / pos) op 0
00857 // Can't overflow. e.g. X/2 op 0 --> [-1, 2)
00858 LoBound = ConstantExpr::getNeg(SubOne(RangeSize));
00859 HiBound = RangeSize;
00860 } else if (CmpRHSV.isStrictlyPositive()) { // (X / pos) op pos
00861 LoBound = Prod; // e.g. X/5 op 3 --> [15, 20)
00862 HiOverflow = LoOverflow = ProdOV;
00863 if (!HiOverflow)
00864 HiOverflow = AddWithOverflow(HiBound, Prod, RangeSize, true);
00865 } else { // (X / pos) op neg
00866 // e.g. X/5 op -3 --> [-15-4, -15+1) --> [-19, -14)
00867 HiBound = AddOne(Prod);
00868 LoOverflow = HiOverflow = ProdOV ? -1 : 0;
00869 if (!LoOverflow) {
00870 ConstantInt *DivNeg =cast<ConstantInt>(ConstantExpr::getNeg(RangeSize));
00871 LoOverflow = AddWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
00872 }
00873 }
00874 } else if (DivRHS->isNegative()) { // Divisor is < 0.
00875 if (DivI->isExact())
00876 RangeSize = cast<ConstantInt>(ConstantExpr::getNeg(RangeSize));
00877 if (CmpRHSV == 0) { // (X / neg) op 0
00878 // e.g. X/-5 op 0 --> [-4, 5)
00879 LoBound = AddOne(RangeSize);
00880 HiBound = cast<ConstantInt>(ConstantExpr::getNeg(RangeSize));
00881 if (HiBound == DivRHS) { // -INTMIN = INTMIN
00882 HiOverflow = 1; // [INTMIN+1, overflow)
00883 HiBound = nullptr; // e.g. X/INTMIN = 0 --> X > INTMIN
00884 }
00885 } else if (CmpRHSV.isStrictlyPositive()) { // (X / neg) op pos
00886 // e.g. X/-5 op 3 --> [-19, -14)
00887 HiBound = AddOne(Prod);
00888 HiOverflow = LoOverflow = ProdOV ? -1 : 0;
00889 if (!LoOverflow)
00890 LoOverflow = AddWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0;
00891 } else { // (X / neg) op neg
00892 LoBound = Prod; // e.g. X/-5 op -3 --> [15, 20)
00893 LoOverflow = HiOverflow = ProdOV;
00894 if (!HiOverflow)
00895 HiOverflow = SubWithOverflow(HiBound, Prod, RangeSize, true);
00896 }
00897
00898 // Dividing by a negative swaps the condition. LT <-> GT
00899 Pred = ICmpInst::getSwappedPredicate(Pred);
00900 }
00901
00902 Value *X = DivI->getOperand(0);
00903 switch (Pred) {
00904 default: llvm_unreachable("Unhandled icmp opcode!");
00905 case ICmpInst::ICMP_EQ:
00906 if (LoOverflow && HiOverflow)
00907 return ReplaceInstUsesWith(ICI, Builder->getFalse());
00908 if (HiOverflow)
00909 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
00910 ICmpInst::ICMP_UGE, X, LoBound);
00911 if (LoOverflow)
00912 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
00913 ICmpInst::ICMP_ULT, X, HiBound);
00914 return ReplaceInstUsesWith(ICI, InsertRangeTest(X, LoBound, HiBound,
00915 DivIsSigned, true));
00916 case ICmpInst::ICMP_NE:
00917 if (LoOverflow && HiOverflow)
00918 return ReplaceInstUsesWith(ICI, Builder->getTrue());
00919 if (HiOverflow)
00920 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
00921 ICmpInst::ICMP_ULT, X, LoBound);
00922 if (LoOverflow)
00923 return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
00924 ICmpInst::ICMP_UGE, X, HiBound);
00925 return ReplaceInstUsesWith(ICI, InsertRangeTest(X, LoBound, HiBound,
00926 DivIsSigned, false));
00927 case ICmpInst::ICMP_ULT:
00928 case ICmpInst::ICMP_SLT:
00929 if (LoOverflow == +1) // Low bound is greater than input range.
00930 return ReplaceInstUsesWith(ICI, Builder->getTrue());
00931 if (LoOverflow == -1) // Low bound is less than input range.
00932 return ReplaceInstUsesWith(ICI, Builder->getFalse());
00933 return new ICmpInst(Pred, X, LoBound);
00934 case ICmpInst::ICMP_UGT:
00935 case ICmpInst::ICMP_SGT:
00936 if (HiOverflow == +1) // High bound greater than input range.
00937 return ReplaceInstUsesWith(ICI, Builder->getFalse());
00938 if (HiOverflow == -1) // High bound less than input range.
00939 return ReplaceInstUsesWith(ICI, Builder->getTrue());
00940 if (Pred == ICmpInst::ICMP_UGT)
00941 return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound);
00942 return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
00943 }
00944 }
00945
00946 /// FoldICmpShrCst - Handle "icmp(([al]shr X, cst1), cst2)".
00947 Instruction *InstCombiner::FoldICmpShrCst(ICmpInst &ICI, BinaryOperator *Shr,
00948 ConstantInt *ShAmt) {
00949 const APInt &CmpRHSV = cast<ConstantInt>(ICI.getOperand(1))->getValue();
00950
00951 // Check that the shift amount is in range. If not, don't perform
00952 // undefined shifts. When the shift is visited it will be
00953 // simplified.
00954 uint32_t TypeBits = CmpRHSV.getBitWidth();
00955 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
00956 if (ShAmtVal >= TypeBits || ShAmtVal == 0)
00957 return nullptr;
00958
00959 if (!ICI.isEquality()) {
00960 // If we have an unsigned comparison and an ashr, we can't simplify this.
00961 // Similarly for signed comparisons with lshr.
00962 if (ICI.isSigned() != (Shr->getOpcode() == Instruction::AShr))
00963 return nullptr;
00964
00965 // Otherwise, all lshr and most exact ashr's are equivalent to a udiv/sdiv
00966 // by a power of 2. Since we already have logic to simplify these,
00967 // transform to div and then simplify the resultant comparison.
00968 if (Shr->getOpcode() == Instruction::AShr &&
00969 (!Shr->isExact() || ShAmtVal == TypeBits - 1))
00970 return nullptr;
00971
00972 // Revisit the shift (to delete it).
00973 Worklist.Add(Shr);
00974
00975 Constant *DivCst =
00976 ConstantInt::get(Shr->getType(), APInt::getOneBitSet(TypeBits, ShAmtVal));
00977
00978 Value *Tmp =
00979 Shr->getOpcode() == Instruction::AShr ?
00980 Builder->CreateSDiv(Shr->getOperand(0), DivCst, "", Shr->isExact()) :
00981 Builder->CreateUDiv(Shr->getOperand(0), DivCst, "", Shr->isExact());
00982
00983 ICI.setOperand(0, Tmp);
00984
00985 // If the builder folded the binop, just return it.
00986 BinaryOperator *TheDiv = dyn_cast<BinaryOperator>(Tmp);
00987 if (!TheDiv)
00988 return &ICI;
00989
00990 // Otherwise, fold this div/compare.
00991 assert(TheDiv->getOpcode() == Instruction::SDiv ||
00992 TheDiv->getOpcode() == Instruction::UDiv);
00993
00994 Instruction *Res = FoldICmpDivCst(ICI, TheDiv, cast<ConstantInt>(DivCst));
00995 assert(Res && "This div/cst should have folded!");
00996 return Res;
00997 }
00998
00999
01000 // If we are comparing against bits always shifted out, the
01001 // comparison cannot succeed.
01002 APInt Comp = CmpRHSV << ShAmtVal;
01003 ConstantInt *ShiftedCmpRHS = Builder->getInt(Comp);
01004 if (Shr->getOpcode() == Instruction::LShr)
01005 Comp = Comp.lshr(ShAmtVal);
01006 else
01007 Comp = Comp.ashr(ShAmtVal);
01008
01009 if (Comp != CmpRHSV) { // Comparing against a bit that we know is zero.
01010 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
01011 Constant *Cst = Builder->getInt1(IsICMP_NE);
01012 return ReplaceInstUsesWith(ICI, Cst);
01013 }
01014
01015 // Otherwise, check to see if the bits shifted out are known to be zero.
01016 // If so, we can compare against the unshifted value:
01017 // (X & 4) >> 1 == 2 --> (X & 4) == 4.
01018 if (Shr->hasOneUse() && Shr->isExact())
01019 return new ICmpInst(ICI.getPredicate(), Shr->getOperand(0), ShiftedCmpRHS);
01020
01021 if (Shr->hasOneUse()) {
01022 // Otherwise strength reduce the shift into an and.
01023 APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
01024 Constant *Mask = Builder->getInt(Val);
01025
01026 Value *And = Builder->CreateAnd(Shr->getOperand(0),
01027 Mask, Shr->getName()+".mask");
01028 return new ICmpInst(ICI.getPredicate(), And, ShiftedCmpRHS);
01029 }
01030 return nullptr;
01031 }
01032
01033 /// FoldICmpCstShrCst - Handle "(icmp eq/ne (ashr/lshr const2, A), const1)" ->
01034 /// (icmp eq/ne A, Log2(const2/const1)) ->
01035 /// (icmp eq/ne A, Log2(const2) - Log2(const1)).
01036 Instruction *InstCombiner::FoldICmpCstShrCst(ICmpInst &I, Value *Op, Value *A,
01037 ConstantInt *CI1,
01038 ConstantInt *CI2) {
01039 assert(I.isEquality() && "Cannot fold icmp gt/lt");
01040
01041 auto getConstant = [&I, this](bool IsTrue) {
01042 if (I.getPredicate() == I.ICMP_NE)
01043 IsTrue = !IsTrue;
01044 return ReplaceInstUsesWith(I, ConstantInt::get(I.getType(), IsTrue));
01045 };
01046
01047 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
01048 if (I.getPredicate() == I.ICMP_NE)
01049 Pred = CmpInst::getInversePredicate(Pred);
01050 return new ICmpInst(Pred, LHS, RHS);
01051 };
01052
01053 APInt AP1 = CI1->getValue();
01054 APInt AP2 = CI2->getValue();
01055
01056 // Don't bother doing any work for cases which InstSimplify handles.
01057 if (AP2 == 0)
01058 return nullptr;
01059 bool IsAShr = isa<AShrOperator>(Op);
01060 if (IsAShr) {
01061 if (AP2.isAllOnesValue())
01062 return nullptr;
01063 if (AP2.isNegative() != AP1.isNegative())
01064 return nullptr;
01065 if (AP2.sgt(AP1))
01066 return nullptr;
01067 }
01068
01069 if (!AP1)
01070 // 'A' must be large enough to shift out the highest set bit.
01071 return getICmp(I.ICMP_UGT, A,
01072 ConstantInt::get(A->getType(), AP2.logBase2()));
01073
01074 if (AP1 == AP2)
01075 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
01076
01077 // Get the distance between the highest bit that's set.
01078 int Shift;
01079 // Both the constants are negative, take their positive to calculate log.
01080 if (IsAShr && AP1.isNegative())
01081 // Get the ones' complement of AP2 and AP1 when computing the distance.
01082 Shift = (~AP2).logBase2() - (~AP1).logBase2();
01083 else
01084 Shift = AP2.logBase2() - AP1.logBase2();
01085
01086 if (Shift > 0) {
01087 if (IsAShr ? AP1 == AP2.ashr(Shift) : AP1 == AP2.lshr(Shift))
01088 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
01089 }
01090 // Shifting const2 will never be equal to const1.
01091 return getConstant(false);
01092 }
01093
01094 /// FoldICmpCstShlCst - Handle "(icmp eq/ne (shl const2, A), const1)" ->
01095 /// (icmp eq/ne A, TrailingZeros(const1) - TrailingZeros(const2)).
01096 Instruction *InstCombiner::FoldICmpCstShlCst(ICmpInst &I, Value *Op, Value *A,
01097 ConstantInt *CI1,
01098 ConstantInt *CI2) {
01099 assert(I.isEquality() && "Cannot fold icmp gt/lt");
01100
01101 auto getConstant = [&I, this](bool IsTrue) {
01102 if (I.getPredicate() == I.ICMP_NE)
01103 IsTrue = !IsTrue;
01104 return ReplaceInstUsesWith(I, ConstantInt::get(I.getType(), IsTrue));
01105 };
01106
01107 auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
01108 if (I.getPredicate() == I.ICMP_NE)
01109 Pred = CmpInst::getInversePredicate(Pred);
01110 return new ICmpInst(Pred, LHS, RHS);
01111 };
01112
01113 APInt AP1 = CI1->getValue();
01114 APInt AP2 = CI2->getValue();
01115
01116 // Don't bother doing any work for cases which InstSimplify handles.
01117 if (AP2 == 0)
01118 return nullptr;
01119
01120 unsigned AP2TrailingZeros = AP2.countTrailingZeros();
01121
01122 if (!AP1 && AP2TrailingZeros != 0)
01123 return getICmp(I.ICMP_UGE, A,
01124 ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
01125
01126 if (AP1 == AP2)
01127 return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
01128
01129 // Get the distance between the lowest bits that are set.
01130 int Shift = AP1.countTrailingZeros() - AP2TrailingZeros;
01131
01132 if (Shift > 0 && AP2.shl(Shift) == AP1)
01133 return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
01134
01135 // Shifting const2 will never be equal to const1.
01136 return getConstant(false);
01137 }
01138
01139 /// visitICmpInstWithInstAndIntCst - Handle "icmp (instr, intcst)".
01140 ///
01141 Instruction *InstCombiner::visitICmpInstWithInstAndIntCst(ICmpInst &ICI,
01142 Instruction *LHSI,
01143 ConstantInt *RHS) {
01144 const APInt &RHSV = RHS->getValue();
01145
01146 switch (LHSI->getOpcode()) {
01147 case Instruction::Trunc:
01148 if (ICI.isEquality() && LHSI->hasOneUse()) {
01149 // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
01150 // of the high bits truncated out of x are known.
01151 unsigned DstBits = LHSI->getType()->getPrimitiveSizeInBits(),
01152 SrcBits = LHSI->getOperand(0)->getType()->getPrimitiveSizeInBits();
01153 APInt KnownZero(SrcBits, 0), KnownOne(SrcBits, 0);
01154 computeKnownBits(LHSI->getOperand(0), KnownZero, KnownOne, 0, &ICI);
01155
01156 // If all the high bits are known, we can do this xform.
01157 if ((KnownZero|KnownOne).countLeadingOnes() >= SrcBits-DstBits) {
01158 // Pull in the high bits from known-ones set.
01159 APInt NewRHS = RHS->getValue().zext(SrcBits);
01160 NewRHS |= KnownOne & APInt::getHighBitsSet(SrcBits, SrcBits-DstBits);
01161 return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
01162 Builder->getInt(NewRHS));
01163 }
01164 }
01165 break;
01166
01167 case Instruction::Xor: // (icmp pred (xor X, XorCst), CI)
01168 if (ConstantInt *XorCst = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
01169 // If this is a comparison that tests the signbit (X < 0) or (x > -1),
01170 // fold the xor.
01171 if ((ICI.getPredicate() == ICmpInst::ICMP_SLT && RHSV == 0) ||
01172 (ICI.getPredicate() == ICmpInst::ICMP_SGT && RHSV.isAllOnesValue())) {
01173 Value *CompareVal = LHSI->getOperand(0);
01174
01175 // If the sign bit of the XorCst is not set, there is no change to
01176 // the operation, just stop using the Xor.
01177 if (!XorCst->isNegative()) {
01178 ICI.setOperand(0, CompareVal);
01179 Worklist.Add(LHSI);
01180 return &ICI;
01181 }
01182
01183 // Was the old condition true if the operand is positive?
01184 bool isTrueIfPositive = ICI.getPredicate() == ICmpInst::ICMP_SGT;
01185
01186 // If so, the new one isn't.
01187 isTrueIfPositive ^= true;
01188
01189 if (isTrueIfPositive)
01190 return new ICmpInst(ICmpInst::ICMP_SGT, CompareVal,
01191 SubOne(RHS));
01192 else
01193 return new ICmpInst(ICmpInst::ICMP_SLT, CompareVal,
01194 AddOne(RHS));
01195 }
01196
01197 if (LHSI->hasOneUse()) {
01198 // (icmp u/s (xor A SignBit), C) -> (icmp s/u A, (xor C SignBit))
01199 if (!ICI.isEquality() && XorCst->getValue().isSignBit()) {
01200 const APInt &SignBit = XorCst->getValue();
01201 ICmpInst::Predicate Pred = ICI.isSigned()
01202 ? ICI.getUnsignedPredicate()
01203 : ICI.getSignedPredicate();
01204 return new ICmpInst(Pred, LHSI->getOperand(0),
01205 Builder->getInt(RHSV ^ SignBit));
01206 }
01207
01208 // (icmp u/s (xor A ~SignBit), C) -> (icmp s/u (xor C ~SignBit), A)
01209 if (!ICI.isEquality() && XorCst->isMaxValue(true)) {
01210 const APInt &NotSignBit = XorCst->getValue();
01211 ICmpInst::Predicate Pred = ICI.isSigned()
01212 ? ICI.getUnsignedPredicate()
01213 : ICI.getSignedPredicate();
01214 Pred = ICI.getSwappedPredicate(Pred);
01215 return new ICmpInst(Pred, LHSI->getOperand(0),
01216 Builder->getInt(RHSV ^ NotSignBit));
01217 }
01218 }
01219
01220 // (icmp ugt (xor X, C), ~C) -> (icmp ult X, C)
01221 // iff -C is a power of 2
01222 if (ICI.getPredicate() == ICmpInst::ICMP_UGT &&
01223 XorCst->getValue() == ~RHSV && (RHSV + 1).isPowerOf2())
01224 return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0), XorCst);
01225
01226 // (icmp ult (xor X, C), -C) -> (icmp uge X, C)
01227 // iff -C is a power of 2
01228 if (ICI.getPredicate() == ICmpInst::ICMP_ULT &&
01229 XorCst->getValue() == -RHSV && RHSV.isPowerOf2())
01230 return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0), XorCst);
01231 }
01232 break;
01233 case Instruction::And: // (icmp pred (and X, AndCst), RHS)
01234 if (LHSI->hasOneUse() && isa<ConstantInt>(LHSI->getOperand(1)) &&
01235 LHSI->getOperand(0)->hasOneUse()) {
01236 ConstantInt *AndCst = cast<ConstantInt>(LHSI->getOperand(1));
01237
01238 // If the LHS is an AND of a truncating cast, we can widen the
01239 // and/compare to be the input width without changing the value
01240 // produced, eliminating a cast.
01241 if (TruncInst *Cast = dyn_cast<TruncInst>(LHSI->getOperand(0))) {
01242 // We can do this transformation if either the AND constant does not
01243 // have its sign bit set or if it is an equality comparison.
01244 // Extending a relational comparison when we're checking the sign
01245 // bit would not work.
01246 if (ICI.isEquality() ||
01247 (!AndCst->isNegative() && RHSV.isNonNegative())) {
01248 Value *NewAnd =
01249 Builder->CreateAnd(Cast->getOperand(0),
01250 ConstantExpr::getZExt(AndCst, Cast->getSrcTy()));
01251 NewAnd->takeName(LHSI);
01252 return new ICmpInst(ICI.getPredicate(), NewAnd,
01253 ConstantExpr::getZExt(RHS, Cast->getSrcTy()));
01254 }
01255 }
01256
01257 // If the LHS is an AND of a zext, and we have an equality compare, we can
01258 // shrink the and/compare to the smaller type, eliminating the cast.
01259 if (ZExtInst *Cast = dyn_cast<ZExtInst>(LHSI->getOperand(0))) {
01260 IntegerType *Ty = cast<IntegerType>(Cast->getSrcTy());
01261 // Make sure we don't compare the upper bits, SimplifyDemandedBits
01262 // should fold the icmp to true/false in that case.
01263 if (ICI.isEquality() && RHSV.getActiveBits() <= Ty->getBitWidth()) {
01264 Value *NewAnd =
01265 Builder->CreateAnd(Cast->getOperand(0),
01266 ConstantExpr::getTrunc(AndCst, Ty));
01267 NewAnd->takeName(LHSI);
01268 return new ICmpInst(ICI.getPredicate(), NewAnd,
01269 ConstantExpr::getTrunc(RHS, Ty));
01270 }
01271 }
01272
01273 // If this is: (X >> C1) & C2 != C3 (where any shift and any compare
01274 // could exist), turn it into (X & (C2 << C1)) != (C3 << C1). This
01275 // happens a LOT in code produced by the C front-end, for bitfield
01276 // access.
01277 BinaryOperator *Shift = dyn_cast<BinaryOperator>(LHSI->getOperand(0));
01278 if (Shift && !Shift->isShift())
01279 Shift = nullptr;
01280
01281 ConstantInt *ShAmt;
01282 ShAmt = Shift ? dyn_cast<ConstantInt>(Shift->getOperand(1)) : nullptr;
01283
01284 // This seemingly simple opportunity to fold away a shift turns out to
01285 // be rather complicated. See PR17827
01286 // ( http://llvm.org/bugs/show_bug.cgi?id=17827 ) for details.
01287 if (ShAmt) {
01288 bool CanFold = false;
01289 unsigned ShiftOpcode = Shift->getOpcode();
01290 if (ShiftOpcode == Instruction::AShr) {
01291 // There may be some constraints that make this possible,
01292 // but nothing simple has been discovered yet.
01293 CanFold = false;
01294 } else if (ShiftOpcode == Instruction::Shl) {
01295 // For a left shift, we can fold if the comparison is not signed.
01296 // We can also fold a signed comparison if the mask value and
01297 // comparison value are not negative. These constraints may not be
01298 // obvious, but we can prove that they are correct using an SMT
01299 // solver.
01300 if (!ICI.isSigned() || (!AndCst->isNegative() && !RHS->isNegative()))
01301 CanFold = true;
01302 } else if (ShiftOpcode == Instruction::LShr) {
01303 // For a logical right shift, we can fold if the comparison is not
01304 // signed. We can also fold a signed comparison if the shifted mask
01305 // value and the shifted comparison value are not negative.
01306 // These constraints may not be obvious, but we can prove that they
01307 // are correct using an SMT solver.
01308 if (!ICI.isSigned())
01309 CanFold = true;
01310 else {
01311 ConstantInt *ShiftedAndCst =
01312 cast<ConstantInt>(ConstantExpr::getShl(AndCst, ShAmt));
01313 ConstantInt *ShiftedRHSCst =
01314 cast<ConstantInt>(ConstantExpr::getShl(RHS, ShAmt));
01315
01316 if (!ShiftedAndCst->isNegative() && !ShiftedRHSCst->isNegative())
01317 CanFold = true;
01318 }
01319 }
01320
01321 if (CanFold) {
01322 Constant *NewCst;
01323 if (ShiftOpcode == Instruction::Shl)
01324 NewCst = ConstantExpr::getLShr(RHS, ShAmt);
01325 else
01326 NewCst = ConstantExpr::getShl(RHS, ShAmt);
01327
01328 // Check to see if we are shifting out any of the bits being
01329 // compared.
01330 if (ConstantExpr::get(ShiftOpcode, NewCst, ShAmt) != RHS) {
01331 // If we shifted bits out, the fold is not going to work out.
01332 // As a special case, check to see if this means that the
01333 // result is always true or false now.
01334 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
01335 return ReplaceInstUsesWith(ICI, Builder->getFalse());
01336 if (ICI.getPredicate() == ICmpInst::ICMP_NE)
01337 return ReplaceInstUsesWith(ICI, Builder->getTrue());
01338 } else {
01339 ICI.setOperand(1, NewCst);
01340 Constant *NewAndCst;
01341 if (ShiftOpcode == Instruction::Shl)
01342 NewAndCst = ConstantExpr::getLShr(AndCst, ShAmt);
01343 else
01344 NewAndCst = ConstantExpr::getShl(AndCst, ShAmt);
01345 LHSI->setOperand(1, NewAndCst);
01346 LHSI->setOperand(0, Shift->getOperand(0));
01347 Worklist.Add(Shift); // Shift is dead.
01348 return &ICI;
01349 }
01350 }
01351 }
01352
01353 // Turn ((X >> Y) & C) == 0 into (X & (C << Y)) == 0. The later is
01354 // preferable because it allows the C<<Y expression to be hoisted out
01355 // of a loop if Y is invariant and X is not.
01356 if (Shift && Shift->hasOneUse() && RHSV == 0 &&
01357 ICI.isEquality() && !Shift->isArithmeticShift() &&
01358 !isa<Constant>(Shift->getOperand(0))) {
01359 // Compute C << Y.
01360 Value *NS;
01361 if (Shift->getOpcode() == Instruction::LShr) {
01362 NS = Builder->CreateShl(AndCst, Shift->getOperand(1));
01363 } else {
01364 // Insert a logical shift.
01365 NS = Builder->CreateLShr(AndCst, Shift->getOperand(1));
01366 }
01367
01368 // Compute X & (C << Y).
01369 Value *NewAnd =
01370 Builder->CreateAnd(Shift->getOperand(0), NS, LHSI->getName());
01371
01372 ICI.setOperand(0, NewAnd);
01373 return &ICI;
01374 }
01375
01376 // (icmp pred (and (or (lshr X, Y), X), 1), 0) -->
01377 // (icmp pred (and X, (or (shl 1, Y), 1), 0))
01378 //
01379 // iff pred isn't signed
01380 {
01381 Value *X, *Y, *LShr;
01382 if (!ICI.isSigned() && RHSV == 0) {
01383 if (match(LHSI->getOperand(1), m_One())) {
01384 Constant *One = cast<Constant>(LHSI->getOperand(1));
01385 Value *Or = LHSI->getOperand(0);
01386 if (match(Or, m_Or(m_Value(LShr), m_Value(X))) &&
01387 match(LShr, m_LShr(m_Specific(X), m_Value(Y)))) {
01388 unsigned UsesRemoved = 0;
01389 if (LHSI->hasOneUse())
01390 ++UsesRemoved;
01391 if (Or->hasOneUse())
01392 ++UsesRemoved;
01393 if (LShr->hasOneUse())
01394 ++UsesRemoved;
01395 Value *NewOr = nullptr;
01396 // Compute X & ((1 << Y) | 1)
01397 if (auto *C = dyn_cast<Constant>(Y)) {
01398 if (UsesRemoved >= 1)
01399 NewOr =
01400 ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
01401 } else {
01402 if (UsesRemoved >= 3)
01403 NewOr = Builder->CreateOr(Builder->CreateShl(One, Y,
01404 LShr->getName(),
01405 /*HasNUW=*/true),
01406 One, Or->getName());
01407 }
01408 if (NewOr) {
01409 Value *NewAnd = Builder->CreateAnd(X, NewOr, LHSI->getName());
01410 ICI.setOperand(0, NewAnd);
01411 return &ICI;
01412 }
01413 }
01414 }
01415 }
01416 }
01417
01418 // Replace ((X & AndCst) > RHSV) with ((X & AndCst) != 0), if any
01419 // bit set in (X & AndCst) will produce a result greater than RHSV.
01420 if (ICI.getPredicate() == ICmpInst::ICMP_UGT) {
01421 unsigned NTZ = AndCst->getValue().countTrailingZeros();
01422 if ((NTZ < AndCst->getBitWidth()) &&
01423 APInt::getOneBitSet(AndCst->getBitWidth(), NTZ).ugt(RHSV))
01424 return new ICmpInst(ICmpInst::ICMP_NE, LHSI,
01425 Constant::getNullValue(RHS->getType()));
01426 }
01427 }
01428
01429 // Try to optimize things like "A[i]&42 == 0" to index computations.
01430 if (LoadInst *LI = dyn_cast<LoadInst>(LHSI->getOperand(0))) {
01431 if (GetElementPtrInst *GEP =
01432 dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
01433 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
01434 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
01435 !LI->isVolatile() && isa<ConstantInt>(LHSI->getOperand(1))) {
01436 ConstantInt *C = cast<ConstantInt>(LHSI->getOperand(1));
01437 if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV,ICI, C))
01438 return Res;
01439 }
01440 }
01441
01442 // X & -C == -C -> X > u ~C
01443 // X & -C != -C -> X <= u ~C
01444 // iff C is a power of 2
01445 if (ICI.isEquality() && RHS == LHSI->getOperand(1) && (-RHSV).isPowerOf2())
01446 return new ICmpInst(
01447 ICI.getPredicate() == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_UGT
01448 : ICmpInst::ICMP_ULE,
01449 LHSI->getOperand(0), SubOne(RHS));
01450 break;
01451
01452 case Instruction::Or: {
01453 if (!ICI.isEquality() || !RHS->isNullValue() || !LHSI->hasOneUse())
01454 break;
01455 Value *P, *Q;
01456 if (match(LHSI, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
01457 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
01458 // -> and (icmp eq P, null), (icmp eq Q, null).
01459 Value *ICIP = Builder->CreateICmp(ICI.getPredicate(), P,
01460 Constant::getNullValue(P->getType()));
01461 Value *ICIQ = Builder->CreateICmp(ICI.getPredicate(), Q,
01462 Constant::getNullValue(Q->getType()));
01463 Instruction *Op;
01464 if (ICI.getPredicate() == ICmpInst::ICMP_EQ)
01465 Op = BinaryOperator::CreateAnd(ICIP, ICIQ);
01466 else
01467 Op = BinaryOperator::CreateOr(ICIP, ICIQ);
01468 return Op;
01469 }
01470 break;
01471 }
01472
01473 case Instruction::Mul: { // (icmp pred (mul X, Val), CI)
01474 ConstantInt *Val = dyn_cast<ConstantInt>(LHSI->getOperand(1));
01475 if (!Val) break;
01476
01477 // If this is a signed comparison to 0 and the mul is sign preserving,
01478 // use the mul LHS operand instead.
01479 ICmpInst::Predicate pred = ICI.getPredicate();
01480 if (isSignTest(pred, RHS) && !Val->isZero() &&
01481 cast<BinaryOperator>(LHSI)->hasNoSignedWrap())
01482 return new ICmpInst(Val->isNegative() ?
01483 ICmpInst::getSwappedPredicate(pred) : pred,
01484 LHSI->getOperand(0),
01485 Constant::getNullValue(RHS->getType()));
01486
01487 break;
01488 }
01489
01490 case Instruction::Shl: { // (icmp pred (shl X, ShAmt), CI)
01491 uint32_t TypeBits = RHSV.getBitWidth();
01492 ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1));
01493 if (!ShAmt) {
01494 Value *X;
01495 // (1 << X) pred P2 -> X pred Log2(P2)
01496 if (match(LHSI, m_Shl(m_One(), m_Value(X)))) {
01497 bool RHSVIsPowerOf2 = RHSV.isPowerOf2();
01498 ICmpInst::Predicate Pred = ICI.getPredicate();
01499 if (ICI.isUnsigned()) {
01500 if (!RHSVIsPowerOf2) {
01501 // (1 << X) < 30 -> X <= 4
01502 // (1 << X) <= 30 -> X <= 4
01503 // (1 << X) >= 30 -> X > 4
01504 // (1 << X) > 30 -> X > 4
01505 if (Pred == ICmpInst::ICMP_ULT)
01506 Pred = ICmpInst::ICMP_ULE;
01507 else if (Pred == ICmpInst::ICMP_UGE)
01508 Pred = ICmpInst::ICMP_UGT;
01509 }
01510 unsigned RHSLog2 = RHSV.logBase2();
01511
01512 // (1 << X) >= 2147483648 -> X >= 31 -> X == 31
01513 // (1 << X) < 2147483648 -> X < 31 -> X != 31
01514 if (RHSLog2 == TypeBits-1) {
01515 if (Pred == ICmpInst::ICMP_UGE)
01516 Pred = ICmpInst::ICMP_EQ;
01517 else if (Pred == ICmpInst::ICMP_ULT)
01518 Pred = ICmpInst::ICMP_NE;
01519 }
01520
01521 return new ICmpInst(Pred, X,
01522 ConstantInt::get(RHS->getType(), RHSLog2));
01523 } else if (ICI.isSigned()) {
01524 if (RHSV.isAllOnesValue()) {
01525 // (1 << X) <= -1 -> X == 31
01526 if (Pred == ICmpInst::ICMP_SLE)
01527 return new ICmpInst(ICmpInst::ICMP_EQ, X,
01528 ConstantInt::get(RHS->getType(), TypeBits-1));
01529
01530 // (1 << X) > -1 -> X != 31
01531 if (Pred == ICmpInst::ICMP_SGT)
01532 return new ICmpInst(ICmpInst::ICMP_NE, X,
01533 ConstantInt::get(RHS->getType(), TypeBits-1));
01534 } else if (!RHSV) {
01535 // (1 << X) < 0 -> X == 31
01536 // (1 << X) <= 0 -> X == 31
01537 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
01538 return new ICmpInst(ICmpInst::ICMP_EQ, X,
01539 ConstantInt::get(RHS->getType(), TypeBits-1));
01540
01541 // (1 << X) >= 0 -> X != 31
01542 // (1 << X) > 0 -> X != 31
01543 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
01544 return new ICmpInst(ICmpInst::ICMP_NE, X,
01545 ConstantInt::get(RHS->getType(), TypeBits-1));
01546 }
01547 } else if (ICI.isEquality()) {
01548 if (RHSVIsPowerOf2)
01549 return new ICmpInst(
01550 Pred, X, ConstantInt::get(RHS->getType(), RHSV.logBase2()));
01551 }
01552 }
01553 break;
01554 }
01555
01556 // Check that the shift amount is in range. If not, don't perform
01557 // undefined shifts. When the shift is visited it will be
01558 // simplified.
01559 if (ShAmt->uge(TypeBits))
01560 break;
01561
01562 if (ICI.isEquality()) {
01563 // If we are comparing against bits always shifted out, the
01564 // comparison cannot succeed.
01565 Constant *Comp =
01566 ConstantExpr::getShl(ConstantExpr::getLShr(RHS, ShAmt),
01567 ShAmt);
01568 if (Comp != RHS) {// Comparing against a bit that we know is zero.
01569 bool IsICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
01570 Constant *Cst = Builder->getInt1(IsICMP_NE);
01571 return ReplaceInstUsesWith(ICI, Cst);
01572 }
01573
01574 // If the shift is NUW, then it is just shifting out zeros, no need for an
01575 // AND.
01576 if (cast<BinaryOperator>(LHSI)->hasNoUnsignedWrap())
01577 return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
01578 ConstantExpr::getLShr(RHS, ShAmt));
01579
01580 // If the shift is NSW and we compare to 0, then it is just shifting out
01581 // sign bits, no need for an AND either.
01582 if (cast<BinaryOperator>(LHSI)->hasNoSignedWrap() && RHSV == 0)
01583 return new ICmpInst(ICI.getPredicate(), LHSI->getOperand(0),
01584 ConstantExpr::getLShr(RHS, ShAmt));
01585
01586 if (LHSI->hasOneUse()) {
01587 // Otherwise strength reduce the shift into an and.
01588 uint32_t ShAmtVal = (uint32_t)ShAmt->getLimitedValue(TypeBits);
01589 Constant *Mask = Builder->getInt(APInt::getLowBitsSet(TypeBits,
01590 TypeBits - ShAmtVal));
01591
01592 Value *And =
01593 Builder->CreateAnd(LHSI->getOperand(0),Mask, LHSI->getName()+".mask");
01594 return new ICmpInst(ICI.getPredicate(), And,
01595 ConstantExpr::getLShr(RHS, ShAmt));
01596 }
01597 }
01598
01599 // If this is a signed comparison to 0 and the shift is sign preserving,
01600 // use the shift LHS operand instead.
01601 ICmpInst::Predicate pred = ICI.getPredicate();
01602 if (isSignTest(pred, RHS) &&
01603 cast<BinaryOperator>(LHSI)->hasNoSignedWrap())
01604 return new ICmpInst(pred,
01605 LHSI->getOperand(0),
01606 Constant::getNullValue(RHS->getType()));
01607
01608 // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
01609 bool TrueIfSigned = false;
01610 if (LHSI->hasOneUse() &&
01611 isSignBitCheck(ICI.getPredicate(), RHS, TrueIfSigned)) {
01612 // (X << 31) <s 0 --> (X&1) != 0
01613 Constant *Mask = ConstantInt::get(LHSI->getOperand(0)->getType(),
01614 APInt::getOneBitSet(TypeBits,
01615 TypeBits-ShAmt->getZExtValue()-1));
01616 Value *And =
01617 Builder->CreateAnd(LHSI->getOperand(0), Mask, LHSI->getName()+".mask");
01618 return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
01619 And, Constant::getNullValue(And->getType()));
01620 }
01621
01622 // Transform (icmp pred iM (shl iM %v, N), CI)
01623 // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (CI>>N))
01624 // Transform the shl to a trunc if (trunc (CI>>N)) has no loss and M-N.
01625 // This enables to get rid of the shift in favor of a trunc which can be
01626 // free on the target. It has the additional benefit of comparing to a
01627 // smaller constant, which will be target friendly.
01628 unsigned Amt = ShAmt->getLimitedValue(TypeBits-1);
01629 if (LHSI->hasOneUse() &&
01630 Amt != 0 && RHSV.countTrailingZeros() >= Amt) {
01631 Type *NTy = IntegerType::get(ICI.getContext(), TypeBits - Amt);
01632 Constant *NCI = ConstantExpr::getTrunc(
01633 ConstantExpr::getAShr(RHS,
01634 ConstantInt::get(RHS->getType(), Amt)),
01635 NTy);
01636 return new ICmpInst(ICI.getPredicate(),
01637 Builder->CreateTrunc(LHSI->getOperand(0), NTy),
01638 NCI);
01639 }
01640
01641 break;
01642 }
01643
01644 case Instruction::LShr: // (icmp pred (shr X, ShAmt), CI)
01645 case Instruction::AShr: {
01646 // Handle equality comparisons of shift-by-constant.
01647 BinaryOperator *BO = cast<BinaryOperator>(LHSI);
01648 if (ConstantInt *ShAmt = dyn_cast<ConstantInt>(LHSI->getOperand(1))) {
01649 if (Instruction *Res = FoldICmpShrCst(ICI, BO, ShAmt))
01650 return Res;
01651 }
01652
01653 // Handle exact shr's.
01654 if (ICI.isEquality() && BO->isExact() && BO->hasOneUse()) {
01655 if (RHSV.isMinValue())
01656 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0), RHS);
01657 }
01658 break;
01659 }
01660
01661 case Instruction::SDiv:
01662 case Instruction::UDiv:
01663 // Fold: icmp pred ([us]div X, C1), C2 -> range test
01664 // Fold this div into the comparison, producing a range check.
01665 // Determine, based on the divide type, what the range is being
01666 // checked. If there is an overflow on the low or high side, remember
01667 // it, otherwise compute the range [low, hi) bounding the new value.
01668 // See: InsertRangeTest above for the kinds of replacements possible.
01669 if (ConstantInt *DivRHS = dyn_cast<ConstantInt>(LHSI->getOperand(1)))
01670 if (Instruction *R = FoldICmpDivCst(ICI, cast<BinaryOperator>(LHSI),
01671 DivRHS))
01672 return R;
01673 break;
01674
01675 case Instruction::Sub: {
01676 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHSI->getOperand(0));
01677 if (!LHSC) break;
01678 const APInt &LHSV = LHSC->getValue();
01679
01680 // C1-X <u C2 -> (X|(C2-1)) == C1
01681 // iff C1 & (C2-1) == C2-1
01682 // C2 is a power of 2
01683 if (ICI.getPredicate() == ICmpInst::ICMP_ULT && LHSI->hasOneUse() &&
01684 RHSV.isPowerOf2() && (LHSV & (RHSV - 1)) == (RHSV - 1))
01685 return new ICmpInst(ICmpInst::ICMP_EQ,
01686 Builder->CreateOr(LHSI->getOperand(1), RHSV - 1),
01687 LHSC);
01688
01689 // C1-X >u C2 -> (X|C2) != C1
01690 // iff C1 & C2 == C2
01691 // C2+1 is a power of 2
01692 if (ICI.getPredicate() == ICmpInst::ICMP_UGT && LHSI->hasOneUse() &&
01693 (RHSV + 1).isPowerOf2() && (LHSV & RHSV) == RHSV)
01694 return new ICmpInst(ICmpInst::ICMP_NE,
01695 Builder->CreateOr(LHSI->getOperand(1), RHSV), LHSC);
01696 break;
01697 }
01698
01699 case Instruction::Add:
01700 // Fold: icmp pred (add X, C1), C2
01701 if (!ICI.isEquality()) {
01702 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHSI->getOperand(1));
01703 if (!LHSC) break;
01704 const APInt &LHSV = LHSC->getValue();
01705
01706 ConstantRange CR = ICI.makeConstantRange(ICI.getPredicate(), RHSV)
01707 .subtract(LHSV);
01708
01709 if (ICI.isSigned()) {
01710 if (CR.getLower().isSignBit()) {
01711 return new ICmpInst(ICmpInst::ICMP_SLT, LHSI->getOperand(0),
01712 Builder->getInt(CR.getUpper()));
01713 } else if (CR.getUpper().isSignBit()) {
01714 return new ICmpInst(ICmpInst::ICMP_SGE, LHSI->getOperand(0),
01715 Builder->getInt(CR.getLower()));
01716 }
01717 } else {
01718 if (CR.getLower().isMinValue()) {
01719 return new ICmpInst(ICmpInst::ICMP_ULT, LHSI->getOperand(0),
01720 Builder->getInt(CR.getUpper()));
01721 } else if (CR.getUpper().isMinValue()) {
01722 return new ICmpInst(ICmpInst::ICMP_UGE, LHSI->getOperand(0),
01723 Builder->getInt(CR.getLower()));
01724 }
01725 }
01726
01727 // X-C1 <u C2 -> (X & -C2) == C1
01728 // iff C1 & (C2-1) == 0
01729 // C2 is a power of 2
01730 if (ICI.getPredicate() == ICmpInst::ICMP_ULT && LHSI->hasOneUse() &&
01731 RHSV.isPowerOf2() && (LHSV & (RHSV - 1)) == 0)
01732 return new ICmpInst(ICmpInst::ICMP_EQ,
01733 Builder->CreateAnd(LHSI->getOperand(0), -RHSV),
01734 ConstantExpr::getNeg(LHSC));
01735
01736 // X-C1 >u C2 -> (X & ~C2) != C1
01737 // iff C1 & C2 == 0
01738 // C2+1 is a power of 2
01739 if (ICI.getPredicate() == ICmpInst::ICMP_UGT && LHSI->hasOneUse() &&
01740 (RHSV + 1).isPowerOf2() && (LHSV & RHSV) == 0)
01741 return new ICmpInst(ICmpInst::ICMP_NE,
01742 Builder->CreateAnd(LHSI->getOperand(0), ~RHSV),
01743 ConstantExpr::getNeg(LHSC));
01744 }
01745 break;
01746 }
01747
01748 // Simplify icmp_eq and icmp_ne instructions with integer constant RHS.
01749 if (ICI.isEquality()) {
01750 bool isICMP_NE = ICI.getPredicate() == ICmpInst::ICMP_NE;
01751
01752 // If the first operand is (add|sub|and|or|xor|rem) with a constant, and
01753 // the second operand is a constant, simplify a bit.
01754 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(LHSI)) {
01755 switch (BO->getOpcode()) {
01756 case Instruction::SRem:
01757 // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
01758 if (RHSV == 0 && isa<ConstantInt>(BO->getOperand(1)) &&BO->hasOneUse()){
01759 const APInt &V = cast<ConstantInt>(BO->getOperand(1))->getValue();
01760 if (V.sgt(1) && V.isPowerOf2()) {
01761 Value *NewRem =
01762 Builder->CreateURem(BO->getOperand(0), BO->getOperand(1),
01763 BO->getName());
01764 return new ICmpInst(ICI.getPredicate(), NewRem,
01765 Constant::getNullValue(BO->getType()));
01766 }
01767 }
01768 break;
01769 case Instruction::Add:
01770 // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
01771 if (ConstantInt *BOp1C = dyn_cast<ConstantInt>(BO->getOperand(1))) {
01772 if (BO->hasOneUse())
01773 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
01774 ConstantExpr::getSub(RHS, BOp1C));
01775 } else if (RHSV == 0) {
01776 // Replace ((add A, B) != 0) with (A != -B) if A or B is
01777 // efficiently invertible, or if the add has just this one use.
01778 Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
01779
01780 if (Value *NegVal = dyn_castNegVal(BOp1))
01781 return new ICmpInst(ICI.getPredicate(), BOp0, NegVal);
01782 if (Value *NegVal = dyn_castNegVal(BOp0))
01783 return new ICmpInst(ICI.getPredicate(), NegVal, BOp1);
01784 if (BO->hasOneUse()) {
01785 Value *Neg = Builder->CreateNeg(BOp1);
01786 Neg->takeName(BO);
01787 return new ICmpInst(ICI.getPredicate(), BOp0, Neg);
01788 }
01789 }
01790 break;
01791 case Instruction::Xor:
01792 // For the xor case, we can xor two constants together, eliminating
01793 // the explicit xor.
01794 if (Constant *BOC = dyn_cast<Constant>(BO->getOperand(1))) {
01795 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
01796 ConstantExpr::getXor(RHS, BOC));
01797 } else if (RHSV == 0) {
01798 // Replace ((xor A, B) != 0) with (A != B)
01799 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
01800 BO->getOperand(1));
01801 }
01802 break;
01803 case Instruction::Sub:
01804 // Replace ((sub A, B) != C) with (B != A-C) if A & C are constants.
01805 if (ConstantInt *BOp0C = dyn_cast<ConstantInt>(BO->getOperand(0))) {
01806 if (BO->hasOneUse())
01807 return new ICmpInst(ICI.getPredicate(), BO->getOperand(1),
01808 ConstantExpr::getSub(BOp0C, RHS));
01809 } else if (RHSV == 0) {
01810 // Replace ((sub A, B) != 0) with (A != B)
01811 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
01812 BO->getOperand(1));
01813 }
01814 break;
01815 case Instruction::Or:
01816 // If bits are being or'd in that are not present in the constant we
01817 // are comparing against, then the comparison could never succeed!
01818 if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) {
01819 Constant *NotCI = ConstantExpr::getNot(RHS);
01820 if (!ConstantExpr::getAnd(BOC, NotCI)->isNullValue())
01821 return ReplaceInstUsesWith(ICI, Builder->getInt1(isICMP_NE));
01822 }
01823 break;
01824
01825 case Instruction::And:
01826 if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) {
01827 // If bits are being compared against that are and'd out, then the
01828 // comparison can never succeed!
01829 if ((RHSV & ~BOC->getValue()) != 0)
01830 return ReplaceInstUsesWith(ICI, Builder->getInt1(isICMP_NE));
01831
01832 // If we have ((X & C) == C), turn it into ((X & C) != 0).
01833 if (RHS == BOC && RHSV.isPowerOf2())
01834 return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ :
01835 ICmpInst::ICMP_NE, LHSI,
01836 Constant::getNullValue(RHS->getType()));
01837
01838 // Don't perform the following transforms if the AND has multiple uses
01839 if (!BO->hasOneUse())
01840 break;
01841
01842 // Replace (and X, (1 << size(X)-1) != 0) with x s< 0
01843 if (BOC->getValue().isSignBit()) {
01844 Value *X = BO->getOperand(0);
01845 Constant *Zero = Constant::getNullValue(X->getType());
01846 ICmpInst::Predicate pred = isICMP_NE ?
01847 ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
01848 return new ICmpInst(pred, X, Zero);
01849 }
01850
01851 // ((X & ~7) == 0) --> X < 8
01852 if (RHSV == 0 && isHighOnes(BOC)) {
01853 Value *X = BO->getOperand(0);
01854 Constant *NegX = ConstantExpr::getNeg(BOC);
01855 ICmpInst::Predicate pred = isICMP_NE ?
01856 ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
01857 return new ICmpInst(pred, X, NegX);
01858 }
01859 }
01860 break;
01861 case Instruction::Mul:
01862 if (RHSV == 0 && BO->hasNoSignedWrap()) {
01863 if (ConstantInt *BOC = dyn_cast<ConstantInt>(BO->getOperand(1))) {
01864 // The trivial case (mul X, 0) is handled by InstSimplify
01865 // General case : (mul X, C) != 0 iff X != 0
01866 // (mul X, C) == 0 iff X == 0
01867 if (!BOC->isZero())
01868 return new ICmpInst(ICI.getPredicate(), BO->getOperand(0),
01869 Constant::getNullValue(RHS->getType()));
01870 }
01871 }
01872 break;
01873 default: break;
01874 }
01875 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(LHSI)) {
01876 // Handle icmp {eq|ne} <intrinsic>, intcst.
01877 switch (II->getIntrinsicID()) {
01878 case Intrinsic::bswap:
01879 Worklist.Add(II);
01880 ICI.setOperand(0, II->getArgOperand(0));
01881 ICI.setOperand(1, Builder->getInt(RHSV.byteSwap()));
01882 return &ICI;
01883 case Intrinsic::ctlz:
01884 case Intrinsic::cttz:
01885 // ctz(A) == bitwidth(a) -> A == 0 and likewise for !=
01886 if (RHSV == RHS->getType()->getBitWidth()) {
01887 Worklist.Add(II);
01888 ICI.setOperand(0, II->getArgOperand(0));
01889 ICI.setOperand(1, ConstantInt::get(RHS->getType(), 0));
01890 return &ICI;
01891 }
01892 break;
01893 case Intrinsic::ctpop:
01894 // popcount(A) == 0 -> A == 0 and likewise for !=
01895 if (RHS->isZero()) {
01896 Worklist.Add(II);
01897 ICI.setOperand(0, II->getArgOperand(0));
01898 ICI.setOperand(1, RHS);
01899 return &ICI;
01900 }
01901 break;
01902 default:
01903 break;
01904 }
01905 }
01906 }
01907 return nullptr;
01908 }
01909
01910 /// visitICmpInstWithCastAndCast - Handle icmp (cast x to y), (cast/cst).
01911 /// We only handle extending casts so far.
01912 ///
01913 Instruction *InstCombiner::visitICmpInstWithCastAndCast(ICmpInst &ICI) {
01914 const CastInst *LHSCI = cast<CastInst>(ICI.getOperand(0));
01915 Value *LHSCIOp = LHSCI->getOperand(0);
01916 Type *SrcTy = LHSCIOp->getType();
01917 Type *DestTy = LHSCI->getType();
01918 Value *RHSCIOp;
01919
01920 // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
01921 // integer type is the same size as the pointer type.
01922 if (LHSCI->getOpcode() == Instruction::PtrToInt &&
01923 DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth()) {
01924 Value *RHSOp = nullptr;
01925 if (PtrToIntOperator *RHSC = dyn_cast<PtrToIntOperator>(ICI.getOperand(1))) {
01926 Value *RHSCIOp = RHSC->getOperand(0);
01927 if (RHSCIOp->getType()->getPointerAddressSpace() ==
01928 LHSCIOp->getType()->getPointerAddressSpace()) {
01929 RHSOp = RHSC->getOperand(0);
01930 // If the pointer types don't match, insert a bitcast.
01931 if (LHSCIOp->getType() != RHSOp->getType())
01932 RHSOp = Builder->CreateBitCast(RHSOp, LHSCIOp->getType());
01933 }
01934 } else if (Constant *RHSC = dyn_cast<Constant>(ICI.getOperand(1)))
01935 RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
01936
01937 if (RHSOp)
01938 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSOp);
01939 }
01940
01941 // The code below only handles extension cast instructions, so far.
01942 // Enforce this.
01943 if (LHSCI->getOpcode() != Instruction::ZExt &&
01944 LHSCI->getOpcode() != Instruction::SExt)
01945 return nullptr;
01946
01947 bool isSignedExt = LHSCI->getOpcode() == Instruction::SExt;
01948 bool isSignedCmp = ICI.isSigned();
01949
01950 if (CastInst *CI = dyn_cast<CastInst>(ICI.getOperand(1))) {
01951 // Not an extension from the same type?
01952 RHSCIOp = CI->getOperand(0);
01953 if (RHSCIOp->getType() != LHSCIOp->getType())
01954 return nullptr;
01955
01956 // If the signedness of the two casts doesn't agree (i.e. one is a sext
01957 // and the other is a zext), then we can't handle this.
01958 if (CI->getOpcode() != LHSCI->getOpcode())
01959 return nullptr;
01960
01961 // Deal with equality cases early.
01962 if (ICI.isEquality())
01963 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
01964
01965 // A signed comparison of sign extended values simplifies into a
01966 // signed comparison.
01967 if (isSignedCmp && isSignedExt)
01968 return new ICmpInst(ICI.getPredicate(), LHSCIOp, RHSCIOp);
01969
01970 // The other three cases all fold into an unsigned comparison.
01971 return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, RHSCIOp);
01972 }
01973
01974 // If we aren't dealing with a constant on the RHS, exit early
01975 ConstantInt *CI = dyn_cast<ConstantInt>(ICI.getOperand(1));
01976 if (!CI)
01977 return nullptr;
01978
01979 // Compute the constant that would happen if we truncated to SrcTy then
01980 // reextended to DestTy.
01981 Constant *Res1 = ConstantExpr::getTrunc(CI, SrcTy);
01982 Constant *Res2 = ConstantExpr::getCast(LHSCI->getOpcode(),
01983 Res1, DestTy);
01984
01985 // If the re-extended constant didn't change...
01986 if (Res2 == CI) {
01987 // Deal with equality cases early.
01988 if (ICI.isEquality())
01989 return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1);
01990
01991 // A signed comparison of sign extended values simplifies into a
01992 // signed comparison.
01993 if (isSignedExt && isSignedCmp)
01994 return new ICmpInst(ICI.getPredicate(), LHSCIOp, Res1);
01995
01996 // The other three cases all fold into an unsigned comparison.
01997 return new ICmpInst(ICI.getUnsignedPredicate(), LHSCIOp, Res1);
01998 }
01999
02000 // The re-extended constant changed so the constant cannot be represented
02001 // in the shorter type. Consequently, we cannot emit a simple comparison.
02002 // All the cases that fold to true or false will have already been handled
02003 // by SimplifyICmpInst, so only deal with the tricky case.
02004
02005 if (isSignedCmp || !isSignedExt)
02006 return nullptr;
02007
02008 // Evaluate the comparison for LT (we invert for GT below). LE and GE cases
02009 // should have been folded away previously and not enter in here.
02010
02011 // We're performing an unsigned comp with a sign extended value.
02012 // This is true if the input is >= 0. [aka >s -1]
02013 Constant *NegOne = Constant::getAllOnesValue(SrcTy);
02014 Value *Result = Builder->CreateICmpSGT(LHSCIOp, NegOne, ICI.getName());
02015
02016 // Finally, return the value computed.
02017 if (ICI.getPredicate() == ICmpInst::ICMP_ULT)
02018 return ReplaceInstUsesWith(ICI, Result);
02019
02020 assert(ICI.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
02021 return BinaryOperator::CreateNot(Result);
02022 }
02023
02024 /// ProcessUGT_ADDCST_ADD - The caller has matched a pattern of the form:
02025 /// I = icmp ugt (add (add A, B), CI2), CI1
02026 /// If this is of the form:
02027 /// sum = a + b
02028 /// if (sum+128 >u 255)
02029 /// Then replace it with llvm.sadd.with.overflow.i8.
02030 ///
02031 static Instruction *ProcessUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
02032 ConstantInt *CI2, ConstantInt *CI1,
02033 InstCombiner &IC) {
02034 // The transformation we're trying to do here is to transform this into an
02035 // llvm.sadd.with.overflow. To do this, we have to replace the original add
02036 // with a narrower add, and discard the add-with-constant that is part of the
02037 // range check (if we can't eliminate it, this isn't profitable).
02038
02039 // In order to eliminate the add-with-constant, the compare can be its only
02040 // use.
02041 Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
02042 if (!AddWithCst->hasOneUse()) return nullptr;
02043
02044 // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
02045 if (!CI2->getValue().isPowerOf2()) return nullptr;
02046 unsigned NewWidth = CI2->getValue().countTrailingZeros();
02047 if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31) return nullptr;
02048
02049 // The width of the new add formed is 1 more than the bias.
02050 ++NewWidth;
02051
02052 // Check to see that CI1 is an all-ones value with NewWidth bits.
02053 if (CI1->getBitWidth() == NewWidth ||
02054 CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
02055 return nullptr;
02056
02057 // This is only really a signed overflow check if the inputs have been
02058 // sign-extended; check for that condition. For example, if CI2 is 2^31 and
02059 // the operands of the add are 64 bits wide, we need at least 33 sign bits.
02060 unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1;
02061 if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits ||
02062 IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits)
02063 return nullptr;
02064
02065 // In order to replace the original add with a narrower
02066 // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
02067 // and truncates that discard the high bits of the add. Verify that this is
02068 // the case.
02069 Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
02070 for (User *U : OrigAdd->users()) {
02071 if (U == AddWithCst) continue;
02072
02073 // Only accept truncates for now. We would really like a nice recursive
02074 // predicate like SimplifyDemandedBits, but which goes downwards the use-def
02075 // chain to see which bits of a value are actually demanded. If the
02076 // original add had another add which was then immediately truncated, we
02077 // could still do the transformation.
02078 TruncInst *TI = dyn_cast<TruncInst>(U);
02079 if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
02080 return nullptr;
02081 }
02082
02083 // If the pattern matches, truncate the inputs to the narrower type and
02084 // use the sadd_with_overflow intrinsic to efficiently compute both the
02085 // result and the overflow bit.
02086 Module *M = I.getParent()->getParent()->getParent();
02087
02088 Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
02089 Value *F = Intrinsic::getDeclaration(M, Intrinsic::sadd_with_overflow,
02090 NewType);
02091
02092 InstCombiner::BuilderTy *Builder = IC.Builder;
02093
02094 // Put the new code above the original add, in case there are any uses of the
02095 // add between the add and the compare.
02096 Builder->SetInsertPoint(OrigAdd);
02097
02098 Value *TruncA = Builder->CreateTrunc(A, NewType, A->getName()+".trunc");
02099 Value *TruncB = Builder->CreateTrunc(B, NewType, B->getName()+".trunc");
02100 CallInst *Call = Builder->CreateCall2(F, TruncA, TruncB, "sadd");
02101 Value *Add = Builder->CreateExtractValue(Call, 0, "sadd.result");
02102 Value *ZExt = Builder->CreateZExt(Add, OrigAdd->getType());
02103
02104 // The inner add was the result of the narrow add, zero extended to the
02105 // wider type. Replace it with the result computed by the intrinsic.
02106 IC.ReplaceInstUsesWith(*OrigAdd, ZExt);
02107
02108 // The original icmp gets replaced with the overflow value.
02109 return ExtractValueInst::Create(Call, 1, "sadd.overflow");
02110 }
02111
02112 bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
02113 Value *RHS, Instruction &OrigI,
02114 Value *&Result, Constant *&Overflow) {
02115 assert((!OrigI.isCommutative() ||
02116 !(isa<Constant>(LHS) && !isa<Constant>(RHS))) &&
02117 "call with a constant RHS if possible!");
02118
02119 auto SetResult = [&](Value *OpResult, Constant *OverflowVal, bool ReuseName) {
02120 Result = OpResult;
02121 Overflow = OverflowVal;
02122 if (ReuseName)
02123 Result->takeName(&OrigI);
02124 return true;
02125 };
02126
02127 switch (OCF) {
02128 case OCF_INVALID:
02129 llvm_unreachable("bad overflow check kind!");
02130
02131 case OCF_UNSIGNED_ADD: {
02132 OverflowResult OR = computeOverflowForUnsignedAdd(LHS, RHS, &OrigI);
02133 if (OR == OverflowResult::NeverOverflows)
02134 return SetResult(Builder->CreateNUWAdd(LHS, RHS), Builder->getFalse(),
02135 true);
02136
02137 if (OR == OverflowResult::AlwaysOverflows)
02138 return SetResult(Builder->CreateAdd(LHS, RHS), Builder->getTrue(), true);
02139 }
02140 // FALL THROUGH uadd into sadd
02141 case OCF_SIGNED_ADD: {
02142 // X + undef -> undef
02143 if (isa<UndefValue>(RHS))
02144 return SetResult(UndefValue::get(RHS->getType()),
02145 UndefValue::get(Builder->getInt1Ty()), false);
02146
02147 if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS))
02148 // X + 0 -> {X, false}
02149 if (ConstRHS->isZero())
02150 return SetResult(LHS, Builder->getFalse(), false);
02151
02152 // We can strength reduce this signed add into a regular add if we can prove
02153 // that it will never overflow.
02154 if (OCF == OCF_SIGNED_ADD)
02155 if (WillNotOverflowSignedAdd(LHS, RHS, OrigI))
02156 return SetResult(Builder->CreateNSWAdd(LHS, RHS), Builder->getFalse(),
02157 true);
02158 }
02159
02160 case OCF_UNSIGNED_SUB:
02161 case OCF_SIGNED_SUB: {
02162 // undef - X -> undef
02163 // X - undef -> undef
02164 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
02165 return SetResult(UndefValue::get(LHS->getType()),
02166 UndefValue::get(Builder->getInt1Ty()), false);
02167
02168 if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS))
02169 // X - 0 -> {X, false}
02170 if (ConstRHS->isZero())
02171 return SetResult(UndefValue::get(LHS->getType()), Builder->getFalse(),
02172 false);
02173
02174 if (OCF == OCF_SIGNED_SUB) {
02175 if (WillNotOverflowSignedSub(LHS, RHS, OrigI))
02176 return SetResult(Builder->CreateNSWSub(LHS, RHS), Builder->getFalse(),
02177 true);
02178 } else {
02179 if (WillNotOverflowUnsignedSub(LHS, RHS, OrigI))
02180 return SetResult(Builder->CreateNUWSub(LHS, RHS), Builder->getFalse(),
02181 true);
02182 }
02183 break;
02184 }
02185
02186 case OCF_UNSIGNED_MUL: {
02187 OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, &OrigI);
02188 if (OR == OverflowResult::NeverOverflows)
02189 return SetResult(Builder->CreateNUWMul(LHS, RHS), Builder->getFalse(),
02190 true);
02191 if (OR == OverflowResult::AlwaysOverflows)
02192 return SetResult(Builder->CreateMul(LHS, RHS), Builder->getTrue(), true);
02193 } // FALL THROUGH
02194 case OCF_SIGNED_MUL:
02195 // X * undef -> undef
02196 if (isa<UndefValue>(RHS))
02197 return SetResult(UndefValue::get(LHS->getType()),
02198 UndefValue::get(Builder->getInt1Ty()), false);
02199
02200 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(RHS)) {
02201 // X * 0 -> {0, false}
02202 if (RHSI->isZero())
02203 return SetResult(Constant::getNullValue(RHS->getType()),
02204 Builder->getFalse(), false);
02205
02206 // X * 1 -> {X, false}
02207 if (RHSI->equalsInt(1))
02208 return SetResult(LHS, Builder->getFalse(), false);
02209 }
02210
02211 if (OCF == OCF_SIGNED_MUL)
02212 if (WillNotOverflowSignedMul(LHS, RHS, OrigI))
02213 return SetResult(Builder->CreateNSWMul(LHS, RHS), Builder->getFalse(),
02214 true);
02215 }
02216
02217 return false;
02218 }
02219
02220 /// \brief Recognize and process idiom involving test for multiplication
02221 /// overflow.
02222 ///
02223 /// The caller has matched a pattern of the form:
02224 /// I = cmp u (mul(zext A, zext B), V
02225 /// The function checks if this is a test for overflow and if so replaces
02226 /// multiplication with call to 'mul.with.overflow' intrinsic.
02227 ///
02228 /// \param I Compare instruction.
02229 /// \param MulVal Result of 'mult' instruction. It is one of the arguments of
02230 /// the compare instruction. Must be of integer type.
02231 /// \param OtherVal The other argument of compare instruction.
02232 /// \returns Instruction which must replace the compare instruction, NULL if no
02233 /// replacement required.
02234 static Instruction *ProcessUMulZExtIdiom(ICmpInst &I, Value *MulVal,
02235 Value *OtherVal, InstCombiner &IC) {
02236 // Don't bother doing this transformation for pointers, don't do it for
02237 // vectors.
02238 if (!isa<IntegerType>(MulVal->getType()))
02239 return nullptr;
02240
02241 assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal);
02242 assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal);
02243 Instruction *MulInstr = cast<Instruction>(MulVal);
02244 assert(MulInstr->getOpcode() == Instruction::Mul);
02245
02246 auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)),
02247 *RHS = cast<ZExtOperator>(MulInstr->getOperand(1));
02248 assert(LHS->getOpcode() == Instruction::ZExt);
02249 assert(RHS->getOpcode() == Instruction::ZExt);
02250 Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
02251
02252 // Calculate type and width of the result produced by mul.with.overflow.
02253 Type *TyA = A->getType(), *TyB = B->getType();
02254 unsigned WidthA = TyA->getPrimitiveSizeInBits(),
02255 WidthB = TyB->getPrimitiveSizeInBits();
02256 unsigned MulWidth;
02257 Type *MulType;
02258 if (WidthB > WidthA) {
02259 MulWidth = WidthB;
02260 MulType = TyB;
02261 } else {
02262 MulWidth = WidthA;
02263 MulType = TyA;
02264 }
02265
02266 // In order to replace the original mul with a narrower mul.with.overflow,
02267 // all uses must ignore upper bits of the product. The number of used low
02268 // bits must be not greater than the width of mul.with.overflow.
02269 if (MulVal->hasNUsesOrMore(2))
02270 for (User *U : MulVal->users()) {
02271 if (U == &I)
02272 continue;
02273 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
02274 // Check if truncation ignores bits above MulWidth.
02275 unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
02276 if (TruncWidth > MulWidth)
02277 return nullptr;
02278 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
02279 // Check if AND ignores bits above MulWidth.
02280 if (BO->getOpcode() != Instruction::And)
02281 return nullptr;
02282 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
02283 const APInt &CVal = CI->getValue();
02284 if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth)
02285 return nullptr;
02286 }
02287 } else {
02288 // Other uses prohibit this transformation.
02289 return nullptr;
02290 }
02291 }
02292
02293 // Recognize patterns
02294 switch (I.getPredicate()) {
02295 case ICmpInst::ICMP_EQ:
02296 case ICmpInst::ICMP_NE:
02297 // Recognize pattern:
02298 // mulval = mul(zext A, zext B)
02299 // cmp eq/neq mulval, zext trunc mulval
02300 if (ZExtInst *Zext = dyn_cast<ZExtInst>(OtherVal))
02301 if (Zext->hasOneUse()) {
02302 Value *ZextArg = Zext->getOperand(0);
02303 if (TruncInst *Trunc = dyn_cast<TruncInst>(ZextArg))
02304 if (Trunc->getType()->getPrimitiveSizeInBits() == MulWidth)
02305 break; //Recognized
02306 }
02307
02308 // Recognize pattern:
02309 // mulval = mul(zext A, zext B)
02310 // cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits.
02311 ConstantInt *CI;
02312 Value *ValToMask;
02313 if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) {
02314 if (ValToMask != MulVal)
02315 return nullptr;
02316 const APInt &CVal = CI->getValue() + 1;
02317 if (CVal.isPowerOf2()) {
02318 unsigned MaskWidth = CVal.logBase2();
02319 if (MaskWidth == MulWidth)
02320 break; // Recognized
02321 }
02322 }
02323 return nullptr;
02324
02325 case ICmpInst::ICMP_UGT:
02326 // Recognize pattern:
02327 // mulval = mul(zext A, zext B)
02328 // cmp ugt mulval, max
02329 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
02330 APInt MaxVal = APInt::getMaxValue(MulWidth);
02331 MaxVal = MaxVal.zext(CI->getBitWidth());
02332 if (MaxVal.eq(CI->getValue()))
02333 break; // Recognized
02334 }
02335 return nullptr;
02336
02337 case ICmpInst::ICMP_UGE:
02338 // Recognize pattern:
02339 // mulval = mul(zext A, zext B)
02340 // cmp uge mulval, max+1
02341 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
02342 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
02343 if (MaxVal.eq(CI->getValue()))
02344 break; // Recognized
02345 }
02346 return nullptr;
02347
02348 case ICmpInst::ICMP_ULE:
02349 // Recognize pattern:
02350 // mulval = mul(zext A, zext B)
02351 // cmp ule mulval, max
02352 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
02353 APInt MaxVal = APInt::getMaxValue(MulWidth);
02354 MaxVal = MaxVal.zext(CI->getBitWidth());
02355 if (MaxVal.eq(CI->getValue()))
02356 break; // Recognized
02357 }
02358 return nullptr;
02359
02360 case ICmpInst::ICMP_ULT:
02361 // Recognize pattern:
02362 // mulval = mul(zext A, zext B)
02363 // cmp ule mulval, max + 1
02364 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
02365 APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
02366 if (MaxVal.eq(CI->getValue()))
02367 break; // Recognized
02368 }
02369 return nullptr;
02370
02371 default:
02372 return nullptr;
02373 }
02374
02375 InstCombiner::BuilderTy *Builder = IC.Builder;
02376 Builder->SetInsertPoint(MulInstr);
02377 Module *M = I.getParent()->getParent()->getParent();
02378
02379 // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
02380 Value *MulA = A, *MulB = B;
02381 if (WidthA < MulWidth)
02382 MulA = Builder->CreateZExt(A, MulType);
02383 if (WidthB < MulWidth)
02384 MulB = Builder->CreateZExt(B, MulType);
02385 Value *F =
02386 Intrinsic::getDeclaration(M, Intrinsic::umul_with_overflow, MulType);
02387 CallInst *Call = Builder->CreateCall2(F, MulA, MulB, "umul");
02388 IC.Worklist.Add(MulInstr);
02389
02390 // If there are uses of mul result other than the comparison, we know that
02391 // they are truncation or binary AND. Change them to use result of
02392 // mul.with.overflow and adjust properly mask/size.
02393 if (MulVal->hasNUsesOrMore(2)) {
02394 Value *Mul = Builder->CreateExtractValue(Call, 0, "umul.value");
02395 for (User *U : MulVal->users()) {
02396 if (U == &I || U == OtherVal)
02397 continue;
02398 if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
02399 if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
02400 IC.ReplaceInstUsesWith(*TI, Mul);
02401 else
02402 TI->setOperand(0, Mul);
02403 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
02404 assert(BO->getOpcode() == Instruction::And);
02405 // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
02406 ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
02407 APInt ShortMask = CI->getValue().trunc(MulWidth);
02408 Value *ShortAnd = Builder->CreateAnd(Mul, ShortMask);
02409 Instruction *Zext =
02410 cast<Instruction>(Builder->CreateZExt(ShortAnd, BO->getType()));
02411 IC.Worklist.Add(Zext);
02412 IC.ReplaceInstUsesWith(*BO, Zext);
02413 } else {
02414 llvm_unreachable("Unexpected Binary operation");
02415 }
02416 IC.Worklist.Add(cast<Instruction>(U));
02417 }
02418 }
02419 if (isa<Instruction>(OtherVal))
02420 IC.Worklist.Add(cast<Instruction>(OtherVal));
02421
02422 // The original icmp gets replaced with the overflow value, maybe inverted
02423 // depending on predicate.
02424 bool Inverse = false;
02425 switch (I.getPredicate()) {
02426 case ICmpInst::ICMP_NE:
02427 break;
02428 case ICmpInst::ICMP_EQ:
02429 Inverse = true;
02430 break;
02431 case ICmpInst::ICMP_UGT:
02432 case ICmpInst::ICMP_UGE:
02433 if (I.getOperand(0) == MulVal)
02434 break;
02435 Inverse = true;
02436 break;
02437 case ICmpInst::ICMP_ULT:
02438 case ICmpInst::ICMP_ULE:
02439 if (I.getOperand(1) == MulVal)
02440 break;
02441 Inverse = true;
02442 break;
02443 default:
02444 llvm_unreachable("Unexpected predicate");
02445 }
02446 if (Inverse) {
02447 Value *Res = Builder->CreateExtractValue(Call, 1);
02448 return BinaryOperator::CreateNot(Res);
02449 }
02450
02451 return ExtractValueInst::Create(Call, 1);
02452 }
02453
02454 // DemandedBitsLHSMask - When performing a comparison against a constant,
02455 // it is possible that not all the bits in the LHS are demanded. This helper
02456 // method computes the mask that IS demanded.
02457 static APInt DemandedBitsLHSMask(ICmpInst &I,
02458 unsigned BitWidth, bool isSignCheck) {
02459 if (isSignCheck)
02460 return APInt::getSignBit(BitWidth);
02461
02462 ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(1));
02463 if (!CI) return APInt::getAllOnesValue(BitWidth);
02464 const APInt &RHS = CI->getValue();
02465
02466 switch (I.getPredicate()) {
02467 // For a UGT comparison, we don't care about any bits that
02468 // correspond to the trailing ones of the comparand. The value of these
02469 // bits doesn't impact the outcome of the comparison, because any value
02470 // greater than the RHS must differ in a bit higher than these due to carry.
02471 case ICmpInst::ICMP_UGT: {
02472 unsigned trailingOnes = RHS.countTrailingOnes();
02473 APInt lowBitsSet = APInt::getLowBitsSet(BitWidth, trailingOnes);
02474 return ~lowBitsSet;
02475 }
02476
02477 // Similarly, for a ULT comparison, we don't care about the trailing zeros.
02478 // Any value less than the RHS must differ in a higher bit because of carries.
02479 case ICmpInst::ICMP_ULT: {
02480 unsigned trailingZeros = RHS.countTrailingZeros();
02481 APInt lowBitsSet = APInt::getLowBitsSet(BitWidth, trailingZeros);
02482 return ~lowBitsSet;
02483 }
02484
02485 default:
02486 return APInt::getAllOnesValue(BitWidth);
02487 }
02488
02489 }
02490
02491 /// \brief Check if the order of \p Op0 and \p Op1 as operand in an ICmpInst
02492 /// should be swapped.
02493 /// The decision is based on how many times these two operands are reused
02494 /// as subtract operands and their positions in those instructions.
02495 /// The rational is that several architectures use the same instruction for
02496 /// both subtract and cmp, thus it is better if the order of those operands
02497 /// match.
02498 /// \return true if Op0 and Op1 should be swapped.
02499 static bool swapMayExposeCSEOpportunities(const Value * Op0,
02500 const Value * Op1) {
02501 // Filter out pointer value as those cannot appears directly in subtract.
02502 // FIXME: we may want to go through inttoptrs or bitcasts.
02503 if (Op0->getType()->isPointerTy())
02504 return false;
02505 // Count every uses of both Op0 and Op1 in a subtract.
02506 // Each time Op0 is the first operand, count -1: swapping is bad, the
02507 // subtract has already the same layout as the compare.
02508 // Each time Op0 is the second operand, count +1: swapping is good, the
02509 // subtract has a different layout as the compare.
02510 // At the end, if the benefit is greater than 0, Op0 should come second to
02511 // expose more CSE opportunities.
02512 int GlobalSwapBenefits = 0;
02513 for (const User *U : Op0->users()) {
02514 const BinaryOperator *BinOp = dyn_cast<BinaryOperator>(U);
02515 if (!BinOp || BinOp->getOpcode() != Instruction::Sub)
02516 continue;
02517 // If Op0 is the first argument, this is not beneficial to swap the
02518 // arguments.
02519 int LocalSwapBenefits = -1;
02520 unsigned Op1Idx = 1;
02521 if (BinOp->getOperand(Op1Idx) == Op0) {
02522 Op1Idx = 0;
02523 LocalSwapBenefits = 1;
02524 }
02525 if (BinOp->getOperand(Op1Idx) != Op1)
02526 continue;
02527 GlobalSwapBenefits += LocalSwapBenefits;
02528 }
02529 return GlobalSwapBenefits > 0;
02530 }
02531
02532 /// \brief Check that one use is in the same block as the definition and all
02533 /// other uses are in blocks dominated by a given block
02534 ///
02535 /// \param DI Definition
02536 /// \param UI Use
02537 /// \param DB Block that must dominate all uses of \p DI outside
02538 /// the parent block
02539 /// \return true when \p UI is the only use of \p DI in the parent block
02540 /// and all other uses of \p DI are in blocks dominated by \p DB.
02541 ///
02542 bool InstCombiner::dominatesAllUses(const Instruction *DI,
02543 const Instruction *UI,
02544 const BasicBlock *DB) const {
02545 assert(DI && UI && "Instruction not defined\n");
02546 // ignore incomplete definitions
02547 if (!DI->getParent())
02548 return false;
02549 // DI and UI must be in the same block
02550 if (DI->getParent() != UI->getParent())
02551 return false;
02552 // Protect from self-referencing blocks
02553 if (DI->getParent() == DB)
02554 return false;
02555 // DominatorTree available?
02556 if (!DT)
02557 return false;
02558 for (const User *U : DI->users()) {
02559 auto *Usr = cast<Instruction>(U);
02560 if (Usr != UI && !DT->dominates(DB, Usr->getParent()))
02561 return false;
02562 }
02563 return true;
02564 }
02565
02566 ///
02567 /// true when the instruction sequence within a block is select-cmp-br.
02568 ///
02569 static bool isChainSelectCmpBranch(const SelectInst *SI) {
02570 const BasicBlock *BB = SI->getParent();
02571 if (!BB)
02572 return false;
02573 auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator());
02574 if (!BI || BI->getNumSuccessors() != 2)
02575 return false;
02576 auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
02577 if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
02578 return false;
02579 return true;
02580 }
02581
02582 ///
02583 /// \brief True when a select result is replaced by one of its operands
02584 /// in select-icmp sequence. This will eventually result in the elimination
02585 /// of the select.
02586 ///
02587 /// \param SI Select instruction
02588 /// \param Icmp Compare instruction
02589 /// \param SIOpd Operand that replaces the select
02590 ///
02591 /// Notes:
02592 /// - The replacement is global and requires dominator information
02593 /// - The caller is responsible for the actual replacement
02594 ///
02595 /// Example:
02596 ///
02597 /// entry:
02598 /// %4 = select i1 %3, %C* %0, %C* null
02599 /// %5 = icmp eq %C* %4, null
02600 /// br i1 %5, label %9, label %7
02601 /// ...
02602 /// ; <label>:7 ; preds = %entry
02603 /// %8 = getelementptr inbounds %C* %4, i64 0, i32 0
02604 /// ...
02605 ///
02606 /// can be transformed to
02607 ///
02608 /// %5 = icmp eq %C* %0, null
02609 /// %6 = select i1 %3, i1 %5, i1 true
02610 /// br i1 %6, label %9, label %7
02611 /// ...
02612 /// ; <label>:7 ; preds = %entry
02613 /// %8 = getelementptr inbounds %C* %0, i64 0, i32 0 // replace by %0!
02614 ///
02615 /// Similar when the first operand of the select is a constant or/and
02616 /// the compare is for not equal rather than equal.
02617 ///
02618 /// NOTE: The function is only called when the select and compare constants
02619 /// are equal, the optimization can work only for EQ predicates. This is not a
02620 /// major restriction since a NE compare should be 'normalized' to an equal
02621 /// compare, which usually happens in the combiner and test case
02622 /// select-cmp-br.ll
02623 /// checks for it.
02624 bool InstCombiner::replacedSelectWithOperand(SelectInst *SI,
02625 const ICmpInst *Icmp,
02626 const unsigned SIOpd) {
02627 assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
02628 if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) {
02629 BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
02630 // The check for the unique predecessor is not the best that can be
02631 // done. But it protects efficiently against cases like when SI's
02632 // home block has two successors, Succ and Succ1, and Succ1 predecessor
02633 // of Succ. Then SI can't be replaced by SIOpd because the use that gets
02634 // replaced can be reached on either path. So the uniqueness check
02635 // guarantees that the path all uses of SI (outside SI's parent) are on
02636 // is disjoint from all other paths out of SI. But that information
02637 // is more expensive to compute, and the trade-off here is in favor
02638 // of compile-time.
02639 if (Succ->getUniquePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
02640 NumSel++;
02641 SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
02642 return true;
02643 }
02644 }
02645 return false;
02646 }
02647
02648 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
02649 bool Changed = false;
02650 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
02651 unsigned Op0Cplxity = getComplexity(Op0);
02652 unsigned Op1Cplxity = getComplexity(Op1);
02653
02654 /// Orders the operands of the compare so that they are listed from most
02655 /// complex to least complex. This puts constants before unary operators,
02656 /// before binary operators.
02657 if (Op0Cplxity < Op1Cplxity ||
02658 (Op0Cplxity == Op1Cplxity &&
02659 swapMayExposeCSEOpportunities(Op0, Op1))) {
02660 I.swapOperands();
02661 std::swap(Op0, Op1);
02662 Changed = true;
02663 }
02664
02665 if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, DL, TLI, DT, AC))
02666 return ReplaceInstUsesWith(I, V);
02667
02668 // comparing -val or val with non-zero is the same as just comparing val
02669 // ie, abs(val) != 0 -> val != 0
02670 if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero()))
02671 {
02672 Value *Cond, *SelectTrue, *SelectFalse;
02673 if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
02674 m_Value(SelectFalse)))) {
02675 if (Value *V = dyn_castNegVal(SelectTrue)) {
02676 if (V == SelectFalse)
02677 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
02678 }
02679 else if (Value *V = dyn_castNegVal(SelectFalse)) {
02680 if (V == SelectTrue)
02681 return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
02682 }
02683 }
02684 }
02685
02686 Type *Ty = Op0->getType();
02687
02688 // icmp's with boolean values can always be turned into bitwise operations
02689 if (Ty->isIntegerTy(1)) {
02690 switch (I.getPredicate()) {
02691 default: llvm_unreachable("Invalid icmp instruction!");
02692 case ICmpInst::ICMP_EQ: { // icmp eq i1 A, B -> ~(A^B)
02693 Value *Xor = Builder->CreateXor(Op0, Op1, I.getName()+"tmp");
02694 return BinaryOperator::CreateNot(Xor);
02695 }
02696 case ICmpInst::ICMP_NE: // icmp eq i1 A, B -> A^B
02697 return BinaryOperator::CreateXor(Op0, Op1);
02698
02699 case ICmpInst::ICMP_UGT:
02700 std::swap(Op0, Op1); // Change icmp ugt -> icmp ult
02701 // FALL THROUGH
02702 case ICmpInst::ICMP_ULT:{ // icmp ult i1 A, B -> ~A & B
02703 Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp");
02704 return BinaryOperator::CreateAnd(Not, Op1);
02705 }
02706 case ICmpInst::ICMP_SGT:
02707 std::swap(Op0, Op1); // Change icmp sgt -> icmp slt
02708 // FALL THROUGH
02709 case ICmpInst::ICMP_SLT: { // icmp slt i1 A, B -> A & ~B
02710 Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp");
02711 return BinaryOperator::CreateAnd(Not, Op0);
02712 }
02713 case ICmpInst::ICMP_UGE:
02714 std::swap(Op0, Op1); // Change icmp uge -> icmp ule
02715 // FALL THROUGH
02716 case ICmpInst::ICMP_ULE: { // icmp ule i1 A, B -> ~A | B
02717 Value *Not = Builder->CreateNot(Op0, I.getName()+"tmp");
02718 return BinaryOperator::CreateOr(Not, Op1);
02719 }
02720 case ICmpInst::ICMP_SGE:
02721 std::swap(Op0, Op1); // Change icmp sge -> icmp sle
02722 // FALL THROUGH
02723 case ICmpInst::ICMP_SLE: { // icmp sle i1 A, B -> A | ~B
02724 Value *Not = Builder->CreateNot(Op1, I.getName()+"tmp");
02725 return BinaryOperator::CreateOr(Not, Op0);
02726 }
02727 }
02728 }
02729
02730 unsigned BitWidth = 0;
02731 if (Ty->isIntOrIntVectorTy())
02732 BitWidth = Ty->getScalarSizeInBits();
02733 else // Get pointer size.
02734 BitWidth = DL.getTypeSizeInBits(Ty->getScalarType());
02735
02736 bool isSignBit = false;
02737
02738 // See if we are doing a comparison with a constant.
02739 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
02740 Value *A = nullptr, *B = nullptr;
02741
02742 // Match the following pattern, which is a common idiom when writing
02743 // overflow-safe integer arithmetic function. The source performs an
02744 // addition in wider type, and explicitly checks for overflow using
02745 // comparisons against INT_MIN and INT_MAX. Simplify this by using the
02746 // sadd_with_overflow intrinsic.
02747 //
02748 // TODO: This could probably be generalized to handle other overflow-safe
02749 // operations if we worked out the formulas to compute the appropriate
02750 // magic constants.
02751 //
02752 // sum = a + b
02753 // if (sum+128 >u 255) ... -> llvm.sadd.with.overflow.i8
02754 {
02755 ConstantInt *CI2; // I = icmp ugt (add (add A, B), CI2), CI
02756 if (I.getPredicate() == ICmpInst::ICMP_UGT &&
02757 match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
02758 if (Instruction *Res = ProcessUGT_ADDCST_ADD(I, A, B, CI2, CI, *this))
02759 return Res;
02760 }
02761
02762 // The following transforms are only 'worth it' if the only user of the
02763 // subtraction is the icmp.
02764 if (Op0->hasOneUse()) {
02765 // (icmp ne/eq (sub A B) 0) -> (icmp ne/eq A, B)
02766 if (I.isEquality() && CI->isZero() &&
02767 match(Op0, m_Sub(m_Value(A), m_Value(B))))
02768 return new ICmpInst(I.getPredicate(), A, B);
02769
02770 // (icmp sgt (sub nsw A B), -1) -> (icmp sge A, B)
02771 if (I.getPredicate() == ICmpInst::ICMP_SGT && CI->isAllOnesValue() &&
02772 match(Op0, m_NSWSub(m_Value(A), m_Value(B))))
02773 return new ICmpInst(ICmpInst::ICMP_SGE, A, B);
02774
02775 // (icmp sgt (sub nsw A B), 0) -> (icmp sgt A, B)
02776 if (I.getPredicate() == ICmpInst::ICMP_SGT && CI->isZero() &&
02777 match(Op0, m_NSWSub(m_Value(A), m_Value(B))))
02778 return new ICmpInst(ICmpInst::ICMP_SGT, A, B);
02779
02780 // (icmp slt (sub nsw A B), 0) -> (icmp slt A, B)
02781 if (I.getPredicate() == ICmpInst::ICMP_SLT && CI->isZero() &&
02782 match(Op0, m_NSWSub(m_Value(A), m_Value(B))))
02783 return new ICmpInst(ICmpInst::ICMP_SLT, A, B);
02784
02785 // (icmp slt (sub nsw A B), 1) -> (icmp sle A, B)
02786 if (I.getPredicate() == ICmpInst::ICMP_SLT && CI->isOne() &&
02787 match(Op0, m_NSWSub(m_Value(A), m_Value(B))))
02788 return new ICmpInst(ICmpInst::ICMP_SLE, A, B);
02789 }
02790
02791 // If we have an icmp le or icmp ge instruction, turn it into the
02792 // appropriate icmp lt or icmp gt instruction. This allows us to rely on
02793 // them being folded in the code below. The SimplifyICmpInst code has
02794 // already handled the edge cases for us, so we just assert on them.
02795 switch (I.getPredicate()) {
02796 default: break;
02797 case ICmpInst::ICMP_ULE:
02798 assert(!CI->isMaxValue(false)); // A <=u MAX -> TRUE
02799 return new ICmpInst(ICmpInst::ICMP_ULT, Op0,
02800 Builder->getInt(CI->getValue()+1));
02801 case ICmpInst::ICMP_SLE:
02802 assert(!CI->isMaxValue(true)); // A <=s MAX -> TRUE
02803 return new ICmpInst(ICmpInst::ICMP_SLT, Op0,
02804 Builder->getInt(CI->getValue()+1));
02805 case ICmpInst::ICMP_UGE:
02806 assert(!CI->isMinValue(false)); // A >=u MIN -> TRUE
02807 return new ICmpInst(ICmpInst::ICMP_UGT, Op0,
02808 Builder->getInt(CI->getValue()-1));
02809 case ICmpInst::ICMP_SGE:
02810 assert(!CI->isMinValue(true)); // A >=s MIN -> TRUE
02811 return new ICmpInst(ICmpInst::ICMP_SGT, Op0,
02812 Builder->getInt(CI->getValue()-1));
02813 }
02814
02815 if (I.isEquality()) {
02816 ConstantInt *CI2;
02817 if (match(Op0, m_AShr(m_ConstantInt(CI2), m_Value(A))) ||
02818 match(Op0, m_LShr(m_ConstantInt(CI2), m_Value(A)))) {
02819 // (icmp eq/ne (ashr/lshr const2, A), const1)
02820 if (Instruction *Inst = FoldICmpCstShrCst(I, Op0, A, CI, CI2))
02821 return Inst;
02822 }
02823 if (match(Op0, m_Shl(m_ConstantInt(CI2), m_Value(A)))) {
02824 // (icmp eq/ne (shl const2, A), const1)
02825 if (Instruction *Inst = FoldICmpCstShlCst(I, Op0, A, CI, CI2))
02826 return Inst;
02827 }
02828 }
02829
02830 // If this comparison is a normal comparison, it demands all
02831 // bits, if it is a sign bit comparison, it only demands the sign bit.
02832 bool UnusedBit;
02833 isSignBit = isSignBitCheck(I.getPredicate(), CI, UnusedBit);
02834 }
02835
02836 // See if we can fold the comparison based on range information we can get
02837 // by checking whether bits are known to be zero or one in the input.
02838 if (BitWidth != 0) {
02839 APInt Op0KnownZero(BitWidth, 0), Op0KnownOne(BitWidth, 0);
02840 APInt Op1KnownZero(BitWidth, 0), Op1KnownOne(BitWidth, 0);
02841
02842 if (SimplifyDemandedBits(I.getOperandUse(0),
02843 DemandedBitsLHSMask(I, BitWidth, isSignBit),
02844 Op0KnownZero, Op0KnownOne, 0))
02845 return &I;
02846 if (SimplifyDemandedBits(I.getOperandUse(1),
02847 APInt::getAllOnesValue(BitWidth), Op1KnownZero,
02848 Op1KnownOne, 0))
02849 return &I;
02850
02851 // Given the known and unknown bits, compute a range that the LHS could be
02852 // in. Compute the Min, Max and RHS values based on the known bits. For the
02853 // EQ and NE we use unsigned values.
02854 APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
02855 APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
02856 if (I.isSigned()) {
02857 ComputeSignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
02858 Op0Min, Op0Max);
02859 ComputeSignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
02860 Op1Min, Op1Max);
02861 } else {
02862 ComputeUnsignedMinMaxValuesFromKnownBits(Op0KnownZero, Op0KnownOne,
02863 Op0Min, Op0Max);
02864 ComputeUnsignedMinMaxValuesFromKnownBits(Op1KnownZero, Op1KnownOne,
02865 Op1Min, Op1Max);
02866 }
02867
02868 // If Min and Max are known to be the same, then SimplifyDemandedBits
02869 // figured out that the LHS is a constant. Just constant fold this now so
02870 // that code below can assume that Min != Max.
02871 if (!isa<Constant>(Op0) && Op0Min == Op0Max)
02872 return new ICmpInst(I.getPredicate(),
02873 ConstantInt::get(Op0->getType(), Op0Min), Op1);
02874 if (!isa<Constant>(Op1) && Op1Min == Op1Max)
02875 return new ICmpInst(I.getPredicate(), Op0,
02876 ConstantInt::get(Op1->getType(), Op1Min));
02877
02878 // Based on the range information we know about the LHS, see if we can
02879 // simplify this comparison. For example, (x&4) < 8 is always true.
02880 switch (I.getPredicate()) {
02881 default: llvm_unreachable("Unknown icmp opcode!");
02882 case ICmpInst::ICMP_EQ: {
02883 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
02884 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
02885
02886 // If all bits are known zero except for one, then we know at most one
02887 // bit is set. If the comparison is against zero, then this is a check
02888 // to see if *that* bit is set.
02889 APInt Op0KnownZeroInverted = ~Op0KnownZero;
02890 if (~Op1KnownZero == 0) {
02891 // If the LHS is an AND with the same constant, look through it.
02892 Value *LHS = nullptr;
02893 ConstantInt *LHSC = nullptr;
02894 if (!match(Op0, m_And(m_Value(LHS), m_ConstantInt(LHSC))) ||
02895 LHSC->getValue() != Op0KnownZeroInverted)
02896 LHS = Op0;
02897
02898 // If the LHS is 1 << x, and we know the result is a power of 2 like 8,
02899 // then turn "((1 << x)&8) == 0" into "x != 3".
02900 // or turn "((1 << x)&7) == 0" into "x > 2".
02901 Value *X = nullptr;
02902 if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
02903 APInt ValToCheck = Op0KnownZeroInverted;
02904 if (ValToCheck.isPowerOf2()) {
02905 unsigned CmpVal = ValToCheck.countTrailingZeros();
02906 return new ICmpInst(ICmpInst::ICMP_NE, X,
02907 ConstantInt::get(X->getType(), CmpVal));
02908 } else if ((++ValToCheck).isPowerOf2()) {
02909 unsigned CmpVal = ValToCheck.countTrailingZeros() - 1;
02910 return new ICmpInst(ICmpInst::ICMP_UGT, X,
02911 ConstantInt::get(X->getType(), CmpVal));
02912 }
02913 }
02914
02915 // If the LHS is 8 >>u x, and we know the result is a power of 2 like 1,
02916 // then turn "((8 >>u x)&1) == 0" into "x != 3".
02917 const APInt *CI;
02918 if (Op0KnownZeroInverted == 1 &&
02919 match(LHS, m_LShr(m_Power2(CI), m_Value(X))))
02920 return new ICmpInst(ICmpInst::ICMP_NE, X,
02921 ConstantInt::get(X->getType(),
02922 CI->countTrailingZeros()));
02923 }
02924
02925 break;
02926 }
02927 case ICmpInst::ICMP_NE: {
02928 if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
02929 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
02930
02931 // If all bits are known zero except for one, then we know at most one
02932 // bit is set. If the comparison is against zero, then this is a check
02933 // to see if *that* bit is set.
02934 APInt Op0KnownZeroInverted = ~Op0KnownZero;
02935 if (~Op1KnownZero == 0) {
02936 // If the LHS is an AND with the same constant, look through it.
02937 Value *LHS = nullptr;
02938 ConstantInt *LHSC = nullptr;
02939 if (!match(Op0, m_And(m_Value(LHS), m_ConstantInt(LHSC))) ||
02940 LHSC->getValue() != Op0KnownZeroInverted)
02941 LHS = Op0;
02942
02943 // If the LHS is 1 << x, and we know the result is a power of 2 like 8,
02944 // then turn "((1 << x)&8) != 0" into "x == 3".
02945 // or turn "((1 << x)&7) != 0" into "x < 3".
02946 Value *X = nullptr;
02947 if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
02948 APInt ValToCheck = Op0KnownZeroInverted;
02949 if (ValToCheck.isPowerOf2()) {
02950 unsigned CmpVal = ValToCheck.countTrailingZeros();
02951 return new ICmpInst(ICmpInst::ICMP_EQ, X,
02952 ConstantInt::get(X->getType(), CmpVal));
02953 } else if ((++ValToCheck).isPowerOf2()) {
02954 unsigned CmpVal = ValToCheck.countTrailingZeros();
02955 return new ICmpInst(ICmpInst::ICMP_ULT, X,
02956 ConstantInt::get(X->getType(), CmpVal));
02957 }
02958 }
02959
02960 // If the LHS is 8 >>u x, and we know the result is a power of 2 like 1,
02961 // then turn "((8 >>u x)&1) != 0" into "x == 3".
02962 const APInt *CI;
02963 if (Op0KnownZeroInverted == 1 &&
02964 match(LHS, m_LShr(m_Power2(CI), m_Value(X))))
02965 return new ICmpInst(ICmpInst::ICMP_EQ, X,
02966 ConstantInt::get(X->getType(),
02967 CI->countTrailingZeros()));
02968 }
02969
02970 break;
02971 }
02972 case ICmpInst::ICMP_ULT:
02973 if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
02974 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
02975 if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
02976 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
02977 if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
02978 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
02979 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
02980 if (Op1Max == Op0Min+1) // A <u C -> A == C-1 if min(A)+1 == C
02981 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
02982 Builder->getInt(CI->getValue()-1));
02983
02984 // (x <u 2147483648) -> (x >s -1) -> true if sign bit clear
02985 if (CI->isMinValue(true))
02986 return new ICmpInst(ICmpInst::ICMP_SGT, Op0,
02987 Constant::getAllOnesValue(Op0->getType()));
02988 }
02989 break;
02990 case ICmpInst::ICMP_UGT:
02991 if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
02992 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
02993 if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
02994 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
02995
02996 if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
02997 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
02998 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
02999 if (Op1Min == Op0Max-1) // A >u C -> A == C+1 if max(a)-1 == C
03000 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
03001 Builder->getInt(CI->getValue()+1));
03002
03003 // (x >u 2147483647) -> (x <s 0) -> true if sign bit set
03004 if (CI->isMaxValue(true))
03005 return new ICmpInst(ICmpInst::ICMP_SLT, Op0,
03006 Constant::getNullValue(Op0->getType()));
03007 }
03008 break;
03009 case ICmpInst::ICMP_SLT:
03010 if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
03011 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
03012 if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
03013 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
03014 if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
03015 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
03016 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
03017 if (Op1Max == Op0Min+1) // A <s C -> A == C-1 if min(A)+1 == C
03018 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
03019 Builder->getInt(CI->getValue()-1));
03020 }
03021 break;
03022 case ICmpInst::ICMP_SGT:
03023 if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
03024 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
03025 if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
03026 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
03027
03028 if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
03029 return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
03030 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
03031 if (Op1Min == Op0Max-1) // A >s C -> A == C+1 if max(A)-1 == C
03032 return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
03033 Builder->getInt(CI->getValue()+1));
03034 }
03035 break;
03036 case ICmpInst::ICMP_SGE:
03037 assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
03038 if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
03039 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
03040 if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
03041 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
03042 break;
03043 case ICmpInst::ICMP_SLE:
03044 assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
03045 if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
03046 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
03047 if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
03048 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
03049 break;
03050 case ICmpInst::ICMP_UGE:
03051 assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
03052 if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
03053 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
03054 if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
03055 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
03056 break;
03057 case ICmpInst::ICMP_ULE:
03058 assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
03059 if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
03060 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
03061 if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
03062 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
03063 break;
03064 }
03065
03066 // Turn a signed comparison into an unsigned one if both operands
03067 // are known to have the same sign.
03068 if (I.isSigned() &&
03069 ((Op0KnownZero.isNegative() && Op1KnownZero.isNegative()) ||
03070 (Op0KnownOne.isNegative() && Op1KnownOne.isNegative())))
03071 return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
03072 }
03073
03074 // Test if the ICmpInst instruction is used exclusively by a select as
03075 // part of a minimum or maximum operation. If so, refrain from doing
03076 // any other folding. This helps out other analyses which understand
03077 // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
03078 // and CodeGen. And in this case, at least one of the comparison
03079 // operands has at least one user besides the compare (the select),
03080 // which would often largely negate the benefit of folding anyway.
03081 if (I.hasOneUse())
03082 if (SelectInst *SI = dyn_cast<SelectInst>(*I.user_begin()))
03083 if ((SI->getOperand(1) == Op0 && SI->getOperand(2) == Op1) ||
03084 (SI->getOperand(2) == Op0 && SI->getOperand(1) == Op1))
03085 return nullptr;
03086
03087 // See if we are doing a comparison between a constant and an instruction that
03088 // can be folded into the comparison.
03089 if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
03090 // Since the RHS is a ConstantInt (CI), if the left hand side is an
03091 // instruction, see if that instruction also has constants so that the
03092 // instruction can be folded into the icmp
03093 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
03094 if (Instruction *Res = visitICmpInstWithInstAndIntCst(I, LHSI, CI))
03095 return Res;
03096 }
03097
03098 // Handle icmp with constant (but not simple integer constant) RHS
03099 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
03100 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
03101 switch (LHSI->getOpcode()) {
03102 case Instruction::GetElementPtr:
03103 // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
03104 if (RHSC->isNullValue() &&
03105 cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices())
03106 return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
03107 Constant::getNullValue(LHSI->getOperand(0)->getType()));
03108 break;
03109 case Instruction::PHI:
03110 // Only fold icmp into the PHI if the phi and icmp are in the same
03111 // block. If in the same block, we're encouraging jump threading. If
03112 // not, we are just pessimizing the code by making an i1 phi.
03113 if (LHSI->getParent() == I.getParent())
03114 if (Instruction *NV = FoldOpIntoPhi(I))
03115 return NV;
03116 break;
03117 case Instruction::Select: {
03118 // If either operand of the select is a constant, we can fold the
03119 // comparison into the select arms, which will cause one to be
03120 // constant folded and the select turned into a bitwise or.
03121 Value *Op1 = nullptr, *Op2 = nullptr;
03122 ConstantInt *CI = 0;
03123 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
03124 Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
03125 CI = dyn_cast<ConstantInt>(Op1);
03126 }
03127 if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
03128 Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
03129 CI = dyn_cast<ConstantInt>(Op2);
03130 }
03131
03132 // We only want to perform this transformation if it will not lead to
03133 // additional code. This is true if either both sides of the select
03134 // fold to a constant (in which case the icmp is replaced with a select
03135 // which will usually simplify) or this is the only user of the
03136 // select (in which case we are trading a select+icmp for a simpler
03137 // select+icmp) or all uses of the select can be replaced based on
03138 // dominance information ("Global cases").
03139 bool Transform = false;
03140 if (Op1 && Op2)
03141 Transform = true;
03142 else if (Op1 || Op2) {
03143 // Local case
03144 if (LHSI->hasOneUse())
03145 Transform = true;
03146 // Global cases
03147 else if (CI && !CI->isZero())
03148 // When Op1 is constant try replacing select with second operand.
03149 // Otherwise Op2 is constant and try replacing select with first
03150 // operand.
03151 Transform = replacedSelectWithOperand(cast<SelectInst>(LHSI), &I,
03152 Op1 ? 2 : 1);
03153 }
03154 if (Transform) {
03155 if (!Op1)
03156 Op1 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(1),
03157 RHSC, I.getName());
03158 if (!Op2)
03159 Op2 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(2),
03160 RHSC, I.getName());
03161 return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
03162 }
03163 break;
03164 }
03165 case Instruction::IntToPtr:
03166 // icmp pred inttoptr(X), null -> icmp pred X, 0
03167 if (RHSC->isNullValue() &&
03168 DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
03169 return new ICmpInst(I.getPredicate(), LHSI->getOperand(0),
03170 Constant::getNullValue(LHSI->getOperand(0)->getType()));
03171 break;
03172
03173 case Instruction::Load:
03174 // Try to optimize things like "A[i] > 4" to index computations.
03175 if (GetElementPtrInst *GEP =
03176 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
03177 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
03178 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
03179 !cast<LoadInst>(LHSI)->isVolatile())
03180 if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV, I))
03181 return Res;
03182 }
03183 break;
03184 }
03185 }
03186
03187 // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
03188 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
03189 if (Instruction *NI = FoldGEPICmp(GEP, Op1, I.getPredicate(), I))
03190 return NI;
03191 if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
03192 if (Instruction *NI = FoldGEPICmp(GEP, Op0,
03193 ICmpInst::getSwappedPredicate(I.getPredicate()), I))
03194 return NI;
03195
03196 // Test to see if the operands of the icmp are casted versions of other
03197 // values. If the ptr->ptr cast can be stripped off both arguments, we do so
03198 // now.
03199 if (BitCastInst *CI = dyn_cast<BitCastInst>(Op0)) {
03200 if (Op0->getType()->isPointerTy() &&
03201 (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
03202 // We keep moving the cast from the left operand over to the right
03203 // operand, where it can often be eliminated completely.
03204 Op0 = CI->getOperand(0);
03205
03206 // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
03207 // so eliminate it as well.
03208 if (BitCastInst *CI2 = dyn_cast<BitCastInst>(Op1))
03209 Op1 = CI2->getOperand(0);
03210
03211 // If Op1 is a constant, we can fold the cast into the constant.
03212 if (Op0->getType() != Op1->getType()) {
03213 if (Constant *Op1C = dyn_cast<Constant>(Op1)) {
03214 Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType());
03215 } else {
03216 // Otherwise, cast the RHS right before the icmp
03217 Op1 = Builder->CreateBitCast(Op1, Op0->getType());
03218 }
03219 }
03220 return new ICmpInst(I.getPredicate(), Op0, Op1);
03221 }
03222 }
03223
03224 if (isa<CastInst>(Op0)) {
03225 // Handle the special case of: icmp (cast bool to X), <cst>
03226 // This comes up when you have code like
03227 // int X = A < B;
03228 // if (X) ...
03229 // For generality, we handle any zero-extension of any operand comparison
03230 // with a constant or another cast from the same type.
03231 if (isa<Constant>(Op1) || isa<CastInst>(Op1))
03232 if (Instruction *R = visitICmpInstWithCastAndCast(I))
03233 return R;
03234 }
03235
03236 // Special logic for binary operators.
03237 BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0);
03238 BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1);
03239 if (BO0 || BO1) {
03240 CmpInst::Predicate Pred = I.getPredicate();
03241 bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
03242 if (BO0 && isa<OverflowingBinaryOperator>(BO0))
03243 NoOp0WrapProblem = ICmpInst::isEquality(Pred) ||
03244 (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) ||
03245 (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap());
03246 if (BO1 && isa<OverflowingBinaryOperator>(BO1))
03247 NoOp1WrapProblem = ICmpInst::isEquality(Pred) ||
03248 (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) ||
03249 (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap());
03250
03251 // Analyze the case when either Op0 or Op1 is an add instruction.
03252 // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
03253 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
03254 if (BO0 && BO0->getOpcode() == Instruction::Add)
03255 A = BO0->getOperand(0), B = BO0->getOperand(1);
03256 if (BO1 && BO1->getOpcode() == Instruction::Add)
03257 C = BO1->getOperand(0), D = BO1->getOperand(1);
03258
03259 // icmp (X+cst) < 0 --> X < -cst
03260 if (NoOp0WrapProblem && ICmpInst::isSigned(Pred) && match(Op1, m_Zero()))
03261 if (ConstantInt *RHSC = dyn_cast_or_null<ConstantInt>(B))
03262 if (!RHSC->isMinValue(/*isSigned=*/true))
03263 return new ICmpInst(Pred, A, ConstantExpr::getNeg(RHSC));
03264
03265 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
03266 if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
03267 return new ICmpInst(Pred, A == Op1 ? B : A,
03268 Constant::getNullValue(Op1->getType()));
03269
03270 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
03271 if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
03272 return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
03273 C == Op0 ? D : C);
03274
03275 // icmp (X+Y), (X+Z) -> icmp Y, Z for equalities or if there is no overflow.
03276 if (A && C && (A == C || A == D || B == C || B == D) &&
03277 NoOp0WrapProblem && NoOp1WrapProblem &&
03278 // Try not to increase register pressure.
03279 BO0->hasOneUse() && BO1->hasOneUse()) {
03280 // Determine Y and Z in the form icmp (X+Y), (X+Z).
03281 Value *Y, *Z;
03282 if (A == C) {
03283 // C + B == C + D -> B == D
03284 Y = B;
03285 Z = D;
03286 } else if (A == D) {
03287 // D + B == C + D -> B == C
03288 Y = B;
03289 Z = C;
03290 } else if (B == C) {
03291 // A + C == C + D -> A == D
03292 Y = A;
03293 Z = D;
03294 } else {
03295 assert(B == D);
03296 // A + D == C + D -> A == C
03297 Y = A;
03298 Z = C;
03299 }
03300 return new ICmpInst(Pred, Y, Z);
03301 }
03302
03303 // icmp slt (X + -1), Y -> icmp sle X, Y
03304 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT &&
03305 match(B, m_AllOnes()))
03306 return new ICmpInst(CmpInst::ICMP_SLE, A, Op1);
03307
03308 // icmp sge (X + -1), Y -> icmp sgt X, Y
03309 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE &&
03310 match(B, m_AllOnes()))
03311 return new ICmpInst(CmpInst::ICMP_SGT, A, Op1);
03312
03313 // icmp sle (X + 1), Y -> icmp slt X, Y
03314 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE &&
03315 match(B, m_One()))
03316 return new ICmpInst(CmpInst::ICMP_SLT, A, Op1);
03317
03318 // icmp sgt (X + 1), Y -> icmp sge X, Y
03319 if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT &&
03320 match(B, m_One()))
03321 return new ICmpInst(CmpInst::ICMP_SGE, A, Op1);
03322
03323 // if C1 has greater magnitude than C2:
03324 // icmp (X + C1), (Y + C2) -> icmp (X + C3), Y
03325 // s.t. C3 = C1 - C2
03326 //
03327 // if C2 has greater magnitude than C1:
03328 // icmp (X + C1), (Y + C2) -> icmp X, (Y + C3)
03329 // s.t. C3 = C2 - C1
03330 if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
03331 (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned())
03332 if (ConstantInt *C1 = dyn_cast<ConstantInt>(B))
03333 if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) {
03334 const APInt &AP1 = C1->getValue();
03335 const APInt &AP2 = C2->getValue();
03336 if (AP1.isNegative() == AP2.isNegative()) {
03337 APInt AP1Abs = C1->getValue().abs();
03338 APInt AP2Abs = C2->getValue().abs();
03339 if (AP1Abs.uge(AP2Abs)) {
03340 ConstantInt *C3 = Builder->getInt(AP1 - AP2);
03341 Value *NewAdd = Builder->CreateNSWAdd(A, C3);
03342 return new ICmpInst(Pred, NewAdd, C);
03343 } else {
03344 ConstantInt *C3 = Builder->getInt(AP2 - AP1);
03345 Value *NewAdd = Builder->CreateNSWAdd(C, C3);
03346 return new ICmpInst(Pred, A, NewAdd);
03347 }
03348 }
03349 }
03350
03351
03352 // Analyze the case when either Op0 or Op1 is a sub instruction.
03353 // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
03354 A = nullptr; B = nullptr; C = nullptr; D = nullptr;
03355 if (BO0 && BO0->getOpcode() == Instruction::Sub)
03356 A = BO0->getOperand(0), B = BO0->getOperand(1);
03357 if (BO1 && BO1->getOpcode() == Instruction::Sub)
03358 C = BO1->getOperand(0), D = BO1->getOperand(1);
03359
03360 // icmp (X-Y), X -> icmp 0, Y for equalities or if there is no overflow.
03361 if (A == Op1 && NoOp0WrapProblem)
03362 return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
03363
03364 // icmp X, (X-Y) -> icmp Y, 0 for equalities or if there is no overflow.
03365 if (C == Op0 && NoOp1WrapProblem)
03366 return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
03367
03368 // icmp (Y-X), (Z-X) -> icmp Y, Z for equalities or if there is no overflow.
03369 if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem &&
03370 // Try not to increase register pressure.
03371 BO0->hasOneUse() && BO1->hasOneUse())
03372 return new ICmpInst(Pred, A, C);
03373
03374 // icmp (X-Y), (X-Z) -> icmp Z, Y for equalities or if there is no overflow.
03375 if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem &&
03376 // Try not to increase register pressure.
03377 BO0->hasOneUse() && BO1->hasOneUse())
03378 return new ICmpInst(Pred, D, B);
03379
03380 // icmp (0-X) < cst --> x > -cst
03381 if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
03382 Value *X;
03383 if (match(BO0, m_Neg(m_Value(X))))
03384 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1))
03385 if (!RHSC->isMinValue(/*isSigned=*/true))
03386 return new ICmpInst(I.getSwappedPredicate(), X,
03387 ConstantExpr::getNeg(RHSC));
03388 }
03389
03390 BinaryOperator *SRem = nullptr;
03391 // icmp (srem X, Y), Y
03392 if (BO0 && BO0->getOpcode() == Instruction::SRem &&
03393 Op1 == BO0->getOperand(1))
03394 SRem = BO0;
03395 // icmp Y, (srem X, Y)
03396 else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
03397 Op0 == BO1->getOperand(1))
03398 SRem = BO1;
03399 if (SRem) {
03400 // We don't check hasOneUse to avoid increasing register pressure because
03401 // the value we use is the same value this instruction was already using.
03402 switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
03403 default: break;
03404 case ICmpInst::ICMP_EQ:
03405 return ReplaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
03406 case ICmpInst::ICMP_NE:
03407 return ReplaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
03408 case ICmpInst::ICMP_SGT:
03409 case ICmpInst::ICMP_SGE:
03410 return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
03411 Constant::getAllOnesValue(SRem->getType()));
03412 case ICmpInst::ICMP_SLT:
03413 case ICmpInst::ICMP_SLE:
03414 return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
03415 Constant::getNullValue(SRem->getType()));
03416 }
03417 }
03418
03419 if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() &&
03420 BO0->hasOneUse() && BO1->hasOneUse() &&
03421 BO0->getOperand(1) == BO1->getOperand(1)) {
03422 switch (BO0->getOpcode()) {
03423 default: break;
03424 case Instruction::Add:
03425 case Instruction::Sub:
03426 case Instruction::Xor:
03427 if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
03428 return new ICmpInst(I.getPredicate(), BO0->getOperand(0),
03429 BO1->getOperand(0));
03430 // icmp u/s (a ^ signbit), (b ^ signbit) --> icmp s/u a, b
03431 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO0->getOperand(1))) {
03432 if (CI->getValue().isSignBit()) {
03433 ICmpInst::Predicate Pred = I.isSigned()
03434 ? I.getUnsignedPredicate()
03435 : I.getSignedPredicate();
03436 return new ICmpInst(Pred, BO0->getOperand(0),
03437 BO1->getOperand(0));
03438 }
03439
03440 if (CI->isMaxValue(true)) {
03441 ICmpInst::Predicate Pred = I.isSigned()
03442 ? I.getUnsignedPredicate()
03443 : I.getSignedPredicate();
03444 Pred = I.getSwappedPredicate(Pred);
03445 return new ICmpInst(Pred, BO0->getOperand(0),
03446 BO1->getOperand(0));
03447 }
03448 }
03449 break;
03450 case Instruction::Mul:
03451 if (!I.isEquality())
03452 break;
03453
03454 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO0->getOperand(1))) {
03455 // a * Cst icmp eq/ne b * Cst --> a & Mask icmp b & Mask
03456 // Mask = -1 >> count-trailing-zeros(Cst).
03457 if (!CI->isZero() && !CI->isOne()) {
03458 const APInt &AP = CI->getValue();
03459 ConstantInt *Mask = ConstantInt::get(I.getContext(),
03460 APInt::getLowBitsSet(AP.getBitWidth(),
03461 AP.getBitWidth() -
03462 AP.countTrailingZeros()));
03463 Value *And1 = Builder->CreateAnd(BO0->getOperand(0), Mask);
03464 Value *And2 = Builder->CreateAnd(BO1->getOperand(0), Mask);
03465 return new ICmpInst(I.getPredicate(), And1, And2);
03466 }
03467 }
03468 break;
03469 case Instruction::UDiv:
03470 case Instruction::LShr:
03471 if (I.isSigned())
03472 break;
03473 // fall-through
03474 case Instruction::SDiv:
03475 case Instruction::AShr:
03476 if (!BO0->isExact() || !BO1->isExact())
03477 break;
03478 return new ICmpInst(I.getPredicate(), BO0->getOperand(0),
03479 BO1->getOperand(0));
03480 case Instruction::Shl: {
03481 bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap();
03482 bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap();
03483 if (!NUW && !NSW)
03484 break;
03485 if (!NSW && I.isSigned())
03486 break;
03487 return new ICmpInst(I.getPredicate(), BO0->getOperand(0),
03488 BO1->getOperand(0));
03489 }
03490 }
03491 }
03492 }
03493
03494 { Value *A, *B;
03495 // Transform (A & ~B) == 0 --> (A & B) != 0
03496 // and (A & ~B) != 0 --> (A & B) == 0
03497 // if A is a power of 2.
03498 if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
03499 match(Op1, m_Zero()) &&
03500 isKnownToBeAPowerOfTwo(A, DL, false, 0, AC, &I, DT) && I.isEquality())
03501 return new ICmpInst(I.getInversePredicate(),
03502 Builder->CreateAnd(A, B),
03503 Op1);
03504
03505 // ~x < ~y --> y < x
03506 // ~x < cst --> ~cst < x
03507 if (match(Op0, m_Not(m_Value(A)))) {
03508 if (match(Op1, m_Not(m_Value(B))))
03509 return new ICmpInst(I.getPredicate(), B, A);
03510 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1))
03511 return new ICmpInst(I.getPredicate(), ConstantExpr::getNot(RHSC), A);
03512 }
03513
03514 Instruction *AddI = nullptr;
03515 if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B),
03516 m_Instruction(AddI))) &&
03517 isa<IntegerType>(A->getType())) {
03518 Value *Result;
03519 Constant *Overflow;
03520 if (OptimizeOverflowCheck(OCF_UNSIGNED_ADD, A, B, *AddI, Result,
03521 Overflow)) {
03522 ReplaceInstUsesWith(*AddI, Result);
03523 return ReplaceInstUsesWith(I, Overflow);
03524 }
03525 }
03526
03527 // (zext a) * (zext b) --> llvm.umul.with.overflow.
03528 if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
03529 if (Instruction *R = ProcessUMulZExtIdiom(I, Op0, Op1, *this))
03530 return R;
03531 }
03532 if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
03533 if (Instruction *R = ProcessUMulZExtIdiom(I, Op1, Op0, *this))
03534 return R;
03535 }
03536 }
03537
03538 if (I.isEquality()) {
03539 Value *A, *B, *C, *D;
03540
03541 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
03542 if (A == Op1 || B == Op1) { // (A^B) == A -> B == 0
03543 Value *OtherVal = A == Op1 ? B : A;
03544 return new ICmpInst(I.getPredicate(), OtherVal,
03545 Constant::getNullValue(A->getType()));
03546 }
03547
03548 if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
03549 // A^c1 == C^c2 --> A == C^(c1^c2)
03550 ConstantInt *C1, *C2;
03551 if (match(B, m_ConstantInt(C1)) &&
03552 match(D, m_ConstantInt(C2)) && Op1->hasOneUse()) {
03553 Constant *NC = Builder->getInt(C1->getValue() ^ C2->getValue());
03554 Value *Xor = Builder->CreateXor(C, NC);
03555 return new ICmpInst(I.getPredicate(), A, Xor);
03556 }
03557
03558 // A^B == A^D -> B == D
03559 if (A == C) return new ICmpInst(I.getPredicate(), B, D);
03560 if (A == D) return new ICmpInst(I.getPredicate(), B, C);
03561 if (B == C) return new ICmpInst(I.getPredicate(), A, D);
03562 if (B == D) return new ICmpInst(I.getPredicate(), A, C);
03563 }
03564 }
03565
03566 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
03567 (A == Op0 || B == Op0)) {
03568 // A == (A^B) -> B == 0
03569 Value *OtherVal = A == Op0 ? B : A;
03570 return new ICmpInst(I.getPredicate(), OtherVal,
03571 Constant::getNullValue(A->getType()));
03572 }
03573
03574 // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
03575 if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) &&
03576 match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) {
03577 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
03578
03579 if (A == C) {
03580 X = B; Y = D; Z = A;
03581 } else if (A == D) {
03582 X = B; Y = C; Z = A;
03583 } else if (B == C) {
03584 X = A; Y = D; Z = B;
03585 } else if (B == D) {
03586 X = A; Y = C; Z = B;
03587 }
03588
03589 if (X) { // Build (X^Y) & Z
03590 Op1 = Builder->CreateXor(X, Y);
03591 Op1 = Builder->CreateAnd(Op1, Z);
03592 I.setOperand(0, Op1);
03593 I.setOperand(1, Constant::getNullValue(Op1->getType()));
03594 return &I;
03595 }
03596 }
03597
03598 // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B)
03599 // and (B & (1<<X)-1) == (zext A) --> A == (trunc B)
03600 ConstantInt *Cst1;
03601 if ((Op0->hasOneUse() &&
03602 match(Op0, m_ZExt(m_Value(A))) &&
03603 match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) ||
03604 (Op1->hasOneUse() &&
03605 match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) &&
03606 match(Op1, m_ZExt(m_Value(A))))) {
03607 APInt Pow2 = Cst1->getValue() + 1;
03608 if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) &&
03609 Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth())
03610 return new ICmpInst(I.getPredicate(), A,
03611 Builder->CreateTrunc(B, A->getType()));
03612 }
03613
03614 // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
03615 // For lshr and ashr pairs.
03616 if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) &&
03617 match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) ||
03618 (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) &&
03619 match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) {
03620 unsigned TypeBits = Cst1->getBitWidth();
03621 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
03622 if (ShAmt < TypeBits && ShAmt != 0) {
03623 ICmpInst::Predicate Pred = I.getPredicate() == ICmpInst::ICMP_NE
03624 ? ICmpInst::ICMP_UGE
03625 : ICmpInst::ICMP_ULT;
03626 Value *Xor = Builder->CreateXor(A, B, I.getName() + ".unshifted");
03627 APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
03628 return new ICmpInst(Pred, Xor, Builder->getInt(CmpVal));
03629 }
03630 }
03631
03632 // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
03633 if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
03634 match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
03635 unsigned TypeBits = Cst1->getBitWidth();
03636 unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
03637 if (ShAmt < TypeBits && ShAmt != 0) {
03638 Value *Xor = Builder->CreateXor(A, B, I.getName() + ".unshifted");
03639 APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
03640 Value *And = Builder->CreateAnd(Xor, Builder->getInt(AndVal),
03641 I.getName() + ".mask");
03642 return new ICmpInst(I.getPredicate(), And,
03643 Constant::getNullValue(Cst1->getType()));
03644 }
03645 }
03646
03647 // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
03648 // "icmp (and X, mask), cst"
03649 uint64_t ShAmt = 0;
03650 if (Op0->hasOneUse() &&
03651 match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A),
03652 m_ConstantInt(ShAmt))))) &&
03653 match(Op1, m_ConstantInt(Cst1)) &&
03654 // Only do this when A has multiple uses. This is most important to do
03655 // when it exposes other optimizations.
03656 !A->hasOneUse()) {
03657 unsigned ASize =cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
03658
03659 if (ShAmt < ASize) {
03660 APInt MaskV =
03661 APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits());
03662 MaskV <<= ShAmt;
03663
03664 APInt CmpV = Cst1->getValue().zext(ASize);
03665 CmpV <<= ShAmt;
03666
03667 Value *Mask = Builder->CreateAnd(A, Builder->getInt(MaskV));
03668 return new ICmpInst(I.getPredicate(), Mask, Builder->getInt(CmpV));
03669 }
03670 }
03671 }
03672
03673 // The 'cmpxchg' instruction returns an aggregate containing the old value and
03674 // an i1 which indicates whether or not we successfully did the swap.
03675 //
03676 // Replace comparisons between the old value and the expected value with the
03677 // indicator that 'cmpxchg' returns.
03678 //
03679 // N.B. This transform is only valid when the 'cmpxchg' is not permitted to
03680 // spuriously fail. In those cases, the old value may equal the expected
03681 // value but it is possible for the swap to not occur.
03682 if (I.getPredicate() == ICmpInst::ICMP_EQ)
03683 if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
03684 if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
03685 if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
03686 !ACXI->isWeak())
03687 return ExtractValueInst::Create(ACXI, 1);
03688
03689 {
03690 Value *X; ConstantInt *Cst;
03691 // icmp X+Cst, X
03692 if (match(Op0, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op1 == X)
03693 return FoldICmpAddOpCst(I, X, Cst, I.getPredicate());
03694
03695 // icmp X, X+Cst
03696 if (match(Op1, m_Add(m_Value(X), m_ConstantInt(Cst))) && Op0 == X)
03697 return FoldICmpAddOpCst(I, X, Cst, I.getSwappedPredicate());
03698 }
03699 return Changed ? &I : nullptr;
03700 }
03701
03702 /// FoldFCmp_IntToFP_Cst - Fold fcmp ([us]itofp x, cst) if possible.
03703 Instruction *InstCombiner::FoldFCmp_IntToFP_Cst(FCmpInst &I,
03704 Instruction *LHSI,
03705 Constant *RHSC) {
03706 if (!isa<ConstantFP>(RHSC)) return nullptr;
03707 const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
03708
03709 // Get the width of the mantissa. We don't want to hack on conversions that
03710 // might lose information from the integer, e.g. "i64 -> float"
03711 int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
03712 if (MantissaWidth == -1) return nullptr; // Unknown.
03713
03714 IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
03715
03716 // Check to see that the input is converted from an integer type that is small
03717 // enough that preserves all bits. TODO: check here for "known" sign bits.
03718 // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
03719 unsigned InputSize = IntTy->getScalarSizeInBits();
03720
03721 // If this is a uitofp instruction, we need an extra bit to hold the sign.
03722 bool LHSUnsigned = isa<UIToFPInst>(LHSI);
03723 if (LHSUnsigned)
03724 ++InputSize;
03725
03726 if (I.isEquality()) {
03727 FCmpInst::Predicate P = I.getPredicate();
03728 bool IsExact = false;
03729 APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned);
03730 RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
03731
03732 // If the floating point constant isn't an integer value, we know if we will
03733 // ever compare equal / not equal to it.
03734 if (!IsExact) {
03735 // TODO: Can never be -0.0 and other non-representable values
03736 APFloat RHSRoundInt(RHS);
03737 RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
03738 if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) {
03739 if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
03740 return ReplaceInstUsesWith(I, Builder->getFalse());
03741
03742 assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
03743 return ReplaceInstUsesWith(I, Builder->getTrue());
03744 }
03745 }
03746
03747 // TODO: If the constant is exactly representable, is it always OK to do
03748 // equality compares as integer?
03749 }
03750
03751 // Comparisons with zero are a special case where we know we won't lose
03752 // information.
03753 bool IsCmpZero = RHS.isPosZero();
03754
03755 // If the conversion would lose info, don't hack on this.
03756 if ((int)InputSize > MantissaWidth && !IsCmpZero)
03757 return nullptr;
03758
03759 // Otherwise, we can potentially simplify the comparison. We know that it
03760 // will always come through as an integer value and we know the constant is
03761 // not a NAN (it would have been previously simplified).
03762 assert(!RHS.isNaN() && "NaN comparison not already folded!");
03763
03764 ICmpInst::Predicate Pred;
03765 switch (I.getPredicate()) {
03766 default: llvm_unreachable("Unexpected predicate!");
03767 case FCmpInst::FCMP_UEQ:
03768 case FCmpInst::FCMP_OEQ:
03769 Pred = ICmpInst::ICMP_EQ;
03770 break;
03771 case FCmpInst::FCMP_UGT:
03772 case FCmpInst::FCMP_OGT:
03773 Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
03774 break;
03775 case FCmpInst::FCMP_UGE:
03776 case FCmpInst::FCMP_OGE:
03777 Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
03778 break;
03779 case FCmpInst::FCMP_ULT:
03780 case FCmpInst::FCMP_OLT:
03781 Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
03782 break;
03783 case FCmpInst::FCMP_ULE:
03784 case FCmpInst::FCMP_OLE:
03785 Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
03786 break;
03787 case FCmpInst::FCMP_UNE:
03788 case FCmpInst::FCMP_ONE:
03789 Pred = ICmpInst::ICMP_NE;
03790 break;
03791 case FCmpInst::FCMP_ORD:
03792 return ReplaceInstUsesWith(I, Builder->getTrue());
03793 case FCmpInst::FCMP_UNO:
03794 return ReplaceInstUsesWith(I, Builder->getFalse());
03795 }
03796
03797 // Now we know that the APFloat is a normal number, zero or inf.
03798
03799 // See if the FP constant is too large for the integer. For example,
03800 // comparing an i8 to 300.0.
03801 unsigned IntWidth = IntTy->getScalarSizeInBits();
03802
03803 if (!LHSUnsigned) {
03804 // If the RHS value is > SignedMax, fold the comparison. This handles +INF
03805 // and large values.
03806 APFloat SMax(RHS.getSemantics());
03807 SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
03808 APFloat::rmNearestTiesToEven);
03809 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0
03810 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
03811 Pred == ICmpInst::ICMP_SLE)
03812 return ReplaceInstUsesWith(I, Builder->getTrue());
03813 return ReplaceInstUsesWith(I, Builder->getFalse());
03814 }
03815 } else {
03816 // If the RHS value is > UnsignedMax, fold the comparison. This handles
03817 // +INF and large values.
03818 APFloat UMax(RHS.getSemantics());
03819 UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
03820 APFloat::rmNearestTiesToEven);
03821 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0
03822 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
03823 Pred == ICmpInst::ICMP_ULE)
03824 return ReplaceInstUsesWith(I, Builder->getTrue());
03825 return ReplaceInstUsesWith(I, Builder->getFalse());
03826 }
03827 }
03828
03829 if (!LHSUnsigned) {
03830 // See if the RHS value is < SignedMin.
03831 APFloat SMin(RHS.getSemantics());
03832 SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
03833 APFloat::rmNearestTiesToEven);
03834 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
03835 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
03836 Pred == ICmpInst::ICMP_SGE)
03837 return ReplaceInstUsesWith(I, Builder->getTrue());
03838 return ReplaceInstUsesWith(I, Builder->getFalse());
03839 }
03840 } else {
03841 // See if the RHS value is < UnsignedMin.
03842 APFloat SMin(RHS.getSemantics());
03843 SMin.convertFromAPInt(APInt::getMinValue(IntWidth), true,
03844 APFloat::rmNearestTiesToEven);
03845 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0
03846 if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
03847 Pred == ICmpInst::ICMP_UGE)
03848 return ReplaceInstUsesWith(I, Builder->getTrue());
03849 return ReplaceInstUsesWith(I, Builder->getFalse());
03850 }
03851 }
03852
03853 // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
03854 // [0, UMAX], but it may still be fractional. See if it is fractional by
03855 // casting the FP value to the integer value and back, checking for equality.
03856 // Don't do this for zero, because -0.0 is not fractional.
03857 Constant *RHSInt = LHSUnsigned
03858 ? ConstantExpr::getFPToUI(RHSC, IntTy)
03859 : ConstantExpr::getFPToSI(RHSC, IntTy);
03860 if (!RHS.isZero()) {
03861 bool Equal = LHSUnsigned
03862 ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
03863 : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
03864 if (!Equal) {
03865 // If we had a comparison against a fractional value, we have to adjust
03866 // the compare predicate and sometimes the value. RHSC is rounded towards
03867 // zero at this point.
03868 switch (Pred) {
03869 default: llvm_unreachable("Unexpected integer comparison!");
03870 case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
03871 return ReplaceInstUsesWith(I, Builder->getTrue());
03872 case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
03873 return ReplaceInstUsesWith(I, Builder->getFalse());
03874 case ICmpInst::ICMP_ULE:
03875 // (float)int <= 4.4 --> int <= 4
03876 // (float)int <= -4.4 --> false
03877 if (RHS.isNegative())
03878 return ReplaceInstUsesWith(I, Builder->getFalse());
03879 break;
03880 case ICmpInst::ICMP_SLE:
03881 // (float)int <= 4.4 --> int <= 4
03882 // (float)int <= -4.4 --> int < -4
03883 if (RHS.isNegative())
03884 Pred = ICmpInst::ICMP_SLT;
03885 break;
03886 case ICmpInst::ICMP_ULT:
03887 // (float)int < -4.4 --> false
03888 // (float)int < 4.4 --> int <= 4
03889 if (RHS.isNegative())
03890 return ReplaceInstUsesWith(I, Builder->getFalse());
03891 Pred = ICmpInst::ICMP_ULE;
03892 break;
03893 case ICmpInst::ICMP_SLT:
03894 // (float)int < -4.4 --> int < -4
03895 // (float)int < 4.4 --> int <= 4
03896 if (!RHS.isNegative())
03897 Pred = ICmpInst::ICMP_SLE;
03898 break;
03899 case ICmpInst::ICMP_UGT:
03900 // (float)int > 4.4 --> int > 4
03901 // (float)int > -4.4 --> true
03902 if (RHS.isNegative())
03903 return ReplaceInstUsesWith(I, Builder->getTrue());
03904 break;
03905 case ICmpInst::ICMP_SGT:
03906 // (float)int > 4.4 --> int > 4
03907 // (float)int > -4.4 --> int >= -4
03908 if (RHS.isNegative())
03909 Pred = ICmpInst::ICMP_SGE;
03910 break;
03911 case ICmpInst::ICMP_UGE:
03912 // (float)int >= -4.4 --> true
03913 // (float)int >= 4.4 --> int > 4
03914 if (RHS.isNegative())
03915 return ReplaceInstUsesWith(I, Builder->getTrue());
03916 Pred = ICmpInst::ICMP_UGT;
03917 break;
03918 case ICmpInst::ICMP_SGE:
03919 // (float)int >= -4.4 --> int >= -4
03920 // (float)int >= 4.4 --> int > 4
03921 if (!RHS.isNegative())
03922 Pred = ICmpInst::ICMP_SGT;
03923 break;
03924 }
03925 }
03926 }
03927
03928 // Lower this FP comparison into an appropriate integer version of the
03929 // comparison.
03930 return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
03931 }
03932
03933 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
03934 bool Changed = false;
03935
03936 /// Orders the operands of the compare so that they are listed from most
03937 /// complex to least complex. This puts constants before unary operators,
03938 /// before binary operators.
03939 if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
03940 I.swapOperands();
03941 Changed = true;
03942 }
03943
03944 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
03945
03946 if (Value *V = SimplifyFCmpInst(I.getPredicate(), Op0, Op1, DL, TLI, DT, AC))
03947 return ReplaceInstUsesWith(I, V);
03948
03949 // Simplify 'fcmp pred X, X'
03950 if (Op0 == Op1) {
03951 switch (I.getPredicate()) {
03952 default: llvm_unreachable("Unknown predicate!");
03953 case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
03954 case FCmpInst::FCMP_ULT: // True if unordered or less than
03955 case FCmpInst::FCMP_UGT: // True if unordered or greater than
03956 case FCmpInst::FCMP_UNE: // True if unordered or not equal
03957 // Canonicalize these to be 'fcmp uno %X, 0.0'.
03958 I.setPredicate(FCmpInst::FCMP_UNO);
03959 I.setOperand(1, Constant::getNullValue(Op0->getType()));
03960 return &I;
03961
03962 case FCmpInst::FCMP_ORD: // True if ordered (no nans)
03963 case FCmpInst::FCMP_OEQ: // True if ordered and equal
03964 case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
03965 case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
03966 // Canonicalize these to be 'fcmp ord %X, 0.0'.
03967 I.setPredicate(FCmpInst::FCMP_ORD);
03968 I.setOperand(1, Constant::getNullValue(Op0->getType()));
03969 return &I;
03970 }
03971 }
03972
03973 // Handle fcmp with constant RHS
03974 if (Constant *RHSC = dyn_cast<Constant>(Op1)) {
03975 if (Instruction *LHSI = dyn_cast<Instruction>(Op0))
03976 switch (LHSI->getOpcode()) {
03977 case Instruction::FPExt: {
03978 // fcmp (fpext x), C -> fcmp x, (fptrunc C) if fptrunc is lossless
03979 FPExtInst *LHSExt = cast<FPExtInst>(LHSI);
03980 ConstantFP *RHSF = dyn_cast<ConstantFP>(RHSC);
03981 if (!RHSF)
03982 break;
03983
03984 const fltSemantics *Sem;
03985 // FIXME: This shouldn't be here.
03986 if (LHSExt->getSrcTy()->isHalfTy())
03987 Sem = &APFloat::IEEEhalf;
03988 else if (LHSExt->getSrcTy()->isFloatTy())
03989 Sem = &APFloat::IEEEsingle;
03990 else if (LHSExt->getSrcTy()->isDoubleTy())
03991 Sem = &APFloat::IEEEdouble;
03992 else if (LHSExt->getSrcTy()->isFP128Ty())
03993 Sem = &APFloat::IEEEquad;
03994 else if (LHSExt->getSrcTy()->isX86_FP80Ty())
03995 Sem = &APFloat::x87DoubleExtended;
03996 else if (LHSExt->getSrcTy()->isPPC_FP128Ty())
03997 Sem = &APFloat::PPCDoubleDouble;
03998 else
03999 break;
04000
04001 bool Lossy;
04002 APFloat F = RHSF->getValueAPF();
04003 F.convert(*Sem, APFloat::rmNearestTiesToEven, &Lossy);
04004
04005 // Avoid lossy conversions and denormals. Zero is a special case
04006 // that's OK to convert.
04007 APFloat Fabs = F;
04008 Fabs.clearSign();
04009 if (!Lossy &&
04010 ((Fabs.compare(APFloat::getSmallestNormalized(*Sem)) !=
04011 APFloat::cmpLessThan) || Fabs.isZero()))
04012
04013 return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0),
04014 ConstantFP::get(RHSC->getContext(), F));
04015 break;
04016 }
04017 case Instruction::PHI:
04018 // Only fold fcmp into the PHI if the phi and fcmp are in the same
04019 // block. If in the same block, we're encouraging jump threading. If
04020 // not, we are just pessimizing the code by making an i1 phi.
04021 if (LHSI->getParent() == I.getParent())
04022 if (Instruction *NV = FoldOpIntoPhi(I))
04023 return NV;
04024 break;
04025 case Instruction::SIToFP:
04026 case Instruction::UIToFP:
04027 if (Instruction *NV = FoldFCmp_IntToFP_Cst(I, LHSI, RHSC))
04028 return NV;
04029 break;
04030 case Instruction::FSub: {
04031 // fcmp pred (fneg x), C -> fcmp swap(pred) x, -C
04032 Value *Op;
04033 if (match(LHSI, m_FNeg(m_Value(Op))))
04034 return new FCmpInst(I.getSwappedPredicate(), Op,
04035 ConstantExpr::getFNeg(RHSC));
04036 break;
04037 }
04038 case Instruction::Load:
04039 if (GetElementPtrInst *GEP =
04040 dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
04041 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
04042 if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
04043 !cast<LoadInst>(LHSI)->isVolatile())
04044 if (Instruction *Res = FoldCmpLoadFromIndexedGlobal(GEP, GV, I))
04045 return Res;
04046 }
04047 break;
04048 case Instruction::Call: {
04049 if (!RHSC->isNullValue())
04050 break;
04051
04052 CallInst *CI = cast<CallInst>(LHSI);
04053 const Function *F = CI->getCalledFunction();
04054 if (!F)
04055 break;
04056
04057 // Various optimization for fabs compared with zero.
04058 LibFunc::Func Func;
04059 if (F->getIntrinsicID() == Intrinsic::fabs ||
04060 (TLI->getLibFunc(F->getName(), Func) && TLI->has(Func) &&
04061 (Func == LibFunc::fabs || Func == LibFunc::fabsf ||
04062 Func == LibFunc::fabsl))) {
04063 switch (I.getPredicate()) {
04064 default:
04065 break;
04066 // fabs(x) < 0 --> false
04067 case FCmpInst::FCMP_OLT:
04068 return ReplaceInstUsesWith(I, Builder->getFalse());
04069 // fabs(x) > 0 --> x != 0
04070 case FCmpInst::FCMP_OGT:
04071 return new FCmpInst(FCmpInst::FCMP_ONE, CI->getArgOperand(0), RHSC);
04072 // fabs(x) <= 0 --> x == 0
04073 case FCmpInst::FCMP_OLE:
04074 return new FCmpInst(FCmpInst::FCMP_OEQ, CI->getArgOperand(0), RHSC);
04075 // fabs(x) >= 0 --> !isnan(x)
04076 case FCmpInst::FCMP_OGE:
04077 return new FCmpInst(FCmpInst::FCMP_ORD, CI->getArgOperand(0), RHSC);
04078 // fabs(x) == 0 --> x == 0
04079 // fabs(x) != 0 --> x != 0
04080 case FCmpInst::FCMP_OEQ:
04081 case FCmpInst::FCMP_UEQ:
04082 case FCmpInst::FCMP_ONE:
04083 case FCmpInst::FCMP_UNE:
04084 return new FCmpInst(I.getPredicate(), CI->getArgOperand(0), RHSC);
04085 }
04086 }
04087 }
04088 }
04089 }
04090
04091 // fcmp pred (fneg x), (fneg y) -> fcmp swap(pred) x, y
04092 Value *X, *Y;
04093 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
04094 return new FCmpInst(I.getSwappedPredicate(), X, Y);
04095
04096 // fcmp (fpext x), (fpext y) -> fcmp x, y
04097 if (FPExtInst *LHSExt = dyn_cast<FPExtInst>(Op0))
04098 if (FPExtInst *RHSExt = dyn_cast<FPExtInst>(Op1))
04099 if (LHSExt->getSrcTy() == RHSExt->getSrcTy())
04100 return new FCmpInst(I.getPredicate(), LHSExt->getOperand(0),
04101 RHSExt->getOperand(0));
04102
04103 return Changed ? &I : nullptr;
04104 }
|
__label__pos
| 0.563696 |
Take the 2-minute tour ×
Stack Overflow is a question and answer site for professional and enthusiast programmers. It's 100% free, no registration required.
My situation is roughly similar to this guy except that I don't need change notifications right now
I have a WPF App displaying a hierarchy. The children for each node are retrieved using a LinqToSql query. The app works perfectly when there is one thread.
Now I'd like to make things a bit faster.. by loading children asynchronously. Fire off a thread, to do the DB fetching, on completion create the corresponding tree nodes for the children.
<HierarchicalDataTemplate DataType="{x:Type viewmodels:NodeDM}" ItemsSource="{Binding Path=Children}">
After some thrashing around yesterday, I found that WPF Data Binding allows this via an IsAsync property on the Binding. So I made the following change
<HierarchicalDataTemplate .. ItemsSource="{Binding Path=Children, IsAsync=True}">
Now its mayhem, an initial bunch of nodes pass through the fire before exceptions run riot. Pasting the first one here...
System.Windows.Data Error: 16 : Cannot get 'Children' value (type 'ObservableCollection`1') from '' (type 'NodeDM'). BindingExpression:Path=Children; DataItem='NodeDM' (HashCode=29677729); target element is 'TreeViewItem' (Name=''); target property is 'ItemsSource' (type 'IEnumerable') TargetInvocationException:'System.Reflection.TargetInvocationException: Exception has been thrown by the target of an invocation. ---> System.InvalidCastException: Specified cast is not valid.
at System.Data.SqlClient.SqlBuffer.get_Int32()
at System.Data.SqlClient.SqlBuffer.get_Value()
at System.Data.SqlClient.SqlDataReader.GetValueInternal(Int32 i)
<snipped around 20-30 lines>
at System.Data.Linq.Table`1.GetEnumerator()
at System.Data.Linq.Table`1.System.Collections.Generic.IEnumerable<TEntity>.GetEnumerator()
at System.Linq.Lookup`2.CreateForJoin(IEnumerable`1 source, Func`2 keySelector, IEqualityComparer`1 comparer)
at System.Linq.Enumerable.<JoinIterator>d__61`4.MoveNext()
at System.Linq.Enumerable.WhereSelectEnumerableIterator`2.MoveNext()
at ICTemplates.Models.NodeDM.Load_Children()
at ICTemplates.Models.NodeDM.get_Children()
Others include
System.InvalidOperationException: There is already an open DataReader associated with this Command which must be closed first.
I have a deformed tree where some nodes have failed to load. I have a singleton instance of the almightly LinqToSql DataContext class, which talks to the DB. So I tried putting a lock so that multiple worker threads do not access it simultaneously.. but no luck.
partial class Node
{
public IEnumerable<Segment> Children
{
lock (MyDatabaseDataContext.m_lockObject)
{
return // LINQ Query to join 2 tables and return a result
}
}
Reverting the IsAsync change makes things all good again. Why is the IsAsync property messing up LinqToSql ? The WPF Treeview is enough to make normal people pull their hair out.
share|improve this question
1 Answer 1
Your problem seems to come from using the same connection object for the reads. When you have syncronous data access one connection is usually enough if you remember to open/close between reads. Thy to change your GetChildren code to create new connections (and commands/data readers etc.) on each call so multiple threads would not step on each others toes. Also if the results are cached in memory in some structure try to syncronize access to that structure using locks. You don't want one thread to try to enumerate over your elements while another is trying to add new ones.
share|improve this answer
Creating new DataContexts for each node grinds my machine to a halt (stackoverflow.com/questions/424016/…). Also there is no resource contention, the GetChildren is pure read. Each node is loading its own children once. – Gishu Jan 14 '09 at 11:18
then syncronize at a level above the data access to ensure that no reads are done at the same time; Eg: public Nodes GetChildren(Node parent) { lock(someLockObject) { var result = run your Linq query; return result; } } – AZ. Jan 14 '09 at 14:59
I've tried this.. as mentioned in the code snippet at the end of the question. Same result - exceptions galore. – Gishu Jan 14 '09 at 15:23
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.552282 |
Super User is a question and answer site for computer enthusiasts and power users. It's 100% free, no registration required.
Sign up
Here's how it works:
1. Anybody can ask a question
2. Anybody can answer
3. The best answers are voted up and rise to the top
In 2014, I hear a lot of programming languages touted for their concurrency features. Concurrency is said to be crucial for performance gains.
In making this statement, many people point back to a 2005 article called The Free Lunch Is Over: A Fundamental Turn Toward Concurrency in Software. The basic argument is that it's getting harder to increase the clock speed of processors, but we can still put more cores on a chip, and that to get performance gains, software will need to be written to take advantage of multiple cores.
Some key quotes:
We're used to seeing 500MHz CPUs give way to 1GHz CPUs give way to 2GHz CPUs, and so on. Today we're in the 3GHz range on mainstream computers.
The key question is: When will it end? After all, Moore's Law predicts exponential growth, and clearly exponential growth can't continue forever before we reach hard physical limits; light isn't getting any faster. The growth must eventually slow down and even end.
... It has become harder and harder to exploit higher clock speeds due to not just one but several physical issues, notably heat (too much of it and too hard to dissipate), power consumption (too high), and current leakage problems.
... Intel's and most processor vendors' future lies elsewhere as chip companies aggressively pursue the same new multicore directions.
...Multicore is about running two or more actual CPUs on one chip.
This article's predictions seem to have held up, but I don't understand why. I have only very vague ideas about how hardware works.
My oversimplified view is "it's getting harder to pack more processing power into the same space" (because of issues with heat, power consumption, etc). I would expect the conclusion to be "therefore, we'll have to have bigger computers or run our programs on multiple computers." (And indeed, distributed cloud computing is a thing we're hearing more about.)
But part of the solution seems to be multi-core architectures. Unless computers grow in size (which they haven't), this just seems to be another way of saying "pack more pocessing power into the same space".
Why doesn't "add more cores" face the same physical limitations as "make the CPU faster"?
Please explain in the simplest terms you can. :)
share|improve this question
4
4
en.wikipedia.org/wiki/Moore%27s_law worth a read - since we're talking about two different things. Moore's law isn't about clockspeed - its about transistor counts – Journeyman Geek Aug 16 '14 at 2:10
8
Let's pretend it was 30 years ago and CPUs ran at 4.77MHz. Why could you put 1000 computers with 4MHz CPUs in the same room even though it was impossible to get a 4GHz CPU? – user20574 Aug 16 '14 at 4:02
3
@NathanLong but it's still much easier to get more space (for more computers) than to get a faster computer. – user20574 Aug 17 '14 at 0:39
5
Analogy: a car engine can only be made to so many rpm, but you can easily bolt two together. – Ollie Ford Aug 17 '14 at 13:54
13 Answers 13
up vote 129 down vote accepted
Summary
• Economics. It's cheaper and easier to design a CPU that has more cores than a higher clock speed, because:
• Significant increase in power usage. CPU power consumption increases rapidly as you increase the clock speed - you can double the number of cores operating at a lower speed in the thermal space it takes to increase the clock speed by 25%. Quadruple for 50%.
• There's other ways to increase sequential processing speed, and CPU manufacturers make good use of those.
I'm going to be drawing heavily on the excellent answers at this question on one of our sister SE sites. So go upvote them!
Clock speed limitations
There are a few known physical limitations to clock speed:
• Transmission time
The time it takes for an electrical signal to traverse a circuit is limited by the speed of light. This is a hard limit, and there is no known way around it1. At gigahertz-clocks, we are approaching this limit.
However, we are not there yet. 1 GHz means one nanosecond per clock tick. In that time, light can travel 30cm. At 10 GHz, light can travel 3cm. A single CPU core is about 5mm wide, so we will run into these issues somewhere past 10 GHz.2
• Switching delay
It's not enough to merely consider the time it takes for a signal to travel from one end to another. We also need to consider the time it takes for a logic gate within the CPU to switch from one state to another! As we increase clock speed, this can become an issue.
Unfortunately, I'm not sure about the specifics, and cannot provide any numbers.
Apparently, pumping more power into it can speed up switching, but this leads to both power consumption and heat dissipation issues. Also, more power means you need bulkier conduits capable of handling it without damage.
• Heat dissipation/power consumption
This is the big one. Quoting from fuzzyhair2's answer:
Recent processors are manufactured using CMOS technology. Every time there is a clock cycle, power is dissipated. Therefore, higher processor speeds means more heat dissipation.
There's some lovely measurements at this AnandTech forum thread, and they even derived a formula for the power consumption (which goes hand in hand with heat generated):
Formula
Credit to Idontcare
We can visualise this in the following graph:
Graph
Credit to Idontcare
As you can see, power consumption (and heat generated) rises extremely rapidly as the clock speed is increased past a certain point. This makes it impractical to boundlessly increase clock speed.
The reason for the rapid increase in power usage is probably related to the switching delay - it's not enough to simply increase power proportional to the clock rate; the voltage must also be increased to maintain stability at higher clocks. This may not be completely correct; feel free to point out corrections in a comment, or make an edit to this answer.
More cores?
So why more cores? Well, I can't answer that definitively. You'd have to ask the folks at Intel and AMD. But you can see above that, with modern CPUs, at some point it becomes impractical to increase clock speed.
Yes, multicore also increases power required, and heat dissipation. But it neatly avoids the transmission time and switching delay issues. And, as you can see from the graph, you can easily double the number of cores in a modern CPU with the same thermal overhead as a 25% increase in clock speed.
Some people have done it - the current overclocking world record is just shy of 9 GHz. But it is a significant engineering challenge to do so while keeping power consumption within acceptable bounds. The designers at some point decided that adding more cores to perform more work in parallel would provide a more effective boost to performance in most cases.
That's where the economics come in - it was likely cheaper (less design time, less complicated to manufacture) to go the multicore route. And it's easy to market - who doesn't love the brand new octa-core chip? (Of course, we know that multicore is pretty useless when the software doesn't make use of it...)
There is a downside to multicore: you need more physical space to put the extra core. However, CPU process sizes constantly shrink a lot, so there's plenty of space to put two copies of a previous design - the real tradeoff is not being able to create larger, more-complex, single cores. Then again, increasing core complexity is a bad thing from a design standpoint - more complexity = more mistakes/bugs and manufacturing errors. We seem to have found a happy medium with efficient cores that are simple enough to not take too much space.
We've already hit a limit with the number of cores we can fit on a single die at current process sizes. We might hit a limit of how far we can shrink things soon. So, what's next? Do we need more? That's difficult to answer, unfortunately. Anyone here a clairvoyant?
Other ways to improve performance
So, we can't increase the clock speed. And more cores have an additional disadvantage - namely, they only help when the software running on them can make use of them.
So, what else can we do? How are modern CPUs so much faster than older ones at the same clock speed?
Clock speed is really only a very rough approximation of the internal workings of a CPU. Not all components of a CPU work at that speed - some might operate once every two ticks, etc..
What's more significant is the number of instructions you can execute per unit of time. This is a far better measure of just how much a single CPU core can accomplish. Some instructions; some will take one clock cycle, some will take three. Division, for example, is considerably slower than addition.
So, we could make a CPU perform better by increasing the number of instructions it can execute per second. How? Well, you could make an instruction more efficient - maybe division now takes only two cycles. Then there's instruction pipelining. By breaking each instruction into multiple stages, it's possible to execute instructions "in parallel" - but each instruction still has a well-defined, sequential, order respective to the instructions before and after it, so it doesn't require software support like multicore does.
There is another way: more specialised instructions. We've seen things like SSE, which provide instructions to process large amounts of data at one time. There are new instruction sets constantly being introduced with similar goals. These, again, require software support and increase complexity of the hardware, but they provide a nice performance boost. Recently, there was AES-NI, which provides hardware-accelerated AES encryption and decryption, far faster than a bunch of arithmetic implemented in software.
1 Not without getting quite deep into theoretical quantum physics, anyway.
2 It might actually be lower, since electrical field propagation isn't quite as fast as the speed of light in a vacuum. Also, that's just for straight-line distance - it's likely that there's at least one path that's considerably longer than a straight line.
share|improve this answer
18
Also, in many applications the bottleneck is not the computation time, but the stall time to fetch data from the RAM (or, god forbid it, from disk); so, another major speedup comes from bigger, faster processor caches. – Matteo Italia Aug 15 '14 at 16:59
2
@MatteoItalia Yup. And there's also branch prediction improvements, and probably far more I don't know about. Outside the processor, we also have faster buses, faster memory, faster disks and associated protocols, etc.. – Bob Aug 15 '14 at 17:01
2
You mentioned that issues related to the "hard limit" of the speed of light will occur at "somewhere past 20 GHz". Your calculations aren't correct; electrical signals travel at speeds lower than the speed of light, depending on the geometry of the wire. – Giulio Muscarello Aug 15 '14 at 17:45
5
Please don't use "exponential" when there's entirely more correct words for this relationship (quadratic, cubic, etc.)... – Oliver Charlesworth Aug 15 '14 at 21:54
7
@OliCharlesworth Please read the footnote. This is precisely why the footnote is there, and why I have references to it everywhere exponential is used. This is a perfectly valid use of the word, and it would be tangential to the point of this answer to get bogged down in mathematical details. If you really want to try to "correct" it, feel free to suggest an edit. It won't be up to me if it gets accepted or not, as long as you don't significantly change the meaning. – Bob Aug 16 '14 at 2:03
Physics is physics. We can't keep packing more transistors into ever smaller spaces forever. At some point it gets so small that you deal with weird quantum crap. At some point we can't pack twice as many transistors in a year as we used to (which is what moore's law is about).
Raw clockspeeds mean nothing. My old Pentium M was about half the clock speed of a contemporary desktop CPU (and yet in many respects faster) – and modern systems are barely approaching the speeds of systems 10 years ago (and are clearly faster). Basically 'just' bumping up the clockspeed does not give real performance gains in many cases. It may help in some singlethreaded operations, but you're better off spending the design budget on better efficiency in terms of everything else.
Multiple cores let you do two or more things at once, so you don't need to wait for one thing to finish for the next one. On the shorter term, you can simply pop two existing cores into the same package(for example with the Pentium Ds, and their MCM, which was a transitional design) and you have a system that's twice as fast. Most modern implementations do share things like a memory controller of course.
You can also build smarter in different ways. ARM does Big-Little – having 4 'weak' low power cores working alongside 4 more powerful cores so you have the best of both worlds. Intel lets you down throttle (for better power efficency) or overclock specific cores (for better single thread performance). I remember AMD does something with modules.
You can also move things like memory controllers (so you have lower latency) and IO related functions (the modern CPU has no north bridge) as well as video (which is more important with laptops and AIW design). It makes more sense to do these things than 'just' keep ramping up the clockspeed.
At some point 'more' cores may not work – though GPUs have hundreds of cores.
Multicores as such lets computers work smarter in all these ways.
share|improve this answer
1
Should be noted, that GPU cores are designed for a very specific purpose, as opposed to CPU cores which are more of a general purpose thing. Also should be noted, that video card boards are significantly larger than CPU's are (meaning even if the cores aren't using all of the room on the board, they still have MUCH more room to dissipate the heat). – user2366842 Aug 15 '14 at 14:41
3
true, but that's an example where you do scale up cores by an extreme amount. I'll likely revisit this answer in the morning – Journeyman Geek Aug 15 '14 at 14:44
"you can simply pop two existing cores into the same package". But how they achieve it without using more space for the extra cores How?! Magic? Unicorns? Puppies? (By your avatar, I suspect the latter) – That Brazilian Guy Aug 15 '14 at 15:31
Pentium Ds were that en.wikipedia.org/wiki/Pentium_D basically – Journeyman Geek Aug 15 '14 at 15:33
5
wierd quantum crap + 1 for that alone! – Dave Aug 16 '14 at 7:07
Simple answer
The simplest answer to the question
Why doesn't "add more cores" face the same physical limitations as "make the CPU faster"?
is actually found within another part of your question:
I would expect the conclusion to be "therefore, we'll have to have bigger computers or run our programs on multiple computers."
In essence, multiple cores is like having multiple "computers" on the same device.
Complex answer
A "core" is the part of the computer that actually processes instructions (adding, multiplying, "and"ing, etc). A core can only execute a single instruction at one time. If you want your computer to be "more powerful" there are two basic things you can do:
1. Increase throughput (increase clock rate, decrease physical size, etc)
2. Use more cores in the same computer
The physical limitations to #1 are primarily the need to dump heat caused by the processing and the speed of an electron in the circuit. Once you split off some of those transistors to a separate core, you alleviate the heat issue to a large degree.
There's an important limitation to #2: you have to be able to split your problem up into multiple independent problems, and then combine the answer. On a modern personal computer, this isn't really a problem, as there are loads of independent problems all vying for computational time with the core(s) anyway. But when doing intensive computational problems, multiple cores only really help if the problem is amenable to concurrency.
share|improve this answer
'multiple cores is like having multiple "computers" on the same device.' Right, but my confusion was, how do you fit them all in there? I thought "we can't go faster" was a symptom of "we can't shrink things much more." – Nathan Long Aug 15 '14 at 18:27
Multiple cores DO take up more space, chips are getting bigger. The burden has been shifted from the CPU to the software engineer... to actually run faster on one of these giant multi-core cpus the software has to be written such that you can cut its work in half and do both halves independently. – James Aug 16 '14 at 4:57
1
A short answer is that power consumption is proportional to the cube of clock speed. With signals travelling longer distances, the single thread illusion gets harder to maintain. The detailed answer: amazon.com/… – Rob Aug 18 '14 at 14:49
Why doesn't "add more cores" face the same physical limitations as "make the CPU faster"?
They do face the same physical limitations, but switching to multicore design gives us some breathing space before we hit some of them. At the same time other problems caused by those limitations arise, but they are easier to overcome.
Fact 1: Power consumption and emitted heat grow faster than computational power. Pushing a CPU from 1 GHz to 2 GHZ will push power consumption from 20 W to 80 W, same with dissipated heat. (I just made up these numbers, but it's quite how it works)
Fact 2: Buying second CPU and running both at 1 GHz would double your computational power. Two CPUs running at 1 GHz could process same amount of data as one 2 GHz CPU, but each one would consume only 20 W of energy, that's 40 W in total.
Profit: Doubling CPU number instead of clock frequency saves us some energy and we're not as close to the "frequency barrier" as before.
Problem: You have to split the work between two CPUs and combine results later.
If you can solve this problem in acceptable time and using less energy than you have just saved, then you've just profited thanks to using multiple CPUs.
Now you just have to merge two CPUs into one dual-core CPU and you're home. This is beneficial because cores can share some parts of CPU, for example cache (related answer).
share|improve this answer
We keep hitting physical limitations in 1000 different ways, size of the die (for multi-cores which leads to smaller manufacturing process), size of the mobo (for multi cpu), amps drawn thru the traces (both). its not like you could put 2 cpus on an m-atx or 4 and memory on an atx board and those designs take DECADES to change. I agree with the rest. – Rostol Aug 16 '14 at 3:04
@Rostol That's a good point, I have edited the answer. – gronostaj Aug 16 '14 at 10:46
Long story short: Speeding up single cores has reached it limits, so we keep shrinking them and adding more of them, until this reaches its limits or we can change to better meterials (or achieve a fundamental breakthrough that overthrows the established tech. Something like home sized, actually working, quantum computing)
I think this problem is multi dimensional and it will take some writing to paint the more complete picture:
1. Physical limitations (imposed by actual physics): Like speed of light, quantum mechanics, all that.
2. Manufacturing problems: How do we manufacture ever smaller structures with the needed precision? Raw material related problems, materials used to build circuits usw., durability
3. Architectural problems: heat, inference, power consumption etc.
4. Economical problems: Whats the cheapest way to get more performance to the user
5. Usecases and user perception of performance
There may be many more. A multipurpose CPU is trying to find a solution to scramble all these factors (and more) in to one, massproducible chip that fits 93% of the subjects on the market. As you see the last point is the most crucial one. Costumer perception. Wich is directly derived from the way the costumer uses the cpu.
Ask yourself what is your usual application? Maybe: 25 Firefox tabs, each playing some adds in the background, while you are listening to music, all while waiting for your build job you started some 2 hours ago to finish. That is a lot of work to bee done, and still you want a smooth experience. But your cpu can handle ONE task at the time! On single thing. So what you do is, you split thing up and make a looong queue and every one gets his own share and all are happy. Except for you because all the things become laggy and not smooth at all.
So you speed up your cpu up, in oder to do more operations in the same amount of time. But as you said: heat and power consumption. And thats where we come to the raw material part. Silicon becomes more conductive as it gets hotter, meaning more current flows through the material as you heat it up. Transistors have a higher power consumption as you switch them faster. Also high frequencies make crosstalk between short wires worse. So you see, the speed things up approach will lead to a "meltdown" As long as we do not have better raw materials than silicon or much better transistors, we are stuck where we are with single core speed.
This gets us back to where we started. Getting stuff done, in parallel. Lets add another core. Now we can actually do two things at one time. So lets cool things down a bit and just write software that can split its work over two, less powerful but more functional cores. This approach has two main problems (besides that it needs time for the software world to adapt to it): 1. make the chip larger, or make individual core smaller. 2. Some tasks simply can not be split into two parts that run simultaneously. Keep adding cores as long as you can shrink them, or make the chip larger and keep the heat problem at bay. Oh and lets not forget the costumer. If we change our usecases, the industries have to adapt. See all the shiny "new" things the mobile sector has come up with. That is why the mobile sector is considered so crucial and every one wants to get their hands on it.
Yes this strategy WILL reach its limitations! And Intel knows this, thats why they say the futur lies somewhere else. But they will keep doing it as long as it is cheap and effective and do able.
Last but not least: physics. Quantum mechanics will limit chip shrinking. Speed of light is not a limit yet, since electrons can not travel the speed of light in silicon, actually is much solver than that. Also it is the impuls speed that puts the hard cap on the speed offered by a material. Just as sound travels faster in water than in air, electric impulses travel faster in for example graphene than in silicon. This leads back to raw materials. graphene is great as far as it electrical properties go. It would make a much better material to build cpus of, unfortunately it is very hard produce in large quantity.
share|improve this answer
Say (as an unrealistic example, but should still get the point across) you have a CPU that's running at 100F. How multicore usually works is by taking the clock frequency that the CPU that's running at 100F, and lowering it, thus lowering the speed some. Because it's no longer running as hot, they can plop a second, 3rd, or even 4th one right next to it without significantly affecting the overall temperature of the CPU, and gaining the benefits from multicore. This obviously comes with some overhead, as the cores still have to be controlled from one place, by one core. The more cores you add, the more overhead there is. As for single cores, the more you crank up the speed on them, the more heat they generate. This obviously has physical limitations (i.e. after a certain point, it starts becoming detrimental to performance, and even dangerous if it runs too hot)
As time goes by, they have found ways to decrease the physical size of the CPU's, which is why we haven't really ran into the requirement of needing more space yet, however for example, 16 core CPU's don't really exist (at the time of writing) outside of server grade equipment because of the physical size limitations, as server grade CPU's are significantly larger physically than standard consumer grade.
share|improve this answer
CPU = Car engine: It's easier to make a more powerful car with 16 valves, i.e a lamborghini, than a high rpm car that would have one giant valve / cylinder at 100 000 rpm.
The reasons are physical and chemical, silicon needs to be replaced with a computational rocket fuel to change the balance between number of cores and core speed.
share|improve this answer
I would say the primary restriction on computational power limits are primarily related to the limit of how fast we can move the electron through a circuit (speed of light electron drift). There are many more factors like you mentioned.
Adding additional cores would not make the processor faster, although it would allow it to process more in the same amount of time.
Moore's Law is a very interesting and informative read. This quote in particular is relevant here:
One could also limit the theoretical performance of a rather practical "ultimate laptop" with a mass of one kilogram and a volume of one litre. This is done by considering the speed of light, the quantum scale, the gravitational constant and the Boltzmann constant, giving a performance of 5.4258 ⋅ 1050 logical operations per second on approximately 1031 bits.
share|improve this answer
2
Incidentally, electrons actually move very slowly ("drift velocity"), in the order of millimetres per second IIRC. You're more referring to the speed at which the electric field propagates. – Bob Aug 15 '14 at 14:43
2
I knew I ought not dare act like I understand physics in the slightest regard. :) Thank you for correcting/informing me on this though. – jRadd Aug 15 '14 at 14:44
Also, the actual clock speeds where propagation speed will have a significant effect are more around 10+ GHz. There are other reasons modern processors prefer multiple cores over higher clocks. – Bob Aug 15 '14 at 14:51
Long story even shorter:
We really don't need faster CPUs. Outside of a few highly specialized uses* the CPU hasn't been the bottleneck for years - all the peripheral bits like memory, storage and network usually make the CPU wait for millions of clock cycles during which it can do other things. A second core can do more "other things", thus producing a perception of higher performance to the user.
Many mobile devices, laptops etc. will underclock the CPU for better battery life and cooler temperatures. Not much incentive to develop a 3.5GHz ARM core if your main customer runs it at 1.3GHz.
• those specialized uses don't buy enough to justify development of a 5GHz core. They also don't care about the heat or power - buy the fastest available, overclock it and bolt on a water-cooled heatsink the size of a toaster.
share|improve this answer
1
It's not "perception", but "better latency". I think this is one of the main problems we have; a strong single-threaded throughput at all costs mentality. Consumer operating systems are not yet real-time oriented, nor fundamentally oriented to concurrency or parallelism. – Rob Aug 18 '14 at 15:26
@peter You make a very good point, and thanks for explaining that. It's something I need to remember as a programmer. :) It's still a bit of a side issue for this question's purposes, though. My question was about why we can't get faster clock speeds; your answer is about why we don't currently need to. – Nathan Long Aug 19 '14 at 13:40
1
@NathanLong "can't" and "don't need" are linked. You can't get faster processors because no one is willing to invest the billion or five needed to develop it (5GHz is probably pushing physical limits anyway). No one is willing to invest because there is no realistic market when overall processor speeds are trending down - some current manufacture desktops are in the 1.5GHz class (top of the line 10 years ago) because it's cheaper, cooler and fast enough for that market segment. – peter Aug 20 '14 at 2:05
I think another factor is temperature. If you increase clock frequency, the core temperature goes up. If you add more cores, even though the power consumption goes up, this is distributed over the cores, so the temperature stays the same (like if you add two hot liquids, at the same temperature, to each other, the temperature stays the same).
Another reason is that increasing clock frequency tends to increase power consumption by a square of the factor you increase the frequency (depending on which other barriers you are hitting at a given point). Hence, increasing clock frequency increases power consumption by a square, whereas adding more cores only increases it linearly.
share|improve this answer
Short and simple answer:
Why doesn't going from 1 truck hauling goods to 100 trucks hauling 100 times as much, face the same physical limitations as going from 1 truck driving at 100mph to 1 truck driving at 10,000mph?
Answer that question, and your question will also be answered. The concept is roughly the same.
share|improve this answer
The answer to the why doesn't "add more cores" face the same physical limitations as "make the CPU faster" question is that a multicore system faces the exact same limitations as a single core CPU. We have reached a point where we don't really have an option to make a single core system go faster, so we made it so we can do more things at once. The challenges of size and coordination are currently easier to solve than going faster. The downside is if a task can't be broken down into pieces, we may not end up going much faster than a single core system.
share|improve this answer
You can't make a CPU faster by simply cramming in more gates. Ultimately, instructions must be executed, and each instruction requires several "switching" operations. There are fundamental physical limits (quantum mechanics -- the Uncertainty Principle) that make it very difficult to increase the "clock speed" of a CPU beyond the speed of current top-end systems.
share|improve this answer
Your Answer
discard
By posting your answer, you agree to the privacy policy and terms of service.
Not the answer you're looking for? Browse other questions tagged or ask your own question.
|
__label__pos
| 0.889559 |
Export (0) Print
Expand All
Expand Minimize
Remove-CMDriverPackage
Updated: February 7, 2014
Applies To: System Center 2012 R2 Configuration Manager
Remove-CMDriverPackage
Removes a driver package.
Syntax
Parameter Set: SearchByIdMandatory
Remove-CMDriverPackage -Id <String[]> [-Force] [-SecuredScopeNames <String> ] [-Confirm] [-WhatIf] [ <CommonParameters>]
Parameter Set: SearchByNameMandatory
Remove-CMDriverPackage -Name <String> [-Force] [-SecuredScopeNames <String> ] [-Confirm] [-WhatIf] [ <CommonParameters>]
Parameter Set: SearchByValueMandatory
Remove-CMDriverPackage -InputObject <IResultObject> [-Force] [-Confirm] [-WhatIf] [ <CommonParameters>]
Detailed Description
The Remove-CMDriverPackage cmdlet removes a driver package from Microsoft System Center 2012 Configuration Manager. After the driver package is removed, it cannot be used by any task sequence.
Parameters
-Force
Performs the action without a confirmation message.
Aliases
none
Required?
false
Position?
named
Default Value
none
Accept Pipeline Input?
True (ByPropertyName)
Accept Wildcard Characters?
false
-Id<String[]>
Specifies an array of identifiers for a driver package.
Aliases
PackageId
Required?
true
Position?
named
Default Value
none
Accept Pipeline Input?
True (ByPropertyName)
Accept Wildcard Characters?
false
-InputObject<IResultObject>
Specifies a driver package object. To obtain a driver package object, use the Get-CMDriverPackage cmdlet.
Aliases
none
Required?
true
Position?
named
Default Value
none
Accept Pipeline Input?
True (ByPropertyName)
Accept Wildcard Characters?
false
-Name<String>
Specifies the name of a driver.
Aliases
none
Required?
true
Position?
named
Default Value
none
Accept Pipeline Input?
True (ByPropertyName)
Accept Wildcard Characters?
false
-SecuredScopeNames<String>
Specifies an array of secured scope names.
Aliases
none
Required?
false
Position?
named
Default Value
none
Accept Pipeline Input?
True (ByPropertyName)
Accept Wildcard Characters?
false
-Confirm
Prompts you for confirmation before executing the command.
Required?
false
Position?
named
Default Value
false
Accept Pipeline Input?
false
Accept Wildcard Characters?
false
-WhatIf
Describes what would happen if you executed the command without actually executing the command.
Required?
false
Position?
named
Default Value
false
Accept Pipeline Input?
false
Accept Wildcard Characters?
false
<CommonParameters>
This cmdlet supports the common parameters: -Verbose, -Debug, -ErrorAction, -ErrorVariable, -OutBuffer, and -OutVariable. For more information, see about_CommonParameters.
Inputs
The input type is the type of the objects that you can pipe to the cmdlet.
Outputs
The output type is the type of the objects that the cmdlet emits.
Examples
Example 1: Remove a driver package that is specified by its identifier
This command removes a driver package that is specified by its identifier.
PS C:\> Remove-CMDriverPackage -Id "ST100062"
Related topics
Was this page helpful?
(1500 characters remaining)
Thank you for your feedback
Show:
© 2014 Microsoft
|
__label__pos
| 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.