diff --git a/ckpts/universal/global_step40/zero/14.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/14.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..c2eb6ffb9dd74c008362adc9fd3c96f928f49907 --- /dev/null +++ b/ckpts/universal/global_step40/zero/14.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ec34c6a75f69252093c0b2a4049d5cbb72ce2dfdb246e68b2bfe89177266903 +size 9387 diff --git a/ckpts/universal/global_step40/zero/23.input_layernorm.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/23.input_layernorm.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..f0718860cf1d31afc6526839f8caf982e185e586 --- /dev/null +++ b/ckpts/universal/global_step40/zero/23.input_layernorm.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b653b78234e0782e985e3c7c00239a8dd9eaaf5f0e83b24d4850d227fc0e585 +size 9387 diff --git a/ckpts/universal/global_step40/zero/23.input_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/23.input_layernorm.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..fa197ea95d36e2acda3d2a5e3e90ef389fbeb5dc --- /dev/null +++ b/ckpts/universal/global_step40/zero/23.input_layernorm.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77ad8ab6e7a60fd6e6024c6f6ac80f55a84b923f82bd1a2f34a6b65066b1bf2b +size 9293 diff --git a/venv/lib/python3.10/site-packages/nltk/ccg/__init__.py b/venv/lib/python3.10/site-packages/nltk/ccg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..43c5876b74dcf07ea70c9d90c1dcd41971e515a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/ccg/__init__.py @@ -0,0 +1,34 @@ +# Natural Language Toolkit: Combinatory Categorial Grammar +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Graeme Gange +# URL: +# For license information, see LICENSE.TXT + +""" +Combinatory Categorial Grammar. + +For more information see nltk/doc/contrib/ccg/ccg.pdf +""" + +from nltk.ccg.chart import CCGChart, CCGChartParser, CCGEdge, CCGLeafEdge +from nltk.ccg.combinator import ( + BackwardApplication, + BackwardBx, + BackwardCombinator, + BackwardComposition, + BackwardSx, + BackwardT, + DirectedBinaryCombinator, + ForwardApplication, + ForwardCombinator, + ForwardComposition, + ForwardSubstitution, + ForwardT, + UndirectedBinaryCombinator, + UndirectedComposition, + UndirectedFunctionApplication, + UndirectedSubstitution, + UndirectedTypeRaise, +) +from nltk.ccg.lexicon import CCGLexicon diff --git a/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef14d22930534a0c8fca21e70b44295fbaac0b0c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..462d6557e210c579776e4a169e960bf3935f1f0d Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/chart.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/chart.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e3b6a74f2bdb39503a0c563975ac1d92c3c9940 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/chart.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/combinator.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/combinator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb0a51cd19fedaae20e5ff1d55602402d3f9cd61 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/combinator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/lexicon.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/lexicon.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c12d2b4014ce6a2d7d02b89915af48fa9c542477 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/lexicon.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/logic.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/logic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d7a16ae75e7593bc8ce5a77decf4b95951e0743 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/ccg/__pycache__/logic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/ccg/api.py b/venv/lib/python3.10/site-packages/nltk/ccg/api.py new file mode 100644 index 0000000000000000000000000000000000000000..f0d1355cfadca031cf0017584d819fe794ffaea3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/ccg/api.py @@ -0,0 +1,358 @@ +# Natural Language Toolkit: CCG Categories +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Graeme Gange +# URL: +# For license information, see LICENSE.TXT + +from abc import ABCMeta, abstractmethod +from functools import total_ordering + +from nltk.internals import raise_unorderable_types + + +@total_ordering +class AbstractCCGCategory(metaclass=ABCMeta): + """ + Interface for categories in combinatory grammars. + """ + + @abstractmethod + def is_primitive(self): + """ + Returns true if the category is primitive. + """ + + @abstractmethod + def is_function(self): + """ + Returns true if the category is a function application. + """ + + @abstractmethod + def is_var(self): + """ + Returns true if the category is a variable. + """ + + @abstractmethod + def substitute(self, substitutions): + """ + Takes a set of (var, category) substitutions, and replaces every + occurrence of the variable with the corresponding category. + """ + + @abstractmethod + def can_unify(self, other): + """ + Determines whether two categories can be unified. + - Returns None if they cannot be unified + - Returns a list of necessary substitutions if they can. + """ + + # Utility functions: comparison, strings and hashing. + @abstractmethod + def __str__(self): + pass + + def __eq__(self, other): + return ( + self.__class__ is other.__class__ + and self._comparison_key == other._comparison_key + ) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, AbstractCCGCategory): + raise_unorderable_types("<", self, other) + if self.__class__ is other.__class__: + return self._comparison_key < other._comparison_key + else: + return self.__class__.__name__ < other.__class__.__name__ + + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self._comparison_key) + return self._hash + + +class CCGVar(AbstractCCGCategory): + """ + Class representing a variable CCG category. + Used for conjunctions (and possibly type-raising, if implemented as a + unary rule). + """ + + _maxID = 0 + + def __init__(self, prim_only=False): + """Initialize a variable (selects a new identifier) + + :param prim_only: a boolean that determines whether the variable is + restricted to primitives + :type prim_only: bool + """ + self._id = self.new_id() + self._prim_only = prim_only + self._comparison_key = self._id + + @classmethod + def new_id(cls): + """ + A class method allowing generation of unique variable identifiers. + """ + cls._maxID = cls._maxID + 1 + return cls._maxID - 1 + + @classmethod + def reset_id(cls): + cls._maxID = 0 + + def is_primitive(self): + return False + + def is_function(self): + return False + + def is_var(self): + return True + + def substitute(self, substitutions): + """If there is a substitution corresponding to this variable, + return the substituted category. + """ + for (var, cat) in substitutions: + if var == self: + return cat + return self + + def can_unify(self, other): + """If the variable can be replaced with other + a substitution is returned. + """ + if other.is_primitive() or not self._prim_only: + return [(self, other)] + return None + + def id(self): + return self._id + + def __str__(self): + return "_var" + str(self._id) + + +@total_ordering +class Direction: + """ + Class representing the direction of a function application. + Also contains maintains information as to which combinators + may be used with the category. + """ + + def __init__(self, dir, restrictions): + self._dir = dir + self._restrs = restrictions + self._comparison_key = (dir, tuple(restrictions)) + + # Testing the application direction + def is_forward(self): + return self._dir == "/" + + def is_backward(self): + return self._dir == "\\" + + def dir(self): + return self._dir + + def restrs(self): + """A list of restrictions on the combinators. + '.' denotes that permuting operations are disallowed + ',' denotes that function composition is disallowed + '_' denotes that the direction has variable restrictions. + (This is redundant in the current implementation of type-raising) + """ + return self._restrs + + def is_variable(self): + return self._restrs == "_" + + # Unification and substitution of variable directions. + # Used only if type-raising is implemented as a unary rule, as it + # must inherit restrictions from the argument category. + def can_unify(self, other): + if other.is_variable(): + return [("_", self.restrs())] + elif self.is_variable(): + return [("_", other.restrs())] + else: + if self.restrs() == other.restrs(): + return [] + return None + + def substitute(self, subs): + if not self.is_variable(): + return self + + for (var, restrs) in subs: + if var == "_": + return Direction(self._dir, restrs) + return self + + # Testing permitted combinators + def can_compose(self): + return "," not in self._restrs + + def can_cross(self): + return "." not in self._restrs + + def __eq__(self, other): + return ( + self.__class__ is other.__class__ + and self._comparison_key == other._comparison_key + ) + + def __ne__(self, other): + return not self == other + + def __lt__(self, other): + if not isinstance(other, Direction): + raise_unorderable_types("<", self, other) + if self.__class__ is other.__class__: + return self._comparison_key < other._comparison_key + else: + return self.__class__.__name__ < other.__class__.__name__ + + def __hash__(self): + try: + return self._hash + except AttributeError: + self._hash = hash(self._comparison_key) + return self._hash + + def __str__(self): + r_str = "" + for r in self._restrs: + r_str = r_str + "%s" % r + return f"{self._dir}{r_str}" + + # The negation operator reverses the direction of the application + def __neg__(self): + if self._dir == "/": + return Direction("\\", self._restrs) + else: + return Direction("/", self._restrs) + + +class PrimitiveCategory(AbstractCCGCategory): + """ + Class representing primitive categories. + Takes a string representation of the category, and a + list of strings specifying the morphological subcategories. + """ + + def __init__(self, categ, restrictions=[]): + self._categ = categ + self._restrs = restrictions + self._comparison_key = (categ, tuple(restrictions)) + + def is_primitive(self): + return True + + def is_function(self): + return False + + def is_var(self): + return False + + def restrs(self): + return self._restrs + + def categ(self): + return self._categ + + # Substitution does nothing to a primitive category + def substitute(self, subs): + return self + + # A primitive can be unified with a class of the same + # base category, given that the other category shares all + # of its subclasses, or with a variable. + def can_unify(self, other): + if not other.is_primitive(): + return None + if other.is_var(): + return [(other, self)] + if other.categ() == self.categ(): + for restr in self._restrs: + if restr not in other.restrs(): + return None + return [] + return None + + def __str__(self): + if self._restrs == []: + return "%s" % self._categ + restrictions = "[%s]" % ",".join(repr(r) for r in self._restrs) + return f"{self._categ}{restrictions}" + + +class FunctionalCategory(AbstractCCGCategory): + """ + Class that represents a function application category. + Consists of argument and result categories, together with + an application direction. + """ + + def __init__(self, res, arg, dir): + self._res = res + self._arg = arg + self._dir = dir + self._comparison_key = (arg, dir, res) + + def is_primitive(self): + return False + + def is_function(self): + return True + + def is_var(self): + return False + + # Substitution returns the category consisting of the + # substitution applied to each of its constituents. + def substitute(self, subs): + sub_res = self._res.substitute(subs) + sub_dir = self._dir.substitute(subs) + sub_arg = self._arg.substitute(subs) + return FunctionalCategory(sub_res, sub_arg, self._dir) + + # A function can unify with another function, so long as its + # constituents can unify, or with an unrestricted variable. + def can_unify(self, other): + if other.is_var(): + return [(other, self)] + if other.is_function(): + sa = self._res.can_unify(other.res()) + sd = self._dir.can_unify(other.dir()) + if sa is not None and sd is not None: + sb = self._arg.substitute(sa).can_unify(other.arg().substitute(sa)) + if sb is not None: + return sa + sb + return None + + # Constituent accessors + def arg(self): + return self._arg + + def res(self): + return self._res + + def dir(self): + return self._dir + + def __str__(self): + return f"({self._res}{self._dir}{self._arg})" diff --git a/venv/lib/python3.10/site-packages/nltk/ccg/chart.py b/venv/lib/python3.10/site-packages/nltk/ccg/chart.py new file mode 100644 index 0000000000000000000000000000000000000000..bf9e61036199016f89e89a8b0980d38d856ac4dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/ccg/chart.py @@ -0,0 +1,480 @@ +# Natural Language Toolkit: Combinatory Categorial Grammar +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Graeme Gange +# URL: +# For license information, see LICENSE.TXT + +""" +The lexicon is constructed by calling +``lexicon.fromstring()``. + +In order to construct a parser, you also need a rule set. +The standard English rules are provided in chart as +``chart.DefaultRuleSet``. + +The parser can then be constructed by calling, for example: +``parser = chart.CCGChartParser(, )`` + +Parsing is then performed by running +``parser.parse(.split())``. + +While this returns a list of trees, the default representation +of the produced trees is not very enlightening, particularly +given that it uses the same tree class as the CFG parsers. +It is probably better to call: +``chart.printCCGDerivation()`` +which should print a nice representation of the derivation. + +This entire process is shown far more clearly in the demonstration: +python chart.py +""" + +import itertools + +from nltk.ccg.combinator import * +from nltk.ccg.combinator import ( + BackwardApplication, + BackwardBx, + BackwardComposition, + BackwardSx, + BackwardT, + ForwardApplication, + ForwardComposition, + ForwardSubstitution, + ForwardT, +) +from nltk.ccg.lexicon import Token, fromstring +from nltk.ccg.logic import * +from nltk.parse import ParserI +from nltk.parse.chart import AbstractChartRule, Chart, EdgeI +from nltk.sem.logic import * +from nltk.tree import Tree + + +# Based on the EdgeI class from NLTK. +# A number of the properties of the EdgeI interface don't +# transfer well to CCGs, however. +class CCGEdge(EdgeI): + def __init__(self, span, categ, rule): + self._span = span + self._categ = categ + self._rule = rule + self._comparison_key = (span, categ, rule) + + # Accessors + def lhs(self): + return self._categ + + def span(self): + return self._span + + def start(self): + return self._span[0] + + def end(self): + return self._span[1] + + def length(self): + return self._span[1] - self.span[0] + + def rhs(self): + return () + + def dot(self): + return 0 + + def is_complete(self): + return True + + def is_incomplete(self): + return False + + def nextsym(self): + return None + + def categ(self): + return self._categ + + def rule(self): + return self._rule + + +class CCGLeafEdge(EdgeI): + """ + Class representing leaf edges in a CCG derivation. + """ + + def __init__(self, pos, token, leaf): + self._pos = pos + self._token = token + self._leaf = leaf + self._comparison_key = (pos, token.categ(), leaf) + + # Accessors + def lhs(self): + return self._token.categ() + + def span(self): + return (self._pos, self._pos + 1) + + def start(self): + return self._pos + + def end(self): + return self._pos + 1 + + def length(self): + return 1 + + def rhs(self): + return self._leaf + + def dot(self): + return 0 + + def is_complete(self): + return True + + def is_incomplete(self): + return False + + def nextsym(self): + return None + + def token(self): + return self._token + + def categ(self): + return self._token.categ() + + def leaf(self): + return self._leaf + + +class BinaryCombinatorRule(AbstractChartRule): + """ + Class implementing application of a binary combinator to a chart. + Takes the directed combinator to apply. + """ + + NUMEDGES = 2 + + def __init__(self, combinator): + self._combinator = combinator + + # Apply a combinator + def apply(self, chart, grammar, left_edge, right_edge): + # The left & right edges must be touching. + if not (left_edge.end() == right_edge.start()): + return + + # Check if the two edges are permitted to combine. + # If so, generate the corresponding edge. + if self._combinator.can_combine(left_edge.categ(), right_edge.categ()): + for res in self._combinator.combine(left_edge.categ(), right_edge.categ()): + new_edge = CCGEdge( + span=(left_edge.start(), right_edge.end()), + categ=res, + rule=self._combinator, + ) + if chart.insert(new_edge, (left_edge, right_edge)): + yield new_edge + + # The representation of the combinator (for printing derivations) + def __str__(self): + return "%s" % self._combinator + + +# Type-raising must be handled slightly differently to the other rules, as the +# resulting rules only span a single edge, rather than both edges. + + +class ForwardTypeRaiseRule(AbstractChartRule): + """ + Class for applying forward type raising + """ + + NUMEDGES = 2 + + def __init__(self): + self._combinator = ForwardT + + def apply(self, chart, grammar, left_edge, right_edge): + if not (left_edge.end() == right_edge.start()): + return + + for res in self._combinator.combine(left_edge.categ(), right_edge.categ()): + new_edge = CCGEdge(span=left_edge.span(), categ=res, rule=self._combinator) + if chart.insert(new_edge, (left_edge,)): + yield new_edge + + def __str__(self): + return "%s" % self._combinator + + +class BackwardTypeRaiseRule(AbstractChartRule): + """ + Class for applying backward type raising. + """ + + NUMEDGES = 2 + + def __init__(self): + self._combinator = BackwardT + + def apply(self, chart, grammar, left_edge, right_edge): + if not (left_edge.end() == right_edge.start()): + return + + for res in self._combinator.combine(left_edge.categ(), right_edge.categ()): + new_edge = CCGEdge(span=right_edge.span(), categ=res, rule=self._combinator) + if chart.insert(new_edge, (right_edge,)): + yield new_edge + + def __str__(self): + return "%s" % self._combinator + + +# Common sets of combinators used for English derivations. +ApplicationRuleSet = [ + BinaryCombinatorRule(ForwardApplication), + BinaryCombinatorRule(BackwardApplication), +] +CompositionRuleSet = [ + BinaryCombinatorRule(ForwardComposition), + BinaryCombinatorRule(BackwardComposition), + BinaryCombinatorRule(BackwardBx), +] +SubstitutionRuleSet = [ + BinaryCombinatorRule(ForwardSubstitution), + BinaryCombinatorRule(BackwardSx), +] +TypeRaiseRuleSet = [ForwardTypeRaiseRule(), BackwardTypeRaiseRule()] + +# The standard English rule set. +DefaultRuleSet = ( + ApplicationRuleSet + CompositionRuleSet + SubstitutionRuleSet + TypeRaiseRuleSet +) + + +class CCGChartParser(ParserI): + """ + Chart parser for CCGs. + Based largely on the ChartParser class from NLTK. + """ + + def __init__(self, lexicon, rules, trace=0): + self._lexicon = lexicon + self._rules = rules + self._trace = trace + + def lexicon(self): + return self._lexicon + + # Implements the CYK algorithm + def parse(self, tokens): + tokens = list(tokens) + chart = CCGChart(list(tokens)) + lex = self._lexicon + + # Initialize leaf edges. + for index in range(chart.num_leaves()): + for token in lex.categories(chart.leaf(index)): + new_edge = CCGLeafEdge(index, token, chart.leaf(index)) + chart.insert(new_edge, ()) + + # Select a span for the new edges + for span in range(2, chart.num_leaves() + 1): + for start in range(0, chart.num_leaves() - span + 1): + # Try all possible pairs of edges that could generate + # an edge for that span + for part in range(1, span): + lstart = start + mid = start + part + rend = start + span + + for left in chart.select(span=(lstart, mid)): + for right in chart.select(span=(mid, rend)): + # Generate all possible combinations of the two edges + for rule in self._rules: + edges_added_by_rule = 0 + for newedge in rule.apply(chart, lex, left, right): + edges_added_by_rule += 1 + + # Output the resulting parses + return chart.parses(lex.start()) + + +class CCGChart(Chart): + def __init__(self, tokens): + Chart.__init__(self, tokens) + + # Constructs the trees for a given parse. Unfortnunately, the parse trees need to be + # constructed slightly differently to those in the default Chart class, so it has to + # be reimplemented + def _trees(self, edge, complete, memo, tree_class): + assert complete, "CCGChart cannot build incomplete trees" + + if edge in memo: + return memo[edge] + + if isinstance(edge, CCGLeafEdge): + word = tree_class(edge.token(), [self._tokens[edge.start()]]) + leaf = tree_class((edge.token(), "Leaf"), [word]) + memo[edge] = [leaf] + return [leaf] + + memo[edge] = [] + trees = [] + + for cpl in self.child_pointer_lists(edge): + child_choices = [self._trees(cp, complete, memo, tree_class) for cp in cpl] + for children in itertools.product(*child_choices): + lhs = ( + Token( + self._tokens[edge.start() : edge.end()], + edge.lhs(), + compute_semantics(children, edge), + ), + str(edge.rule()), + ) + trees.append(tree_class(lhs, children)) + + memo[edge] = trees + return trees + + +def compute_semantics(children, edge): + if children[0].label()[0].semantics() is None: + return None + + if len(children) == 2: + if isinstance(edge.rule(), BackwardCombinator): + children = [children[1], children[0]] + + combinator = edge.rule()._combinator + function = children[0].label()[0].semantics() + argument = children[1].label()[0].semantics() + + if isinstance(combinator, UndirectedFunctionApplication): + return compute_function_semantics(function, argument) + elif isinstance(combinator, UndirectedComposition): + return compute_composition_semantics(function, argument) + elif isinstance(combinator, UndirectedSubstitution): + return compute_substitution_semantics(function, argument) + else: + raise AssertionError("Unsupported combinator '" + combinator + "'") + else: + return compute_type_raised_semantics(children[0].label()[0].semantics()) + + +# -------- +# Displaying derivations +# -------- +def printCCGDerivation(tree): + # Get the leaves and initial categories + leafcats = tree.pos() + leafstr = "" + catstr = "" + + # Construct a string with both the leaf word and corresponding + # category aligned. + for (leaf, cat) in leafcats: + str_cat = "%s" % cat + nextlen = 2 + max(len(leaf), len(str_cat)) + lcatlen = (nextlen - len(str_cat)) // 2 + rcatlen = lcatlen + (nextlen - len(str_cat)) % 2 + catstr += " " * lcatlen + str_cat + " " * rcatlen + lleaflen = (nextlen - len(leaf)) // 2 + rleaflen = lleaflen + (nextlen - len(leaf)) % 2 + leafstr += " " * lleaflen + leaf + " " * rleaflen + print(leafstr.rstrip()) + print(catstr.rstrip()) + + # Display the derivation steps + printCCGTree(0, tree) + + +# Prints the sequence of derivation steps. +def printCCGTree(lwidth, tree): + rwidth = lwidth + + # Is a leaf (word). + # Increment the span by the space occupied by the leaf. + if not isinstance(tree, Tree): + return 2 + lwidth + len(tree) + + # Find the width of the current derivation step + for child in tree: + rwidth = max(rwidth, printCCGTree(rwidth, child)) + + # Is a leaf node. + # Don't print anything, but account for the space occupied. + if not isinstance(tree.label(), tuple): + return max( + rwidth, 2 + lwidth + len("%s" % tree.label()), 2 + lwidth + len(tree[0]) + ) + + (token, op) = tree.label() + + if op == "Leaf": + return rwidth + + # Pad to the left with spaces, followed by a sequence of '-' + # and the derivation rule. + print(lwidth * " " + (rwidth - lwidth) * "-" + "%s" % op) + # Print the resulting category on a new line. + str_res = "%s" % (token.categ()) + if token.semantics() is not None: + str_res += " {" + str(token.semantics()) + "}" + respadlen = (rwidth - lwidth - len(str_res)) // 2 + lwidth + print(respadlen * " " + str_res) + return rwidth + + +### Demonstration code + +# Construct the lexicon +lex = fromstring( + """ + :- S, NP, N, VP # Primitive categories, S is the target primitive + + Det :: NP/N # Family of words + Pro :: NP + TV :: VP/NP + Modal :: (S\\NP)/VP # Backslashes need to be escaped + + I => Pro # Word -> Category mapping + you => Pro + + the => Det + + # Variables have the special keyword 'var' + # '.' prevents permutation + # ',' prevents composition + and => var\\.,var/.,var + + which => (N\\N)/(S/NP) + + will => Modal # Categories can be either explicit, or families. + might => Modal + + cook => TV + eat => TV + + mushrooms => N + parsnips => N + bacon => N + """ +) + + +def demo(): + parser = CCGChartParser(lex, DefaultRuleSet) + for parse in parser.parse("I might cook and eat the bacon".split()): + printCCGDerivation(parse) + + +if __name__ == "__main__": + demo() diff --git a/venv/lib/python3.10/site-packages/nltk/ccg/combinator.py b/venv/lib/python3.10/site-packages/nltk/ccg/combinator.py new file mode 100644 index 0000000000000000000000000000000000000000..6efe6adf40d1aea7c98df1aceccdf9cf5c7b5c31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/ccg/combinator.py @@ -0,0 +1,339 @@ +# Natural Language Toolkit: Combinatory Categorial Grammar +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Graeme Gange +# URL: +# For license information, see LICENSE.TXT +""" +CCG Combinators +""" + +from abc import ABCMeta, abstractmethod + +from nltk.ccg.api import FunctionalCategory + + +class UndirectedBinaryCombinator(metaclass=ABCMeta): + """ + Abstract class for representing a binary combinator. + Merely defines functions for checking if the function and argument + are able to be combined, and what the resulting category is. + + Note that as no assumptions are made as to direction, the unrestricted + combinators can perform all backward, forward and crossed variations + of the combinators; these restrictions must be added in the rule + class. + """ + + @abstractmethod + def can_combine(self, function, argument): + pass + + @abstractmethod + def combine(self, function, argument): + pass + + +class DirectedBinaryCombinator(metaclass=ABCMeta): + """ + Wrapper for the undirected binary combinator. + It takes left and right categories, and decides which is to be + the function, and which the argument. + It then decides whether or not they can be combined. + """ + + @abstractmethod + def can_combine(self, left, right): + pass + + @abstractmethod + def combine(self, left, right): + pass + + +class ForwardCombinator(DirectedBinaryCombinator): + """ + Class representing combinators where the primary functor is on the left. + + Takes an undirected combinator, and a predicate which adds constraints + restricting the cases in which it may apply. + """ + + def __init__(self, combinator, predicate, suffix=""): + self._combinator = combinator + self._predicate = predicate + self._suffix = suffix + + def can_combine(self, left, right): + return self._combinator.can_combine(left, right) and self._predicate( + left, right + ) + + def combine(self, left, right): + yield from self._combinator.combine(left, right) + + def __str__(self): + return f">{self._combinator}{self._suffix}" + + +class BackwardCombinator(DirectedBinaryCombinator): + """ + The backward equivalent of the ForwardCombinator class. + """ + + def __init__(self, combinator, predicate, suffix=""): + self._combinator = combinator + self._predicate = predicate + self._suffix = suffix + + def can_combine(self, left, right): + return self._combinator.can_combine(right, left) and self._predicate( + left, right + ) + + def combine(self, left, right): + yield from self._combinator.combine(right, left) + + def __str__(self): + return f"<{self._combinator}{self._suffix}" + + +class UndirectedFunctionApplication(UndirectedBinaryCombinator): + """ + Class representing function application. + Implements rules of the form: + X/Y Y -> X (>) + And the corresponding backwards application rule + """ + + def can_combine(self, function, argument): + if not function.is_function(): + return False + + return not function.arg().can_unify(argument) is None + + def combine(self, function, argument): + if not function.is_function(): + return + + subs = function.arg().can_unify(argument) + if subs is None: + return + + yield function.res().substitute(subs) + + def __str__(self): + return "" + + +# Predicates for function application. + +# Ensures the left functor takes an argument on the right +def forwardOnly(left, right): + return left.dir().is_forward() + + +# Ensures the right functor takes an argument on the left +def backwardOnly(left, right): + return right.dir().is_backward() + + +# Application combinator instances +ForwardApplication = ForwardCombinator(UndirectedFunctionApplication(), forwardOnly) +BackwardApplication = BackwardCombinator(UndirectedFunctionApplication(), backwardOnly) + + +class UndirectedComposition(UndirectedBinaryCombinator): + """ + Functional composition (harmonic) combinator. + Implements rules of the form + X/Y Y/Z -> X/Z (B>) + And the corresponding backwards and crossed variations. + """ + + def can_combine(self, function, argument): + # Can only combine two functions, and both functions must + # allow composition. + if not (function.is_function() and argument.is_function()): + return False + if function.dir().can_compose() and argument.dir().can_compose(): + return not function.arg().can_unify(argument.res()) is None + return False + + def combine(self, function, argument): + if not (function.is_function() and argument.is_function()): + return + if function.dir().can_compose() and argument.dir().can_compose(): + subs = function.arg().can_unify(argument.res()) + if subs is not None: + yield FunctionalCategory( + function.res().substitute(subs), + argument.arg().substitute(subs), + argument.dir(), + ) + + def __str__(self): + return "B" + + +# Predicates for restricting application of straight composition. +def bothForward(left, right): + return left.dir().is_forward() and right.dir().is_forward() + + +def bothBackward(left, right): + return left.dir().is_backward() and right.dir().is_backward() + + +# Predicates for crossed composition +def crossedDirs(left, right): + return left.dir().is_forward() and right.dir().is_backward() + + +def backwardBxConstraint(left, right): + # The functors must be crossed inwards + if not crossedDirs(left, right): + return False + # Permuting combinators must be allowed + if not left.dir().can_cross() and right.dir().can_cross(): + return False + # The resulting argument category is restricted to be primitive + return left.arg().is_primitive() + + +# Straight composition combinators +ForwardComposition = ForwardCombinator(UndirectedComposition(), forwardOnly) +BackwardComposition = BackwardCombinator(UndirectedComposition(), backwardOnly) + +# Backward crossed composition +BackwardBx = BackwardCombinator( + UndirectedComposition(), backwardBxConstraint, suffix="x" +) + + +class UndirectedSubstitution(UndirectedBinaryCombinator): + r""" + Substitution (permutation) combinator. + Implements rules of the form + Y/Z (X\Y)/Z -> X/Z ( N\N +def innermostFunction(categ): + while categ.res().is_function(): + categ = categ.res() + return categ + + +class UndirectedTypeRaise(UndirectedBinaryCombinator): + """ + Undirected combinator for type raising. + """ + + def can_combine(self, function, arg): + # The argument must be a function. + # The restriction that arg.res() must be a function + # merely reduces redundant type-raising; if arg.res() is + # primitive, we have: + # X Y\X =>((>) Y + # which is equivalent to + # X Y\X =>(<) Y + if not (arg.is_function() and arg.res().is_function()): + return False + + arg = innermostFunction(arg) + + # left, arg_categ are undefined! + subs = left.can_unify(arg_categ.arg()) + if subs is not None: + return True + return False + + def combine(self, function, arg): + if not ( + function.is_primitive() and arg.is_function() and arg.res().is_function() + ): + return + + # Type-raising matches only the innermost application. + arg = innermostFunction(arg) + + subs = function.can_unify(arg.arg()) + if subs is not None: + xcat = arg.res().substitute(subs) + yield FunctionalCategory( + xcat, FunctionalCategory(xcat, function, arg.dir()), -(arg.dir()) + ) + + def __str__(self): + return "T" + + +# Predicates for type-raising +# The direction of the innermost category must be towards +# the primary functor. +# The restriction that the variable must be primitive is not +# common to all versions of CCGs; some authors have other restrictions. +def forwardTConstraint(left, right): + arg = innermostFunction(right) + return arg.dir().is_backward() and arg.res().is_primitive() + + +def backwardTConstraint(left, right): + arg = innermostFunction(left) + return arg.dir().is_forward() and arg.res().is_primitive() + + +# Instances of type-raising combinators +ForwardT = ForwardCombinator(UndirectedTypeRaise(), forwardTConstraint) +BackwardT = BackwardCombinator(UndirectedTypeRaise(), backwardTConstraint) diff --git a/venv/lib/python3.10/site-packages/nltk/ccg/lexicon.py b/venv/lib/python3.10/site-packages/nltk/ccg/lexicon.py new file mode 100644 index 0000000000000000000000000000000000000000..da7d00ab6bcdfa190f49fe7c141a23542426ff20 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/ccg/lexicon.py @@ -0,0 +1,338 @@ +# Natural Language Toolkit: Combinatory Categorial Grammar +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Graeme Gange +# URL: +# For license information, see LICENSE.TXT +""" +CCG Lexicons +""" + +import re +from collections import defaultdict + +from nltk.ccg.api import CCGVar, Direction, FunctionalCategory, PrimitiveCategory +from nltk.internals import deprecated +from nltk.sem.logic import Expression + +# ------------ +# Regular expressions used for parsing components of the lexicon +# ------------ + +# Parses a primitive category and subscripts +PRIM_RE = re.compile(r"""([A-Za-z]+)(\[[A-Za-z,]+\])?""") + +# Separates the next primitive category from the remainder of the +# string +NEXTPRIM_RE = re.compile(r"""([A-Za-z]+(?:\[[A-Za-z,]+\])?)(.*)""") + +# Separates the next application operator from the remainder +APP_RE = re.compile(r"""([\\/])([.,]?)([.,]?)(.*)""") + +# Parses the definition of the right-hand side (rhs) of either a word or a family +LEX_RE = re.compile(r"""([\S_]+)\s*(::|[-=]+>)\s*(.+)""", re.UNICODE) + +# Parses the right hand side that contains category and maybe semantic predicate +RHS_RE = re.compile(r"""([^{}]*[^ {}])\s*(\{[^}]+\})?""", re.UNICODE) + +# Parses the semantic predicate +SEMANTICS_RE = re.compile(r"""\{([^}]+)\}""", re.UNICODE) + +# Strips comments from a line +COMMENTS_RE = re.compile("""([^#]*)(?:#.*)?""") + + +class Token: + """ + Class representing a token. + + token => category {semantics} + e.g. eat => S\\var[pl]/var {\\x y.eat(x,y)} + + * `token` (string) + * `categ` (string) + * `semantics` (Expression) + """ + + def __init__(self, token, categ, semantics=None): + self._token = token + self._categ = categ + self._semantics = semantics + + def categ(self): + return self._categ + + def semantics(self): + return self._semantics + + def __str__(self): + semantics_str = "" + if self._semantics is not None: + semantics_str = " {" + str(self._semantics) + "}" + return "" + str(self._categ) + semantics_str + + def __cmp__(self, other): + if not isinstance(other, Token): + return -1 + return cmp((self._categ, self._semantics), other.categ(), other.semantics()) + + +class CCGLexicon: + """ + Class representing a lexicon for CCG grammars. + + * `primitives`: The list of primitive categories for the lexicon + * `families`: Families of categories + * `entries`: A mapping of words to possible categories + """ + + def __init__(self, start, primitives, families, entries): + self._start = PrimitiveCategory(start) + self._primitives = primitives + self._families = families + self._entries = entries + + def categories(self, word): + """ + Returns all the possible categories for a word + """ + return self._entries[word] + + def start(self): + """ + Return the target category for the parser + """ + return self._start + + def __str__(self): + """ + String representation of the lexicon. Used for debugging. + """ + string = "" + first = True + for ident in sorted(self._entries): + if not first: + string = string + "\n" + string = string + ident + " => " + + first = True + for cat in self._entries[ident]: + if not first: + string = string + " | " + else: + first = False + string = string + "%s" % cat + return string + + +# ----------- +# Parsing lexicons +# ----------- + + +def matchBrackets(string): + """ + Separate the contents matching the first set of brackets from the rest of + the input. + """ + rest = string[1:] + inside = "(" + + while rest != "" and not rest.startswith(")"): + if rest.startswith("("): + (part, rest) = matchBrackets(rest) + inside = inside + part + else: + inside = inside + rest[0] + rest = rest[1:] + if rest.startswith(")"): + return (inside + ")", rest[1:]) + raise AssertionError("Unmatched bracket in string '" + string + "'") + + +def nextCategory(string): + """ + Separate the string for the next portion of the category from the rest + of the string + """ + if string.startswith("("): + return matchBrackets(string) + return NEXTPRIM_RE.match(string).groups() + + +def parseApplication(app): + """ + Parse an application operator + """ + return Direction(app[0], app[1:]) + + +def parseSubscripts(subscr): + """ + Parse the subscripts for a primitive category + """ + if subscr: + return subscr[1:-1].split(",") + return [] + + +def parsePrimitiveCategory(chunks, primitives, families, var): + """ + Parse a primitive category + + If the primitive is the special category 'var', replace it with the + correct `CCGVar`. + """ + if chunks[0] == "var": + if chunks[1] is None: + if var is None: + var = CCGVar() + return (var, var) + + catstr = chunks[0] + if catstr in families: + (cat, cvar) = families[catstr] + if var is None: + var = cvar + else: + cat = cat.substitute([(cvar, var)]) + return (cat, var) + + if catstr in primitives: + subscrs = parseSubscripts(chunks[1]) + return (PrimitiveCategory(catstr, subscrs), var) + raise AssertionError( + "String '" + catstr + "' is neither a family nor primitive category." + ) + + +def augParseCategory(line, primitives, families, var=None): + """ + Parse a string representing a category, and returns a tuple with + (possibly) the CCG variable for the category + """ + (cat_string, rest) = nextCategory(line) + + if cat_string.startswith("("): + (res, var) = augParseCategory(cat_string[1:-1], primitives, families, var) + + else: + (res, var) = parsePrimitiveCategory( + PRIM_RE.match(cat_string).groups(), primitives, families, var + ) + + while rest != "": + app = APP_RE.match(rest).groups() + direction = parseApplication(app[0:3]) + rest = app[3] + + (cat_string, rest) = nextCategory(rest) + if cat_string.startswith("("): + (arg, var) = augParseCategory(cat_string[1:-1], primitives, families, var) + else: + (arg, var) = parsePrimitiveCategory( + PRIM_RE.match(cat_string).groups(), primitives, families, var + ) + res = FunctionalCategory(res, arg, direction) + + return (res, var) + + +def fromstring(lex_str, include_semantics=False): + """ + Convert string representation into a lexicon for CCGs. + """ + CCGVar.reset_id() + primitives = [] + families = {} + entries = defaultdict(list) + for line in lex_str.splitlines(): + # Strip comments and leading/trailing whitespace. + line = COMMENTS_RE.match(line).groups()[0].strip() + if line == "": + continue + + if line.startswith(":-"): + # A line of primitive categories. + # The first one is the target category + # ie, :- S, N, NP, VP + primitives = primitives + [ + prim.strip() for prim in line[2:].strip().split(",") + ] + else: + # Either a family definition, or a word definition + (ident, sep, rhs) = LEX_RE.match(line).groups() + (catstr, semantics_str) = RHS_RE.match(rhs).groups() + (cat, var) = augParseCategory(catstr, primitives, families) + + if sep == "::": + # Family definition + # ie, Det :: NP/N + families[ident] = (cat, var) + else: + semantics = None + if include_semantics is True: + if semantics_str is None: + raise AssertionError( + line + + " must contain semantics because include_semantics is set to True" + ) + else: + semantics = Expression.fromstring( + SEMANTICS_RE.match(semantics_str).groups()[0] + ) + # Word definition + # ie, which => (N\N)/(S/NP) + entries[ident].append(Token(ident, cat, semantics)) + return CCGLexicon(primitives[0], primitives, families, entries) + + +@deprecated("Use fromstring() instead.") +def parseLexicon(lex_str): + return fromstring(lex_str) + + +openccg_tinytiny = fromstring( + """ + # Rather minimal lexicon based on the openccg `tinytiny' grammar. + # Only incorporates a subset of the morphological subcategories, however. + :- S,NP,N # Primitive categories + Det :: NP/N # Determiners + Pro :: NP + IntransVsg :: S\\NP[sg] # Tensed intransitive verbs (singular) + IntransVpl :: S\\NP[pl] # Plural + TransVsg :: S\\NP[sg]/NP # Tensed transitive verbs (singular) + TransVpl :: S\\NP[pl]/NP # Plural + + the => NP[sg]/N[sg] + the => NP[pl]/N[pl] + + I => Pro + me => Pro + we => Pro + us => Pro + + book => N[sg] + books => N[pl] + + peach => N[sg] + peaches => N[pl] + + policeman => N[sg] + policemen => N[pl] + + boy => N[sg] + boys => N[pl] + + sleep => IntransVsg + sleep => IntransVpl + + eat => IntransVpl + eat => TransVpl + eats => IntransVsg + eats => TransVsg + + see => TransVpl + sees => TransVsg + """ +) diff --git a/venv/lib/python3.10/site-packages/nltk/ccg/logic.py b/venv/lib/python3.10/site-packages/nltk/ccg/logic.py new file mode 100644 index 0000000000000000000000000000000000000000..2e347b7531f723b3d8fe0caa84c22e8fcb659a6c --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/ccg/logic.py @@ -0,0 +1,60 @@ +# Natural Language Toolkit: Combinatory Categorial Grammar +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Tanin Na Nakorn (@tanin) +# URL: +# For license information, see LICENSE.TXT +""" +Helper functions for CCG semantics computation +""" + +from nltk.sem.logic import * + + +def compute_type_raised_semantics(semantics): + core = semantics + parent = None + while isinstance(core, LambdaExpression): + parent = core + core = core.term + + var = Variable("F") + while var in core.free(): + var = unique_variable(pattern=var) + core = ApplicationExpression(FunctionVariableExpression(var), core) + + if parent is not None: + parent.term = core + else: + semantics = core + + return LambdaExpression(var, semantics) + + +def compute_function_semantics(function, argument): + return ApplicationExpression(function, argument).simplify() + + +def compute_composition_semantics(function, argument): + assert isinstance(argument, LambdaExpression), ( + "`" + str(argument) + "` must be a lambda expression" + ) + return LambdaExpression( + argument.variable, ApplicationExpression(function, argument.term).simplify() + ) + + +def compute_substitution_semantics(function, argument): + assert isinstance(function, LambdaExpression) and isinstance( + function.term, LambdaExpression + ), ("`" + str(function) + "` must be a lambda expression with 2 arguments") + assert isinstance(argument, LambdaExpression), ( + "`" + str(argument) + "` must be a lambda expression" + ) + + new_argument = ApplicationExpression( + argument, VariableExpression(function.variable) + ).simplify() + new_term = ApplicationExpression(function.term, new_argument).simplify() + + return LambdaExpression(function.variable, new_term) diff --git a/venv/lib/python3.10/site-packages/nltk/tag/__init__.py b/venv/lib/python3.10/site-packages/nltk/tag/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3f537dd6c7a9badc43313a8d2b4c5efed9b1b6ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tag/__init__.py @@ -0,0 +1,184 @@ +# Natural Language Toolkit: Taggers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# URL: +# For license information, see LICENSE.TXT +""" +NLTK Taggers + +This package contains classes and interfaces for part-of-speech +tagging, or simply "tagging". + +A "tag" is a case-sensitive string that specifies some property of a token, +such as its part of speech. Tagged tokens are encoded as tuples +``(tag, token)``. For example, the following tagged token combines +the word ``'fly'`` with a noun part of speech tag (``'NN'``): + + >>> tagged_tok = ('fly', 'NN') + +An off-the-shelf tagger is available for English. It uses the Penn Treebank tagset: + + >>> from nltk import pos_tag, word_tokenize + >>> pos_tag(word_tokenize("John's big idea isn't all that bad.")) # doctest: +NORMALIZE_WHITESPACE + [('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'), + ("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')] + +A Russian tagger is also available if you specify lang="rus". It uses +the Russian National Corpus tagset: + + >>> pos_tag(word_tokenize("Илья оторопел и дважды перечитал бумажку."), lang='rus') # doctest: +SKIP + [('Илья', 'S'), ('оторопел', 'V'), ('и', 'CONJ'), ('дважды', 'ADV'), ('перечитал', 'V'), + ('бумажку', 'S'), ('.', 'NONLEX')] + +This package defines several taggers, which take a list of tokens, +assign a tag to each one, and return the resulting list of tagged tokens. +Most of the taggers are built automatically based on a training corpus. +For example, the unigram tagger tags each word *w* by checking what +the most frequent tag for *w* was in a training corpus: + + >>> from nltk.corpus import brown + >>> from nltk.tag import UnigramTagger + >>> tagger = UnigramTagger(brown.tagged_sents(categories='news')[:500]) + >>> sent = ['Mitchell', 'decried', 'the', 'high', 'rate', 'of', 'unemployment'] + >>> for word, tag in tagger.tag(sent): + ... print(word, '->', tag) + Mitchell -> NP + decried -> None + the -> AT + high -> JJ + rate -> NN + of -> IN + unemployment -> None + +Note that words that the tagger has not seen during training receive a tag +of ``None``. + +We evaluate a tagger on data that was not seen during training: + + >>> round(tagger.accuracy(brown.tagged_sents(categories='news')[500:600]), 3) + 0.735 + +For more information, please consult chapter 5 of the NLTK Book. + +isort:skip_file +""" + +from nltk.tag.api import TaggerI +from nltk.tag.util import str2tuple, tuple2str, untag +from nltk.tag.sequential import ( + SequentialBackoffTagger, + ContextTagger, + DefaultTagger, + NgramTagger, + UnigramTagger, + BigramTagger, + TrigramTagger, + AffixTagger, + RegexpTagger, + ClassifierBasedTagger, + ClassifierBasedPOSTagger, +) +from nltk.tag.brill import BrillTagger +from nltk.tag.brill_trainer import BrillTaggerTrainer +from nltk.tag.tnt import TnT +from nltk.tag.hunpos import HunposTagger +from nltk.tag.stanford import StanfordTagger, StanfordPOSTagger, StanfordNERTagger +from nltk.tag.hmm import HiddenMarkovModelTagger, HiddenMarkovModelTrainer +from nltk.tag.senna import SennaTagger, SennaChunkTagger, SennaNERTagger +from nltk.tag.mapping import tagset_mapping, map_tag +from nltk.tag.crf import CRFTagger +from nltk.tag.perceptron import PerceptronTagger + +from nltk.data import load, find + +RUS_PICKLE = ( + "taggers/averaged_perceptron_tagger_ru/averaged_perceptron_tagger_ru.pickle" +) + + +def _get_tagger(lang=None): + if lang == "rus": + tagger = PerceptronTagger(False) + ap_russian_model_loc = "file:" + str(find(RUS_PICKLE)) + tagger.load(ap_russian_model_loc) + else: + tagger = PerceptronTagger() + return tagger + + +def _pos_tag(tokens, tagset=None, tagger=None, lang=None): + # Currently only supports English and Russian. + if lang not in ["eng", "rus"]: + raise NotImplementedError( + "Currently, NLTK pos_tag only supports English and Russian " + "(i.e. lang='eng' or lang='rus')" + ) + # Throws Error if tokens is of string type + elif isinstance(tokens, str): + raise TypeError("tokens: expected a list of strings, got a string") + + else: + tagged_tokens = tagger.tag(tokens) + if tagset: # Maps to the specified tagset. + if lang == "eng": + tagged_tokens = [ + (token, map_tag("en-ptb", tagset, tag)) + for (token, tag) in tagged_tokens + ] + elif lang == "rus": + # Note that the new Russian pos tags from the model contains suffixes, + # see https://github.com/nltk/nltk/issues/2151#issuecomment-430709018 + tagged_tokens = [ + (token, map_tag("ru-rnc-new", tagset, tag.partition("=")[0])) + for (token, tag) in tagged_tokens + ] + return tagged_tokens + + +def pos_tag(tokens, tagset=None, lang="eng"): + """ + Use NLTK's currently recommended part of speech tagger to + tag the given list of tokens. + + >>> from nltk.tag import pos_tag + >>> from nltk.tokenize import word_tokenize + >>> pos_tag(word_tokenize("John's big idea isn't all that bad.")) # doctest: +NORMALIZE_WHITESPACE + [('John', 'NNP'), ("'s", 'POS'), ('big', 'JJ'), ('idea', 'NN'), ('is', 'VBZ'), + ("n't", 'RB'), ('all', 'PDT'), ('that', 'DT'), ('bad', 'JJ'), ('.', '.')] + >>> pos_tag(word_tokenize("John's big idea isn't all that bad."), tagset='universal') # doctest: +NORMALIZE_WHITESPACE + [('John', 'NOUN'), ("'s", 'PRT'), ('big', 'ADJ'), ('idea', 'NOUN'), ('is', 'VERB'), + ("n't", 'ADV'), ('all', 'DET'), ('that', 'DET'), ('bad', 'ADJ'), ('.', '.')] + + NB. Use `pos_tag_sents()` for efficient tagging of more than one sentence. + + :param tokens: Sequence of tokens to be tagged + :type tokens: list(str) + :param tagset: the tagset to be used, e.g. universal, wsj, brown + :type tagset: str + :param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian + :type lang: str + :return: The tagged tokens + :rtype: list(tuple(str, str)) + """ + tagger = _get_tagger(lang) + return _pos_tag(tokens, tagset, tagger, lang) + + +def pos_tag_sents(sentences, tagset=None, lang="eng"): + """ + Use NLTK's currently recommended part of speech tagger to tag the + given list of sentences, each consisting of a list of tokens. + + :param sentences: List of sentences to be tagged + :type sentences: list(list(str)) + :param tagset: the tagset to be used, e.g. universal, wsj, brown + :type tagset: str + :param lang: the ISO 639 code of the language, e.g. 'eng' for English, 'rus' for Russian + :type lang: str + :return: The list of tagged sentences + :rtype: list(list(tuple(str, str))) + """ + tagger = _get_tagger(lang) + return [_pos_tag(sent, tagset, tagger, lang) for sent in sentences] diff --git a/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8578ba4a35f704461a04062bd7ecae38c54cb745 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/api.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e0514bacba0e0073bce1299340805a739fd2568 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/brill.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/brill.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6f850942aa09a6c816b8a271512c9efe90d55be Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/brill.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/brill_trainer.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/brill_trainer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7da42ef98caf20f4f390494a6efe298b3339ca7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/brill_trainer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/crf.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/crf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b7aa70f9f62cef95e0ab500a11f36e30699f376 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/crf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/hmm.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/hmm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..509ec5731e0c8a6352d683e54d14c6e2c4c907e2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/hmm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/hunpos.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/hunpos.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9016cccefb003ecad1a788bdaf3536ca3fefa9d Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/hunpos.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/mapping.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/mapping.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5a411b705f51c3decab8eaf563462d390ac68dd Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/mapping.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/perceptron.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/perceptron.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dde774e1f75edfeee15cb5355f2bd39e8cef027 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/perceptron.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/senna.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/senna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b9918f40a58339e80ac74f2c30a83aaeda0e252e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/senna.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/sequential.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/sequential.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e4ae5028417c919ed528a64bdde15b7532e6e83 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/sequential.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/stanford.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/stanford.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04c93f25eb35db74a51a589e2d04d9f3c40c5011 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/stanford.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/tnt.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/tnt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37a15246034d0e563cace43d4df8f0de8acebbab Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/tnt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2968f48e44b1c3f603194cbfdef609d67148e44e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/tag/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/tag/api.py b/venv/lib/python3.10/site-packages/nltk/tag/api.py new file mode 100644 index 0000000000000000000000000000000000000000..27e45026cabe6d747f4b4a7dc108b7c3cec1c6f9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tag/api.py @@ -0,0 +1,296 @@ +# Natural Language Toolkit: Tagger Interface +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# Tom Aarsen <> +# URL: +# For license information, see LICENSE.TXT + +""" +Interface for tagging each token in a sentence with supplementary +information, such as its part of speech. +""" +from abc import ABCMeta, abstractmethod +from functools import lru_cache +from itertools import chain +from typing import Dict + +from nltk.internals import deprecated, overridden +from nltk.metrics import ConfusionMatrix, accuracy +from nltk.tag.util import untag + + +class TaggerI(metaclass=ABCMeta): + """ + A processing interface for assigning a tag to each token in a list. + Tags are case sensitive strings that identify some property of each + token, such as its part of speech or its sense. + + Some taggers require specific types for their tokens. This is + generally indicated by the use of a sub-interface to ``TaggerI``. + For example, featureset taggers, which are subclassed from + ``FeaturesetTagger``, require that each token be a ``featureset``. + + Subclasses must define: + - either ``tag()`` or ``tag_sents()`` (or both) + """ + + @abstractmethod + def tag(self, tokens): + """ + Determine the most appropriate tag sequence for the given + token sequence, and return a corresponding list of tagged + tokens. A tagged token is encoded as a tuple ``(token, tag)``. + + :rtype: list(tuple(str, str)) + """ + if overridden(self.tag_sents): + return self.tag_sents([tokens])[0] + + def tag_sents(self, sentences): + """ + Apply ``self.tag()`` to each element of *sentences*. I.e.:: + + return [self.tag(sent) for sent in sentences] + """ + return [self.tag(sent) for sent in sentences] + + @deprecated("Use accuracy(gold) instead.") + def evaluate(self, gold): + return self.accuracy(gold) + + def accuracy(self, gold): + """ + Score the accuracy of the tagger against the gold standard. + Strip the tags from the gold standard text, retag it using + the tagger, then compute the accuracy score. + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :rtype: float + """ + + tagged_sents = self.tag_sents(untag(sent) for sent in gold) + gold_tokens = list(chain.from_iterable(gold)) + test_tokens = list(chain.from_iterable(tagged_sents)) + return accuracy(gold_tokens, test_tokens) + + @lru_cache(maxsize=1) + def _confusion_cached(self, gold): + """ + Inner function used after ``gold`` is converted to a + ``tuple(tuple(tuple(str, str)))``. That way, we can use caching on + creating a ConfusionMatrix. + + :param gold: The list of tagged sentences to run the tagger with, + also used as the reference values in the generated confusion matrix. + :type gold: tuple(tuple(tuple(str, str))) + :rtype: ConfusionMatrix + """ + + tagged_sents = self.tag_sents(untag(sent) for sent in gold) + gold_tokens = [token for _word, token in chain.from_iterable(gold)] + test_tokens = [token for _word, token in chain.from_iterable(tagged_sents)] + return ConfusionMatrix(gold_tokens, test_tokens) + + def confusion(self, gold): + """ + Return a ConfusionMatrix with the tags from ``gold`` as the reference + values, with the predictions from ``tag_sents`` as the predicted values. + + >>> from nltk.tag import PerceptronTagger + >>> from nltk.corpus import treebank + >>> tagger = PerceptronTagger() + >>> gold_data = treebank.tagged_sents()[:10] + >>> print(tagger.confusion(gold_data)) + | - | + | N | + | O P | + | N J J N N P P R R V V V V V W | + | ' E C C D E I J J J M N N N O R P R B R T V B B B B B D ` | + | ' , - . C D T X N J R S D N P S S P $ B R P O B D G N P Z T ` | + -------+----------------------------------------------------------------------------------------------+ + '' | <1> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | + , | .<15> . . . . . . . . . . . . . . . . . . . . . . . . . . . . . | + -NONE- | . . <.> . . 2 . . . 2 . . . 5 1 . . . . 2 . . . . . . . . . . . | + . | . . .<10> . . . . . . . . . . . . . . . . . . . . . . . . . . . | + CC | . . . . <1> . . . . . . . . . . . . . . . . . . . . . . . . . . | + CD | . . . . . <5> . . . . . . . . . . . . . . . . . . . . . . . . . | + DT | . . . . . .<20> . . . . . . . . . . . . . . . . . . . . . . . . | + EX | . . . . . . . <1> . . . . . . . . . . . . . . . . . . . . . . . | + IN | . . . . . . . .<22> . . . . . . . . . . 3 . . . . . . . . . . . | + JJ | . . . . . . . . .<16> . . . . 1 . . . . 1 . . . . . . . . . . . | + JJR | . . . . . . . . . . <.> . . . . . . . . . . . . . . . . . . . . | + JJS | . . . . . . . . . . . <1> . . . . . . . . . . . . . . . . . . . | + MD | . . . . . . . . . . . . <1> . . . . . . . . . . . . . . . . . . | + NN | . . . . . . . . . . . . .<28> 1 1 . . . . . . . . . . . . . . . | + NNP | . . . . . . . . . . . . . .<25> . . . . . . . . . . . . . . . . | + NNS | . . . . . . . . . . . . . . .<19> . . . . . . . . . . . . . . . | + POS | . . . . . . . . . . . . . . . . <1> . . . . . . . . . . . . . . | + PRP | . . . . . . . . . . . . . . . . . <4> . . . . . . . . . . . . . | + PRP$ | . . . . . . . . . . . . . . . . . . <2> . . . . . . . . . . . . | + RB | . . . . . . . . . . . . . . . . . . . <4> . . . . . . . . . . . | + RBR | . . . . . . . . . . 1 . . . . . . . . . <1> . . . . . . . . . . | + RP | . . . . . . . . . . . . . . . . . . . . . <1> . . . . . . . . . | + TO | . . . . . . . . . . . . . . . . . . . . . . <5> . . . . . . . . | + VB | . . . . . . . . . . . . . . . . . . . . . . . <3> . . . . . . . | + VBD | . . . . . . . . . . . . . 1 . . . . . . . . . . <6> . . . . . . | + VBG | . . . . . . . . . . . . . 1 . . . . . . . . . . . <4> . . . . . | + VBN | . . . . . . . . . . . . . . . . . . . . . . . . 1 . <4> . . . . | + VBP | . . . . . . . . . . . . . . . . . . . . . . . . . . . <3> . . . | + VBZ | . . . . . . . . . . . . . . . . . . . . . . . . . . . . <7> . . | + WDT | . . . . . . . . 2 . . . . . . . . . . . . . . . . . . . . <.> . | + `` | . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . <1>| + -------+----------------------------------------------------------------------------------------------+ + (row = reference; col = test) + + + :param gold: The list of tagged sentences to run the tagger with, + also used as the reference values in the generated confusion matrix. + :type gold: list(list(tuple(str, str))) + :rtype: ConfusionMatrix + """ + + return self._confusion_cached(tuple(tuple(sent) for sent in gold)) + + def recall(self, gold) -> Dict[str, float]: + """ + Compute the recall for each tag from ``gold`` or from running ``tag`` + on the tokenized sentences from ``gold``. Then, return the dictionary + with mappings from tag to recall. The recall is defined as: + + - *r* = true positive / (true positive + false positive) + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :return: A mapping from tags to recall + :rtype: Dict[str, float] + """ + + cm = self.confusion(gold) + return {tag: cm.recall(tag) for tag in cm._values} + + def precision(self, gold): + """ + Compute the precision for each tag from ``gold`` or from running ``tag`` + on the tokenized sentences from ``gold``. Then, return the dictionary + with mappings from tag to precision. The precision is defined as: + + - *p* = true positive / (true positive + false negative) + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :return: A mapping from tags to precision + :rtype: Dict[str, float] + """ + + cm = self.confusion(gold) + return {tag: cm.precision(tag) for tag in cm._values} + + def f_measure(self, gold, alpha=0.5): + """ + Compute the f-measure for each tag from ``gold`` or from running ``tag`` + on the tokenized sentences from ``gold``. Then, return the dictionary + with mappings from tag to f-measure. The f-measure is the harmonic mean + of the ``precision`` and ``recall``, weighted by ``alpha``. + In particular, given the precision *p* and recall *r* defined by: + + - *p* = true positive / (true positive + false negative) + - *r* = true positive / (true positive + false positive) + + The f-measure is: + + - *1/(alpha/p + (1-alpha)/r)* + + With ``alpha = 0.5``, this reduces to: + + - *2pr / (p + r)* + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :param alpha: Ratio of the cost of false negative compared to false + positives. Defaults to 0.5, where the costs are equal. + :type alpha: float + :return: A mapping from tags to precision + :rtype: Dict[str, float] + """ + cm = self.confusion(gold) + return {tag: cm.f_measure(tag, alpha) for tag in cm._values} + + def evaluate_per_tag(self, gold, alpha=0.5, truncate=None, sort_by_count=False): + """Tabulate the **recall**, **precision** and **f-measure** + for each tag from ``gold`` or from running ``tag`` on the tokenized + sentences from ``gold``. + + >>> from nltk.tag import PerceptronTagger + >>> from nltk.corpus import treebank + >>> tagger = PerceptronTagger() + >>> gold_data = treebank.tagged_sents()[:10] + >>> print(tagger.evaluate_per_tag(gold_data)) + Tag | Prec. | Recall | F-measure + -------+--------+--------+----------- + '' | 1.0000 | 1.0000 | 1.0000 + , | 1.0000 | 1.0000 | 1.0000 + -NONE- | 0.0000 | 0.0000 | 0.0000 + . | 1.0000 | 1.0000 | 1.0000 + CC | 1.0000 | 1.0000 | 1.0000 + CD | 0.7143 | 1.0000 | 0.8333 + DT | 1.0000 | 1.0000 | 1.0000 + EX | 1.0000 | 1.0000 | 1.0000 + IN | 0.9167 | 0.8800 | 0.8980 + JJ | 0.8889 | 0.8889 | 0.8889 + JJR | 0.0000 | 0.0000 | 0.0000 + JJS | 1.0000 | 1.0000 | 1.0000 + MD | 1.0000 | 1.0000 | 1.0000 + NN | 0.8000 | 0.9333 | 0.8615 + NNP | 0.8929 | 1.0000 | 0.9434 + NNS | 0.9500 | 1.0000 | 0.9744 + POS | 1.0000 | 1.0000 | 1.0000 + PRP | 1.0000 | 1.0000 | 1.0000 + PRP$ | 1.0000 | 1.0000 | 1.0000 + RB | 0.4000 | 1.0000 | 0.5714 + RBR | 1.0000 | 0.5000 | 0.6667 + RP | 1.0000 | 1.0000 | 1.0000 + TO | 1.0000 | 1.0000 | 1.0000 + VB | 1.0000 | 1.0000 | 1.0000 + VBD | 0.8571 | 0.8571 | 0.8571 + VBG | 1.0000 | 0.8000 | 0.8889 + VBN | 1.0000 | 0.8000 | 0.8889 + VBP | 1.0000 | 1.0000 | 1.0000 + VBZ | 1.0000 | 1.0000 | 1.0000 + WDT | 0.0000 | 0.0000 | 0.0000 + `` | 1.0000 | 1.0000 | 1.0000 + + + :param gold: The list of tagged sentences to score the tagger on. + :type gold: list(list(tuple(str, str))) + :param alpha: Ratio of the cost of false negative compared to false + positives, as used in the f-measure computation. Defaults to 0.5, + where the costs are equal. + :type alpha: float + :param truncate: If specified, then only show the specified + number of values. Any sorting (e.g., sort_by_count) + will be performed before truncation. Defaults to None + :type truncate: int, optional + :param sort_by_count: Whether to sort the outputs on number of + occurrences of that tag in the ``gold`` data, defaults to False + :type sort_by_count: bool, optional + :return: A tabulated recall, precision and f-measure string + :rtype: str + """ + cm = self.confusion(gold) + return cm.evaluate(alpha=alpha, truncate=truncate, sort_by_count=sort_by_count) + + def _check_params(self, train, model): + if (train and model) or (not train and not model): + raise ValueError("Must specify either training data or trained model.") + + +class FeaturesetTaggerI(TaggerI): + """ + A tagger that requires tokens to be ``featuresets``. A featureset + is a dictionary that maps from feature names to feature + values. See ``nltk.classify`` for more information about features + and featuresets. + """ diff --git a/venv/lib/python3.10/site-packages/nltk/tag/brill.py b/venv/lib/python3.10/site-packages/nltk/tag/brill.py new file mode 100644 index 0000000000000000000000000000000000000000..d3bd1cd3b6cb10c4b62b7d23910e2a8ba9568cd2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tag/brill.py @@ -0,0 +1,449 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +from collections import Counter, defaultdict + +from nltk import jsontags +from nltk.tag import TaggerI +from nltk.tbl import Feature, Template + +###################################################################### +# Brill Templates +###################################################################### + + +@jsontags.register_tag +class Word(Feature): + """ + Feature which examines the text (word) of nearby tokens. + """ + + json_tag = "nltk.tag.brill.Word" + + @staticmethod + def extract_property(tokens, index): + """@return: The given token's text.""" + return tokens[index][0] + + +@jsontags.register_tag +class Pos(Feature): + """ + Feature which examines the tags of nearby tokens. + """ + + json_tag = "nltk.tag.brill.Pos" + + @staticmethod + def extract_property(tokens, index): + """@return: The given token's tag.""" + return tokens[index][1] + + +def nltkdemo18(): + """ + Return 18 templates, from the original nltk demo, in multi-feature syntax + """ + return [ + Template(Pos([-1])), + Template(Pos([1])), + Template(Pos([-2])), + Template(Pos([2])), + Template(Pos([-2, -1])), + Template(Pos([1, 2])), + Template(Pos([-3, -2, -1])), + Template(Pos([1, 2, 3])), + Template(Pos([-1]), Pos([1])), + Template(Word([-1])), + Template(Word([1])), + Template(Word([-2])), + Template(Word([2])), + Template(Word([-2, -1])), + Template(Word([1, 2])), + Template(Word([-3, -2, -1])), + Template(Word([1, 2, 3])), + Template(Word([-1]), Word([1])), + ] + + +def nltkdemo18plus(): + """ + Return 18 templates, from the original nltk demo, and additionally a few + multi-feature ones (the motivation is easy comparison with nltkdemo18) + """ + return nltkdemo18() + [ + Template(Word([-1]), Pos([1])), + Template(Pos([-1]), Word([1])), + Template(Word([-1]), Word([0]), Pos([1])), + Template(Pos([-1]), Word([0]), Word([1])), + Template(Pos([-1]), Word([0]), Pos([1])), + ] + + +def fntbl37(): + """ + Return 37 templates taken from the postagging task of the + fntbl distribution https://www.cs.jhu.edu/~rflorian/fntbl/ + (37 is after excluding a handful which do not condition on Pos[0]; + fntbl can do that but the current nltk implementation cannot.) + """ + return [ + Template(Word([0]), Word([1]), Word([2])), + Template(Word([-1]), Word([0]), Word([1])), + Template(Word([0]), Word([-1])), + Template(Word([0]), Word([1])), + Template(Word([0]), Word([2])), + Template(Word([0]), Word([-2])), + Template(Word([1, 2])), + Template(Word([-2, -1])), + Template(Word([1, 2, 3])), + Template(Word([-3, -2, -1])), + Template(Word([0]), Pos([2])), + Template(Word([0]), Pos([-2])), + Template(Word([0]), Pos([1])), + Template(Word([0]), Pos([-1])), + Template(Word([0])), + Template(Word([-2])), + Template(Word([2])), + Template(Word([1])), + Template(Word([-1])), + Template(Pos([-1]), Pos([1])), + Template(Pos([1]), Pos([2])), + Template(Pos([-1]), Pos([-2])), + Template(Pos([1])), + Template(Pos([-1])), + Template(Pos([-2])), + Template(Pos([2])), + Template(Pos([1, 2, 3])), + Template(Pos([1, 2])), + Template(Pos([-3, -2, -1])), + Template(Pos([-2, -1])), + Template(Pos([1]), Word([0]), Word([1])), + Template(Pos([1]), Word([0]), Word([-1])), + Template(Pos([-1]), Word([-1]), Word([0])), + Template(Pos([-1]), Word([0]), Word([1])), + Template(Pos([-2]), Pos([-1])), + Template(Pos([1]), Pos([2])), + Template(Pos([1]), Pos([2]), Word([1])), + ] + + +def brill24(): + """ + Return 24 templates of the seminal TBL paper, Brill (1995) + """ + return [ + Template(Pos([-1])), + Template(Pos([1])), + Template(Pos([-2])), + Template(Pos([2])), + Template(Pos([-2, -1])), + Template(Pos([1, 2])), + Template(Pos([-3, -2, -1])), + Template(Pos([1, 2, 3])), + Template(Pos([-1]), Pos([1])), + Template(Pos([-2]), Pos([-1])), + Template(Pos([1]), Pos([2])), + Template(Word([-1])), + Template(Word([1])), + Template(Word([-2])), + Template(Word([2])), + Template(Word([-2, -1])), + Template(Word([1, 2])), + Template(Word([-1, 0])), + Template(Word([0, 1])), + Template(Word([0])), + Template(Word([-1]), Pos([-1])), + Template(Word([1]), Pos([1])), + Template(Word([0]), Word([-1]), Pos([-1])), + Template(Word([0]), Word([1]), Pos([1])), + ] + + +def describe_template_sets(): + """ + Print the available template sets in this demo, with a short description" + """ + import inspect + import sys + + # a bit of magic to get all functions in this module + templatesets = inspect.getmembers(sys.modules[__name__], inspect.isfunction) + for (name, obj) in templatesets: + if name == "describe_template_sets": + continue + print(name, obj.__doc__, "\n") + + +###################################################################### +# The Brill Tagger +###################################################################### + + +@jsontags.register_tag +class BrillTagger(TaggerI): + """ + Brill's transformational rule-based tagger. Brill taggers use an + initial tagger (such as ``tag.DefaultTagger``) to assign an initial + tag sequence to a text; and then apply an ordered list of + transformational rules to correct the tags of individual tokens. + These transformation rules are specified by the ``TagRule`` + interface. + + Brill taggers can be created directly, from an initial tagger and + a list of transformational rules; but more often, Brill taggers + are created by learning rules from a training corpus, using one + of the TaggerTrainers available. + """ + + json_tag = "nltk.tag.BrillTagger" + + def __init__(self, initial_tagger, rules, training_stats=None): + """ + :param initial_tagger: The initial tagger + :type initial_tagger: TaggerI + + :param rules: An ordered list of transformation rules that + should be used to correct the initial tagging. + :type rules: list(TagRule) + + :param training_stats: A dictionary of statistics collected + during training, for possible later use + :type training_stats: dict + + """ + self._initial_tagger = initial_tagger + self._rules = tuple(rules) + self._training_stats = training_stats + + def encode_json_obj(self): + return self._initial_tagger, self._rules, self._training_stats + + @classmethod + def decode_json_obj(cls, obj): + _initial_tagger, _rules, _training_stats = obj + return cls(_initial_tagger, _rules, _training_stats) + + def rules(self): + """ + Return the ordered list of transformation rules that this tagger has learnt + + :return: the ordered list of transformation rules that correct the initial tagging + :rtype: list of Rules + """ + return self._rules + + def train_stats(self, statistic=None): + """ + Return a named statistic collected during training, or a dictionary of all + available statistics if no name given + + :param statistic: name of statistic + :type statistic: str + :return: some statistic collected during training of this tagger + :rtype: any (but usually a number) + """ + if statistic is None: + return self._training_stats + else: + return self._training_stats.get(statistic) + + def tag(self, tokens): + # Inherit documentation from TaggerI + + # Run the initial tagger. + tagged_tokens = self._initial_tagger.tag(tokens) + + # Create a dictionary that maps each tag to a list of the + # indices of tokens that have that tag. + tag_to_positions = defaultdict(set) + for i, (token, tag) in enumerate(tagged_tokens): + tag_to_positions[tag].add(i) + + # Apply each rule, in order. Only try to apply rules at + # positions that have the desired original tag. + for rule in self._rules: + # Find the positions where it might apply + positions = tag_to_positions.get(rule.original_tag, []) + # Apply the rule at those positions. + changed = rule.apply(tagged_tokens, positions) + # Update tag_to_positions with the positions of tags that + # were modified. + for i in changed: + tag_to_positions[rule.original_tag].remove(i) + tag_to_positions[rule.replacement_tag].add(i) + + return tagged_tokens + + def print_template_statistics(self, test_stats=None, printunused=True): + """ + Print a list of all templates, ranked according to efficiency. + + If test_stats is available, the templates are ranked according to their + relative contribution (summed for all rules created from a given template, + weighted by score) to the performance on the test set. If no test_stats, then + statistics collected during training are used instead. There is also + an unweighted measure (just counting the rules). This is less informative, + though, as many low-score rules will appear towards end of training. + + :param test_stats: dictionary of statistics collected during testing + :type test_stats: dict of str -> any (but usually numbers) + :param printunused: if True, print a list of all unused templates + :type printunused: bool + :return: None + :rtype: None + """ + tids = [r.templateid for r in self._rules] + train_stats = self.train_stats() + + trainscores = train_stats["rulescores"] + assert len(trainscores) == len( + tids + ), "corrupt statistics: " "{} train scores for {} rules".format( + trainscores, tids + ) + template_counts = Counter(tids) + weighted_traincounts = Counter() + for (tid, score) in zip(tids, trainscores): + weighted_traincounts[tid] += score + tottrainscores = sum(trainscores) + + # det_tplsort() is for deterministic sorting; + # the otherwise convenient Counter.most_common() unfortunately + # does not break ties deterministically + # between python versions and will break cross-version tests + def det_tplsort(tpl_value): + return (tpl_value[1], repr(tpl_value[0])) + + def print_train_stats(): + print( + "TEMPLATE STATISTICS (TRAIN) {} templates, {} rules)".format( + len(template_counts), len(tids) + ) + ) + print( + "TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} " + "final: {finalerrors:5d} {finalacc:.4f}".format(**train_stats) + ) + head = "#ID | Score (train) | #Rules | Template" + print(head, "\n", "-" * len(head), sep="") + train_tplscores = sorted( + weighted_traincounts.items(), key=det_tplsort, reverse=True + ) + for (tid, trainscore) in train_tplscores: + s = "{} | {:5d} {:5.3f} |{:4d} {:.3f} | {}".format( + tid, + trainscore, + trainscore / tottrainscores, + template_counts[tid], + template_counts[tid] / len(tids), + Template.ALLTEMPLATES[int(tid)], + ) + print(s) + + def print_testtrain_stats(): + testscores = test_stats["rulescores"] + print( + "TEMPLATE STATISTICS (TEST AND TRAIN) ({} templates, {} rules)".format( + len(template_counts), len(tids) + ) + ) + print( + "TEST ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} " + "final: {finalerrors:5d} {finalacc:.4f} ".format(**test_stats) + ) + print( + "TRAIN ({tokencount:7d} tokens) initial {initialerrors:5d} {initialacc:.4f} " + "final: {finalerrors:5d} {finalacc:.4f} ".format(**train_stats) + ) + weighted_testcounts = Counter() + for (tid, score) in zip(tids, testscores): + weighted_testcounts[tid] += score + tottestscores = sum(testscores) + head = "#ID | Score (test) | Score (train) | #Rules | Template" + print(head, "\n", "-" * len(head), sep="") + test_tplscores = sorted( + weighted_testcounts.items(), key=det_tplsort, reverse=True + ) + for (tid, testscore) in test_tplscores: + s = "{:s} |{:5d} {:6.3f} | {:4d} {:.3f} |{:4d} {:.3f} | {:s}".format( + tid, + testscore, + testscore / tottestscores, + weighted_traincounts[tid], + weighted_traincounts[tid] / tottrainscores, + template_counts[tid], + template_counts[tid] / len(tids), + Template.ALLTEMPLATES[int(tid)], + ) + print(s) + + def print_unused_templates(): + usedtpls = {int(tid) for tid in tids} + unused = [ + (tid, tpl) + for (tid, tpl) in enumerate(Template.ALLTEMPLATES) + if tid not in usedtpls + ] + print(f"UNUSED TEMPLATES ({len(unused)})") + + for (tid, tpl) in unused: + print(f"{tid:03d} {str(tpl):s}") + + if test_stats is None: + print_train_stats() + else: + print_testtrain_stats() + print() + if printunused: + print_unused_templates() + print() + + def batch_tag_incremental(self, sequences, gold): + """ + Tags by applying each rule to the entire corpus (rather than all rules to a + single sequence). The point is to collect statistics on the test set for + individual rules. + + NOTE: This is inefficient (does not build any index, so will traverse the entire + corpus N times for N rules) -- usually you would not care about statistics for + individual rules and thus use batch_tag() instead + + :param sequences: lists of token sequences (sentences, in some applications) to be tagged + :type sequences: list of list of strings + :param gold: the gold standard + :type gold: list of list of strings + :returns: tuple of (tagged_sequences, ordered list of rule scores (one for each rule)) + """ + + def counterrors(xs): + return sum(t[1] != g[1] for pair in zip(xs, gold) for (t, g) in zip(*pair)) + + testing_stats = {} + testing_stats["tokencount"] = sum(len(t) for t in sequences) + testing_stats["sequencecount"] = len(sequences) + tagged_tokenses = [self._initial_tagger.tag(tokens) for tokens in sequences] + testing_stats["initialerrors"] = counterrors(tagged_tokenses) + testing_stats["initialacc"] = ( + 1 - testing_stats["initialerrors"] / testing_stats["tokencount"] + ) + # Apply each rule to the entire corpus, in order + errors = [testing_stats["initialerrors"]] + for rule in self._rules: + for tagged_tokens in tagged_tokenses: + rule.apply(tagged_tokens) + errors.append(counterrors(tagged_tokenses)) + testing_stats["rulescores"] = [ + err0 - err1 for (err0, err1) in zip(errors, errors[1:]) + ] + testing_stats["finalerrors"] = errors[-1] + testing_stats["finalacc"] = ( + 1 - testing_stats["finalerrors"] / testing_stats["tokencount"] + ) + return (tagged_tokenses, testing_stats) diff --git a/venv/lib/python3.10/site-packages/nltk/tag/brill_trainer.py b/venv/lib/python3.10/site-packages/nltk/tag/brill_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..236fd9858e755b501f3a8f384b68a383b6902f99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tag/brill_trainer.py @@ -0,0 +1,629 @@ +# Natural Language Toolkit: Transformation-based learning +# +# Copyright (C) 2001-2013 NLTK Project +# Author: Marcus Uneson +# based on previous (nltk2) version by +# Christopher Maloof, Edward Loper, Steven Bird +# URL: +# For license information, see LICENSE.TXT + +import bisect +import textwrap +from collections import defaultdict + +from nltk.tag import BrillTagger, untag + +###################################################################### +# Brill Tagger Trainer +###################################################################### + + +class BrillTaggerTrainer: + """ + A trainer for tbl taggers. + """ + + def __init__( + self, initial_tagger, templates, trace=0, deterministic=None, ruleformat="str" + ): + """ + Construct a Brill tagger from a baseline tagger and a + set of templates + + :param initial_tagger: the baseline tagger + :type initial_tagger: Tagger + :param templates: templates to be used in training + :type templates: list of Templates + :param trace: verbosity level + :type trace: int + :param deterministic: if True, adjudicate ties deterministically + :type deterministic: bool + :param ruleformat: format of reported Rules + :type ruleformat: str + :return: An untrained BrillTagger + :rtype: BrillTagger + """ + + if deterministic is None: + deterministic = trace > 0 + self._initial_tagger = initial_tagger + self._templates = templates + self._trace = trace + self._deterministic = deterministic + self._ruleformat = ruleformat + + self._tag_positions = None + """Mapping from tags to lists of positions that use that tag.""" + + self._rules_by_position = None + """Mapping from positions to the set of rules that are known + to occur at that position. Position is (sentnum, wordnum). + Initially, this will only contain positions where each rule + applies in a helpful way; but when we examine a rule, we'll + extend this list to also include positions where each rule + applies in a harmful or neutral way.""" + + self._positions_by_rule = None + """Mapping from rule to position to effect, specifying the + effect that each rule has on the overall score, at each + position. Position is (sentnum, wordnum); and effect is + -1, 0, or 1. As with _rules_by_position, this mapping starts + out only containing rules with positive effects; but when + we examine a rule, we'll extend this mapping to include + the positions where the rule is harmful or neutral.""" + + self._rules_by_score = None + """Mapping from scores to the set of rules whose effect on the + overall score is upper bounded by that score. Invariant: + rulesByScore[s] will contain r iff the sum of + _positions_by_rule[r] is s.""" + + self._rule_scores = None + """Mapping from rules to upper bounds on their effects on the + overall score. This is the inverse mapping to _rules_by_score. + Invariant: ruleScores[r] = sum(_positions_by_rule[r])""" + + self._first_unknown_position = None + """Mapping from rules to the first position where we're unsure + if the rule applies. This records the next position we + need to check to see if the rule messed anything up.""" + + # Training + + def train(self, train_sents, max_rules=200, min_score=2, min_acc=None): + r""" + Trains the Brill tagger on the corpus *train_sents*, + producing at most *max_rules* transformations, each of which + reduces the net number of errors in the corpus by at least + *min_score*, and each of which has accuracy not lower than + *min_acc*. + + >>> # Relevant imports + >>> from nltk.tbl.template import Template + >>> from nltk.tag.brill import Pos, Word + >>> from nltk.tag import untag, RegexpTagger, BrillTaggerTrainer + + >>> # Load some data + >>> from nltk.corpus import treebank + >>> training_data = treebank.tagged_sents()[:100] + >>> baseline_data = treebank.tagged_sents()[100:200] + >>> gold_data = treebank.tagged_sents()[200:300] + >>> testing_data = [untag(s) for s in gold_data] + + >>> backoff = RegexpTagger([ + ... (r'^-?[0-9]+(\.[0-9]+)?$', 'CD'), # cardinal numbers + ... (r'(The|the|A|a|An|an)$', 'AT'), # articles + ... (r'.*able$', 'JJ'), # adjectives + ... (r'.*ness$', 'NN'), # nouns formed from adjectives + ... (r'.*ly$', 'RB'), # adverbs + ... (r'.*s$', 'NNS'), # plural nouns + ... (r'.*ing$', 'VBG'), # gerunds + ... (r'.*ed$', 'VBD'), # past tense verbs + ... (r'.*', 'NN') # nouns (default) + ... ]) + + >>> baseline = backoff #see NOTE1 + >>> baseline.accuracy(gold_data) #doctest: +ELLIPSIS + 0.243... + + >>> # Set up templates + >>> Template._cleartemplates() #clear any templates created in earlier tests + >>> templates = [Template(Pos([-1])), Template(Pos([-1]), Word([0]))] + + >>> # Construct a BrillTaggerTrainer + >>> tt = BrillTaggerTrainer(baseline, templates, trace=3) + + >>> tagger1 = tt.train(training_data, max_rules=10) + TBL train (fast) (seqs: 100; tokens: 2417; tpls: 2; min score: 2; min acc: None) + Finding initial useful rules... + Found 847 useful rules. + + B | + S F r O | Score = Fixed - Broken + c i o t | R Fixed = num tags changed incorrect -> correct + o x k h | u Broken = num tags changed correct -> incorrect + r e e e | l Other = num tags changed incorrect -> incorrect + e d n r | e + ------------------+------------------------------------------------------- + 132 132 0 0 | AT->DT if Pos:NN@[-1] + 85 85 0 0 | NN->, if Pos:NN@[-1] & Word:,@[0] + 69 69 0 0 | NN->. if Pos:NN@[-1] & Word:.@[0] + 51 51 0 0 | NN->IN if Pos:NN@[-1] & Word:of@[0] + 47 63 16 162 | NN->IN if Pos:NNS@[-1] + 33 33 0 0 | NN->TO if Pos:NN@[-1] & Word:to@[0] + 26 26 0 0 | IN->. if Pos:NNS@[-1] & Word:.@[0] + 24 24 0 0 | IN->, if Pos:NNS@[-1] & Word:,@[0] + 22 27 5 24 | NN->-NONE- if Pos:VBD@[-1] + 17 17 0 0 | NN->CC if Pos:NN@[-1] & Word:and@[0] + + >>> tagger1.rules()[1:3] + (Rule('001', 'NN', ',', [(Pos([-1]),'NN'), (Word([0]),',')]), Rule('001', 'NN', '.', [(Pos([-1]),'NN'), (Word([0]),'.')])) + + >>> train_stats = tagger1.train_stats() + >>> [train_stats[stat] for stat in ['initialerrors', 'finalerrors', 'rulescores']] + [1776, 1270, [132, 85, 69, 51, 47, 33, 26, 24, 22, 17]] + + >>> tagger1.print_template_statistics(printunused=False) + TEMPLATE STATISTICS (TRAIN) 2 templates, 10 rules) + TRAIN ( 2417 tokens) initial 1776 0.2652 final: 1270 0.4746 + #ID | Score (train) | #Rules | Template + -------------------------------------------- + 001 | 305 0.603 | 7 0.700 | Template(Pos([-1]),Word([0])) + 000 | 201 0.397 | 3 0.300 | Template(Pos([-1])) + + + + >>> round(tagger1.accuracy(gold_data),5) + 0.43834 + + >>> tagged, test_stats = tagger1.batch_tag_incremental(testing_data, gold_data) + + >>> tagged[33][12:] == [('foreign', 'IN'), ('debt', 'NN'), ('of', 'IN'), ('$', 'NN'), ('64', 'CD'), + ... ('billion', 'NN'), ('*U*', 'NN'), ('--', 'NN'), ('the', 'DT'), ('third-highest', 'NN'), ('in', 'NN'), + ... ('the', 'DT'), ('developing', 'VBG'), ('world', 'NN'), ('.', '.')] + True + + >>> [test_stats[stat] for stat in ['initialerrors', 'finalerrors', 'rulescores']] + [1859, 1380, [100, 85, 67, 58, 27, 36, 27, 16, 31, 32]] + + >>> # A high-accuracy tagger + >>> tagger2 = tt.train(training_data, max_rules=10, min_acc=0.99) + TBL train (fast) (seqs: 100; tokens: 2417; tpls: 2; min score: 2; min acc: 0.99) + Finding initial useful rules... + Found 847 useful rules. + + B | + S F r O | Score = Fixed - Broken + c i o t | R Fixed = num tags changed incorrect -> correct + o x k h | u Broken = num tags changed correct -> incorrect + r e e e | l Other = num tags changed incorrect -> incorrect + e d n r | e + ------------------+------------------------------------------------------- + 132 132 0 0 | AT->DT if Pos:NN@[-1] + 85 85 0 0 | NN->, if Pos:NN@[-1] & Word:,@[0] + 69 69 0 0 | NN->. if Pos:NN@[-1] & Word:.@[0] + 51 51 0 0 | NN->IN if Pos:NN@[-1] & Word:of@[0] + 36 36 0 0 | NN->TO if Pos:NN@[-1] & Word:to@[0] + 26 26 0 0 | NN->. if Pos:NNS@[-1] & Word:.@[0] + 24 24 0 0 | NN->, if Pos:NNS@[-1] & Word:,@[0] + 19 19 0 6 | NN->VB if Pos:TO@[-1] + 18 18 0 0 | CD->-NONE- if Pos:NN@[-1] & Word:0@[0] + 18 18 0 0 | NN->CC if Pos:NN@[-1] & Word:and@[0] + + >>> round(tagger2.accuracy(gold_data), 8) + 0.43996744 + + >>> tagger2.rules()[2:4] + (Rule('001', 'NN', '.', [(Pos([-1]),'NN'), (Word([0]),'.')]), Rule('001', 'NN', 'IN', [(Pos([-1]),'NN'), (Word([0]),'of')])) + + # NOTE1: (!!FIXME) A far better baseline uses nltk.tag.UnigramTagger, + # with a RegexpTagger only as backoff. For instance, + # >>> baseline = UnigramTagger(baseline_data, backoff=backoff) + # However, as of Nov 2013, nltk.tag.UnigramTagger does not yield consistent results + # between python versions. The simplistic backoff above is a workaround to make doctests + # get consistent input. + + :param train_sents: training data + :type train_sents: list(list(tuple)) + :param max_rules: output at most max_rules rules + :type max_rules: int + :param min_score: stop training when no rules better than min_score can be found + :type min_score: int + :param min_acc: discard any rule with lower accuracy than min_acc + :type min_acc: float or None + :return: the learned tagger + :rtype: BrillTagger + """ + # FIXME: several tests are a bit too dependent on tracing format + # FIXME: tests in trainer.fast and trainer.brillorig are exact duplicates + + # Basic idea: Keep track of the rules that apply at each position. + # And keep track of the positions to which each rule applies. + + # Create a new copy of the training corpus, and run the + # initial tagger on it. We will progressively update this + # test corpus to look more like the training corpus. + test_sents = [ + list(self._initial_tagger.tag(untag(sent))) for sent in train_sents + ] + + # Collect some statistics on the training process + trainstats = {} + trainstats["min_acc"] = min_acc + trainstats["min_score"] = min_score + trainstats["tokencount"] = sum(len(t) for t in test_sents) + trainstats["sequencecount"] = len(test_sents) + trainstats["templatecount"] = len(self._templates) + trainstats["rulescores"] = [] + trainstats["initialerrors"] = sum( + tag[1] != truth[1] + for paired in zip(test_sents, train_sents) + for (tag, truth) in zip(*paired) + ) + trainstats["initialacc"] = ( + 1 - trainstats["initialerrors"] / trainstats["tokencount"] + ) + if self._trace > 0: + print( + "TBL train (fast) (seqs: {sequencecount}; tokens: {tokencount}; " + "tpls: {templatecount}; min score: {min_score}; min acc: {min_acc})".format( + **trainstats + ) + ) + + # Initialize our mappings. This will find any errors made + # by the initial tagger, and use those to generate repair + # rules, which are added to the rule mappings. + if self._trace: + print("Finding initial useful rules...") + self._init_mappings(test_sents, train_sents) + if self._trace: + print(f" Found {len(self._rule_scores)} useful rules.") + + # Let the user know what we're up to. + if self._trace > 2: + self._trace_header() + elif self._trace == 1: + print("Selecting rules...") + + # Repeatedly select the best rule, and add it to `rules`. + rules = [] + try: + while len(rules) < max_rules: + # Find the best rule, and add it to our rule list. + rule = self._best_rule(train_sents, test_sents, min_score, min_acc) + if rule: + rules.append(rule) + score = self._rule_scores[rule] + trainstats["rulescores"].append(score) + else: + break # No more good rules left! + + # Report the rule that we found. + if self._trace > 1: + self._trace_rule(rule) + + # Apply the new rule at the relevant sites + self._apply_rule(rule, test_sents) + + # Update _tag_positions[rule.original_tag] and + # _tag_positions[rule.replacement_tag] for the affected + # positions (i.e., self._positions_by_rule[rule]). + self._update_tag_positions(rule) + + # Update rules that were affected by the change. + self._update_rules(rule, train_sents, test_sents) + + # The user can cancel training manually: + except KeyboardInterrupt: + print(f"Training stopped manually -- {len(rules)} rules found") + + # Discard our tag position mapping & rule mappings. + self._clean() + trainstats["finalerrors"] = trainstats["initialerrors"] - sum( + trainstats["rulescores"] + ) + trainstats["finalacc"] = ( + 1 - trainstats["finalerrors"] / trainstats["tokencount"] + ) + # Create and return a tagger from the rules we found. + return BrillTagger(self._initial_tagger, rules, trainstats) + + def _init_mappings(self, test_sents, train_sents): + """ + Initialize the tag position mapping & the rule related + mappings. For each error in test_sents, find new rules that + would correct them, and add them to the rule mappings. + """ + self._tag_positions = defaultdict(list) + self._rules_by_position = defaultdict(set) + self._positions_by_rule = defaultdict(dict) + self._rules_by_score = defaultdict(set) + self._rule_scores = defaultdict(int) + self._first_unknown_position = defaultdict(int) + # Scan through the corpus, initializing the tag_positions + # mapping and all the rule-related mappings. + for sentnum, sent in enumerate(test_sents): + for wordnum, (word, tag) in enumerate(sent): + + # Initialize tag_positions + self._tag_positions[tag].append((sentnum, wordnum)) + + # If it's an error token, update the rule-related mappings. + correct_tag = train_sents[sentnum][wordnum][1] + if tag != correct_tag: + for rule in self._find_rules(sent, wordnum, correct_tag): + self._update_rule_applies(rule, sentnum, wordnum, train_sents) + + def _clean(self): + self._tag_positions = None + self._rules_by_position = None + self._positions_by_rule = None + self._rules_by_score = None + self._rule_scores = None + self._first_unknown_position = None + + def _find_rules(self, sent, wordnum, new_tag): + """ + Use the templates to find rules that apply at index *wordnum* + in the sentence *sent* and generate the tag *new_tag*. + """ + for template in self._templates: + yield from template.applicable_rules(sent, wordnum, new_tag) + + def _update_rule_applies(self, rule, sentnum, wordnum, train_sents): + """ + Update the rule data tables to reflect the fact that + *rule* applies at the position *(sentnum, wordnum)*. + """ + pos = sentnum, wordnum + + # If the rule is already known to apply here, ignore. + # (This only happens if the position's tag hasn't changed.) + if pos in self._positions_by_rule[rule]: + return + + # Update self._positions_by_rule. + correct_tag = train_sents[sentnum][wordnum][1] + if rule.replacement_tag == correct_tag: + self._positions_by_rule[rule][pos] = 1 + elif rule.original_tag == correct_tag: + self._positions_by_rule[rule][pos] = -1 + else: # was wrong, remains wrong + self._positions_by_rule[rule][pos] = 0 + + # Update _rules_by_position + self._rules_by_position[pos].add(rule) + + # Update _rule_scores. + old_score = self._rule_scores[rule] + self._rule_scores[rule] += self._positions_by_rule[rule][pos] + + # Update _rules_by_score. + self._rules_by_score[old_score].discard(rule) + self._rules_by_score[self._rule_scores[rule]].add(rule) + + def _update_rule_not_applies(self, rule, sentnum, wordnum): + """ + Update the rule data tables to reflect the fact that *rule* + does not apply at the position *(sentnum, wordnum)*. + """ + pos = sentnum, wordnum + + # Update _rule_scores. + old_score = self._rule_scores[rule] + self._rule_scores[rule] -= self._positions_by_rule[rule][pos] + + # Update _rules_by_score. + self._rules_by_score[old_score].discard(rule) + self._rules_by_score[self._rule_scores[rule]].add(rule) + + # Update _positions_by_rule + del self._positions_by_rule[rule][pos] + self._rules_by_position[pos].remove(rule) + + # Optional addition: if the rule now applies nowhere, delete + # all its dictionary entries. + + def _best_rule(self, train_sents, test_sents, min_score, min_acc): + """ + Find the next best rule. This is done by repeatedly taking a + rule with the highest score and stepping through the corpus to + see where it applies. When it makes an error (decreasing its + score) it's bumped down, and we try a new rule with the + highest score. When we find a rule which has the highest + score *and* which has been tested against the entire corpus, we + can conclude that it's the next best rule. + """ + for max_score in sorted(self._rules_by_score.keys(), reverse=True): + if len(self._rules_by_score) == 0: + return None + if max_score < min_score or max_score <= 0: + return None + best_rules = list(self._rules_by_score[max_score]) + if self._deterministic: + best_rules.sort(key=repr) + for rule in best_rules: + positions = self._tag_positions[rule.original_tag] + + unk = self._first_unknown_position.get(rule, (0, -1)) + start = bisect.bisect_left(positions, unk) + + for i in range(start, len(positions)): + sentnum, wordnum = positions[i] + if rule.applies(test_sents[sentnum], wordnum): + self._update_rule_applies(rule, sentnum, wordnum, train_sents) + if self._rule_scores[rule] < max_score: + self._first_unknown_position[rule] = (sentnum, wordnum + 1) + break # The update demoted the rule. + + if self._rule_scores[rule] == max_score: + self._first_unknown_position[rule] = (len(train_sents) + 1, 0) + # optimization: if no min_acc threshold given, don't bother computing accuracy + if min_acc is None: + return rule + else: + changes = self._positions_by_rule[rule].values() + num_fixed = len([c for c in changes if c == 1]) + num_broken = len([c for c in changes if c == -1]) + # acc here is fixed/(fixed+broken); could also be + # fixed/(fixed+broken+other) == num_fixed/len(changes) + acc = num_fixed / (num_fixed + num_broken) + if acc >= min_acc: + return rule + # else: rule too inaccurate, discard and try next + + # We demoted (or skipped due to < min_acc, if that was given) + # all the rules with score==max_score. + + assert min_acc is not None or not self._rules_by_score[max_score] + if not self._rules_by_score[max_score]: + del self._rules_by_score[max_score] + + def _apply_rule(self, rule, test_sents): + """ + Update *test_sents* by applying *rule* everywhere where its + conditions are met. + """ + update_positions = set(self._positions_by_rule[rule]) + new_tag = rule.replacement_tag + + if self._trace > 3: + self._trace_apply(len(update_positions)) + + # Update test_sents. + for (sentnum, wordnum) in update_positions: + text = test_sents[sentnum][wordnum][0] + test_sents[sentnum][wordnum] = (text, new_tag) + + def _update_tag_positions(self, rule): + """ + Update _tag_positions to reflect the changes to tags that are + made by *rule*. + """ + # Update the tag index. + for pos in self._positions_by_rule[rule]: + # Delete the old tag. + old_tag_positions = self._tag_positions[rule.original_tag] + old_index = bisect.bisect_left(old_tag_positions, pos) + del old_tag_positions[old_index] + # Insert the new tag. + new_tag_positions = self._tag_positions[rule.replacement_tag] + bisect.insort_left(new_tag_positions, pos) + + def _update_rules(self, rule, train_sents, test_sents): + """ + Check if we should add or remove any rules from consideration, + given the changes made by *rule*. + """ + # Collect a list of all positions that might be affected. + neighbors = set() + for sentnum, wordnum in self._positions_by_rule[rule]: + for template in self._templates: + n = template.get_neighborhood(test_sents[sentnum], wordnum) + neighbors.update([(sentnum, i) for i in n]) + + # Update the rules at each position. + num_obsolete = num_new = num_unseen = 0 + for sentnum, wordnum in neighbors: + test_sent = test_sents[sentnum] + correct_tag = train_sents[sentnum][wordnum][1] + + # Check if the change causes any rule at this position to + # stop matching; if so, then update our rule mappings + # accordingly. + old_rules = set(self._rules_by_position[sentnum, wordnum]) + for old_rule in old_rules: + if not old_rule.applies(test_sent, wordnum): + num_obsolete += 1 + self._update_rule_not_applies(old_rule, sentnum, wordnum) + + # Check if the change causes our templates to propose any + # new rules for this position. + for template in self._templates: + for new_rule in template.applicable_rules( + test_sent, wordnum, correct_tag + ): + if new_rule not in old_rules: + num_new += 1 + if new_rule not in self._rule_scores: + num_unseen += 1 + old_rules.add(new_rule) + self._update_rule_applies( + new_rule, sentnum, wordnum, train_sents + ) + + # We may have caused other rules to match here, that are + # not proposed by our templates -- in particular, rules + # that are harmful or neutral. We therefore need to + # update any rule whose first_unknown_position is past + # this rule. + for new_rule, pos in self._first_unknown_position.items(): + if pos > (sentnum, wordnum): + if new_rule not in old_rules: + num_new += 1 + if new_rule.applies(test_sent, wordnum): + self._update_rule_applies( + new_rule, sentnum, wordnum, train_sents + ) + + if self._trace > 3: + self._trace_update_rules(num_obsolete, num_new, num_unseen) + + # Tracing + + def _trace_header(self): + print( + """ + B | + S F r O | Score = Fixed - Broken + c i o t | R Fixed = num tags changed incorrect -> correct + o x k h | u Broken = num tags changed correct -> incorrect + r e e e | l Other = num tags changed incorrect -> incorrect + e d n r | e +------------------+------------------------------------------------------- + """.rstrip() + ) + + def _trace_rule(self, rule): + assert self._rule_scores[rule] == sum(self._positions_by_rule[rule].values()) + + changes = self._positions_by_rule[rule].values() + num_fixed = len([c for c in changes if c == 1]) + num_broken = len([c for c in changes if c == -1]) + num_other = len([c for c in changes if c == 0]) + score = self._rule_scores[rule] + + rulestr = rule.format(self._ruleformat) + if self._trace > 2: + print( + "{:4d}{:4d}{:4d}{:4d} |".format( + score, num_fixed, num_broken, num_other + ), + end=" ", + ) + print( + textwrap.fill( + rulestr, + initial_indent=" " * 20, + width=79, + subsequent_indent=" " * 18 + "| ", + ).strip() + ) + else: + print(rulestr) + + def _trace_apply(self, num_updates): + prefix = " " * 18 + "|" + print(prefix) + print(prefix, f"Applying rule to {num_updates} positions.") + + def _trace_update_rules(self, num_obsolete, num_new, num_unseen): + prefix = " " * 18 + "|" + print(prefix, "Updated rule tables:") + print(prefix, (f" - {num_obsolete} rule applications removed")) + print( + prefix, + (f" - {num_new} rule applications added ({num_unseen} novel)"), + ) + print(prefix) diff --git a/venv/lib/python3.10/site-packages/nltk/tag/crf.py b/venv/lib/python3.10/site-packages/nltk/tag/crf.py new file mode 100644 index 0000000000000000000000000000000000000000..dfc728c8d55c5eecadd7dc214f756f5224b7f017 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tag/crf.py @@ -0,0 +1,207 @@ +# Natural Language Toolkit: Interface to the CRFSuite Tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Long Duong +# URL: +# For license information, see LICENSE.TXT + +""" +A module for POS tagging using CRFSuite +""" + +import re +import unicodedata + +from nltk.tag.api import TaggerI + +try: + import pycrfsuite +except ImportError: + pass + + +class CRFTagger(TaggerI): + """ + A module for POS tagging using CRFSuite https://pypi.python.org/pypi/python-crfsuite + + >>> from nltk.tag import CRFTagger + >>> ct = CRFTagger() # doctest: +SKIP + + >>> train_data = [[('University','Noun'), ('is','Verb'), ('a','Det'), ('good','Adj'), ('place','Noun')], + ... [('dog','Noun'),('eat','Verb'),('meat','Noun')]] + + >>> ct.train(train_data,'model.crf.tagger') # doctest: +SKIP + >>> ct.tag_sents([['dog','is','good'], ['Cat','eat','meat']]) # doctest: +SKIP + [[('dog', 'Noun'), ('is', 'Verb'), ('good', 'Adj')], [('Cat', 'Noun'), ('eat', 'Verb'), ('meat', 'Noun')]] + + >>> gold_sentences = [[('dog','Noun'),('is','Verb'),('good','Adj')] , [('Cat','Noun'),('eat','Verb'), ('meat','Noun')]] + >>> ct.accuracy(gold_sentences) # doctest: +SKIP + 1.0 + + Setting learned model file + >>> ct = CRFTagger() # doctest: +SKIP + >>> ct.set_model_file('model.crf.tagger') # doctest: +SKIP + >>> ct.accuracy(gold_sentences) # doctest: +SKIP + 1.0 + """ + + def __init__(self, feature_func=None, verbose=False, training_opt={}): + """ + Initialize the CRFSuite tagger + + :param feature_func: The function that extracts features for each token of a sentence. This function should take + 2 parameters: tokens and index which extract features at index position from tokens list. See the build in + _get_features function for more detail. + :param verbose: output the debugging messages during training. + :type verbose: boolean + :param training_opt: python-crfsuite training options + :type training_opt: dictionary + + Set of possible training options (using LBFGS training algorithm). + :'feature.minfreq': The minimum frequency of features. + :'feature.possible_states': Force to generate possible state features. + :'feature.possible_transitions': Force to generate possible transition features. + :'c1': Coefficient for L1 regularization. + :'c2': Coefficient for L2 regularization. + :'max_iterations': The maximum number of iterations for L-BFGS optimization. + :'num_memories': The number of limited memories for approximating the inverse hessian matrix. + :'epsilon': Epsilon for testing the convergence of the objective. + :'period': The duration of iterations to test the stopping criterion. + :'delta': The threshold for the stopping criterion; an L-BFGS iteration stops when the + improvement of the log likelihood over the last ${period} iterations is no greater than this threshold. + :'linesearch': The line search algorithm used in L-BFGS updates: + + - 'MoreThuente': More and Thuente's method, + - 'Backtracking': Backtracking method with regular Wolfe condition, + - 'StrongBacktracking': Backtracking method with strong Wolfe condition + :'max_linesearch': The maximum number of trials for the line search algorithm. + """ + + self._model_file = "" + self._tagger = pycrfsuite.Tagger() + + if feature_func is None: + self._feature_func = self._get_features + else: + self._feature_func = feature_func + + self._verbose = verbose + self._training_options = training_opt + self._pattern = re.compile(r"\d") + + def set_model_file(self, model_file): + self._model_file = model_file + self._tagger.open(self._model_file) + + def _get_features(self, tokens, idx): + """ + Extract basic features about this word including + - Current word + - is it capitalized? + - Does it have punctuation? + - Does it have a number? + - Suffixes up to length 3 + + Note that : we might include feature over previous word, next word etc. + + :return: a list which contains the features + :rtype: list(str) + """ + token = tokens[idx] + + feature_list = [] + + if not token: + return feature_list + + # Capitalization + if token[0].isupper(): + feature_list.append("CAPITALIZATION") + + # Number + if re.search(self._pattern, token) is not None: + feature_list.append("HAS_NUM") + + # Punctuation + punc_cat = {"Pc", "Pd", "Ps", "Pe", "Pi", "Pf", "Po"} + if all(unicodedata.category(x) in punc_cat for x in token): + feature_list.append("PUNCTUATION") + + # Suffix up to length 3 + if len(token) > 1: + feature_list.append("SUF_" + token[-1:]) + if len(token) > 2: + feature_list.append("SUF_" + token[-2:]) + if len(token) > 3: + feature_list.append("SUF_" + token[-3:]) + + feature_list.append("WORD_" + token) + + return feature_list + + def tag_sents(self, sents): + """ + Tag a list of sentences. NB before using this function, user should specify the mode_file either by + + - Train a new model using ``train`` function + - Use the pre-trained model which is set via ``set_model_file`` function + + :params sentences: list of sentences needed to tag. + :type sentences: list(list(str)) + :return: list of tagged sentences. + :rtype: list(list(tuple(str,str))) + """ + if self._model_file == "": + raise Exception( + " No model file is found !! Please use train or set_model_file function" + ) + + # We need the list of sentences instead of the list generator for matching the input and output + result = [] + for tokens in sents: + features = [self._feature_func(tokens, i) for i in range(len(tokens))] + labels = self._tagger.tag(features) + + if len(labels) != len(tokens): + raise Exception(" Predicted Length Not Matched, Expect Errors !") + + tagged_sent = list(zip(tokens, labels)) + result.append(tagged_sent) + + return result + + def train(self, train_data, model_file): + """ + Train the CRF tagger using CRFSuite + :params train_data : is the list of annotated sentences. + :type train_data : list (list(tuple(str,str))) + :params model_file : the model will be saved to this file. + + """ + trainer = pycrfsuite.Trainer(verbose=self._verbose) + trainer.set_params(self._training_options) + + for sent in train_data: + tokens, labels = zip(*sent) + features = [self._feature_func(tokens, i) for i in range(len(tokens))] + trainer.append(features, labels) + + # Now train the model, the output should be model_file + trainer.train(model_file) + # Save the model file + self.set_model_file(model_file) + + def tag(self, tokens): + """ + Tag a sentence using Python CRFSuite Tagger. NB before using this function, user should specify the mode_file either by + + - Train a new model using ``train`` function + - Use the pre-trained model which is set via ``set_model_file`` function + + :params tokens: list of tokens needed to tag. + :type tokens: list(str) + :return: list of tagged tokens. + :rtype: list(tuple(str,str)) + """ + + return self.tag_sents([tokens])[0] diff --git a/venv/lib/python3.10/site-packages/nltk/tag/hmm.py b/venv/lib/python3.10/site-packages/nltk/tag/hmm.py new file mode 100644 index 0000000000000000000000000000000000000000..6577789b883828ce01e84c0864de57eead81f12b --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tag/hmm.py @@ -0,0 +1,1329 @@ +# Natural Language Toolkit: Hidden Markov Model +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Trevor Cohn +# Philip Blunsom +# Tiago Tresoldi (fixes) +# Steven Bird (fixes) +# Joseph Frazee (fixes) +# Steven Xu (fixes) +# URL: +# For license information, see LICENSE.TXT + +""" +Hidden Markov Models (HMMs) largely used to assign the correct label sequence +to sequential data or assess the probability of a given label and data +sequence. These models are finite state machines characterised by a number of +states, transitions between these states, and output symbols emitted while in +each state. The HMM is an extension to the Markov chain, where each state +corresponds deterministically to a given event. In the HMM the observation is +a probabilistic function of the state. HMMs share the Markov chain's +assumption, being that the probability of transition from one state to another +only depends on the current state - i.e. the series of states that led to the +current state are not used. They are also time invariant. + +The HMM is a directed graph, with probability weighted edges (representing the +probability of a transition between the source and sink states) where each +vertex emits an output symbol when entered. The symbol (or observation) is +non-deterministically generated. For this reason, knowing that a sequence of +output observations was generated by a given HMM does not mean that the +corresponding sequence of states (and what the current state is) is known. +This is the 'hidden' in the hidden markov model. + +Formally, a HMM can be characterised by: + +- the output observation alphabet. This is the set of symbols which may be + observed as output of the system. +- the set of states. +- the transition probabilities *a_{ij} = P(s_t = j | s_{t-1} = i)*. These + represent the probability of transition to each state from a given state. +- the output probability matrix *b_i(k) = P(X_t = o_k | s_t = i)*. These + represent the probability of observing each symbol in a given state. +- the initial state distribution. This gives the probability of starting + in each state. + +To ground this discussion, take a common NLP application, part-of-speech (POS) +tagging. An HMM is desirable for this task as the highest probability tag +sequence can be calculated for a given sequence of word forms. This differs +from other tagging techniques which often tag each word individually, seeking +to optimise each individual tagging greedily without regard to the optimal +combination of tags for a larger unit, such as a sentence. The HMM does this +with the Viterbi algorithm, which efficiently computes the optimal path +through the graph given the sequence of words forms. + +In POS tagging the states usually have a 1:1 correspondence with the tag +alphabet - i.e. each state represents a single tag. The output observation +alphabet is the set of word forms (the lexicon), and the remaining three +parameters are derived by a training regime. With this information the +probability of a given sentence can be easily derived, by simply summing the +probability of each distinct path through the model. Similarly, the highest +probability tagging sequence can be derived with the Viterbi algorithm, +yielding a state sequence which can be mapped into a tag sequence. + +This discussion assumes that the HMM has been trained. This is probably the +most difficult task with the model, and requires either MLE estimates of the +parameters or unsupervised learning using the Baum-Welch algorithm, a variant +of EM. + +For more information, please consult the source code for this module, +which includes extensive demonstration code. +""" + +import itertools +import re + +try: + import numpy as np +except ImportError: + pass + +from nltk.metrics import accuracy +from nltk.probability import ( + ConditionalFreqDist, + ConditionalProbDist, + DictionaryConditionalProbDist, + DictionaryProbDist, + FreqDist, + LidstoneProbDist, + MLEProbDist, + MutableProbDist, + RandomProbDist, +) +from nltk.tag.api import TaggerI +from nltk.util import LazyMap, unique_list + +_TEXT = 0 # index of text in a tuple +_TAG = 1 # index of tag in a tuple + + +def _identity(labeled_symbols): + return labeled_symbols + + +class HiddenMarkovModelTagger(TaggerI): + """ + Hidden Markov model class, a generative model for labelling sequence data. + These models define the joint probability of a sequence of symbols and + their labels (state transitions) as the product of the starting state + probability, the probability of each state transition, and the probability + of each observation being generated from each state. This is described in + more detail in the module documentation. + + This implementation is based on the HMM description in Chapter 8, Huang, + Acero and Hon, Spoken Language Processing and includes an extension for + training shallow HMM parsers or specialized HMMs as in Molina et. + al, 2002. A specialized HMM modifies training data by applying a + specialization function to create a new training set that is more + appropriate for sequential tagging with an HMM. A typical use case is + chunking. + + :param symbols: the set of output symbols (alphabet) + :type symbols: seq of any + :param states: a set of states representing state space + :type states: seq of any + :param transitions: transition probabilities; Pr(s_i | s_j) is the + probability of transition from state i given the model is in + state_j + :type transitions: ConditionalProbDistI + :param outputs: output probabilities; Pr(o_k | s_i) is the probability + of emitting symbol k when entering state i + :type outputs: ConditionalProbDistI + :param priors: initial state distribution; Pr(s_i) is the probability + of starting in state i + :type priors: ProbDistI + :param transform: an optional function for transforming training + instances, defaults to the identity function. + :type transform: callable + """ + + def __init__( + self, symbols, states, transitions, outputs, priors, transform=_identity + ): + self._symbols = unique_list(symbols) + self._states = unique_list(states) + self._transitions = transitions + self._outputs = outputs + self._priors = priors + self._cache = None + self._transform = transform + + @classmethod + def _train( + cls, + labeled_sequence, + test_sequence=None, + unlabeled_sequence=None, + transform=_identity, + estimator=None, + **kwargs, + ): + + if estimator is None: + + def estimator(fd, bins): + return LidstoneProbDist(fd, 0.1, bins) + + labeled_sequence = LazyMap(transform, labeled_sequence) + symbols = unique_list(word for sent in labeled_sequence for word, tag in sent) + tag_set = unique_list(tag for sent in labeled_sequence for word, tag in sent) + + trainer = HiddenMarkovModelTrainer(tag_set, symbols) + hmm = trainer.train_supervised(labeled_sequence, estimator=estimator) + hmm = cls( + hmm._symbols, + hmm._states, + hmm._transitions, + hmm._outputs, + hmm._priors, + transform=transform, + ) + + if test_sequence: + hmm.test(test_sequence, verbose=kwargs.get("verbose", False)) + + if unlabeled_sequence: + max_iterations = kwargs.get("max_iterations", 5) + hmm = trainer.train_unsupervised( + unlabeled_sequence, model=hmm, max_iterations=max_iterations + ) + if test_sequence: + hmm.test(test_sequence, verbose=kwargs.get("verbose", False)) + + return hmm + + @classmethod + def train( + cls, labeled_sequence, test_sequence=None, unlabeled_sequence=None, **kwargs + ): + """ + Train a new HiddenMarkovModelTagger using the given labeled and + unlabeled training instances. Testing will be performed if test + instances are provided. + + :return: a hidden markov model tagger + :rtype: HiddenMarkovModelTagger + :param labeled_sequence: a sequence of labeled training instances, + i.e. a list of sentences represented as tuples + :type labeled_sequence: list(list) + :param test_sequence: a sequence of labeled test instances + :type test_sequence: list(list) + :param unlabeled_sequence: a sequence of unlabeled training instances, + i.e. a list of sentences represented as words + :type unlabeled_sequence: list(list) + :param transform: an optional function for transforming training + instances, defaults to the identity function, see ``transform()`` + :type transform: function + :param estimator: an optional function or class that maps a + condition's frequency distribution to its probability + distribution, defaults to a Lidstone distribution with gamma = 0.1 + :type estimator: class or function + :param verbose: boolean flag indicating whether training should be + verbose or include printed output + :type verbose: bool + :param max_iterations: number of Baum-Welch iterations to perform + :type max_iterations: int + """ + return cls._train(labeled_sequence, test_sequence, unlabeled_sequence, **kwargs) + + def probability(self, sequence): + """ + Returns the probability of the given symbol sequence. If the sequence + is labelled, then returns the joint probability of the symbol, state + sequence. Otherwise, uses the forward algorithm to find the + probability over all label sequences. + + :return: the probability of the sequence + :rtype: float + :param sequence: the sequence of symbols which must contain the TEXT + property, and optionally the TAG property + :type sequence: Token + """ + return 2 ** (self.log_probability(self._transform(sequence))) + + def log_probability(self, sequence): + """ + Returns the log-probability of the given symbol sequence. If the + sequence is labelled, then returns the joint log-probability of the + symbol, state sequence. Otherwise, uses the forward algorithm to find + the log-probability over all label sequences. + + :return: the log-probability of the sequence + :rtype: float + :param sequence: the sequence of symbols which must contain the TEXT + property, and optionally the TAG property + :type sequence: Token + """ + sequence = self._transform(sequence) + + T = len(sequence) + + if T > 0 and sequence[0][_TAG]: + last_state = sequence[0][_TAG] + p = self._priors.logprob(last_state) + self._output_logprob( + last_state, sequence[0][_TEXT] + ) + for t in range(1, T): + state = sequence[t][_TAG] + p += self._transitions[last_state].logprob( + state + ) + self._output_logprob(state, sequence[t][_TEXT]) + last_state = state + return p + else: + alpha = self._forward_probability(sequence) + p = logsumexp2(alpha[T - 1]) + return p + + def tag(self, unlabeled_sequence): + """ + Tags the sequence with the highest probability state sequence. This + uses the best_path method to find the Viterbi path. + + :return: a labelled sequence of symbols + :rtype: list + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + return self._tag(unlabeled_sequence) + + def _tag(self, unlabeled_sequence): + path = self._best_path(unlabeled_sequence) + return list(zip(unlabeled_sequence, path)) + + def _output_logprob(self, state, symbol): + """ + :return: the log probability of the symbol being observed in the given + state + :rtype: float + """ + return self._outputs[state].logprob(symbol) + + def _create_cache(self): + """ + The cache is a tuple (P, O, X, S) where: + + - S maps symbols to integers. I.e., it is the inverse + mapping from self._symbols; for each symbol s in + self._symbols, the following is true:: + + self._symbols[S[s]] == s + + - O is the log output probabilities:: + + O[i,k] = log( P(token[t]=sym[k]|tag[t]=state[i]) ) + + - X is the log transition probabilities:: + + X[i,j] = log( P(tag[t]=state[j]|tag[t-1]=state[i]) ) + + - P is the log prior probabilities:: + + P[i] = log( P(tag[0]=state[i]) ) + """ + if not self._cache: + N = len(self._states) + M = len(self._symbols) + P = np.zeros(N, np.float32) + X = np.zeros((N, N), np.float32) + O = np.zeros((N, M), np.float32) + for i in range(N): + si = self._states[i] + P[i] = self._priors.logprob(si) + for j in range(N): + X[i, j] = self._transitions[si].logprob(self._states[j]) + for k in range(M): + O[i, k] = self._output_logprob(si, self._symbols[k]) + S = {} + for k in range(M): + S[self._symbols[k]] = k + self._cache = (P, O, X, S) + + def _update_cache(self, symbols): + # add new symbols to the symbol table and repopulate the output + # probabilities and symbol table mapping + if symbols: + self._create_cache() + P, O, X, S = self._cache + for symbol in symbols: + if symbol not in self._symbols: + self._cache = None + self._symbols.append(symbol) + # don't bother with the work if there aren't any new symbols + if not self._cache: + N = len(self._states) + M = len(self._symbols) + Q = O.shape[1] + # add new columns to the output probability table without + # destroying the old probabilities + O = np.hstack([O, np.zeros((N, M - Q), np.float32)]) + for i in range(N): + si = self._states[i] + # only calculate probabilities for new symbols + for k in range(Q, M): + O[i, k] = self._output_logprob(si, self._symbols[k]) + # only create symbol mappings for new symbols + for k in range(Q, M): + S[self._symbols[k]] = k + self._cache = (P, O, X, S) + + def reset_cache(self): + self._cache = None + + def best_path(self, unlabeled_sequence): + """ + Returns the state sequence of the optimal (most probable) path through + the HMM. Uses the Viterbi algorithm to calculate this part by dynamic + programming. + + :return: the state sequence + :rtype: sequence of any + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + return self._best_path(unlabeled_sequence) + + def _best_path(self, unlabeled_sequence): + T = len(unlabeled_sequence) + N = len(self._states) + self._create_cache() + self._update_cache(unlabeled_sequence) + P, O, X, S = self._cache + + V = np.zeros((T, N), np.float32) + B = -np.ones((T, N), int) + + V[0] = P + O[:, S[unlabeled_sequence[0]]] + for t in range(1, T): + for j in range(N): + vs = V[t - 1, :] + X[:, j] + best = np.argmax(vs) + V[t, j] = vs[best] + O[j, S[unlabeled_sequence[t]]] + B[t, j] = best + + current = np.argmax(V[T - 1, :]) + sequence = [current] + for t in range(T - 1, 0, -1): + last = B[t, current] + sequence.append(last) + current = last + + sequence.reverse() + return list(map(self._states.__getitem__, sequence)) + + def best_path_simple(self, unlabeled_sequence): + """ + Returns the state sequence of the optimal (most probable) path through + the HMM. Uses the Viterbi algorithm to calculate this part by dynamic + programming. This uses a simple, direct method, and is included for + teaching purposes. + + :return: the state sequence + :rtype: sequence of any + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + return self._best_path_simple(unlabeled_sequence) + + def _best_path_simple(self, unlabeled_sequence): + T = len(unlabeled_sequence) + N = len(self._states) + V = np.zeros((T, N), np.float64) + B = {} + + # find the starting log probabilities for each state + symbol = unlabeled_sequence[0] + for i, state in enumerate(self._states): + V[0, i] = self._priors.logprob(state) + self._output_logprob(state, symbol) + B[0, state] = None + + # find the maximum log probabilities for reaching each state at time t + for t in range(1, T): + symbol = unlabeled_sequence[t] + for j in range(N): + sj = self._states[j] + best = None + for i in range(N): + si = self._states[i] + va = V[t - 1, i] + self._transitions[si].logprob(sj) + if not best or va > best[0]: + best = (va, si) + V[t, j] = best[0] + self._output_logprob(sj, symbol) + B[t, sj] = best[1] + + # find the highest probability final state + best = None + for i in range(N): + val = V[T - 1, i] + if not best or val > best[0]: + best = (val, self._states[i]) + + # traverse the back-pointers B to find the state sequence + current = best[1] + sequence = [current] + for t in range(T - 1, 0, -1): + last = B[t, current] + sequence.append(last) + current = last + + sequence.reverse() + return sequence + + def random_sample(self, rng, length): + """ + Randomly sample the HMM to generate a sentence of a given length. This + samples the prior distribution then the observation distribution and + transition distribution for each subsequent observation and state. + This will mostly generate unintelligible garbage, but can provide some + amusement. + + :return: the randomly created state/observation sequence, + generated according to the HMM's probability + distributions. The SUBTOKENS have TEXT and TAG + properties containing the observation and state + respectively. + :rtype: list + :param rng: random number generator + :type rng: Random (or any object with a random() method) + :param length: desired output length + :type length: int + """ + + # sample the starting state and symbol prob dists + tokens = [] + state = self._sample_probdist(self._priors, rng.random(), self._states) + symbol = self._sample_probdist( + self._outputs[state], rng.random(), self._symbols + ) + tokens.append((symbol, state)) + + for i in range(1, length): + # sample the state transition and symbol prob dists + state = self._sample_probdist( + self._transitions[state], rng.random(), self._states + ) + symbol = self._sample_probdist( + self._outputs[state], rng.random(), self._symbols + ) + tokens.append((symbol, state)) + + return tokens + + def _sample_probdist(self, probdist, p, samples): + cum_p = 0 + for sample in samples: + add_p = probdist.prob(sample) + if cum_p <= p <= cum_p + add_p: + return sample + cum_p += add_p + raise Exception("Invalid probability distribution - " "does not sum to one") + + def entropy(self, unlabeled_sequence): + """ + Returns the entropy over labellings of the given sequence. This is + given by:: + + H(O) = - sum_S Pr(S | O) log Pr(S | O) + + where the summation ranges over all state sequences, S. Let + *Z = Pr(O) = sum_S Pr(S, O)}* where the summation ranges over all state + sequences and O is the observation sequence. As such the entropy can + be re-expressed as:: + + H = - sum_S Pr(S | O) log [ Pr(S, O) / Z ] + = log Z - sum_S Pr(S | O) log Pr(S, 0) + = log Z - sum_S Pr(S | O) [ log Pr(S_0) + sum_t Pr(S_t | S_{t-1}) + sum_t Pr(O_t | S_t) ] + + The order of summation for the log terms can be flipped, allowing + dynamic programming to be used to calculate the entropy. Specifically, + we use the forward and backward probabilities (alpha, beta) giving:: + + H = log Z - sum_s0 alpha_0(s0) beta_0(s0) / Z * log Pr(s0) + + sum_t,si,sj alpha_t(si) Pr(sj | si) Pr(O_t+1 | sj) beta_t(sj) / Z * log Pr(sj | si) + + sum_t,st alpha_t(st) beta_t(st) / Z * log Pr(O_t | st) + + This simply uses alpha and beta to find the probabilities of partial + sequences, constrained to include the given state(s) at some point in + time. + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + alpha = self._forward_probability(unlabeled_sequence) + beta = self._backward_probability(unlabeled_sequence) + normalisation = logsumexp2(alpha[T - 1]) + + entropy = normalisation + + # starting state, t = 0 + for i, state in enumerate(self._states): + p = 2 ** (alpha[0, i] + beta[0, i] - normalisation) + entropy -= p * self._priors.logprob(state) + # print('p(s_0 = %s) =' % state, p) + + # state transitions + for t0 in range(T - 1): + t1 = t0 + 1 + for i0, s0 in enumerate(self._states): + for i1, s1 in enumerate(self._states): + p = 2 ** ( + alpha[t0, i0] + + self._transitions[s0].logprob(s1) + + self._outputs[s1].logprob(unlabeled_sequence[t1][_TEXT]) + + beta[t1, i1] + - normalisation + ) + entropy -= p * self._transitions[s0].logprob(s1) + # print('p(s_%d = %s, s_%d = %s) =' % (t0, s0, t1, s1), p) + + # symbol emissions + for t in range(T): + for i, state in enumerate(self._states): + p = 2 ** (alpha[t, i] + beta[t, i] - normalisation) + entropy -= p * self._outputs[state].logprob( + unlabeled_sequence[t][_TEXT] + ) + # print('p(s_%d = %s) =' % (t, state), p) + + return entropy + + def point_entropy(self, unlabeled_sequence): + """ + Returns the pointwise entropy over the possible states at each + position in the chain, given the observation sequence. + """ + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + alpha = self._forward_probability(unlabeled_sequence) + beta = self._backward_probability(unlabeled_sequence) + normalisation = logsumexp2(alpha[T - 1]) + + entropies = np.zeros(T, np.float64) + probs = np.zeros(N, np.float64) + for t in range(T): + for s in range(N): + probs[s] = alpha[t, s] + beta[t, s] - normalisation + + for s in range(N): + entropies[t] -= 2 ** (probs[s]) * probs[s] + + return entropies + + def _exhaustive_entropy(self, unlabeled_sequence): + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + labellings = [[state] for state in self._states] + for t in range(T - 1): + current = labellings + labellings = [] + for labelling in current: + for state in self._states: + labellings.append(labelling + [state]) + + log_probs = [] + for labelling in labellings: + labeled_sequence = unlabeled_sequence[:] + for t, label in enumerate(labelling): + labeled_sequence[t] = (labeled_sequence[t][_TEXT], label) + lp = self.log_probability(labeled_sequence) + log_probs.append(lp) + normalisation = _log_add(*log_probs) + + entropy = 0 + for lp in log_probs: + lp -= normalisation + entropy -= 2 ** (lp) * lp + + return entropy + + def _exhaustive_point_entropy(self, unlabeled_sequence): + unlabeled_sequence = self._transform(unlabeled_sequence) + + T = len(unlabeled_sequence) + N = len(self._states) + + labellings = [[state] for state in self._states] + for t in range(T - 1): + current = labellings + labellings = [] + for labelling in current: + for state in self._states: + labellings.append(labelling + [state]) + + log_probs = [] + for labelling in labellings: + labelled_sequence = unlabeled_sequence[:] + for t, label in enumerate(labelling): + labelled_sequence[t] = (labelled_sequence[t][_TEXT], label) + lp = self.log_probability(labelled_sequence) + log_probs.append(lp) + + normalisation = _log_add(*log_probs) + + probabilities = _ninf_array((T, N)) + + for labelling, lp in zip(labellings, log_probs): + lp -= normalisation + for t, label in enumerate(labelling): + index = self._states.index(label) + probabilities[t, index] = _log_add(probabilities[t, index], lp) + + entropies = np.zeros(T, np.float64) + for t in range(T): + for s in range(N): + entropies[t] -= 2 ** (probabilities[t, s]) * probabilities[t, s] + + return entropies + + def _transitions_matrix(self): + """Return a matrix of transition log probabilities.""" + trans_iter = ( + self._transitions[sj].logprob(si) + for sj in self._states + for si in self._states + ) + + transitions_logprob = np.fromiter(trans_iter, dtype=np.float64) + N = len(self._states) + return transitions_logprob.reshape((N, N)).T + + def _outputs_vector(self, symbol): + """ + Return a vector with log probabilities of emitting a symbol + when entering states. + """ + out_iter = (self._output_logprob(sj, symbol) for sj in self._states) + return np.fromiter(out_iter, dtype=np.float64) + + def _forward_probability(self, unlabeled_sequence): + """ + Return the forward probability matrix, a T by N array of + log-probabilities, where T is the length of the sequence and N is the + number of states. Each entry (t, s) gives the probability of being in + state s at time t after observing the partial symbol sequence up to + and including t. + + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + :return: the forward log probability matrix + :rtype: array + """ + T = len(unlabeled_sequence) + N = len(self._states) + alpha = _ninf_array((T, N)) + + transitions_logprob = self._transitions_matrix() + + # Initialization + symbol = unlabeled_sequence[0][_TEXT] + for i, state in enumerate(self._states): + alpha[0, i] = self._priors.logprob(state) + self._output_logprob( + state, symbol + ) + + # Induction + for t in range(1, T): + symbol = unlabeled_sequence[t][_TEXT] + output_logprob = self._outputs_vector(symbol) + + for i in range(N): + summand = alpha[t - 1] + transitions_logprob[i] + alpha[t, i] = logsumexp2(summand) + output_logprob[i] + + return alpha + + def _backward_probability(self, unlabeled_sequence): + """ + Return the backward probability matrix, a T by N array of + log-probabilities, where T is the length of the sequence and N is the + number of states. Each entry (t, s) gives the probability of being in + state s at time t after observing the partial symbol sequence from t + .. T. + + :return: the backward log probability matrix + :rtype: array + :param unlabeled_sequence: the sequence of unlabeled symbols + :type unlabeled_sequence: list + """ + T = len(unlabeled_sequence) + N = len(self._states) + beta = _ninf_array((T, N)) + + transitions_logprob = self._transitions_matrix().T + + # initialise the backward values; + # "1" is an arbitrarily chosen value from Rabiner tutorial + beta[T - 1, :] = np.log2(1) + + # inductively calculate remaining backward values + for t in range(T - 2, -1, -1): + symbol = unlabeled_sequence[t + 1][_TEXT] + outputs = self._outputs_vector(symbol) + + for i in range(N): + summand = transitions_logprob[i] + beta[t + 1] + outputs + beta[t, i] = logsumexp2(summand) + + return beta + + def test(self, test_sequence, verbose=False, **kwargs): + """ + Tests the HiddenMarkovModelTagger instance. + + :param test_sequence: a sequence of labeled test instances + :type test_sequence: list(list) + :param verbose: boolean flag indicating whether training should be + verbose or include printed output + :type verbose: bool + """ + + def words(sent): + return [word for (word, tag) in sent] + + def tags(sent): + return [tag for (word, tag) in sent] + + def flatten(seq): + return list(itertools.chain(*seq)) + + test_sequence = self._transform(test_sequence) + predicted_sequence = list(map(self._tag, map(words, test_sequence))) + + if verbose: + for test_sent, predicted_sent in zip(test_sequence, predicted_sequence): + print( + "Test:", + " ".join(f"{token}/{tag}" for (token, tag) in test_sent), + ) + print() + print("Untagged:", " ".join("%s" % token for (token, tag) in test_sent)) + print() + print( + "HMM-tagged:", + " ".join(f"{token}/{tag}" for (token, tag) in predicted_sent), + ) + print() + print( + "Entropy:", + self.entropy([(token, None) for (token, tag) in predicted_sent]), + ) + print() + print("-" * 60) + + test_tags = flatten(map(tags, test_sequence)) + predicted_tags = flatten(map(tags, predicted_sequence)) + + acc = accuracy(test_tags, predicted_tags) + count = sum(len(sent) for sent in test_sequence) + print("accuracy over %d tokens: %.2f" % (count, acc * 100)) + + def __repr__(self): + return "" % ( + len(self._states), + len(self._symbols), + ) + + +class HiddenMarkovModelTrainer: + """ + Algorithms for learning HMM parameters from training data. These include + both supervised learning (MLE) and unsupervised learning (Baum-Welch). + + Creates an HMM trainer to induce an HMM with the given states and + output symbol alphabet. A supervised and unsupervised training + method may be used. If either of the states or symbols are not given, + these may be derived from supervised training. + + :param states: the set of state labels + :type states: sequence of any + :param symbols: the set of observation symbols + :type symbols: sequence of any + """ + + def __init__(self, states=None, symbols=None): + self._states = states if states else [] + self._symbols = symbols if symbols else [] + + def train(self, labeled_sequences=None, unlabeled_sequences=None, **kwargs): + """ + Trains the HMM using both (or either of) supervised and unsupervised + techniques. + + :return: the trained model + :rtype: HiddenMarkovModelTagger + :param labelled_sequences: the supervised training data, a set of + labelled sequences of observations + ex: [ (word_1, tag_1),...,(word_n,tag_n) ] + :type labelled_sequences: list + :param unlabeled_sequences: the unsupervised training data, a set of + sequences of observations + ex: [ word_1, ..., word_n ] + :type unlabeled_sequences: list + :param kwargs: additional arguments to pass to the training methods + """ + assert labeled_sequences or unlabeled_sequences + model = None + if labeled_sequences: + model = self.train_supervised(labeled_sequences, **kwargs) + if unlabeled_sequences: + if model: + kwargs["model"] = model + model = self.train_unsupervised(unlabeled_sequences, **kwargs) + return model + + def _baum_welch_step(self, sequence, model, symbol_to_number): + + N = len(model._states) + M = len(model._symbols) + T = len(sequence) + + # compute forward and backward probabilities + alpha = model._forward_probability(sequence) + beta = model._backward_probability(sequence) + + # find the log probability of the sequence + lpk = logsumexp2(alpha[T - 1]) + + A_numer = _ninf_array((N, N)) + B_numer = _ninf_array((N, M)) + A_denom = _ninf_array(N) + B_denom = _ninf_array(N) + + transitions_logprob = model._transitions_matrix().T + + for t in range(T): + symbol = sequence[t][_TEXT] # not found? FIXME + next_symbol = None + if t < T - 1: + next_symbol = sequence[t + 1][_TEXT] # not found? FIXME + xi = symbol_to_number[symbol] + + next_outputs_logprob = model._outputs_vector(next_symbol) + alpha_plus_beta = alpha[t] + beta[t] + + if t < T - 1: + numer_add = ( + transitions_logprob + + next_outputs_logprob + + beta[t + 1] + + alpha[t].reshape(N, 1) + ) + A_numer = np.logaddexp2(A_numer, numer_add) + A_denom = np.logaddexp2(A_denom, alpha_plus_beta) + else: + B_denom = np.logaddexp2(A_denom, alpha_plus_beta) + + B_numer[:, xi] = np.logaddexp2(B_numer[:, xi], alpha_plus_beta) + + return lpk, A_numer, A_denom, B_numer, B_denom + + def train_unsupervised(self, unlabeled_sequences, update_outputs=True, **kwargs): + """ + Trains the HMM using the Baum-Welch algorithm to maximise the + probability of the data sequence. This is a variant of the EM + algorithm, and is unsupervised in that it doesn't need the state + sequences for the symbols. The code is based on 'A Tutorial on Hidden + Markov Models and Selected Applications in Speech Recognition', + Lawrence Rabiner, IEEE, 1989. + + :return: the trained model + :rtype: HiddenMarkovModelTagger + :param unlabeled_sequences: the training data, a set of + sequences of observations + :type unlabeled_sequences: list + + kwargs may include following parameters: + + :param model: a HiddenMarkovModelTagger instance used to begin + the Baum-Welch algorithm + :param max_iterations: the maximum number of EM iterations + :param convergence_logprob: the maximum change in log probability to + allow convergence + """ + + # create a uniform HMM, which will be iteratively refined, unless + # given an existing model + model = kwargs.get("model") + if not model: + priors = RandomProbDist(self._states) + transitions = DictionaryConditionalProbDist( + {state: RandomProbDist(self._states) for state in self._states} + ) + outputs = DictionaryConditionalProbDist( + {state: RandomProbDist(self._symbols) for state in self._states} + ) + model = HiddenMarkovModelTagger( + self._symbols, self._states, transitions, outputs, priors + ) + + self._states = model._states + self._symbols = model._symbols + + N = len(self._states) + M = len(self._symbols) + symbol_numbers = {sym: i for i, sym in enumerate(self._symbols)} + + # update model prob dists so that they can be modified + # model._priors = MutableProbDist(model._priors, self._states) + + model._transitions = DictionaryConditionalProbDist( + { + s: MutableProbDist(model._transitions[s], self._states) + for s in self._states + } + ) + + if update_outputs: + model._outputs = DictionaryConditionalProbDist( + { + s: MutableProbDist(model._outputs[s], self._symbols) + for s in self._states + } + ) + + model.reset_cache() + + # iterate until convergence + converged = False + last_logprob = None + iteration = 0 + max_iterations = kwargs.get("max_iterations", 1000) + epsilon = kwargs.get("convergence_logprob", 1e-6) + + while not converged and iteration < max_iterations: + A_numer = _ninf_array((N, N)) + B_numer = _ninf_array((N, M)) + A_denom = _ninf_array(N) + B_denom = _ninf_array(N) + + logprob = 0 + for sequence in unlabeled_sequences: + sequence = list(sequence) + if not sequence: + continue + + ( + lpk, + seq_A_numer, + seq_A_denom, + seq_B_numer, + seq_B_denom, + ) = self._baum_welch_step(sequence, model, symbol_numbers) + + # add these sums to the global A and B values + for i in range(N): + A_numer[i] = np.logaddexp2(A_numer[i], seq_A_numer[i] - lpk) + B_numer[i] = np.logaddexp2(B_numer[i], seq_B_numer[i] - lpk) + + A_denom = np.logaddexp2(A_denom, seq_A_denom - lpk) + B_denom = np.logaddexp2(B_denom, seq_B_denom - lpk) + + logprob += lpk + + # use the calculated values to update the transition and output + # probability values + for i in range(N): + logprob_Ai = A_numer[i] - A_denom[i] + logprob_Bi = B_numer[i] - B_denom[i] + + # We should normalize all probabilities (see p.391 Huang et al) + # Let sum(P) be K. + # We can divide each Pi by K to make sum(P) == 1. + # Pi' = Pi/K + # log2(Pi') = log2(Pi) - log2(K) + logprob_Ai -= logsumexp2(logprob_Ai) + logprob_Bi -= logsumexp2(logprob_Bi) + + # update output and transition probabilities + si = self._states[i] + + for j in range(N): + sj = self._states[j] + model._transitions[si].update(sj, logprob_Ai[j]) + + if update_outputs: + for k in range(M): + ok = self._symbols[k] + model._outputs[si].update(ok, logprob_Bi[k]) + + # Rabiner says the priors don't need to be updated. I don't + # believe him. FIXME + + # test for convergence + if iteration > 0 and abs(logprob - last_logprob) < epsilon: + converged = True + + print("iteration", iteration, "logprob", logprob) + iteration += 1 + last_logprob = logprob + + return model + + def train_supervised(self, labelled_sequences, estimator=None): + """ + Supervised training maximising the joint probability of the symbol and + state sequences. This is done via collecting frequencies of + transitions between states, symbol observations while within each + state and which states start a sentence. These frequency distributions + are then normalised into probability estimates, which can be + smoothed if desired. + + :return: the trained model + :rtype: HiddenMarkovModelTagger + :param labelled_sequences: the training data, a set of + labelled sequences of observations + :type labelled_sequences: list + :param estimator: a function taking + a FreqDist and a number of bins and returning a CProbDistI; + otherwise a MLE estimate is used + """ + + # default to the MLE estimate + if estimator is None: + estimator = lambda fdist, bins: MLEProbDist(fdist) + + # count occurrences of starting states, transitions out of each state + # and output symbols observed in each state + known_symbols = set(self._symbols) + known_states = set(self._states) + + starting = FreqDist() + transitions = ConditionalFreqDist() + outputs = ConditionalFreqDist() + for sequence in labelled_sequences: + lasts = None + for token in sequence: + state = token[_TAG] + symbol = token[_TEXT] + if lasts is None: + starting[state] += 1 + else: + transitions[lasts][state] += 1 + outputs[state][symbol] += 1 + lasts = state + + # update the state and symbol lists + if state not in known_states: + self._states.append(state) + known_states.add(state) + + if symbol not in known_symbols: + self._symbols.append(symbol) + known_symbols.add(symbol) + + # create probability distributions (with smoothing) + N = len(self._states) + pi = estimator(starting, N) + A = ConditionalProbDist(transitions, estimator, N) + B = ConditionalProbDist(outputs, estimator, len(self._symbols)) + + return HiddenMarkovModelTagger(self._symbols, self._states, A, B, pi) + + +def _ninf_array(shape): + res = np.empty(shape, np.float64) + res.fill(-np.inf) + return res + + +def logsumexp2(arr): + max_ = arr.max() + return np.log2(np.sum(2 ** (arr - max_))) + max_ + + +def _log_add(*values): + """ + Adds the logged values, returning the logarithm of the addition. + """ + x = max(values) + if x > -np.inf: + sum_diffs = 0 + for value in values: + sum_diffs += 2 ** (value - x) + return x + np.log2(sum_diffs) + else: + return x + + +def _create_hmm_tagger(states, symbols, A, B, pi): + def pd(values, samples): + d = dict(zip(samples, values)) + return DictionaryProbDist(d) + + def cpd(array, conditions, samples): + d = {} + for values, condition in zip(array, conditions): + d[condition] = pd(values, samples) + return DictionaryConditionalProbDist(d) + + A = cpd(A, states, states) + B = cpd(B, states, symbols) + pi = pd(pi, states) + return HiddenMarkovModelTagger( + symbols=symbols, states=states, transitions=A, outputs=B, priors=pi + ) + + +def _market_hmm_example(): + """ + Return an example HMM (described at page 381, Huang et al) + """ + states = ["bull", "bear", "static"] + symbols = ["up", "down", "unchanged"] + A = np.array([[0.6, 0.2, 0.2], [0.5, 0.3, 0.2], [0.4, 0.1, 0.5]], np.float64) + B = np.array([[0.7, 0.1, 0.2], [0.1, 0.6, 0.3], [0.3, 0.3, 0.4]], np.float64) + pi = np.array([0.5, 0.2, 0.3], np.float64) + + model = _create_hmm_tagger(states, symbols, A, B, pi) + return model, states, symbols + + +def demo(): + # demonstrates HMM probability calculation + + print() + print("HMM probability calculation demo") + print() + + model, states, symbols = _market_hmm_example() + + print("Testing", model) + + for test in [ + ["up", "up"], + ["up", "down", "up"], + ["down"] * 5, + ["unchanged"] * 5 + ["up"], + ]: + + sequence = [(t, None) for t in test] + + print("Testing with state sequence", test) + print("probability =", model.probability(sequence)) + print("tagging = ", model.tag([word for (word, tag) in sequence])) + print("p(tagged) = ", model.probability(sequence)) + print("H = ", model.entropy(sequence)) + print("H_exh = ", model._exhaustive_entropy(sequence)) + print("H(point) = ", model.point_entropy(sequence)) + print("H_exh(point)=", model._exhaustive_point_entropy(sequence)) + print() + + +def load_pos(num_sents): + from nltk.corpus import brown + + sentences = brown.tagged_sents(categories="news")[:num_sents] + + tag_re = re.compile(r"[*]|--|[^+*-]+") + tag_set = set() + symbols = set() + + cleaned_sentences = [] + for sentence in sentences: + for i in range(len(sentence)): + word, tag = sentence[i] + word = word.lower() # normalize + symbols.add(word) # log this word + # Clean up the tag. + tag = tag_re.match(tag).group() + tag_set.add(tag) + sentence[i] = (word, tag) # store cleaned-up tagged token + cleaned_sentences += [sentence] + + return cleaned_sentences, list(tag_set), list(symbols) + + +def demo_pos(): + # demonstrates POS tagging using supervised training + + print() + print("HMM POS tagging demo") + print() + + print("Training HMM...") + labelled_sequences, tag_set, symbols = load_pos(20000) + trainer = HiddenMarkovModelTrainer(tag_set, symbols) + hmm = trainer.train_supervised( + labelled_sequences[10:], + estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins), + ) + + print("Testing...") + hmm.test(labelled_sequences[:10], verbose=True) + + +def _untag(sentences): + unlabeled = [] + for sentence in sentences: + unlabeled.append([(token[_TEXT], None) for token in sentence]) + return unlabeled + + +def demo_pos_bw( + test=10, supervised=20, unsupervised=10, verbose=True, max_iterations=5 +): + # demonstrates the Baum-Welch algorithm in POS tagging + + print() + print("Baum-Welch demo for POS tagging") + print() + + print("Training HMM (supervised, %d sentences)..." % supervised) + + sentences, tag_set, symbols = load_pos(test + supervised + unsupervised) + + symbols = set() + for sentence in sentences: + for token in sentence: + symbols.add(token[_TEXT]) + + trainer = HiddenMarkovModelTrainer(tag_set, list(symbols)) + hmm = trainer.train_supervised( + sentences[test : test + supervised], + estimator=lambda fd, bins: LidstoneProbDist(fd, 0.1, bins), + ) + + hmm.test(sentences[:test], verbose=verbose) + + print("Training (unsupervised, %d sentences)..." % unsupervised) + # it's rather slow - so only use 10 samples by default + unlabeled = _untag(sentences[test + supervised :]) + hmm = trainer.train_unsupervised( + unlabeled, model=hmm, max_iterations=max_iterations + ) + hmm.test(sentences[:test], verbose=verbose) + + +def demo_bw(): + # demo Baum Welch by generating some sequences and then performing + # unsupervised training on them + + print() + print("Baum-Welch demo for market example") + print() + + model, states, symbols = _market_hmm_example() + + # generate some random sequences + training = [] + import random + + rng = random.Random() + rng.seed(0) + for i in range(10): + item = model.random_sample(rng, 5) + training.append([(i[0], None) for i in item]) + + # train on those examples, starting with the model that generated them + trainer = HiddenMarkovModelTrainer(states, symbols) + hmm = trainer.train_unsupervised(training, model=model, max_iterations=1000) diff --git a/venv/lib/python3.10/site-packages/nltk/tag/hunpos.py b/venv/lib/python3.10/site-packages/nltk/tag/hunpos.py new file mode 100644 index 0000000000000000000000000000000000000000..e001c6d6dbc1257515ed1149abe6bab06f1c7337 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tag/hunpos.py @@ -0,0 +1,142 @@ +# Natural Language Toolkit: Interface to the HunPos POS-tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Peter Ljunglöf +# Dávid Márk Nemeskey (modifications) +# Attila Zséder (modifications) +# URL: +# For license information, see LICENSE.TXT + +""" +A module for interfacing with the HunPos open-source POS-tagger. +""" + +import os +from subprocess import PIPE, Popen + +from nltk.internals import find_binary, find_file +from nltk.tag.api import TaggerI + +_hunpos_url = "https://code.google.com/p/hunpos/" + +_hunpos_charset = "ISO-8859-1" +"""The default encoding used by hunpos: ISO-8859-1.""" + + +class HunposTagger(TaggerI): + """ + A class for pos tagging with HunPos. The input is the paths to: + - a model trained on training data + - (optionally) the path to the hunpos-tag binary + - (optionally) the encoding of the training data (default: ISO-8859-1) + + Check whether the required "hunpos-tag" binary is available: + + >>> from nltk.test.setup_fixt import check_binary + >>> check_binary('hunpos-tag') + + Example: + >>> from nltk.tag import HunposTagger + >>> ht = HunposTagger('en_wsj.model') + >>> ht.tag('What is the airspeed of an unladen swallow ?'.split()) + [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow', 'VB'), ('?', '.')] + >>> ht.close() + + This class communicates with the hunpos-tag binary via pipes. When the + tagger object is no longer needed, the close() method should be called to + free system resources. The class supports the context manager interface; if + used in a with statement, the close() method is invoked automatically: + + >>> with HunposTagger('en_wsj.model') as ht: + ... ht.tag('What is the airspeed of an unladen swallow ?'.split()) + ... + [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow', 'VB'), ('?', '.')] + """ + + def __init__( + self, path_to_model, path_to_bin=None, encoding=_hunpos_charset, verbose=False + ): + """ + Starts the hunpos-tag executable and establishes a connection with it. + + :param path_to_model: The model file. + :param path_to_bin: The hunpos-tag binary. + :param encoding: The encoding used by the model. Unicode tokens + passed to the tag() and tag_sents() methods are converted to + this charset when they are sent to hunpos-tag. + The default is ISO-8859-1 (Latin-1). + + This parameter is ignored for str tokens, which are sent as-is. + The caller must ensure that tokens are encoded in the right charset. + """ + self._closed = True + hunpos_paths = [ + ".", + "/usr/bin", + "/usr/local/bin", + "/opt/local/bin", + "/Applications/bin", + "~/bin", + "~/Applications/bin", + ] + hunpos_paths = list(map(os.path.expanduser, hunpos_paths)) + + self._hunpos_bin = find_binary( + "hunpos-tag", + path_to_bin, + env_vars=("HUNPOS_TAGGER",), + searchpath=hunpos_paths, + url=_hunpos_url, + verbose=verbose, + ) + + self._hunpos_model = find_file( + path_to_model, env_vars=("HUNPOS_TAGGER",), verbose=verbose + ) + self._encoding = encoding + self._hunpos = Popen( + [self._hunpos_bin, self._hunpos_model], + shell=False, + stdin=PIPE, + stdout=PIPE, + stderr=PIPE, + ) + self._closed = False + + def __del__(self): + self.close() + + def close(self): + """Closes the pipe to the hunpos executable.""" + if not self._closed: + self._hunpos.communicate() + self._closed = True + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.close() + + def tag(self, tokens): + """Tags a single sentence: a list of words. + The tokens should not contain any newline characters. + """ + for token in tokens: + assert "\n" not in token, "Tokens should not contain newlines" + if isinstance(token, str): + token = token.encode(self._encoding) + self._hunpos.stdin.write(token + b"\n") + # We write a final empty line to tell hunpos that the sentence is finished: + self._hunpos.stdin.write(b"\n") + self._hunpos.stdin.flush() + + tagged_tokens = [] + for token in tokens: + tagged = self._hunpos.stdout.readline().strip().split(b"\t") + tag = tagged[1] if len(tagged) > 1 else None + tagged_tokens.append((token, tag)) + # We have to read (and dismiss) the final empty line: + self._hunpos.stdout.readline() + + return tagged_tokens diff --git a/venv/lib/python3.10/site-packages/nltk/tag/mapping.py b/venv/lib/python3.10/site-packages/nltk/tag/mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..0af1a0eef945b3cfb2bb3a5860b223a42dbaeae7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tag/mapping.py @@ -0,0 +1,136 @@ +# Natural Language Toolkit: Tagset Mapping +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Nathan Schneider +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + +""" +Interface for converting POS tags from various treebanks +to the universal tagset of Petrov, Das, & McDonald. + +The tagset consists of the following 12 coarse tags: + +VERB - verbs (all tenses and modes) +NOUN - nouns (common and proper) +PRON - pronouns +ADJ - adjectives +ADV - adverbs +ADP - adpositions (prepositions and postpositions) +CONJ - conjunctions +DET - determiners +NUM - cardinal numbers +PRT - particles or other function words +X - other: foreign words, typos, abbreviations +. - punctuation + +@see: https://arxiv.org/abs/1104.2086 and https://code.google.com/p/universal-pos-tags/ + +""" + +from collections import defaultdict +from os.path import join + +from nltk.data import load + +_UNIVERSAL_DATA = "taggers/universal_tagset" +_UNIVERSAL_TAGS = ( + "VERB", + "NOUN", + "PRON", + "ADJ", + "ADV", + "ADP", + "CONJ", + "DET", + "NUM", + "PRT", + "X", + ".", +) + +# _MAPPINGS = defaultdict(lambda: defaultdict(dict)) +# the mapping between tagset T1 and T2 returns UNK if applied to an unrecognized tag +_MAPPINGS = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: "UNK"))) + + +def _load_universal_map(fileid): + contents = load(join(_UNIVERSAL_DATA, fileid + ".map"), format="text") + + # When mapping to the Universal Tagset, + # map unknown inputs to 'X' not 'UNK' + _MAPPINGS[fileid]["universal"].default_factory = lambda: "X" + + for line in contents.splitlines(): + line = line.strip() + if line == "": + continue + fine, coarse = line.split("\t") + + assert coarse in _UNIVERSAL_TAGS, f"Unexpected coarse tag: {coarse}" + assert ( + fine not in _MAPPINGS[fileid]["universal"] + ), f"Multiple entries for original tag: {fine}" + + _MAPPINGS[fileid]["universal"][fine] = coarse + + +def tagset_mapping(source, target): + """ + Retrieve the mapping dictionary between tagsets. + + >>> tagset_mapping('ru-rnc', 'universal') == {'!': '.', 'A': 'ADJ', 'C': 'CONJ', 'AD': 'ADV',\ + 'NN': 'NOUN', 'VG': 'VERB', 'COMP': 'CONJ', 'NC': 'NUM', 'VP': 'VERB', 'P': 'ADP',\ + 'IJ': 'X', 'V': 'VERB', 'Z': 'X', 'VI': 'VERB', 'YES_NO_SENT': 'X', 'PTCL': 'PRT'} + True + """ + + if source not in _MAPPINGS or target not in _MAPPINGS[source]: + if target == "universal": + _load_universal_map(source) + # Added the new Russian National Corpus mappings because the + # Russian model for nltk.pos_tag() uses it. + _MAPPINGS["ru-rnc-new"]["universal"] = { + "A": "ADJ", + "A-PRO": "PRON", + "ADV": "ADV", + "ADV-PRO": "PRON", + "ANUM": "ADJ", + "CONJ": "CONJ", + "INTJ": "X", + "NONLEX": ".", + "NUM": "NUM", + "PARENTH": "PRT", + "PART": "PRT", + "PR": "ADP", + "PRAEDIC": "PRT", + "PRAEDIC-PRO": "PRON", + "S": "NOUN", + "S-PRO": "PRON", + "V": "VERB", + } + + return _MAPPINGS[source][target] + + +def map_tag(source, target, source_tag): + """ + Maps the tag from the source tagset to the target tagset. + + >>> map_tag('en-ptb', 'universal', 'VBZ') + 'VERB' + >>> map_tag('en-ptb', 'universal', 'VBP') + 'VERB' + >>> map_tag('en-ptb', 'universal', '``') + '.' + """ + + # we need a systematic approach to naming + if target == "universal": + if source == "wsj": + source = "en-ptb" + if source == "brown": + source = "en-brown" + + return tagset_mapping(source, target)[source_tag] diff --git a/venv/lib/python3.10/site-packages/nltk/tag/perceptron.py b/venv/lib/python3.10/site-packages/nltk/tag/perceptron.py new file mode 100644 index 0000000000000000000000000000000000000000..9afe08f0c8d6a9d5852a225e6c9569a291fb1e3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tag/perceptron.py @@ -0,0 +1,371 @@ +# This module is a port of the Textblob Averaged Perceptron Tagger +# Author: Matthew Honnibal , +# Long Duong (NLTK port) +# URL: +# +# Copyright 2013 Matthew Honnibal +# NLTK modifications Copyright 2015 The NLTK Project +# +# This module is provided under the terms of the MIT License. + +import logging +import pickle +import random +from collections import defaultdict + +from nltk import jsontags +from nltk.data import find, load +from nltk.tag.api import TaggerI + +try: + import numpy as np +except ImportError: + pass + +PICKLE = "averaged_perceptron_tagger.pickle" + + +@jsontags.register_tag +class AveragedPerceptron: + + """An averaged perceptron, as implemented by Matthew Honnibal. + + See more implementation details here: + https://explosion.ai/blog/part-of-speech-pos-tagger-in-python + """ + + json_tag = "nltk.tag.perceptron.AveragedPerceptron" + + def __init__(self, weights=None): + # Each feature gets its own weight vector, so weights is a dict-of-dicts + self.weights = weights if weights else {} + self.classes = set() + # The accumulated values, for the averaging. These will be keyed by + # feature/clas tuples + self._totals = defaultdict(int) + # The last time the feature was changed, for the averaging. Also + # keyed by feature/clas tuples + # (tstamps is short for timestamps) + self._tstamps = defaultdict(int) + # Number of instances seen + self.i = 0 + + def _softmax(self, scores): + s = np.fromiter(scores.values(), dtype=float) + exps = np.exp(s) + return exps / np.sum(exps) + + def predict(self, features, return_conf=False): + """Dot-product the features and current weights and return the best label.""" + scores = defaultdict(float) + for feat, value in features.items(): + if feat not in self.weights or value == 0: + continue + weights = self.weights[feat] + for label, weight in weights.items(): + scores[label] += value * weight + + # Do a secondary alphabetic sort, for stability + best_label = max(self.classes, key=lambda label: (scores[label], label)) + # compute the confidence + conf = max(self._softmax(scores)) if return_conf == True else None + + return best_label, conf + + def update(self, truth, guess, features): + """Update the feature weights.""" + + def upd_feat(c, f, w, v): + param = (f, c) + self._totals[param] += (self.i - self._tstamps[param]) * w + self._tstamps[param] = self.i + self.weights[f][c] = w + v + + self.i += 1 + if truth == guess: + return None + for f in features: + weights = self.weights.setdefault(f, {}) + upd_feat(truth, f, weights.get(truth, 0.0), 1.0) + upd_feat(guess, f, weights.get(guess, 0.0), -1.0) + + def average_weights(self): + """Average weights from all iterations.""" + for feat, weights in self.weights.items(): + new_feat_weights = {} + for clas, weight in weights.items(): + param = (feat, clas) + total = self._totals[param] + total += (self.i - self._tstamps[param]) * weight + averaged = round(total / self.i, 3) + if averaged: + new_feat_weights[clas] = averaged + self.weights[feat] = new_feat_weights + + def save(self, path): + """Save the pickled model weights.""" + with open(path, "wb") as fout: + return pickle.dump(dict(self.weights), fout) + + def load(self, path): + """Load the pickled model weights.""" + self.weights = load(path) + + def encode_json_obj(self): + return self.weights + + @classmethod + def decode_json_obj(cls, obj): + return cls(obj) + + +@jsontags.register_tag +class PerceptronTagger(TaggerI): + + """ + Greedy Averaged Perceptron tagger, as implemented by Matthew Honnibal. + See more implementation details here: + https://explosion.ai/blog/part-of-speech-pos-tagger-in-python + + >>> from nltk.tag.perceptron import PerceptronTagger + + Train the model + + >>> tagger = PerceptronTagger(load=False) + + >>> tagger.train([[('today','NN'),('is','VBZ'),('good','JJ'),('day','NN')], + ... [('yes','NNS'),('it','PRP'),('beautiful','JJ')]]) + + >>> tagger.tag(['today','is','a','beautiful','day']) + [('today', 'NN'), ('is', 'PRP'), ('a', 'PRP'), ('beautiful', 'JJ'), ('day', 'NN')] + + Use the pretrain model (the default constructor) + + >>> pretrain = PerceptronTagger() + + >>> pretrain.tag('The quick brown fox jumps over the lazy dog'.split()) + [('The', 'DT'), ('quick', 'JJ'), ('brown', 'NN'), ('fox', 'NN'), ('jumps', 'VBZ'), ('over', 'IN'), ('the', 'DT'), ('lazy', 'JJ'), ('dog', 'NN')] + + >>> pretrain.tag("The red cat".split()) + [('The', 'DT'), ('red', 'JJ'), ('cat', 'NN')] + """ + + json_tag = "nltk.tag.sequential.PerceptronTagger" + + START = ["-START-", "-START2-"] + END = ["-END-", "-END2-"] + + def __init__(self, load=True): + """ + :param load: Load the pickled model upon instantiation. + """ + self.model = AveragedPerceptron() + self.tagdict = {} + self.classes = set() + if load: + AP_MODEL_LOC = "file:" + str( + find("taggers/averaged_perceptron_tagger/" + PICKLE) + ) + self.load(AP_MODEL_LOC) + + def tag(self, tokens, return_conf=False, use_tagdict=True): + """ + Tag tokenized sentences. + :params tokens: list of word + :type tokens: list(str) + """ + prev, prev2 = self.START + output = [] + + context = self.START + [self.normalize(w) for w in tokens] + self.END + for i, word in enumerate(tokens): + tag, conf = ( + (self.tagdict.get(word), 1.0) if use_tagdict == True else (None, None) + ) + if not tag: + features = self._get_features(i, word, context, prev, prev2) + tag, conf = self.model.predict(features, return_conf) + output.append((word, tag, conf) if return_conf == True else (word, tag)) + + prev2 = prev + prev = tag + + return output + + def train(self, sentences, save_loc=None, nr_iter=5): + """Train a model from sentences, and save it at ``save_loc``. ``nr_iter`` + controls the number of Perceptron training iterations. + + :param sentences: A list or iterator of sentences, where each sentence + is a list of (words, tags) tuples. + :param save_loc: If not ``None``, saves a pickled model in this location. + :param nr_iter: Number of training iterations. + """ + # We'd like to allow ``sentences`` to be either a list or an iterator, + # the latter being especially important for a large training dataset. + # Because ``self._make_tagdict(sentences)`` runs regardless, we make + # it populate ``self._sentences`` (a list) with all the sentences. + # This saves the overheard of just iterating through ``sentences`` to + # get the list by ``sentences = list(sentences)``. + + self._sentences = list() # to be populated by self._make_tagdict... + self._make_tagdict(sentences) + self.model.classes = self.classes + for iter_ in range(nr_iter): + c = 0 + n = 0 + for sentence in self._sentences: + words, tags = zip(*sentence) + + prev, prev2 = self.START + context = self.START + [self.normalize(w) for w in words] + self.END + for i, word in enumerate(words): + guess = self.tagdict.get(word) + if not guess: + feats = self._get_features(i, word, context, prev, prev2) + guess, _ = self.model.predict(feats) + self.model.update(tags[i], guess, feats) + prev2 = prev + prev = guess + c += guess == tags[i] + n += 1 + random.shuffle(self._sentences) + logging.info(f"Iter {iter_}: {c}/{n}={_pc(c, n)}") + + # We don't need the training sentences anymore, and we don't want to + # waste space on them when we pickle the trained tagger. + self._sentences = None + + self.model.average_weights() + # Pickle as a binary file + if save_loc is not None: + with open(save_loc, "wb") as fout: + # changed protocol from -1 to 2 to make pickling Python 2 compatible + pickle.dump((self.model.weights, self.tagdict, self.classes), fout, 2) + + def load(self, loc): + """ + :param loc: Load a pickled model at location. + :type loc: str + """ + + self.model.weights, self.tagdict, self.classes = load(loc) + self.model.classes = self.classes + + def encode_json_obj(self): + return self.model.weights, self.tagdict, list(self.classes) + + @classmethod + def decode_json_obj(cls, obj): + tagger = cls(load=False) + tagger.model.weights, tagger.tagdict, tagger.classes = obj + tagger.classes = set(tagger.classes) + tagger.model.classes = tagger.classes + return tagger + + def normalize(self, word): + """ + Normalization used in pre-processing. + - All words are lower cased + - Groups of digits of length 4 are represented as !YEAR; + - Other digits are represented as !DIGITS + + :rtype: str + """ + if "-" in word and word[0] != "-": + return "!HYPHEN" + if word.isdigit() and len(word) == 4: + return "!YEAR" + if word and word[0].isdigit(): + return "!DIGITS" + return word.lower() + + def _get_features(self, i, word, context, prev, prev2): + """Map tokens into a feature representation, implemented as a + {hashable: int} dict. If the features change, a new model must be + trained. + """ + + def add(name, *args): + features[" ".join((name,) + tuple(args))] += 1 + + i += len(self.START) + features = defaultdict(int) + # It's useful to have a constant feature, which acts sort of like a prior + add("bias") + add("i suffix", word[-3:]) + add("i pref1", word[0] if word else "") + add("i-1 tag", prev) + add("i-2 tag", prev2) + add("i tag+i-2 tag", prev, prev2) + add("i word", context[i]) + add("i-1 tag+i word", prev, context[i]) + add("i-1 word", context[i - 1]) + add("i-1 suffix", context[i - 1][-3:]) + add("i-2 word", context[i - 2]) + add("i+1 word", context[i + 1]) + add("i+1 suffix", context[i + 1][-3:]) + add("i+2 word", context[i + 2]) + return features + + def _make_tagdict(self, sentences): + """ + Make a tag dictionary for single-tag words. + :param sentences: A list of list of (word, tag) tuples. + """ + counts = defaultdict(lambda: defaultdict(int)) + for sentence in sentences: + self._sentences.append(sentence) + for word, tag in sentence: + counts[word][tag] += 1 + self.classes.add(tag) + freq_thresh = 20 + ambiguity_thresh = 0.97 + for word, tag_freqs in counts.items(): + tag, mode = max(tag_freqs.items(), key=lambda item: item[1]) + n = sum(tag_freqs.values()) + # Don't add rare words to the tag dictionary + # Only add quite unambiguous words + if n >= freq_thresh and (mode / n) >= ambiguity_thresh: + self.tagdict[word] = tag + + +def _pc(n, d): + return (n / d) * 100 + + +def _load_data_conll_format(filename): + print("Read from file: ", filename) + with open(filename, "rb") as fin: + sentences = [] + sentence = [] + for line in fin.readlines(): + line = line.strip() + # print line + if len(line) == 0: + sentences.append(sentence) + sentence = [] + continue + tokens = line.split("\t") + word = tokens[1] + tag = tokens[4] + sentence.append((word, tag)) + return sentences + + +def _get_pretrain_model(): + # Train and test on English part of ConLL data (WSJ part of Penn Treebank) + # Train: section 2-11 + # Test : section 23 + tagger = PerceptronTagger() + training = _load_data_conll_format("english_ptb_train.conll") + testing = _load_data_conll_format("english_ptb_test.conll") + print("Size of training and testing (sentence)", len(training), len(testing)) + # Train and save the model + tagger.train(training, PICKLE) + print("Accuracy : ", tagger.accuracy(testing)) + + +if __name__ == "__main__": + # _get_pretrain_model() + pass diff --git a/venv/lib/python3.10/site-packages/nltk/tag/senna.py b/venv/lib/python3.10/site-packages/nltk/tag/senna.py new file mode 100644 index 0000000000000000000000000000000000000000..7b52b7ee0a7bc01614c3a2a397a6ffce47835999 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tag/senna.py @@ -0,0 +1,134 @@ +# Natural Language Toolkit: Senna POS Tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Rami Al-Rfou' +# URL: +# For license information, see LICENSE.TXT + +""" +Senna POS tagger, NER Tagger, Chunk Tagger + +The input is: + +- path to the directory that contains SENNA executables. If the path is incorrect, + SennaTagger will automatically search for executable file specified in SENNA environment variable +- (optionally) the encoding of the input data (default:utf-8) + +Note: Unit tests for this module can be found in test/unit/test_senna.py + +>>> from nltk.tag import SennaTagger +>>> tagger = SennaTagger('/usr/share/senna-v3.0') # doctest: +SKIP +>>> tagger.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP +[('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), +('of', 'IN'), ('an', 'DT'), ('unladen', 'NN'), ('swallow', 'NN'), ('?', '.')] + +>>> from nltk.tag import SennaChunkTagger +>>> chktagger = SennaChunkTagger('/usr/share/senna-v3.0') # doctest: +SKIP +>>> chktagger.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP +[('What', 'B-NP'), ('is', 'B-VP'), ('the', 'B-NP'), ('airspeed', 'I-NP'), +('of', 'B-PP'), ('an', 'B-NP'), ('unladen', 'I-NP'), ('swallow', 'I-NP'), +('?', 'O')] + +>>> from nltk.tag import SennaNERTagger +>>> nertagger = SennaNERTagger('/usr/share/senna-v3.0') # doctest: +SKIP +>>> nertagger.tag('Shakespeare theatre was in London .'.split()) # doctest: +SKIP +[('Shakespeare', 'B-PER'), ('theatre', 'O'), ('was', 'O'), ('in', 'O'), +('London', 'B-LOC'), ('.', 'O')] +>>> nertagger.tag('UN headquarters are in NY , USA .'.split()) # doctest: +SKIP +[('UN', 'B-ORG'), ('headquarters', 'O'), ('are', 'O'), ('in', 'O'), +('NY', 'B-LOC'), (',', 'O'), ('USA', 'B-LOC'), ('.', 'O')] +""" + +from nltk.classify import Senna + + +class SennaTagger(Senna): + def __init__(self, path, encoding="utf-8"): + super().__init__(path, ["pos"], encoding) + + def tag_sents(self, sentences): + """ + Applies the tag method over a list of sentences. This method will return + for each sentence a list of tuples of (word, tag). + """ + tagged_sents = super().tag_sents(sentences) + for i in range(len(tagged_sents)): + for j in range(len(tagged_sents[i])): + annotations = tagged_sents[i][j] + tagged_sents[i][j] = (annotations["word"], annotations["pos"]) + return tagged_sents + + +class SennaChunkTagger(Senna): + def __init__(self, path, encoding="utf-8"): + super().__init__(path, ["chk"], encoding) + + def tag_sents(self, sentences): + """ + Applies the tag method over a list of sentences. This method will return + for each sentence a list of tuples of (word, tag). + """ + tagged_sents = super().tag_sents(sentences) + for i in range(len(tagged_sents)): + for j in range(len(tagged_sents[i])): + annotations = tagged_sents[i][j] + tagged_sents[i][j] = (annotations["word"], annotations["chk"]) + return tagged_sents + + def bio_to_chunks(self, tagged_sent, chunk_type): + """ + Extracts the chunks in a BIO chunk-tagged sentence. + + >>> from nltk.tag import SennaChunkTagger + >>> chktagger = SennaChunkTagger('/usr/share/senna-v3.0') # doctest: +SKIP + >>> sent = 'What is the airspeed of an unladen swallow ?'.split() + >>> tagged_sent = chktagger.tag(sent) # doctest: +SKIP + >>> tagged_sent # doctest: +SKIP + [('What', 'B-NP'), ('is', 'B-VP'), ('the', 'B-NP'), ('airspeed', 'I-NP'), + ('of', 'B-PP'), ('an', 'B-NP'), ('unladen', 'I-NP'), ('swallow', 'I-NP'), + ('?', 'O')] + >>> list(chktagger.bio_to_chunks(tagged_sent, chunk_type='NP')) # doctest: +SKIP + [('What', '0'), ('the airspeed', '2-3'), ('an unladen swallow', '5-6-7')] + + :param tagged_sent: A list of tuples of word and BIO chunk tag. + :type tagged_sent: list(tuple) + :param tagged_sent: The chunk tag that users want to extract, e.g. 'NP' or 'VP' + :type tagged_sent: str + + :return: An iterable of tuples of chunks that users want to extract + and their corresponding indices. + :rtype: iter(tuple(str)) + """ + current_chunk = [] + current_chunk_position = [] + for idx, word_pos in enumerate(tagged_sent): + word, pos = word_pos + if "-" + chunk_type in pos: # Append the word to the current_chunk. + current_chunk.append(word) + current_chunk_position.append(idx) + else: + if current_chunk: # Flush the full chunk when out of an NP. + _chunk_str = " ".join(current_chunk) + _chunk_pos_str = "-".join(map(str, current_chunk_position)) + yield _chunk_str, _chunk_pos_str + current_chunk = [] + current_chunk_position = [] + if current_chunk: # Flush the last chunk. + yield " ".join(current_chunk), "-".join(map(str, current_chunk_position)) + + +class SennaNERTagger(Senna): + def __init__(self, path, encoding="utf-8"): + super().__init__(path, ["ner"], encoding) + + def tag_sents(self, sentences): + """ + Applies the tag method over a list of sentences. This method will return + for each sentence a list of tuples of (word, tag). + """ + tagged_sents = super().tag_sents(sentences) + for i in range(len(tagged_sents)): + for j in range(len(tagged_sents[i])): + annotations = tagged_sents[i][j] + tagged_sents[i][j] = (annotations["word"], annotations["ner"]) + return tagged_sents diff --git a/venv/lib/python3.10/site-packages/nltk/tag/sequential.py b/venv/lib/python3.10/site-packages/nltk/tag/sequential.py new file mode 100644 index 0000000000000000000000000000000000000000..3fb85c9fade8079ad5fd4ba7a517939741cb2440 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tag/sequential.py @@ -0,0 +1,755 @@ +# Natural Language Toolkit: Sequential Backoff Taggers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird (minor additions) +# Tiago Tresoldi (original affix tagger) +# URL: +# For license information, see LICENSE.TXT + +""" +Classes for tagging sentences sequentially, left to right. The +abstract base class SequentialBackoffTagger serves as the base +class for all the taggers in this module. Tagging of individual words +is performed by the method ``choose_tag()``, which is defined by +subclasses of SequentialBackoffTagger. If a tagger is unable to +determine a tag for the specified token, then its backoff tagger is +consulted instead. Any SequentialBackoffTagger may serve as a +backoff tagger for any other SequentialBackoffTagger. +""" +import ast +import re +from abc import abstractmethod +from typing import List, Optional, Tuple + +from nltk import jsontags +from nltk.classify import NaiveBayesClassifier +from nltk.probability import ConditionalFreqDist +from nltk.tag.api import FeaturesetTaggerI, TaggerI + + +###################################################################### +# Abstract Base Classes +###################################################################### +class SequentialBackoffTagger(TaggerI): + """ + An abstract base class for taggers that tags words sequentially, + left to right. Tagging of individual words is performed by the + ``choose_tag()`` method, which should be defined by subclasses. If + a tagger is unable to determine a tag for the specified token, + then its backoff tagger is consulted. + + :ivar _taggers: A list of all the taggers that should be tried to + tag a token (i.e., self and its backoff taggers). + """ + + def __init__(self, backoff=None): + if backoff is None: + self._taggers = [self] + else: + self._taggers = [self] + backoff._taggers + + @property + def backoff(self): + """The backoff tagger for this tagger.""" + return self._taggers[1] if len(self._taggers) > 1 else None + + def tag(self, tokens): + # docs inherited from TaggerI + tags = [] + for i in range(len(tokens)): + tags.append(self.tag_one(tokens, i, tags)) + return list(zip(tokens, tags)) + + def tag_one(self, tokens, index, history): + """ + Determine an appropriate tag for the specified token, and + return that tag. If this tagger is unable to determine a tag + for the specified token, then its backoff tagger is consulted. + + :rtype: str + :type tokens: list + :param tokens: The list of words that are being tagged. + :type index: int + :param index: The index of the word whose tag should be + returned. + :type history: list(str) + :param history: A list of the tags for all words before *index*. + """ + tag = None + for tagger in self._taggers: + tag = tagger.choose_tag(tokens, index, history) + if tag is not None: + break + return tag + + @abstractmethod + def choose_tag(self, tokens, index, history): + """ + Decide which tag should be used for the specified token, and + return that tag. If this tagger is unable to determine a tag + for the specified token, return None -- do not consult + the backoff tagger. This method should be overridden by + subclasses of SequentialBackoffTagger. + + :rtype: str + :type tokens: list + :param tokens: The list of words that are being tagged. + :type index: int + :param index: The index of the word whose tag should be + returned. + :type history: list(str) + :param history: A list of the tags for all words before *index*. + """ + + +class ContextTagger(SequentialBackoffTagger): + """ + An abstract base class for sequential backoff taggers that choose + a tag for a token based on the value of its "context". Different + subclasses are used to define different contexts. + + A ContextTagger chooses the tag for a token by calculating the + token's context, and looking up the corresponding tag in a table. + This table can be constructed manually; or it can be automatically + constructed based on a training corpus, using the ``_train()`` + factory method. + + :ivar _context_to_tag: Dictionary mapping contexts to tags. + """ + + def __init__(self, context_to_tag, backoff=None): + """ + :param context_to_tag: A dictionary mapping contexts to tags. + :param backoff: The backoff tagger that should be used for this tagger. + """ + super().__init__(backoff) + self._context_to_tag = context_to_tag if context_to_tag else {} + + @abstractmethod + def context(self, tokens, index, history): + """ + :return: the context that should be used to look up the tag + for the specified token; or None if the specified token + should not be handled by this tagger. + :rtype: (hashable) + """ + + def choose_tag(self, tokens, index, history): + context = self.context(tokens, index, history) + return self._context_to_tag.get(context) + + def size(self): + """ + :return: The number of entries in the table used by this + tagger to map from contexts to tags. + """ + return len(self._context_to_tag) + + def __repr__(self): + return f"<{self.__class__.__name__}: size={self.size()}>" + + def _train(self, tagged_corpus, cutoff=0, verbose=False): + """ + Initialize this ContextTagger's ``_context_to_tag`` table + based on the given training data. In particular, for each + context ``c`` in the training data, set + ``_context_to_tag[c]`` to the most frequent tag for that + context. However, exclude any contexts that are already + tagged perfectly by the backoff tagger(s). + + The old value of ``self._context_to_tag`` (if any) is discarded. + + :param tagged_corpus: A tagged corpus. Each item should be + a list of (word, tag tuples. + :param cutoff: If the most likely tag for a context occurs + fewer than cutoff times, then exclude it from the + context-to-tag table for the new tagger. + """ + + token_count = hit_count = 0 + + # A context is considered 'useful' if it's not already tagged + # perfectly by the backoff tagger. + useful_contexts = set() + + # Count how many times each tag occurs in each context. + fd = ConditionalFreqDist() + for sentence in tagged_corpus: + tokens, tags = zip(*sentence) + for index, (token, tag) in enumerate(sentence): + # Record the event. + token_count += 1 + context = self.context(tokens, index, tags[:index]) + if context is None: + continue + fd[context][tag] += 1 + # If the backoff got it wrong, this context is useful: + if self.backoff is None or tag != self.backoff.tag_one( + tokens, index, tags[:index] + ): + useful_contexts.add(context) + + # Build the context_to_tag table -- for each context, figure + # out what the most likely tag is. Only include contexts that + # we've seen at least `cutoff` times. + for context in useful_contexts: + best_tag = fd[context].max() + hits = fd[context][best_tag] + if hits > cutoff: + self._context_to_tag[context] = best_tag + hit_count += hits + + # Display some stats, if requested. + if verbose: + size = len(self._context_to_tag) + backoff = 100 - (hit_count * 100.0) / token_count + pruning = 100 - (size * 100.0) / len(fd.conditions()) + print("[Trained Unigram tagger:", end=" ") + print( + "size={}, backoff={:.2f}%, pruning={:.2f}%]".format( + size, backoff, pruning + ) + ) + + +###################################################################### +# Tagger Classes +###################################################################### + + +@jsontags.register_tag +class DefaultTagger(SequentialBackoffTagger): + """ + A tagger that assigns the same tag to every token. + + >>> from nltk.tag import DefaultTagger + >>> default_tagger = DefaultTagger('NN') + >>> list(default_tagger.tag('This is a test'.split())) + [('This', 'NN'), ('is', 'NN'), ('a', 'NN'), ('test', 'NN')] + + This tagger is recommended as a backoff tagger, in cases where + a more powerful tagger is unable to assign a tag to the word + (e.g. because the word was not seen during training). + + :param tag: The tag to assign to each token + :type tag: str + """ + + json_tag = "nltk.tag.sequential.DefaultTagger" + + def __init__(self, tag): + self._tag = tag + super().__init__(None) + + def encode_json_obj(self): + return self._tag + + @classmethod + def decode_json_obj(cls, obj): + tag = obj + return cls(tag) + + def choose_tag(self, tokens, index, history): + return self._tag # ignore token and history + + def __repr__(self): + return f"" + + +@jsontags.register_tag +class NgramTagger(ContextTagger): + """ + A tagger that chooses a token's tag based on its word string and + on the preceding n word's tags. In particular, a tuple + (tags[i-n:i-1], words[i]) is looked up in a table, and the + corresponding tag is returned. N-gram taggers are typically + trained on a tagged corpus. + + Train a new NgramTagger using the given training data or + the supplied model. In particular, construct a new tagger + whose table maps from each context (tag[i-n:i-1], word[i]) + to the most frequent tag for that context. But exclude any + contexts that are already tagged perfectly by the backoff + tagger. + + :param train: A tagged corpus consisting of a list of tagged + sentences, where each sentence is a list of (word, tag) tuples. + :param backoff: A backoff tagger, to be used by the new + tagger if it encounters an unknown context. + :param cutoff: If the most likely tag for a context occurs + fewer than *cutoff* times, then exclude it from the + context-to-tag table for the new tagger. + """ + + json_tag = "nltk.tag.sequential.NgramTagger" + + def __init__( + self, n, train=None, model=None, backoff=None, cutoff=0, verbose=False + ): + self._n = n + self._check_params(train, model) + + super().__init__(model, backoff) + + if train: + self._train(train, cutoff, verbose) + + def encode_json_obj(self): + _context_to_tag = {repr(k): v for k, v in self._context_to_tag.items()} + if "NgramTagger" in self.__class__.__name__: + return self._n, _context_to_tag, self.backoff + else: + return _context_to_tag, self.backoff + + @classmethod + def decode_json_obj(cls, obj): + try: + _n, _context_to_tag, backoff = obj + except ValueError: + _context_to_tag, backoff = obj + + if not _context_to_tag: + return backoff + + _context_to_tag = {ast.literal_eval(k): v for k, v in _context_to_tag.items()} + + if "NgramTagger" in cls.__name__: + return cls(_n, model=_context_to_tag, backoff=backoff) + else: + return cls(model=_context_to_tag, backoff=backoff) + + def context(self, tokens, index, history): + tag_context = tuple(history[max(0, index - self._n + 1) : index]) + return tag_context, tokens[index] + + +@jsontags.register_tag +class UnigramTagger(NgramTagger): + """ + Unigram Tagger + + The UnigramTagger finds the most likely tag for each word in a training + corpus, and then uses that information to assign tags to new tokens. + + >>> from nltk.corpus import brown + >>> from nltk.tag import UnigramTagger + >>> test_sent = brown.sents(categories='news')[0] + >>> unigram_tagger = UnigramTagger(brown.tagged_sents(categories='news')[:500]) + >>> for tok, tag in unigram_tagger.tag(test_sent): + ... print("({}, {}), ".format(tok, tag)) # doctest: +NORMALIZE_WHITESPACE + (The, AT), (Fulton, NP-TL), (County, NN-TL), (Grand, JJ-TL), + (Jury, NN-TL), (said, VBD), (Friday, NR), (an, AT), + (investigation, NN), (of, IN), (Atlanta's, NP$), (recent, JJ), + (primary, NN), (election, NN), (produced, VBD), (``, ``), + (no, AT), (evidence, NN), ('', ''), (that, CS), (any, DTI), + (irregularities, NNS), (took, VBD), (place, NN), (., .), + + :param train: The corpus of training data, a list of tagged sentences + :type train: list(list(tuple(str, str))) + :param model: The tagger model + :type model: dict + :param backoff: Another tagger which this tagger will consult when it is + unable to tag a word + :type backoff: TaggerI + :param cutoff: The number of instances of training data the tagger must see + in order not to use the backoff tagger + :type cutoff: int + """ + + json_tag = "nltk.tag.sequential.UnigramTagger" + + def __init__(self, train=None, model=None, backoff=None, cutoff=0, verbose=False): + super().__init__(1, train, model, backoff, cutoff, verbose) + + def context(self, tokens, index, history): + return tokens[index] + + +@jsontags.register_tag +class BigramTagger(NgramTagger): + """ + A tagger that chooses a token's tag based its word string and on + the preceding words' tag. In particular, a tuple consisting + of the previous tag and the word is looked up in a table, and + the corresponding tag is returned. + + :param train: The corpus of training data, a list of tagged sentences + :type train: list(list(tuple(str, str))) + :param model: The tagger model + :type model: dict + :param backoff: Another tagger which this tagger will consult when it is + unable to tag a word + :type backoff: TaggerI + :param cutoff: The number of instances of training data the tagger must see + in order not to use the backoff tagger + :type cutoff: int + """ + + json_tag = "nltk.tag.sequential.BigramTagger" + + def __init__(self, train=None, model=None, backoff=None, cutoff=0, verbose=False): + super().__init__(2, train, model, backoff, cutoff, verbose) + + +@jsontags.register_tag +class TrigramTagger(NgramTagger): + """ + A tagger that chooses a token's tag based its word string and on + the preceding two words' tags. In particular, a tuple consisting + of the previous two tags and the word is looked up in a table, and + the corresponding tag is returned. + + :param train: The corpus of training data, a list of tagged sentences + :type train: list(list(tuple(str, str))) + :param model: The tagger model + :type model: dict + :param backoff: Another tagger which this tagger will consult when it is + unable to tag a word + :type backoff: TaggerI + :param cutoff: The number of instances of training data the tagger must see + in order not to use the backoff tagger + :type cutoff: int + """ + + json_tag = "nltk.tag.sequential.TrigramTagger" + + def __init__(self, train=None, model=None, backoff=None, cutoff=0, verbose=False): + super().__init__(3, train, model, backoff, cutoff, verbose) + + +@jsontags.register_tag +class AffixTagger(ContextTagger): + """ + A tagger that chooses a token's tag based on a leading or trailing + substring of its word string. (It is important to note that these + substrings are not necessarily "true" morphological affixes). In + particular, a fixed-length substring of the word is looked up in a + table, and the corresponding tag is returned. Affix taggers are + typically constructed by training them on a tagged corpus. + + Construct a new affix tagger. + + :param affix_length: The length of the affixes that should be + considered during training and tagging. Use negative + numbers for suffixes. + :param min_stem_length: Any words whose length is less than + min_stem_length+abs(affix_length) will be assigned a + tag of None by this tagger. + """ + + json_tag = "nltk.tag.sequential.AffixTagger" + + def __init__( + self, + train=None, + model=None, + affix_length=-3, + min_stem_length=2, + backoff=None, + cutoff=0, + verbose=False, + ): + + self._check_params(train, model) + + super().__init__(model, backoff) + + self._affix_length = affix_length + self._min_word_length = min_stem_length + abs(affix_length) + + if train: + self._train(train, cutoff, verbose) + + def encode_json_obj(self): + return ( + self._affix_length, + self._min_word_length, + self._context_to_tag, + self.backoff, + ) + + @classmethod + def decode_json_obj(cls, obj): + _affix_length, _min_word_length, _context_to_tag, backoff = obj + return cls( + affix_length=_affix_length, + min_stem_length=_min_word_length - abs(_affix_length), + model=_context_to_tag, + backoff=backoff, + ) + + def context(self, tokens, index, history): + token = tokens[index] + if len(token) < self._min_word_length: + return None + elif self._affix_length > 0: + return token[: self._affix_length] + else: + return token[self._affix_length :] + + +@jsontags.register_tag +class RegexpTagger(SequentialBackoffTagger): + r""" + Regular Expression Tagger + + The RegexpTagger assigns tags to tokens by comparing their + word strings to a series of regular expressions. The following tagger + uses word suffixes to make guesses about the correct Brown Corpus part + of speech tag: + + >>> from nltk.corpus import brown + >>> from nltk.tag import RegexpTagger + >>> test_sent = brown.sents(categories='news')[0] + >>> regexp_tagger = RegexpTagger( + ... [(r'^-?[0-9]+(\.[0-9]+)?$', 'CD'), # cardinal numbers + ... (r'(The|the|A|a|An|an)$', 'AT'), # articles + ... (r'.*able$', 'JJ'), # adjectives + ... (r'.*ness$', 'NN'), # nouns formed from adjectives + ... (r'.*ly$', 'RB'), # adverbs + ... (r'.*s$', 'NNS'), # plural nouns + ... (r'.*ing$', 'VBG'), # gerunds + ... (r'.*ed$', 'VBD'), # past tense verbs + ... (r'.*', 'NN') # nouns (default) + ... ]) + >>> regexp_tagger + + >>> regexp_tagger.tag(test_sent) # doctest: +NORMALIZE_WHITESPACE + [('The', 'AT'), ('Fulton', 'NN'), ('County', 'NN'), ('Grand', 'NN'), ('Jury', 'NN'), + ('said', 'NN'), ('Friday', 'NN'), ('an', 'AT'), ('investigation', 'NN'), ('of', 'NN'), + ("Atlanta's", 'NNS'), ('recent', 'NN'), ('primary', 'NN'), ('election', 'NN'), + ('produced', 'VBD'), ('``', 'NN'), ('no', 'NN'), ('evidence', 'NN'), ("''", 'NN'), + ('that', 'NN'), ('any', 'NN'), ('irregularities', 'NNS'), ('took', 'NN'), + ('place', 'NN'), ('.', 'NN')] + + :type regexps: list(tuple(str, str)) + :param regexps: A list of ``(regexp, tag)`` pairs, each of + which indicates that a word matching ``regexp`` should + be tagged with ``tag``. The pairs will be evaluated in + order. If none of the regexps match a word, then the + optional backoff tagger is invoked, else it is + assigned the tag None. + """ + + json_tag = "nltk.tag.sequential.RegexpTagger" + + def __init__( + self, regexps: List[Tuple[str, str]], backoff: Optional[TaggerI] = None + ): + super().__init__(backoff) + self._regexps = [] + for regexp, tag in regexps: + try: + self._regexps.append((re.compile(regexp), tag)) + except Exception as e: + raise Exception( + f"Invalid RegexpTagger regexp: {e}\n- regexp: {regexp!r}\n- tag: {tag!r}" + ) from e + + def encode_json_obj(self): + return [(regexp.pattern, tag) for regexp, tag in self._regexps], self.backoff + + @classmethod + def decode_json_obj(cls, obj): + regexps, backoff = obj + return cls(regexps, backoff) + + def choose_tag(self, tokens, index, history): + for regexp, tag in self._regexps: + if re.match(regexp, tokens[index]): + return tag + return None + + def __repr__(self): + return f"" + + +class ClassifierBasedTagger(SequentialBackoffTagger, FeaturesetTaggerI): + """ + A sequential tagger that uses a classifier to choose the tag for + each token in a sentence. The featureset input for the classifier + is generated by a feature detector function:: + + feature_detector(tokens, index, history) -> featureset + + Where tokens is the list of unlabeled tokens in the sentence; + index is the index of the token for which feature detection + should be performed; and history is list of the tags for all + tokens before index. + + Construct a new classifier-based sequential tagger. + + :param feature_detector: A function used to generate the + featureset input for the classifier:: + feature_detector(tokens, index, history) -> featureset + + :param train: A tagged corpus consisting of a list of tagged + sentences, where each sentence is a list of (word, tag) tuples. + + :param backoff: A backoff tagger, to be used by the new tagger + if it encounters an unknown context. + + :param classifier_builder: A function used to train a new + classifier based on the data in *train*. It should take + one argument, a list of labeled featuresets (i.e., + (featureset, label) tuples). + + :param classifier: The classifier that should be used by the + tagger. This is only useful if you want to manually + construct the classifier; normally, you would use *train* + instead. + + :param backoff: A backoff tagger, used if this tagger is + unable to determine a tag for a given token. + + :param cutoff_prob: If specified, then this tagger will fall + back on its backoff tagger if the probability of the most + likely tag is less than *cutoff_prob*. + """ + + def __init__( + self, + feature_detector=None, + train=None, + classifier_builder=NaiveBayesClassifier.train, + classifier=None, + backoff=None, + cutoff_prob=None, + verbose=False, + ): + self._check_params(train, classifier) + + super().__init__(backoff) + + if (train and classifier) or (not train and not classifier): + raise ValueError( + "Must specify either training data or " "trained classifier." + ) + + if feature_detector is not None: + self._feature_detector = feature_detector + # The feature detector function, used to generate a featureset + # or each token: feature_detector(tokens, index, history) -> featureset + + self._cutoff_prob = cutoff_prob + """Cutoff probability for tagging -- if the probability of the + most likely tag is less than this, then use backoff.""" + + self._classifier = classifier + """The classifier used to choose a tag for each token.""" + + if train: + self._train(train, classifier_builder, verbose) + + def choose_tag(self, tokens, index, history): + # Use our feature detector to get the featureset. + featureset = self.feature_detector(tokens, index, history) + + # Use the classifier to pick a tag. If a cutoff probability + # was specified, then check that the tag's probability is + # higher than that cutoff first; otherwise, return None. + if self._cutoff_prob is None: + return self._classifier.classify(featureset) + + pdist = self._classifier.prob_classify(featureset) + tag = pdist.max() + return tag if pdist.prob(tag) >= self._cutoff_prob else None + + def _train(self, tagged_corpus, classifier_builder, verbose): + """ + Build a new classifier, based on the given training data + *tagged_corpus*. + """ + + classifier_corpus = [] + if verbose: + print("Constructing training corpus for classifier.") + + for sentence in tagged_corpus: + history = [] + untagged_sentence, tags = zip(*sentence) + for index in range(len(sentence)): + featureset = self.feature_detector(untagged_sentence, index, history) + classifier_corpus.append((featureset, tags[index])) + history.append(tags[index]) + + if verbose: + print(f"Training classifier ({len(classifier_corpus)} instances)") + self._classifier = classifier_builder(classifier_corpus) + + def __repr__(self): + return f"" + + def feature_detector(self, tokens, index, history): + """ + Return the feature detector that this tagger uses to generate + featuresets for its classifier. The feature detector is a + function with the signature:: + + feature_detector(tokens, index, history) -> featureset + + See ``classifier()`` + """ + return self._feature_detector(tokens, index, history) + + def classifier(self): + """ + Return the classifier that this tagger uses to choose a tag + for each word in a sentence. The input for this classifier is + generated using this tagger's feature detector. + See ``feature_detector()`` + """ + return self._classifier + + +class ClassifierBasedPOSTagger(ClassifierBasedTagger): + """ + A classifier based part of speech tagger. + """ + + def feature_detector(self, tokens, index, history): + word = tokens[index] + if index == 0: + prevword = prevprevword = None + prevtag = prevprevtag = None + elif index == 1: + prevword = tokens[index - 1].lower() + prevprevword = None + prevtag = history[index - 1] + prevprevtag = None + else: + prevword = tokens[index - 1].lower() + prevprevword = tokens[index - 2].lower() + prevtag = history[index - 1] + prevprevtag = history[index - 2] + + if re.match(r"[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$", word): + shape = "number" + elif re.match(r"\W+$", word): + shape = "punct" + elif re.match("[A-Z][a-z]+$", word): + shape = "upcase" + elif re.match("[a-z]+$", word): + shape = "downcase" + elif re.match(r"\w+$", word): + shape = "mixedcase" + else: + shape = "other" + + features = { + "prevtag": prevtag, + "prevprevtag": prevprevtag, + "word": word, + "word.lower": word.lower(), + "suffix3": word.lower()[-3:], + "suffix2": word.lower()[-2:], + "suffix1": word.lower()[-1:], + "prevprevword": prevprevword, + "prevword": prevword, + "prevtag+word": f"{prevtag}+{word.lower()}", + "prevprevtag+word": f"{prevprevtag}+{word.lower()}", + "prevword+word": f"{prevword}+{word.lower()}", + "shape": shape, + } + return features diff --git a/venv/lib/python3.10/site-packages/nltk/tag/stanford.py b/venv/lib/python3.10/site-packages/nltk/tag/stanford.py new file mode 100644 index 0000000000000000000000000000000000000000..7c21e2dd20dec5c3b242d0e5007a4bf51d8ef8f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tag/stanford.py @@ -0,0 +1,236 @@ +# Natural Language Toolkit: Interface to the Stanford Part-of-speech and Named-Entity Taggers +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Nitin Madnani +# Rami Al-Rfou' +# URL: +# For license information, see LICENSE.TXT + +""" +A module for interfacing with the Stanford taggers. + +Tagger models need to be downloaded from https://nlp.stanford.edu/software +and the STANFORD_MODELS environment variable set (a colon-separated +list of paths). + +For more details see the documentation for StanfordPOSTagger and StanfordNERTagger. +""" + +import os +import tempfile +import warnings +from abc import abstractmethod +from subprocess import PIPE + +from nltk.internals import _java_options, config_java, find_file, find_jar, java +from nltk.tag.api import TaggerI + +_stanford_url = "https://nlp.stanford.edu/software" + + +class StanfordTagger(TaggerI): + """ + An interface to Stanford taggers. Subclasses must define: + + - ``_cmd`` property: A property that returns the command that will be + executed. + - ``_SEPARATOR``: Class constant that represents that character that + is used to separate the tokens from their tags. + - ``_JAR`` file: Class constant that represents the jar file name. + """ + + _SEPARATOR = "" + _JAR = "" + + def __init__( + self, + model_filename, + path_to_jar=None, + encoding="utf8", + verbose=False, + java_options="-mx1000m", + ): + # Raise deprecation warning. + warnings.warn( + str( + "\nThe StanfordTokenizer will " + "be deprecated in version 3.2.6.\n" + "Please use \033[91mnltk.parse.corenlp.CoreNLPParser\033[0m instead." + ), + DeprecationWarning, + stacklevel=2, + ) + + if not self._JAR: + warnings.warn( + "The StanfordTagger class is not meant to be " + "instantiated directly. Did you mean " + "StanfordPOSTagger or StanfordNERTagger?" + ) + self._stanford_jar = find_jar( + self._JAR, path_to_jar, searchpath=(), url=_stanford_url, verbose=verbose + ) + + self._stanford_model = find_file( + model_filename, env_vars=("STANFORD_MODELS",), verbose=verbose + ) + + self._encoding = encoding + self.java_options = java_options + + @property + @abstractmethod + def _cmd(self): + """ + A property that returns the command that will be executed. + """ + + def tag(self, tokens): + # This function should return list of tuple rather than list of list + return sum(self.tag_sents([tokens]), []) + + def tag_sents(self, sentences): + encoding = self._encoding + default_options = " ".join(_java_options) + config_java(options=self.java_options, verbose=False) + + # Create a temporary input file + _input_fh, self._input_file_path = tempfile.mkstemp(text=True) + + cmd = list(self._cmd) + cmd.extend(["-encoding", encoding]) + + # Write the actual sentences to the temporary input file + _input_fh = os.fdopen(_input_fh, "wb") + _input = "\n".join(" ".join(x) for x in sentences) + if isinstance(_input, str) and encoding: + _input = _input.encode(encoding) + _input_fh.write(_input) + _input_fh.close() + + # Run the tagger and get the output + stanpos_output, _stderr = java( + cmd, classpath=self._stanford_jar, stdout=PIPE, stderr=PIPE + ) + stanpos_output = stanpos_output.decode(encoding) + + # Delete the temporary file + os.unlink(self._input_file_path) + + # Return java configurations to their default values + config_java(options=default_options, verbose=False) + + return self.parse_output(stanpos_output, sentences) + + def parse_output(self, text, sentences=None): + # Output the tagged sentences + tagged_sentences = [] + for tagged_sentence in text.strip().split("\n"): + sentence = [] + for tagged_word in tagged_sentence.strip().split(): + word_tags = tagged_word.strip().split(self._SEPARATOR) + sentence.append( + ("".join(word_tags[:-1]), word_tags[-1].replace("0", "").upper()) + ) + tagged_sentences.append(sentence) + return tagged_sentences + + +class StanfordPOSTagger(StanfordTagger): + """ + A class for pos tagging with Stanford Tagger. The input is the paths to: + - a model trained on training data + - (optionally) the path to the stanford tagger jar file. If not specified here, + then this jar file must be specified in the CLASSPATH environment variable. + - (optionally) the encoding of the training data (default: UTF-8) + + Example: + + >>> from nltk.tag import StanfordPOSTagger + >>> st = StanfordPOSTagger('english-bidirectional-distsim.tagger') # doctest: +SKIP + >>> st.tag('What is the airspeed of an unladen swallow ?'.split()) # doctest: +SKIP + [('What', 'WP'), ('is', 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')] + """ + + _SEPARATOR = "_" + _JAR = "stanford-postagger.jar" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def _cmd(self): + return [ + "edu.stanford.nlp.tagger.maxent.MaxentTagger", + "-model", + self._stanford_model, + "-textFile", + self._input_file_path, + "-tokenize", + "false", + "-outputFormatOptions", + "keepEmptySentences", + ] + + +class StanfordNERTagger(StanfordTagger): + """ + A class for Named-Entity Tagging with Stanford Tagger. The input is the paths to: + + - a model trained on training data + - (optionally) the path to the stanford tagger jar file. If not specified here, + then this jar file must be specified in the CLASSPATH environment variable. + - (optionally) the encoding of the training data (default: UTF-8) + + Example: + + >>> from nltk.tag import StanfordNERTagger + >>> st = StanfordNERTagger('english.all.3class.distsim.crf.ser.gz') # doctest: +SKIP + >>> st.tag('Rami Eid is studying at Stony Brook University in NY'.split()) # doctest: +SKIP + [('Rami', 'PERSON'), ('Eid', 'PERSON'), ('is', 'O'), ('studying', 'O'), + ('at', 'O'), ('Stony', 'ORGANIZATION'), ('Brook', 'ORGANIZATION'), + ('University', 'ORGANIZATION'), ('in', 'O'), ('NY', 'LOCATION')] + """ + + _SEPARATOR = "/" + _JAR = "stanford-ner.jar" + _FORMAT = "slashTags" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @property + def _cmd(self): + # Adding -tokenizerFactory edu.stanford.nlp.process.WhitespaceTokenizer -tokenizerOptions tokenizeNLs=false for not using stanford Tokenizer + return [ + "edu.stanford.nlp.ie.crf.CRFClassifier", + "-loadClassifier", + self._stanford_model, + "-textFile", + self._input_file_path, + "-outputFormat", + self._FORMAT, + "-tokenizerFactory", + "edu.stanford.nlp.process.WhitespaceTokenizer", + "-tokenizerOptions", + '"tokenizeNLs=false"', + ] + + def parse_output(self, text, sentences): + if self._FORMAT == "slashTags": + # Joint together to a big list + tagged_sentences = [] + for tagged_sentence in text.strip().split("\n"): + for tagged_word in tagged_sentence.strip().split(): + word_tags = tagged_word.strip().split(self._SEPARATOR) + tagged_sentences.append(("".join(word_tags[:-1]), word_tags[-1])) + + # Separate it according to the input + result = [] + start = 0 + for sent in sentences: + result.append(tagged_sentences[start : start + len(sent)]) + start += len(sent) + return result + + raise NotImplementedError diff --git a/venv/lib/python3.10/site-packages/nltk/tag/tnt.py b/venv/lib/python3.10/site-packages/nltk/tag/tnt.py new file mode 100644 index 0000000000000000000000000000000000000000..a505104d812532af561ee3d3d9d80611f78db2cd --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tag/tnt.py @@ -0,0 +1,579 @@ +# Natural Language Toolkit: TnT Tagger +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Sam Huston +# +# URL: +# For license information, see LICENSE.TXT + +""" +Implementation of 'TnT - A Statisical Part of Speech Tagger' +by Thorsten Brants + +https://aclanthology.org/A00-1031.pdf +""" + +from math import log +from operator import itemgetter + +from nltk.probability import ConditionalFreqDist, FreqDist +from nltk.tag.api import TaggerI + + +class TnT(TaggerI): + """ + TnT - Statistical POS tagger + + IMPORTANT NOTES: + + * DOES NOT AUTOMATICALLY DEAL WITH UNSEEN WORDS + + - It is possible to provide an untrained POS tagger to + create tags for unknown words, see __init__ function + + * SHOULD BE USED WITH SENTENCE-DELIMITED INPUT + + - Due to the nature of this tagger, it works best when + trained over sentence delimited input. + - However it still produces good results if the training + data and testing data are separated on all punctuation eg: [,.?!] + - Input for training is expected to be a list of sentences + where each sentence is a list of (word, tag) tuples + - Input for tag function is a single sentence + Input for tagdata function is a list of sentences + Output is of a similar form + + * Function provided to process text that is unsegmented + + - Please see basic_sent_chop() + + + TnT uses a second order Markov model to produce tags for + a sequence of input, specifically: + + argmax [Proj(P(t_i|t_i-1,t_i-2)P(w_i|t_i))] P(t_T+1 | t_T) + + IE: the maximum projection of a set of probabilities + + The set of possible tags for a given word is derived + from the training data. It is the set of all tags + that exact word has been assigned. + + To speed up and get more precision, we can use log addition + to instead multiplication, specifically: + + argmax [Sigma(log(P(t_i|t_i-1,t_i-2))+log(P(w_i|t_i)))] + + log(P(t_T+1|t_T)) + + The probability of a tag for a given word is the linear + interpolation of 3 markov models; a zero-order, first-order, + and a second order model. + + P(t_i| t_i-1, t_i-2) = l1*P(t_i) + l2*P(t_i| t_i-1) + + l3*P(t_i| t_i-1, t_i-2) + + A beam search is used to limit the memory usage of the algorithm. + The degree of the beam can be changed using N in the initialization. + N represents the maximum number of possible solutions to maintain + while tagging. + + It is possible to differentiate the tags which are assigned to + capitalized words. However this does not result in a significant + gain in the accuracy of the results. + """ + + def __init__(self, unk=None, Trained=False, N=1000, C=False): + """ + Construct a TnT statistical tagger. Tagger must be trained + before being used to tag input. + + :param unk: instance of a POS tagger, conforms to TaggerI + :type unk: TaggerI + :param Trained: Indication that the POS tagger is trained or not + :type Trained: bool + :param N: Beam search degree (see above) + :type N: int + :param C: Capitalization flag + :type C: bool + + Initializer, creates frequency distributions to be used + for tagging + + _lx values represent the portion of the tri/bi/uni taggers + to be used to calculate the probability + + N value is the number of possible solutions to maintain + while tagging. A good value for this is 1000 + + C is a boolean value which specifies to use or + not use the Capitalization of the word as additional + information for tagging. + NOTE: using capitalization may not increase the accuracy + of the tagger + """ + + self._uni = FreqDist() + self._bi = ConditionalFreqDist() + self._tri = ConditionalFreqDist() + self._wd = ConditionalFreqDist() + self._eos = ConditionalFreqDist() + self._l1 = 0.0 + self._l2 = 0.0 + self._l3 = 0.0 + self._N = N + self._C = C + self._T = Trained + + self._unk = unk + + # statistical tools (ignore or delete me) + self.unknown = 0 + self.known = 0 + + def train(self, data): + """ + Uses a set of tagged data to train the tagger. + If an unknown word tagger is specified, + it is trained on the same data. + + :param data: List of lists of (word, tag) tuples + :type data: tuple(str) + """ + + # Ensure that local C flag is initialized before use + C = False + + if self._unk is not None and self._T == False: + self._unk.train(data) + + for sent in data: + history = [("BOS", False), ("BOS", False)] + for w, t in sent: + + # if capitalization is requested, + # and the word begins with a capital + # set local flag C to True + if self._C and w[0].isupper(): + C = True + + self._wd[w][t] += 1 + self._uni[(t, C)] += 1 + self._bi[history[1]][(t, C)] += 1 + self._tri[tuple(history)][(t, C)] += 1 + + history.append((t, C)) + history.pop(0) + + # set local flag C to false for the next word + C = False + + self._eos[t]["EOS"] += 1 + + # compute lambda values from the trained frequency distributions + self._compute_lambda() + + def _compute_lambda(self): + """ + creates lambda values based upon training data + + NOTE: no need to explicitly reference C, + it is contained within the tag variable :: tag == (tag,C) + + for each tag trigram (t1, t2, t3) + depending on the maximum value of + - f(t1,t2,t3)-1 / f(t1,t2)-1 + - f(t2,t3)-1 / f(t2)-1 + - f(t3)-1 / N-1 + + increment l3,l2, or l1 by f(t1,t2,t3) + + ISSUES -- Resolutions: + if 2 values are equal, increment both lambda values + by (f(t1,t2,t3) / 2) + """ + + # temporary lambda variables + tl1 = 0.0 + tl2 = 0.0 + tl3 = 0.0 + + # for each t1,t2 in system + for history in self._tri.conditions(): + (h1, h2) = history + + # for each t3 given t1,t2 in system + # (NOTE: tag actually represents (tag,C)) + # However no effect within this function + for tag in self._tri[history].keys(): + + # if there has only been 1 occurrence of this tag in the data + # then ignore this trigram. + if self._uni[tag] == 1: + continue + + # safe_div provides a safe floating point division + # it returns -1 if the denominator is 0 + c3 = self._safe_div( + (self._tri[history][tag] - 1), (self._tri[history].N() - 1) + ) + c2 = self._safe_div((self._bi[h2][tag] - 1), (self._bi[h2].N() - 1)) + c1 = self._safe_div((self._uni[tag] - 1), (self._uni.N() - 1)) + + # if c1 is the maximum value: + if (c1 > c3) and (c1 > c2): + tl1 += self._tri[history][tag] + + # if c2 is the maximum value + elif (c2 > c3) and (c2 > c1): + tl2 += self._tri[history][tag] + + # if c3 is the maximum value + elif (c3 > c2) and (c3 > c1): + tl3 += self._tri[history][tag] + + # if c3, and c2 are equal and larger than c1 + elif (c3 == c2) and (c3 > c1): + tl2 += self._tri[history][tag] / 2.0 + tl3 += self._tri[history][tag] / 2.0 + + # if c1, and c2 are equal and larger than c3 + # this might be a dumb thing to do....(not sure yet) + elif (c2 == c1) and (c1 > c3): + tl1 += self._tri[history][tag] / 2.0 + tl2 += self._tri[history][tag] / 2.0 + + # otherwise there might be a problem + # eg: all values = 0 + else: + pass + + # Lambda normalisation: + # ensures that l1+l2+l3 = 1 + self._l1 = tl1 / (tl1 + tl2 + tl3) + self._l2 = tl2 / (tl1 + tl2 + tl3) + self._l3 = tl3 / (tl1 + tl2 + tl3) + + def _safe_div(self, v1, v2): + """ + Safe floating point division function, does not allow division by 0 + returns -1 if the denominator is 0 + """ + if v2 == 0: + return -1 + else: + return v1 / v2 + + def tagdata(self, data): + """ + Tags each sentence in a list of sentences + + :param data:list of list of words + :type data: [[string,],] + :return: list of list of (word, tag) tuples + + Invokes tag(sent) function for each sentence + compiles the results into a list of tagged sentences + each tagged sentence is a list of (word, tag) tuples + """ + res = [] + for sent in data: + res1 = self.tag(sent) + res.append(res1) + return res + + def tag(self, data): + """ + Tags a single sentence + + :param data: list of words + :type data: [string,] + + :return: [(word, tag),] + + Calls recursive function '_tagword' + to produce a list of tags + + Associates the sequence of returned tags + with the correct words in the input sequence + + returns a list of (word, tag) tuples + """ + + current_state = [(["BOS", "BOS"], 0.0)] + + sent = list(data) + + tags = self._tagword(sent, current_state) + + res = [] + for i in range(len(sent)): + # unpack and discard the C flags + (t, C) = tags[i + 2] + res.append((sent[i], t)) + + return res + + def _tagword(self, sent, current_states): + """ + :param sent : List of words remaining in the sentence + :type sent : [word,] + :param current_states : List of possible tag combinations for + the sentence so far, and the log probability + associated with each tag combination + :type current_states : [([tag, ], logprob), ] + + Tags the first word in the sentence and + recursively tags the reminder of sentence + + Uses formula specified above to calculate the probability + of a particular tag + """ + + # if this word marks the end of the sentence, + # return the most probable tag + if sent == []: + (h, logp) = current_states[0] + return h + + # otherwise there are more words to be tagged + word = sent[0] + sent = sent[1:] + new_states = [] + + # if the Capitalisation is requested, + # initialise the flag for this word + C = False + if self._C and word[0].isupper(): + C = True + + # if word is known + # compute the set of possible tags + # and their associated log probabilities + if word in self._wd: + self.known += 1 + + for (history, curr_sent_logprob) in current_states: + logprobs = [] + + for t in self._wd[word].keys(): + tC = (t, C) + p_uni = self._uni.freq(tC) + p_bi = self._bi[history[-1]].freq(tC) + p_tri = self._tri[tuple(history[-2:])].freq(tC) + p_wd = self._wd[word][t] / self._uni[tC] + p = self._l1 * p_uni + self._l2 * p_bi + self._l3 * p_tri + p2 = log(p, 2) + log(p_wd, 2) + + # compute the result of appending each tag to this history + new_states.append((history + [tC], curr_sent_logprob + p2)) + + # otherwise a new word, set of possible tags is unknown + else: + self.unknown += 1 + + # since a set of possible tags, + # and the probability of each specific tag + # can not be returned from most classifiers: + # specify that any unknown words are tagged with certainty + p = 1 + + # if no unknown word tagger has been specified + # then use the tag 'Unk' + if self._unk is None: + tag = ("Unk", C) + + # otherwise apply the unknown word tagger + else: + [(_w, t)] = list(self._unk.tag([word])) + tag = (t, C) + + for (history, logprob) in current_states: + history.append(tag) + + new_states = current_states + + # now have computed a set of possible new_states + + # sort states by log prob + # set is now ordered greatest to least log probability + new_states.sort(reverse=True, key=itemgetter(1)) + + # del everything after N (threshold) + # this is the beam search cut + if len(new_states) > self._N: + new_states = new_states[: self._N] + + # compute the tags for the rest of the sentence + # return the best list of tags for the sentence + return self._tagword(sent, new_states) + + +######################################## +# helper function -- basic sentence tokenizer +######################################## + + +def basic_sent_chop(data, raw=True): + """ + Basic method for tokenizing input into sentences + for this tagger: + + :param data: list of tokens (words or (word, tag) tuples) + :type data: str or tuple(str, str) + :param raw: boolean flag marking the input data + as a list of words or a list of tagged words + :type raw: bool + :return: list of sentences + sentences are a list of tokens + tokens are the same as the input + + Function takes a list of tokens and separates the tokens into lists + where each list represents a sentence fragment + This function can separate both tagged and raw sequences into + basic sentences. + + Sentence markers are the set of [,.!?] + + This is a simple method which enhances the performance of the TnT + tagger. Better sentence tokenization will further enhance the results. + """ + + new_data = [] + curr_sent = [] + sent_mark = [",", ".", "?", "!"] + + if raw: + for word in data: + if word in sent_mark: + curr_sent.append(word) + new_data.append(curr_sent) + curr_sent = [] + else: + curr_sent.append(word) + + else: + for (word, tag) in data: + if word in sent_mark: + curr_sent.append((word, tag)) + new_data.append(curr_sent) + curr_sent = [] + else: + curr_sent.append((word, tag)) + return new_data + + +def demo(): + from nltk.corpus import brown + + sents = list(brown.tagged_sents()) + test = list(brown.sents()) + + tagger = TnT() + tagger.train(sents[200:1000]) + + tagged_data = tagger.tagdata(test[100:120]) + + for j in range(len(tagged_data)): + s = tagged_data[j] + t = sents[j + 100] + for i in range(len(s)): + print(s[i], "--", t[i]) + print() + + +def demo2(): + from nltk.corpus import treebank + + d = list(treebank.tagged_sents()) + + t = TnT(N=1000, C=False) + s = TnT(N=1000, C=True) + t.train(d[(11) * 100 :]) + s.train(d[(11) * 100 :]) + + for i in range(10): + tacc = t.accuracy(d[i * 100 : ((i + 1) * 100)]) + tp_un = t.unknown / (t.known + t.unknown) + tp_kn = t.known / (t.known + t.unknown) + t.unknown = 0 + t.known = 0 + + print("Capitalization off:") + print("Accuracy:", tacc) + print("Percentage known:", tp_kn) + print("Percentage unknown:", tp_un) + print("Accuracy over known words:", (tacc / tp_kn)) + + sacc = s.accuracy(d[i * 100 : ((i + 1) * 100)]) + sp_un = s.unknown / (s.known + s.unknown) + sp_kn = s.known / (s.known + s.unknown) + s.unknown = 0 + s.known = 0 + + print("Capitalization on:") + print("Accuracy:", sacc) + print("Percentage known:", sp_kn) + print("Percentage unknown:", sp_un) + print("Accuracy over known words:", (sacc / sp_kn)) + + +def demo3(): + from nltk.corpus import brown, treebank + + d = list(treebank.tagged_sents()) + e = list(brown.tagged_sents()) + + d = d[:1000] + e = e[:1000] + + d10 = int(len(d) * 0.1) + e10 = int(len(e) * 0.1) + + tknacc = 0 + sknacc = 0 + tallacc = 0 + sallacc = 0 + tknown = 0 + sknown = 0 + + for i in range(10): + + t = TnT(N=1000, C=False) + s = TnT(N=1000, C=False) + + dtest = d[(i * d10) : ((i + 1) * d10)] + etest = e[(i * e10) : ((i + 1) * e10)] + + dtrain = d[: (i * d10)] + d[((i + 1) * d10) :] + etrain = e[: (i * e10)] + e[((i + 1) * e10) :] + + t.train(dtrain) + s.train(etrain) + + tacc = t.accuracy(dtest) + tp_un = t.unknown / (t.known + t.unknown) + tp_kn = t.known / (t.known + t.unknown) + tknown += tp_kn + t.unknown = 0 + t.known = 0 + + sacc = s.accuracy(etest) + sp_un = s.unknown / (s.known + s.unknown) + sp_kn = s.known / (s.known + s.unknown) + sknown += sp_kn + s.unknown = 0 + s.known = 0 + + tknacc += tacc / tp_kn + sknacc += sacc / tp_kn + tallacc += tacc + sallacc += sacc + + # print(i+1, (tacc / tp_kn), i+1, (sacc / tp_kn), i+1, tacc, i+1, sacc) + + print("brown: acc over words known:", 10 * tknacc) + print(" : overall accuracy:", 10 * tallacc) + print(" : words known:", 10 * tknown) + print("treebank: acc over words known:", 10 * sknacc) + print(" : overall accuracy:", 10 * sallacc) + print(" : words known:", 10 * sknown) diff --git a/venv/lib/python3.10/site-packages/nltk/tag/util.py b/venv/lib/python3.10/site-packages/nltk/tag/util.py new file mode 100644 index 0000000000000000000000000000000000000000..e35b98195f2b7b448775a49795e0f34d612624a6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/nltk/tag/util.py @@ -0,0 +1,72 @@ +# Natural Language Toolkit: Tagger Utilities +# +# Copyright (C) 2001-2023 NLTK Project +# Author: Edward Loper +# Steven Bird +# URL: +# For license information, see LICENSE.TXT + + +def str2tuple(s, sep="/"): + """ + Given the string representation of a tagged token, return the + corresponding tuple representation. The rightmost occurrence of + *sep* in *s* will be used to divide *s* into a word string and + a tag string. If *sep* does not occur in *s*, return (s, None). + + >>> from nltk.tag.util import str2tuple + >>> str2tuple('fly/NN') + ('fly', 'NN') + + :type s: str + :param s: The string representation of a tagged token. + :type sep: str + :param sep: The separator string used to separate word strings + from tags. + """ + loc = s.rfind(sep) + if loc >= 0: + return (s[:loc], s[loc + len(sep) :].upper()) + else: + return (s, None) + + +def tuple2str(tagged_token, sep="/"): + """ + Given the tuple representation of a tagged token, return the + corresponding string representation. This representation is + formed by concatenating the token's word string, followed by the + separator, followed by the token's tag. (If the tag is None, + then just return the bare word string.) + + >>> from nltk.tag.util import tuple2str + >>> tagged_token = ('fly', 'NN') + >>> tuple2str(tagged_token) + 'fly/NN' + + :type tagged_token: tuple(str, str) + :param tagged_token: The tuple representation of a tagged token. + :type sep: str + :param sep: The separator string used to separate word strings + from tags. + """ + word, tag = tagged_token + if tag is None: + return word + else: + assert sep not in tag, "tag may not contain sep!" + return f"{word}{sep}{tag}" + + +def untag(tagged_sentence): + """ + Given a tagged sentence, return an untagged version of that + sentence. I.e., return a list containing the first element + of each tuple in *tagged_sentence*. + + >>> from nltk.tag.util import untag + >>> untag([('John', 'NNP'), ('saw', 'VBD'), ('Mary', 'NNP')]) + ['John', 'saw', 'Mary'] + + """ + return [w for (w, t) in tagged_sentence] diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c409eea2531a48cd4ad1ede6ffa0b1f367462774 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/bleu_score.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/bleu_score.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3924abf4c65b28b3c1d93c4218876c8f2c11f2e Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/bleu_score.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/chrf_score.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/chrf_score.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1617458d106fc3303518592898efe695bc1af5f3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/chrf_score.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/gale_church.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/gale_church.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d48f7e82f8cd376f051ecb67e1731ae70496532 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/gale_church.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/gleu_score.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/gleu_score.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a28c998f36a9fc05c6326ee4542eb169bf0005e4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/gleu_score.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm2.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2ef49892e94a5ecb705ec0b96e1d75ee5ca2131 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm3.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d18bd428d17492bd5abd934d7fb5822a0b7449f Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm3.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm4.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm4.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba35a9e672303295278cdcb40b07b162dbad9c28 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm4.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm5.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm5.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce44876b218f47a648d01fc843c41853fb5aa1b8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm5.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm_model.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm_model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b726270d88a9e5441c30c6b82102d785f65224c9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ibm_model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/meteor_score.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/meteor_score.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62a8e942235a4e515252d5202e5f51f6b8265d56 Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/meteor_score.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/nist_score.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/nist_score.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39b06f2013843e8bbdb09800ebbcc2d772e5639f Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/nist_score.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ribes_score.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ribes_score.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bd8d970dde228188fea5d7aa6a142ad78acc7cc Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/ribes_score.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/stack_decoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/stack_decoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ab83158112040c0ccba6912457cde3d37a6017c Binary files /dev/null and b/venv/lib/python3.10/site-packages/nltk/translate/__pycache__/stack_decoder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numexpr/__init__.py b/venv/lib/python3.10/site-packages/numexpr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7946f8522d7c5e536d6def85e69fd7b84aa0373d --- /dev/null +++ b/venv/lib/python3.10/site-packages/numexpr/__init__.py @@ -0,0 +1,68 @@ +################################################################### +# Numexpr - Fast numerical array expression evaluator for NumPy. +# +# License: MIT +# Author: See AUTHORS.txt +# +# See LICENSE.txt and LICENSES/*.txt for details about copyright and +# rights to use. +#################################################################### + +""" +Numexpr is a fast numerical expression evaluator for NumPy. With it, +expressions that operate on arrays (like "3*a+4*b") are accelerated +and use less memory than doing the same calculation in Python. + +See: + +https://github.com/pydata/numexpr + +for more info about it. + +""" + +from numexpr.interpreter import MAX_THREADS, use_vml, __BLOCK_SIZE1__ + +is_cpu_amd_intel = False # DEPRECATION WARNING: WILL BE REMOVED IN FUTURE RELEASE + +# cpuinfo imports were moved into the test submodule function that calls them +# to improve import times. + +import os, os.path +import platform +from numexpr.expressions import E +from numexpr.necompiler import (NumExpr, disassemble, evaluate, re_evaluate, + validate) + +from numexpr.utils import (_init_num_threads, + get_vml_version, set_vml_accuracy_mode, set_vml_num_threads, + set_num_threads, get_num_threads, + detect_number_of_cores, detect_number_of_threads) + +# Detect the number of cores +ncores = detect_number_of_cores() +# Initialize the number of threads to be used +nthreads = _init_num_threads() +# The default for VML is 1 thread (see #39) +# set_vml_num_threads(1) + +from . import version +__version__ = version.version + +def print_versions(): + """Print the versions of software that numexpr relies on.""" + try: + import numexpr.tests + return numexpr.tests.print_versions() + except ImportError: + # To maintain Python 2.6 compatibility we have simple error handling + raise ImportError('`numexpr.tests` could not be imported, likely it was excluded from the distribution.') + +def test(verbosity=1): + """Run all the tests in the test suite.""" + try: + import numexpr.tests + return numexpr.tests.test(verbosity=verbosity) + except ImportError: + # To maintain Python 2.6 compatibility we have simple error handling + raise ImportError('`numexpr.tests` could not be imported, likely it was excluded from the distribution.') \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/numexpr/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numexpr/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2947fd4eaccc01d7fbc7f5b715defeac3c656a4e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numexpr/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numexpr/__pycache__/cpuinfo.cpython-310.pyc b/venv/lib/python3.10/site-packages/numexpr/__pycache__/cpuinfo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef6b302b5c2b374b8fb4d432eaca2941f54bcf1c Binary files /dev/null and b/venv/lib/python3.10/site-packages/numexpr/__pycache__/cpuinfo.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numexpr/__pycache__/expressions.cpython-310.pyc b/venv/lib/python3.10/site-packages/numexpr/__pycache__/expressions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0eae891aab0707856c7636002d84f335e506c1e Binary files /dev/null and b/venv/lib/python3.10/site-packages/numexpr/__pycache__/expressions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numexpr/__pycache__/necompiler.cpython-310.pyc b/venv/lib/python3.10/site-packages/numexpr/__pycache__/necompiler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20eda635ef1de6f50aa4c4b2e0670cd9f2e28c00 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numexpr/__pycache__/necompiler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numexpr/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/numexpr/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc1d0d802d18f63ae5a852e71ea99af5ef87d124 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numexpr/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numexpr/__pycache__/version.cpython-310.pyc b/venv/lib/python3.10/site-packages/numexpr/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60887c94443e8e4d49400b91422f8eb99d931891 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numexpr/__pycache__/version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numexpr/cpuinfo.py b/venv/lib/python3.10/site-packages/numexpr/cpuinfo.py new file mode 100644 index 0000000000000000000000000000000000000000..4a57d3cb03fd2851d5ac886d953080409fc4725c --- /dev/null +++ b/venv/lib/python3.10/site-packages/numexpr/cpuinfo.py @@ -0,0 +1,859 @@ +################################################################### +# cpuinfo - Get information about CPU +# +# License: BSD +# Author: Pearu Peterson +# +# See LICENSES/cpuinfo.txt for details about copyright and +# rights to use. +#################################################################### + +""" +cpuinfo + +Copyright 2002 Pearu Peterson all rights reserved, +Pearu Peterson +Permission to use, modify, and distribute this software is given under the +terms of the NumPy (BSD style) license. See LICENSE.txt that came with +this distribution for specifics. + +NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. +Pearu Peterson +""" + +__all__ = ['cpu'] + +import sys, re, types +import os +import subprocess +import warnings +import platform +import inspect + +is_cpu_amd_intel = False # DEPRECATION WARNING: WILL BE REMOVED IN FUTURE RELEASE + +def getoutput(cmd, successful_status=(0,), stacklevel=1): + try: + p = subprocess.Popen(cmd, stdout=subprocess.PIPE) + output, _ = p.communicate() + status = p.returncode + except EnvironmentError as e: + warnings.warn(str(e), UserWarning, stacklevel=stacklevel) + return False, '' + if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status: + return True, output + return False, output + + +def command_info(successful_status=(0,), stacklevel=1, **kw): + info = {} + for key in kw: + ok, output = getoutput(kw[key], successful_status=successful_status, + stacklevel=stacklevel + 1) + if ok: + info[key] = output.strip() + return info + + +def command_by_line(cmd, successful_status=(0,), stacklevel=1): + ok, output = getoutput(cmd, successful_status=successful_status, + stacklevel=stacklevel + 1) + if not ok: + return + + # XXX: check + output = output.decode('ascii') + + for line in output.splitlines(): + yield line.strip() + + +def key_value_from_command(cmd, sep, successful_status=(0,), + stacklevel=1): + d = {} + for line in command_by_line(cmd, successful_status=successful_status, + stacklevel=stacklevel + 1): + l = [s.strip() for s in line.split(sep, 1)] + if len(l) == 2: + d[l[0]] = l[1] + return d + + +class CPUInfoBase(object): + """Holds CPU information and provides methods for requiring + the availability of various CPU features. + """ + + def _try_call(self, func): + try: + return func() + except: + pass + + def __getattr__(self, name): + if not name.startswith('_'): + if hasattr(self, '_' + name): + attr = getattr(self, '_' + name) + if inspect.ismethod(attr): + return lambda func=self._try_call, attr=attr: func(attr) + else: + return lambda: None + raise AttributeError(name) + + def _getNCPUs(self): + return 1 + + def __get_nbits(self): + abits = platform.architecture()[0] + nbits = re.compile(r'(\d+)bit').search(abits).group(1) + return nbits + + def _is_32bit(self): + return self.__get_nbits() == '32' + + def _is_64bit(self): + return self.__get_nbits() == '64' + + +class LinuxCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = [{}] + ok, output = getoutput(['uname', '-m']) + if ok: + info[0]['uname_m'] = output.strip() + try: + fo = open('/proc/cpuinfo') + except EnvironmentError as e: + warnings.warn(str(e), UserWarning) + else: + for line in fo: + name_value = [s.strip() for s in line.split(':', 1)] + if len(name_value) != 2: + continue + name, value = name_value + if not info or name in info[-1]: # next processor + info.append({}) + info[-1][name] = value + fo.close() + self.__class__.info = info + + def _not_impl(self): + pass + + # Athlon + + def _is_AMD(self): + return self.info[0]['vendor_id'] == 'AuthenticAMD' + + def _is_AthlonK6_2(self): + return self._is_AMD() and self.info[0]['model'] == '2' + + def _is_AthlonK6_3(self): + return self._is_AMD() and self.info[0]['model'] == '3' + + def _is_AthlonK6(self): + return re.match(r'.*?AMD-K6', self.info[0]['model name']) is not None + + def _is_AthlonK7(self): + return re.match(r'.*?AMD-K7', self.info[0]['model name']) is not None + + def _is_AthlonMP(self): + return re.match(r'.*?Athlon\(tm\) MP\b', + self.info[0]['model name']) is not None + + def _is_AMD64(self): + return self.is_AMD() and self.info[0]['family'] == '15' + + def _is_Athlon64(self): + return re.match(r'.*?Athlon\(tm\) 64\b', + self.info[0]['model name']) is not None + + def _is_AthlonHX(self): + return re.match(r'.*?Athlon HX\b', + self.info[0]['model name']) is not None + + def _is_Opteron(self): + return re.match(r'.*?Opteron\b', + self.info[0]['model name']) is not None + + def _is_Hammer(self): + return re.match(r'.*?Hammer\b', + self.info[0]['model name']) is not None + + # Alpha + + def _is_Alpha(self): + return self.info[0]['cpu'] == 'Alpha' + + def _is_EV4(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV4' + + def _is_EV5(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV5' + + def _is_EV56(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'EV56' + + def _is_PCA56(self): + return self.is_Alpha() and self.info[0]['cpu model'] == 'PCA56' + + # Intel + + #XXX + _is_i386 = _not_impl + + def _is_Intel(self): + return self.info[0]['vendor_id'] == 'GenuineIntel' + + def _is_i486(self): + return self.info[0]['cpu'] == 'i486' + + def _is_i586(self): + return self.is_Intel() and self.info[0]['cpu family'] == '5' + + def _is_i686(self): + return self.is_Intel() and self.info[0]['cpu family'] == '6' + + def _is_Celeron(self): + return re.match(r'.*?Celeron', + self.info[0]['model name']) is not None + + def _is_Pentium(self): + return re.match(r'.*?Pentium', + self.info[0]['model name']) is not None + + def _is_PentiumII(self): + return re.match(r'.*?Pentium.*?II\b', + self.info[0]['model name']) is not None + + def _is_PentiumPro(self): + return re.match(r'.*?PentiumPro\b', + self.info[0]['model name']) is not None + + def _is_PentiumMMX(self): + return re.match(r'.*?Pentium.*?MMX\b', + self.info[0]['model name']) is not None + + def _is_PentiumIII(self): + return re.match(r'.*?Pentium.*?III\b', + self.info[0]['model name']) is not None + + def _is_PentiumIV(self): + return re.match(r'.*?Pentium.*?(IV|4)\b', + self.info[0]['model name']) is not None + + def _is_PentiumM(self): + return re.match(r'.*?Pentium.*?M\b', + self.info[0]['model name']) is not None + + def _is_Prescott(self): + return self.is_PentiumIV() and self.has_sse3() + + def _is_Nocona(self): + return (self.is_Intel() and + self.info[0]['cpu family'] in ('6', '15') and + # two s sse3; three s ssse3 not the same thing, this is fine + (self.has_sse3() and not self.has_ssse3()) and + re.match(r'.*?\blm\b', self.info[0]['flags']) is not None) + + def _is_Core2(self): + return (self.is_64bit() and self.is_Intel() and + re.match(r'.*?Core\(TM\)2\b', + self.info[0]['model name']) is not None) + + def _is_Itanium(self): + return re.match(r'.*?Itanium\b', + self.info[0]['family']) is not None + + def _is_XEON(self): + return re.match(r'.*?XEON\b', + self.info[0]['model name'], re.IGNORECASE) is not None + + _is_Xeon = _is_XEON + + # Power + def _is_Power(self): + return re.match(r'.*POWER.*', + self.info[0]['cpu']) is not None + + def _is_Power7(self): + return re.match(r'.*POWER7.*', + self.info[0]['cpu']) is not None + + def _is_Power8(self): + return re.match(r'.*POWER8.*', + self.info[0]['cpu']) is not None + + def _is_Power9(self): + return re.match(r'.*POWER9.*', + self.info[0]['cpu']) is not None + + def _has_Altivec(self): + return re.match(r'.*altivec\ supported.*', + self.info[0]['cpu']) is not None + + # Varia + + def _is_singleCPU(self): + return len(self.info) == 1 + + def _getNCPUs(self): + return len(self.info) + + def _has_fdiv_bug(self): + return self.info[0]['fdiv_bug'] == 'yes' + + def _has_f00f_bug(self): + return self.info[0]['f00f_bug'] == 'yes' + + def _has_mmx(self): + return re.match(r'.*?\bmmx\b', self.info[0]['flags']) is not None + + def _has_sse(self): + return re.match(r'.*?\bsse\b', self.info[0]['flags']) is not None + + def _has_sse2(self): + return re.match(r'.*?\bsse2\b', self.info[0]['flags']) is not None + + def _has_sse3(self): + return re.match(r'.*?\bpni\b', self.info[0]['flags']) is not None + + def _has_ssse3(self): + return re.match(r'.*?\bssse3\b', self.info[0]['flags']) is not None + + def _has_3dnow(self): + return re.match(r'.*?\b3dnow\b', self.info[0]['flags']) is not None + + def _has_3dnowext(self): + return re.match(r'.*?\b3dnowext\b', self.info[0]['flags']) is not None + + +class IRIXCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = key_value_from_command('sysconf', sep=' ', + successful_status=(0, 1)) + self.__class__.info = info + + def _not_impl(self): + pass + + def _is_singleCPU(self): + return self.info.get('NUM_PROCESSORS') == '1' + + def _getNCPUs(self): + return int(self.info.get('NUM_PROCESSORS', 1)) + + def __cputype(self, n): + return self.info.get('PROCESSORS').split()[0].lower() == 'r%s' % (n) + + def _is_r2000(self): + return self.__cputype(2000) + + def _is_r3000(self): + return self.__cputype(3000) + + def _is_r3900(self): + return self.__cputype(3900) + + def _is_r4000(self): + return self.__cputype(4000) + + def _is_r4100(self): + return self.__cputype(4100) + + def _is_r4300(self): + return self.__cputype(4300) + + def _is_r4400(self): + return self.__cputype(4400) + + def _is_r4600(self): + return self.__cputype(4600) + + def _is_r4650(self): + return self.__cputype(4650) + + def _is_r5000(self): + return self.__cputype(5000) + + def _is_r6000(self): + return self.__cputype(6000) + + def _is_r8000(self): + return self.__cputype(8000) + + def _is_r10000(self): + return self.__cputype(10000) + + def _is_r12000(self): + return self.__cputype(12000) + + def _is_rorion(self): + return self.__cputype('orion') + + def get_ip(self): + try: + return self.info.get('MACHINE') + except: + pass + + def __machine(self, n): + return self.info.get('MACHINE').lower() == 'ip%s' % (n) + + def _is_IP19(self): + return self.__machine(19) + + def _is_IP20(self): + return self.__machine(20) + + def _is_IP21(self): + return self.__machine(21) + + def _is_IP22(self): + return self.__machine(22) + + def _is_IP22_4k(self): + return self.__machine(22) and self._is_r4000() + + def _is_IP22_5k(self): + return self.__machine(22) and self._is_r5000() + + def _is_IP24(self): + return self.__machine(24) + + def _is_IP25(self): + return self.__machine(25) + + def _is_IP26(self): + return self.__machine(26) + + def _is_IP27(self): + return self.__machine(27) + + def _is_IP28(self): + return self.__machine(28) + + def _is_IP30(self): + return self.__machine(30) + + def _is_IP32(self): + return self.__machine(32) + + def _is_IP32_5k(self): + return self.__machine(32) and self._is_r5000() + + def _is_IP32_10k(self): + return self.__machine(32) and self._is_r10000() + + +class DarwinCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = command_info(arch='arch', + machine='machine') + info['sysctl_hw'] = key_value_from_command(['sysctl', 'hw'], sep='=') + self.__class__.info = info + + def _not_impl(self): pass + + def _getNCPUs(self): + return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) + + def _is_Power_Macintosh(self): + return self.info['sysctl_hw']['hw.machine'] == 'Power Macintosh' + + def _is_i386(self): + return self.info['arch'] == 'i386' + + def _is_ppc(self): + return self.info['arch'] == 'ppc' + + def __machine(self, n): + return self.info['machine'] == 'ppc%s' % n + + def _is_ppc601(self): return self.__machine(601) + + def _is_ppc602(self): return self.__machine(602) + + def _is_ppc603(self): return self.__machine(603) + + def _is_ppc603e(self): return self.__machine('603e') + + def _is_ppc604(self): return self.__machine(604) + + def _is_ppc604e(self): return self.__machine('604e') + + def _is_ppc620(self): return self.__machine(620) + + def _is_ppc630(self): return self.__machine(630) + + def _is_ppc740(self): return self.__machine(740) + + def _is_ppc7400(self): return self.__machine(7400) + + def _is_ppc7450(self): return self.__machine(7450) + + def _is_ppc750(self): return self.__machine(750) + + def _is_ppc403(self): return self.__machine(403) + + def _is_ppc505(self): return self.__machine(505) + + def _is_ppc801(self): return self.__machine(801) + + def _is_ppc821(self): return self.__machine(821) + + def _is_ppc823(self): return self.__machine(823) + + def _is_ppc860(self): return self.__machine(860) + +class NetBSDCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = {} + info['sysctl_hw'] = key_value_from_command(['sysctl', 'hw'], sep='=') + info['arch'] = info['sysctl_hw'].get('hw.machine_arch', 1) + info['machine'] = info['sysctl_hw'].get('hw.machine', 1) + self.__class__.info = info + + def _not_impl(self): pass + + def _getNCPUs(self): + return int(self.info['sysctl_hw'].get('hw.ncpu', 1)) + + def _is_Intel(self): + if self.info['sysctl_hw'].get('hw.model', "")[0:5] == 'Intel': + return True + return False + + def _is_AMD(self): + if self.info['sysctl_hw'].get('hw.model', "")[0:3] == 'AMD': + return True + return False + +class SunOSCPUInfo(CPUInfoBase): + info = None + + def __init__(self): + if self.info is not None: + return + info = command_info(arch='arch', + mach='mach', + uname_i=['uname', '-i'], + isainfo_b=['isainfo', '-b'], + isainfo_n=['isainfo', '-n'], + ) + info['uname_X'] = key_value_from_command(['uname', '-X'], sep='=') + for line in command_by_line(['psrinfo', '-v', '0']): + m = re.match(r'\s*The (?P

[\w\d]+) processor operates at', line) + if m: + info['processor'] = m.group('p') + break + self.__class__.info = info + + def _not_impl(self): + pass + + def _is_i386(self): + return self.info['isainfo_n'] == 'i386' + + def _is_sparc(self): + return self.info['isainfo_n'] == 'sparc' + + def _is_sparcv9(self): + return self.info['isainfo_n'] == 'sparcv9' + + def _getNCPUs(self): + return int(self.info['uname_X'].get('NumCPU', 1)) + + def _is_sun4(self): + return self.info['arch'] == 'sun4' + + def _is_SUNW(self): + return re.match(r'SUNW', self.info['uname_i']) is not None + + def _is_sparcstation5(self): + return re.match(r'.*SPARCstation-5', self.info['uname_i']) is not None + + def _is_ultra1(self): + return re.match(r'.*Ultra-1', self.info['uname_i']) is not None + + def _is_ultra250(self): + return re.match(r'.*Ultra-250', self.info['uname_i']) is not None + + def _is_ultra2(self): + return re.match(r'.*Ultra-2', self.info['uname_i']) is not None + + def _is_ultra30(self): + return re.match(r'.*Ultra-30', self.info['uname_i']) is not None + + def _is_ultra4(self): + return re.match(r'.*Ultra-4', self.info['uname_i']) is not None + + def _is_ultra5_10(self): + return re.match(r'.*Ultra-5_10', self.info['uname_i']) is not None + + def _is_ultra5(self): + return re.match(r'.*Ultra-5', self.info['uname_i']) is not None + + def _is_ultra60(self): + return re.match(r'.*Ultra-60', self.info['uname_i']) is not None + + def _is_ultra80(self): + return re.match(r'.*Ultra-80', self.info['uname_i']) is not None + + def _is_ultraenterprice(self): + return re.match(r'.*Ultra-Enterprise', self.info['uname_i']) is not None + + def _is_ultraenterprice10k(self): + return re.match(r'.*Ultra-Enterprise-10000', self.info['uname_i']) is not None + + def _is_sunfire(self): + return re.match(r'.*Sun-Fire', self.info['uname_i']) is not None + + def _is_ultra(self): + return re.match(r'.*Ultra', self.info['uname_i']) is not None + + def _is_cpusparcv7(self): + return self.info['processor'] == 'sparcv7' + + def _is_cpusparcv8(self): + return self.info['processor'] == 'sparcv8' + + def _is_cpusparcv9(self): + return self.info['processor'] == 'sparcv9' + + +class Win32CPUInfo(CPUInfoBase): + info = None + pkey = r"HARDWARE\DESCRIPTION\System\CentralProcessor" + # XXX: what does the value of + # HKEY_LOCAL_MACHINE\HARDWARE\DESCRIPTION\System\CentralProcessor\0 + # mean? + + def __init__(self): + try: + import _winreg + except ImportError: # Python 3 + import winreg as _winreg + + if self.info is not None: + return + info = [] + try: + #XXX: Bad style to use so long `try:...except:...`. Fix it! + + prgx = re.compile(r"family\s+(?P\d+)\s+model\s+(?P\d+)" + r"\s+stepping\s+(?P\d+)", re.IGNORECASE) + chnd = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, self.pkey) + pnum = 0 + while 1: + try: + proc = _winreg.EnumKey(chnd, pnum) + except _winreg.error: + break + else: + pnum += 1 + info.append({"Processor": proc}) + phnd = _winreg.OpenKey(chnd, proc) + pidx = 0 + while True: + try: + name, value, vtpe = _winreg.EnumValue(phnd, pidx) + except _winreg.error: + break + else: + pidx = pidx + 1 + info[-1][name] = value + if name == "Identifier": + srch = prgx.search(value) + if srch: + info[-1]["Family"] = int(srch.group("FML")) + info[-1]["Model"] = int(srch.group("MDL")) + info[-1]["Stepping"] = int(srch.group("STP")) + except: + print(sys.exc_value, '(ignoring)') + self.__class__.info = info + + def _not_impl(self): + pass + + # Athlon + + def _is_AMD(self): + return self.info[0]['VendorIdentifier'] == 'AuthenticAMD' + + def _is_Am486(self): + return self.is_AMD() and self.info[0]['Family'] == 4 + + def _is_Am5x86(self): + return self.is_AMD() and self.info[0]['Family'] == 4 + + def _is_AMDK5(self): + return (self.is_AMD() and self.info[0]['Family'] == 5 and + self.info[0]['Model'] in [0, 1, 2, 3]) + + def _is_AMDK6(self): + return (self.is_AMD() and self.info[0]['Family'] == 5 and + self.info[0]['Model'] in [6, 7]) + + def _is_AMDK6_2(self): + return (self.is_AMD() and self.info[0]['Family'] == 5 and + self.info[0]['Model'] == 8) + + def _is_AMDK6_3(self): + return (self.is_AMD() and self.info[0]['Family'] == 5 and + self.info[0]['Model'] == 9) + + def _is_AMDK7(self): + return self.is_AMD() and self.info[0]['Family'] == 6 + + # To reliably distinguish between the different types of AMD64 chips + # (Athlon64, Operton, Athlon64 X2, Semperon, Turion 64, etc.) would + # require looking at the 'brand' from cpuid + + def _is_AMD64(self): + return self.is_AMD() and self.info[0]['Family'] == 15 + + # Intel + + def _is_Intel(self): + return self.info[0]['VendorIdentifier'] == 'GenuineIntel' + + def _is_i386(self): + return self.info[0]['Family'] == 3 + + def _is_i486(self): + return self.info[0]['Family'] == 4 + + def _is_i586(self): + return self.is_Intel() and self.info[0]['Family'] == 5 + + def _is_i686(self): + return self.is_Intel() and self.info[0]['Family'] == 6 + + def _is_Pentium(self): + return self.is_Intel() and self.info[0]['Family'] == 5 + + def _is_PentiumMMX(self): + return (self.is_Intel() and self.info[0]['Family'] == 5 and + self.info[0]['Model'] == 4) + + def _is_PentiumPro(self): + return (self.is_Intel() and self.info[0]['Family'] == 6 and + self.info[0]['Model'] == 1) + + def _is_PentiumII(self): + return (self.is_Intel() and self.info[0]['Family'] == 6 and + self.info[0]['Model'] in [3, 5, 6]) + + def _is_PentiumIII(self): + return (self.is_Intel() and self.info[0]['Family'] == 6 and + self.info[0]['Model'] in [7, 8, 9, 10, 11]) + + def _is_PentiumIV(self): + return self.is_Intel() and self.info[0]['Family'] == 15 + + def _is_PentiumM(self): + return (self.is_Intel() and self.info[0]['Family'] == 6 and + self.info[0]['Model'] in [9, 13, 14]) + + def _is_Core2(self): + return (self.is_Intel() and self.info[0]['Family'] == 6 and + self.info[0]['Model'] in [15, 16, 17]) + + # Varia + + def _is_singleCPU(self): + return len(self.info) == 1 + + def _getNCPUs(self): + return len(self.info) + + def _has_mmx(self): + if self.is_Intel(): + return ((self.info[0]['Family'] == 5 and + self.info[0]['Model'] == 4) or + (self.info[0]['Family'] in [6, 15])) + elif self.is_AMD(): + return self.info[0]['Family'] in [5, 6, 15] + else: + return False + + def _has_sse(self): + if self.is_Intel(): + return ((self.info[0]['Family'] == 6 and + self.info[0]['Model'] in [7, 8, 9, 10, 11]) or + self.info[0]['Family'] == 15) + elif self.is_AMD(): + return ((self.info[0]['Family'] == 6 and + self.info[0]['Model'] in [6, 7, 8, 10]) or + self.info[0]['Family'] == 15) + else: + return False + + def _has_sse2(self): + if self.is_Intel(): + return self.is_Pentium4() or self.is_PentiumM() or self.is_Core2() + elif self.is_AMD(): + return self.is_AMD64() + else: + return False + + def _has_3dnow(self): + return self.is_AMD() and self.info[0]['Family'] in [5, 6, 15] + + def _has_3dnowext(self): + return self.is_AMD() and self.info[0]['Family'] in [6, 15] + + +if sys.platform.startswith('linux'): # variations: linux2,linux-i386 (any others?) + cpuinfo = LinuxCPUInfo +elif sys.platform.startswith('irix'): + cpuinfo = IRIXCPUInfo +elif sys.platform == 'darwin': + cpuinfo = DarwinCPUInfo +elif sys.platform[0:6] == 'netbsd': + cpuinfo = NetBSDCPUInfo +elif sys.platform.startswith('sunos'): + cpuinfo = SunOSCPUInfo +elif sys.platform.startswith('win32'): + cpuinfo = Win32CPUInfo +elif sys.platform.startswith('cygwin'): + cpuinfo = LinuxCPUInfo +#XXX: other OS's. Eg. use _winreg on Win32. Or os.uname on unices. +else: + cpuinfo = CPUInfoBase + +cpu = cpuinfo() + +if __name__ == "__main__": + + cpu.is_blaa() + cpu.is_Intel() + cpu.is_Alpha() + + info = [] + for name in dir(cpuinfo): + if name[0] == '_' and name[1] != '_': + r = getattr(cpu, name[1:])() + if r: + if r != 1: + info.append('%s=%s' % (name[1:], r)) + else: + info.append(name[1:]) + print('CPU information: ' + ' '.join(info)) diff --git a/venv/lib/python3.10/site-packages/numexpr/expressions.py b/venv/lib/python3.10/site-packages/numexpr/expressions.py new file mode 100644 index 0000000000000000000000000000000000000000..419d7dccd4522ec47b4147c391543897c209949e --- /dev/null +++ b/venv/lib/python3.10/site-packages/numexpr/expressions.py @@ -0,0 +1,523 @@ +################################################################### +# Numexpr - Fast numerical array expression evaluator for NumPy. +# +# License: MIT +# Author: See AUTHORS.txt +# +# See LICENSE.txt and LICENSES/*.txt for details about copyright and +# rights to use. +#################################################################### + +__all__ = ['E'] + +import operator +import sys +import threading + +import numpy + +# Declare a double type that does not exist in Python space +double = numpy.double + +# The default kind for undeclared variables +default_kind = 'double' +int_ = numpy.int32 +long_ = numpy.int64 + +type_to_kind = {bool: 'bool', int_: 'int', long_: 'long', float: 'float', + double: 'double', complex: 'complex', bytes: 'bytes', str: 'str'} +kind_to_type = {'bool': bool, 'int': int_, 'long': long_, 'float': float, + 'double': double, 'complex': complex, 'bytes': bytes, 'str': str} +kind_rank = ('bool', 'int', 'long', 'float', 'double', 'complex', 'none') +scalar_constant_types = [bool, int_, int, float, double, complex, bytes, str] + +scalar_constant_types = tuple(scalar_constant_types) + +from numexpr import interpreter + +class Expression(): + + def __getattr__(self, name): + if name.startswith('_'): + try: + return self.__dict__[name] + except KeyError: + raise AttributeError + else: + return VariableNode(name, default_kind) + + +E = Expression() + + +class Context(threading.local): + + def get(self, value, default): + return self.__dict__.get(value, default) + + def get_current_context(self): + return self.__dict__ + + def set_new_context(self, dict_): + self.__dict__.update(dict_) + +# This will be called each time the local object is used in a separate thread +_context = Context() + + +def get_optimization(): + return _context.get('optimization', 'none') + + +# helper functions for creating __magic__ methods +def ophelper(f): + def func(*args): + args = list(args) + for i, x in enumerate(args): + if isConstant(x): + args[i] = x = ConstantNode(x) + if not isinstance(x, ExpressionNode): + raise TypeError("unsupported object type: %s" % type(x)) + return f(*args) + + func.__name__ = f.__name__ + func.__doc__ = f.__doc__ + func.__dict__.update(f.__dict__) + return func + + +def allConstantNodes(args): + "returns True if args are all ConstantNodes." + for x in args: + if not isinstance(x, ConstantNode): + return False + return True + + +def isConstant(ex): + "Returns True if ex is a constant scalar of an allowed type." + return isinstance(ex, scalar_constant_types) + + +def commonKind(nodes): + node_kinds = [node.astKind for node in nodes] + str_count = node_kinds.count('bytes') + node_kinds.count('str') + if 0 < str_count < len(node_kinds): # some args are strings, but not all + raise TypeError("strings can only be operated with strings") + if str_count > 0: # if there are some, all of them must be + return 'bytes' + n = -1 + for x in nodes: + n = max(n, kind_rank.index(x.astKind)) + return kind_rank[n] + + +max_int32 = 2147483647 +min_int32 = -max_int32 - 1 + + +def bestConstantType(x): + # ``numpy.string_`` is a subclass of ``bytes`` + if isinstance(x, (bytes, str)): + return bytes + # Numeric conversion to boolean values is not tried because + # ``bool(1) == True`` (same for 0 and False), so 0 and 1 would be + # interpreted as booleans when ``False`` and ``True`` are already + # supported. + if isinstance(x, (bool, numpy.bool_)): + return bool + # ``long`` objects are kept as is to allow the user to force + # promotion of results by using long constants, e.g. by operating + # a 32-bit array with a long (64-bit) constant. + if isinstance(x, (long_, numpy.int64)): + return long_ + # ``double`` objects are kept as is to allow the user to force + # promotion of results by using double constants, e.g. by operating + # a float (32-bit) array with a double (64-bit) constant. + if isinstance(x, double): + return double + if isinstance(x, numpy.float32): + return float + if isinstance(x, (int, numpy.integer)): + # Constants needing more than 32 bits are always + # considered ``long``, *regardless of the platform*, so we + # can clearly tell 32- and 64-bit constants apart. + if not (min_int32 <= x <= max_int32): + return long_ + return int_ + # The duality of float and double in Python avoids that we have to list + # ``double`` too. + for converter in float, complex: + try: + y = converter(x) + except Exception as err: + continue + if y == x or numpy.isnan(y): + return converter + + +def getKind(x): + converter = bestConstantType(x) + return type_to_kind[converter] + + +def binop(opname, reversed=False, kind=None): + # Getting the named method from self (after reversal) does not + # always work (e.g. int constants do not have a __lt__ method). + opfunc = getattr(operator, "__%s__" % opname) + + @ophelper + def operation(self, other): + if reversed: + self, other = other, self + if allConstantNodes([self, other]): + return ConstantNode(opfunc(self.value, other.value)) + else: + return OpNode(opname, (self, other), kind=kind) + + return operation + + +def func(func, minkind=None, maxkind=None): + @ophelper + def function(*args): + if allConstantNodes(args): + return ConstantNode(func(*[x.value for x in args])) + kind = commonKind(args) + if kind in ('int', 'long'): + # Exception for following NumPy casting rules + #FIXME: this is not always desirable. The following + # functions which return ints (for int inputs) on numpy + # but not on numexpr: copy, abs, fmod, ones_like + kind = 'double' + else: + # Apply regular casting rules + if minkind and kind_rank.index(minkind) > kind_rank.index(kind): + kind = minkind + if maxkind and kind_rank.index(maxkind) < kind_rank.index(kind): + kind = maxkind + return FuncNode(func.__name__, args, kind) + + return function + + +@ophelper +def where_func(a, b, c): + if isinstance(a, ConstantNode): + return b if a.value else c + if allConstantNodes([a, b, c]): + return ConstantNode(numpy.where(a, b, c)) + return FuncNode('where', [a, b, c]) + + +def encode_axis(axis): + if isinstance(axis, ConstantNode): + axis = axis.value + if axis is None: + axis = interpreter.allaxes + else: + if axis < 0: + raise ValueError("negative axis are not supported") + if axis > 254: + raise ValueError("cannot encode axis") + return RawNode(axis) + + +def gen_reduce_axis_func(name): + def _func(a, axis=None): + axis = encode_axis(axis) + if isinstance(a, ConstantNode): + return a + if isinstance(a, (bool, int_, long_, float, double, complex)): + a = ConstantNode(a) + return FuncNode(name, [a, axis], kind=a.astKind) + return _func + + +@ophelper +def contains_func(a, b): + return FuncNode('contains', [a, b], kind='bool') + + +@ophelper +def div_op(a, b): + if get_optimization() in ('moderate', 'aggressive'): + if (isinstance(b, ConstantNode) and + (a.astKind == b.astKind) and + a.astKind in ('float', 'double', 'complex')): + return OpNode('mul', [a, ConstantNode(1. / b.value)]) + return OpNode('div', [a, b]) + + +@ophelper +def truediv_op(a, b): + if get_optimization() in ('moderate', 'aggressive'): + if (isinstance(b, ConstantNode) and + (a.astKind == b.astKind) and + a.astKind in ('float', 'double', 'complex')): + return OpNode('mul', [a, ConstantNode(1. / b.value)]) + kind = commonKind([a, b]) + if kind in ('bool', 'int', 'long'): + kind = 'double' + return OpNode('div', [a, b], kind=kind) + + +@ophelper +def rtruediv_op(a, b): + return truediv_op(b, a) + + +@ophelper +def pow_op(a, b): + + if isinstance(b, ConstantNode): + x = b.value + if ( a.astKind in ('int', 'long') and + b.astKind in ('int', 'long') and x < 0) : + raise ValueError( + 'Integers to negative integer powers are not allowed.') + if get_optimization() == 'aggressive': + RANGE = 50 # Approximate break even point with pow(x,y) + # Optimize all integral and half integral powers in [-RANGE, RANGE] + # Note: for complex numbers RANGE could be larger. + if (int(2 * x) == 2 * x) and (-RANGE <= abs(x) <= RANGE): + n = int_(abs(x)) + ishalfpower = int_(abs(2 * x)) % 2 + + def multiply(x, y): + if x is None: return y + return OpNode('mul', [x, y]) + + r = None + p = a + mask = 1 + while True: + if (n & mask): + r = multiply(r, p) + mask <<= 1 + if mask > n: + break + p = OpNode('mul', [p, p]) + if ishalfpower: + kind = commonKind([a]) + if kind in ('int', 'long'): + kind = 'double' + r = multiply(r, OpNode('sqrt', [a], kind)) + if r is None: + r = OpNode('ones_like', [a]) + if x < 0: + # Issue #428 + r = truediv_op(ConstantNode(1), r) + return r + if get_optimization() in ('moderate', 'aggressive'): + if x == -1: + return OpNode('div', [ConstantNode(1), a]) + if x == 0: + return OpNode('ones_like', [a]) + if x == 0.5: + kind = a.astKind + if kind in ('int', 'long'): kind = 'double' + return FuncNode('sqrt', [a], kind=kind) + if x == 1: + return a + if x == 2: + return OpNode('mul', [a, a]) + return OpNode('pow', [a, b]) + +# The functions and the minimum and maximum types accepted +numpy.expm1x = numpy.expm1 +functions = { + 'copy': func(numpy.copy), + 'ones_like': func(numpy.ones_like), + 'sqrt': func(numpy.sqrt, 'float'), + + 'sin': func(numpy.sin, 'float'), + 'cos': func(numpy.cos, 'float'), + 'tan': func(numpy.tan, 'float'), + 'arcsin': func(numpy.arcsin, 'float'), + 'arccos': func(numpy.arccos, 'float'), + 'arctan': func(numpy.arctan, 'float'), + + 'sinh': func(numpy.sinh, 'float'), + 'cosh': func(numpy.cosh, 'float'), + 'tanh': func(numpy.tanh, 'float'), + 'arcsinh': func(numpy.arcsinh, 'float'), + 'arccosh': func(numpy.arccosh, 'float'), + 'arctanh': func(numpy.arctanh, 'float'), + + 'fmod': func(numpy.fmod, 'float'), + 'arctan2': func(numpy.arctan2, 'float'), + + 'log': func(numpy.log, 'float'), + 'log1p': func(numpy.log1p, 'float'), + 'log10': func(numpy.log10, 'float'), + 'exp': func(numpy.exp, 'float'), + 'expm1': func(numpy.expm1, 'float'), + + 'abs': func(numpy.absolute, 'float'), + 'ceil': func(numpy.ceil, 'float', 'double'), + 'floor': func(numpy.floor, 'float', 'double'), + + 'where': where_func, + + 'real': func(numpy.real, 'double', 'double'), + 'imag': func(numpy.imag, 'double', 'double'), + 'complex': func(complex, 'complex'), + 'conj': func(numpy.conj, 'complex'), + + 'sum': gen_reduce_axis_func('sum'), + 'prod': gen_reduce_axis_func('prod'), + 'min': gen_reduce_axis_func('min'), + 'max': gen_reduce_axis_func('max'), + 'contains': contains_func, +} + + +class ExpressionNode(): + """ + An object that represents a generic number object. + + This implements the number special methods so that we can keep + track of how this object has been used. + """ + astType = 'generic' + + def __init__(self, value=None, kind=None, children=None): + self.value = value + if kind is None: + kind = 'none' + self.astKind = kind + if children is None: + self.children = () + else: + self.children = tuple(children) + + def get_real(self): + if self.astType == 'constant': + return ConstantNode(complex(self.value).real) + return OpNode('real', (self,), 'double') + + real = property(get_real) + + def get_imag(self): + if self.astType == 'constant': + return ConstantNode(complex(self.value).imag) + return OpNode('imag', (self,), 'double') + + imag = property(get_imag) + + def __str__(self): + return '%s(%s, %s, %s)' % (self.__class__.__name__, self.value, + self.astKind, self.children) + + def __repr__(self): + return self.__str__() + + def __neg__(self): + return OpNode('neg', (self,)) + + def __invert__(self): + return OpNode('invert', (self,)) + + def __pos__(self): + return self + + # The next check is commented out. See #24 for more info. + + def __bool__(self): + raise TypeError("You can't use Python's standard boolean operators in " + "NumExpr expressions. You should use their bitwise " + "counterparts instead: '&' instead of 'and', " + "'|' instead of 'or', and '~' instead of 'not'.") + + __add__ = __radd__ = binop('add') + __sub__ = binop('sub') + __rsub__ = binop('sub', reversed=True) + __mul__ = __rmul__ = binop('mul') + __truediv__ = truediv_op + __rtruediv__ = rtruediv_op + __pow__ = pow_op + __rpow__ = binop('pow', reversed=True) + __mod__ = binop('mod') + __rmod__ = binop('mod', reversed=True) + + __lshift__ = binop('lshift') + __rlshift__ = binop('lshift', reversed=True) + __rshift__ = binop('rshift') + __rrshift__ = binop('rshift', reversed=True) + + # boolean operations + + __and__ = binop('and', kind='bool') + __or__ = binop('or', kind='bool') + + __gt__ = binop('gt', kind='bool') + __ge__ = binop('ge', kind='bool') + __eq__ = binop('eq', kind='bool') + __ne__ = binop('ne', kind='bool') + __lt__ = binop('gt', reversed=True, kind='bool') + __le__ = binop('ge', reversed=True, kind='bool') + + +class LeafNode(ExpressionNode): + leafNode = True + + +class VariableNode(LeafNode): + astType = 'variable' + + def __init__(self, value=None, kind=None, children=None): + LeafNode.__init__(self, value=value, kind=kind) + + +class RawNode(): + """ + Used to pass raw integers to interpreter. + For instance, for selecting what function to use in func1. + Purposely don't inherit from ExpressionNode, since we don't wan't + this to be used for anything but being walked. + """ + astType = 'raw' + astKind = 'none' + + def __init__(self, value): + self.value = value + self.children = () + + def __str__(self): + return 'RawNode(%s)' % (self.value,) + + __repr__ = __str__ + + +class ConstantNode(LeafNode): + astType = 'constant' + + def __init__(self, value=None, children=None): + kind = getKind(value) + # Python float constants are double precision by default + if kind == 'float' and isinstance(value, float): + kind = 'double' + LeafNode.__init__(self, value=value, kind=kind) + + def __neg__(self): + return ConstantNode(-self.value) + + def __invert__(self): + return ConstantNode(~self.value) + + +class OpNode(ExpressionNode): + astType = 'op' + + def __init__(self, opcode=None, args=None, kind=None): + if (kind is None) and (args is not None): + kind = commonKind(args) + ExpressionNode.__init__(self, value=opcode, kind=kind, children=args) + + +class FuncNode(OpNode): + def __init__(self, opcode=None, args=None, kind=None): + if (kind is None) and (args is not None): + kind = commonKind(args) + OpNode.__init__(self, opcode, args, kind) diff --git a/venv/lib/python3.10/site-packages/numexpr/necompiler.py b/venv/lib/python3.10/site-packages/numexpr/necompiler.py new file mode 100644 index 0000000000000000000000000000000000000000..5126bd73f7e9bce79d28c1970f558931004e32c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numexpr/necompiler.py @@ -0,0 +1,1007 @@ +################################################################### +# Numexpr - Fast numerical array expression evaluator for NumPy. +# +# License: MIT +# Author: See AUTHORS.txt +# +# See LICENSE.txt and LICENSES/*.txt for details about copyright and +# rights to use. +#################################################################### + +from typing import Optional, Dict +import __future__ +import sys +import os +import threading +import re + +import numpy + +is_cpu_amd_intel = False # DEPRECATION WARNING: WILL BE REMOVED IN FUTURE RELEASE +from numexpr import interpreter, expressions, use_vml +from numexpr.utils import CacheDict + +# Declare a double type that does not exist in Python space +double = numpy.double +double = numpy.double + +int_ = numpy.int32 +long_ = numpy.int64 + +typecode_to_kind = {'b': 'bool', 'i': 'int', 'l': 'long', 'f': 'float', 'd': 'double', + 'c': 'complex', 'n': 'none', 's': 'str'} +kind_to_typecode = {'bool': 'b', 'int': 'i', 'long': 'l', 'float': 'f', 'double': 'd', + 'complex': 'c', 'bytes': 's', 'str': 's', 'none': 'n'} +type_to_typecode = {bool: 'b', int_: 'i', long_: 'l', float: 'f', + double: 'd', complex: 'c', bytes: 's', str: 's'} +type_to_kind = expressions.type_to_kind +kind_to_type = expressions.kind_to_type +default_type = kind_to_type[expressions.default_kind] +scalar_constant_kinds = list(kind_to_typecode.keys()) + +# VML functions that are implemented in numexpr +vml_functions = [ + "div", # interp_body.cpp + "inv", # interp_body.cpp + "pow", # interp_body.cpp + # Keep the rest of this list in sync with the ones listed in functions.hpp + "sqrt", + "sin", + "cos", + "tan", + "arcsin", + "arccos", + "arctan", + "sinh", + "cosh", + "tanh", + "arcsinh", + "arccosh", + "arctanh", + "log", + "log1p", + "log10", + "exp", + "expm1", + "absolute", + "conjugate", + "arctan2", + "fmod", + "ceil", + "floor" + ] + + +class ASTNode(): + """Abstract Syntax Tree node. + + Members: + + astType -- type of node (op, constant, variable, raw, or alias) + astKind -- the type of the result (bool, float, etc.) + value -- value associated with this node. + An opcode, numerical value, a variable name, etc. + children -- the children below this node + reg -- the register assigned to the result for this node. + """ + cmpnames = ['astType', 'astKind', 'value', 'children'] + + def __init__(self, astType='generic', astKind='unknown', value=None, children=()): + self.astType = astType + self.astKind = astKind + self.value = value + self.children = tuple(children) + self.reg = None + + def __eq__(self, other): + if self.astType == 'alias': + self = self.value + if other.astType == 'alias': + other = other.value + if not isinstance(other, ASTNode): + return False + for name in self.cmpnames: + if getattr(self, name) != getattr(other, name): + return False + return True + + def __lt__(self,other): + # RAM: this is a fix for issue #88 whereby sorting on constants + # that may be of astKind == 'complex' but type(self.value) == int or float + # Here we let NumPy sort as it will cast data properly for comparison + # when the Python built-ins will raise an error. + if self.astType == 'constant': + if self.astKind == other.astKind: + return numpy.array(self.value) < numpy.array(other.value) + return self.astKind < other.astKind + else: + raise TypeError('Sorting not implemented for astType: %s'%self.astType) + + def __hash__(self): + if self.astType == 'alias': + self = self.value + return hash((self.astType, self.astKind, self.value, self.children)) + + def __str__(self): + return 'AST(%s, %s, %s, %s, %s)' % (self.astType, self.astKind, + self.value, self.children, self.reg) + + def __repr__(self): + return '' % id(self) + + def key(self): + return (self.astType, self.astKind, self.value, self.children) + + def typecode(self): + return kind_to_typecode[self.astKind] + + def postorderWalk(self): + for c in self.children: + for w in c.postorderWalk(): + yield w + yield self + + def allOf(self, *astTypes): + astTypes = set(astTypes) + for w in self.postorderWalk(): + if w.astType in astTypes: + yield w + + +def expressionToAST(ex): + """Take an expression tree made out of expressions.ExpressionNode, + and convert to an AST tree. + + This is necessary as ExpressionNode overrides many methods to act + like a number. + """ + return ASTNode(ex.astType, ex.astKind, ex.value, + [expressionToAST(c) for c in ex.children]) + + +def sigPerms(s): + """Generate all possible signatures derived by upcasting the given + signature. + """ + codes = 'bilfdc' + if not s: + yield '' + elif s[0] in codes: + start = codes.index(s[0]) + for x in codes[start:]: + for y in sigPerms(s[1:]): + yield x + y + elif s[0] == 's': # numbers shall not be cast to strings + for y in sigPerms(s[1:]): + yield 's' + y + else: + yield s + + +def typeCompileAst(ast): + """Assign appropriate types to each node in the AST. + + Will convert opcodes and functions to appropriate upcast version, + and add "cast" ops if needed. + """ + children = list(ast.children) + if ast.astType == 'op': + retsig = ast.typecode() + basesig = ''.join(x.typecode() for x in list(ast.children)) + # Find some operation that will work on an acceptable casting of args. + for sig in sigPerms(basesig): + value = (ast.value + '_' + retsig + sig).encode('ascii') + if value in interpreter.opcodes: + break + else: + for sig in sigPerms(basesig): + funcname = (ast.value + '_' + retsig + sig).encode('ascii') + if funcname in interpreter.funccodes: + value = ('func_%sn' % (retsig + sig)).encode('ascii') + children += [ASTNode('raw', 'none', + interpreter.funccodes[funcname])] + break + else: + raise NotImplementedError( + "couldn't find matching opcode for '%s'" + % (ast.value + '_' + retsig + basesig)) + # First just cast constants, then cast variables if necessary: + for i, (have, want) in enumerate(zip(basesig, sig)): + if have != want: + kind = typecode_to_kind[want] + if children[i].astType == 'constant': + children[i] = ASTNode('constant', kind, children[i].value) + else: + opname = "cast" + children[i] = ASTNode('op', kind, opname, [children[i]]) + else: + value = ast.value + children = ast.children + return ASTNode(ast.astType, ast.astKind, value, + [typeCompileAst(c) for c in children]) + + +class Register(): + """Abstraction for a register in the VM. + + Members: + node -- the AST node this corresponds to + temporary -- True if this isn't an input or output + immediate -- not a register, but an immediate value + n -- the physical register number. + None if no number assigned yet. + """ + + def __init__(self, astnode, temporary=False): + self.node = astnode + self.temporary = temporary + self.immediate = False + self.n = None + + def __str__(self): + if self.temporary: + name = 'Temporary' + else: + name = 'Register' + return '%s(%s, %s, %s)' % (name, self.node.astType, + self.node.astKind, self.n,) + + def __repr__(self): + return self.__str__() + + +class Immediate(Register): + """Representation of an immediate (integer) operand, instead of + a register. + """ + + def __init__(self, astnode): + Register.__init__(self, astnode) + self.immediate = True + + def __str__(self): + return 'Immediate(%d)' % (self.node.value,) + + +_flow_pat = r'[\;\[\:]' +_dunder_pat = r'(^|[^\w])__[\w]+__($|[^\w])' +_attr_pat = r'\.\b(?!(real|imag|(\d*[eE]?[+-]?\d+)|\d*j)\b)' +_blacklist_re = re.compile(f'{_flow_pat}|{_dunder_pat}|{_attr_pat}') + +def stringToExpression(s, types, context, sanitize: bool=True): + """Given a string, convert it to a tree of ExpressionNode's. + """ + # sanitize the string for obvious attack vectors that NumExpr cannot + # parse into its homebrew AST. This is to protect the call to `eval` below. + # We forbid `;`, `:`. `[` and `__`, and attribute access via '.'. + # We cannot ban `.real` or `.imag` however... + # We also cannot ban `.\d*j`, where `\d*` is some digits (or none), e.g. 1.5j, 1.j + if sanitize: + no_whitespace = re.sub(r'\s+', '', s) + skip_quotes = re.sub(r'(\'[^\']*\')', '', no_whitespace) + if _blacklist_re.search(skip_quotes) is not None: + raise ValueError(f'Expression {s} has forbidden control characters.') + + old_ctx = expressions._context.get_current_context() + try: + expressions._context.set_new_context(context) + # first compile to a code object to determine the names + if context.get('truediv', False): + flags = __future__.division.compiler_flag + else: + flags = 0 + c = compile(s, '', 'eval', flags) + # make VariableNode's for the names + names = {} + for name in c.co_names: + if name == "None": + names[name] = None + elif name == "True": + names[name] = True + elif name == "False": + names[name] = False + else: + t = types.get(name, default_type) + names[name] = expressions.VariableNode(name, type_to_kind[t]) + names.update(expressions.functions) + + # now build the expression + ex = eval(c, names) + + if expressions.isConstant(ex): + ex = expressions.ConstantNode(ex, expressions.getKind(ex)) + elif not isinstance(ex, expressions.ExpressionNode): + raise TypeError("unsupported expression type: %s" % type(ex)) + finally: + expressions._context.set_new_context(old_ctx) + return ex + + +def isReduction(ast): + prefixes = (b'sum_', b'prod_', b'min_', b'max_') + return any(ast.value.startswith(p) for p in prefixes) + + +def getInputOrder(ast, input_order=None): + """ + Derive the input order of the variables in an expression. + """ + variables = {} + for a in ast.allOf('variable'): + variables[a.value] = a + variable_names = set(variables.keys()) + + if input_order: + if variable_names != set(input_order): + raise ValueError( + "input names (%s) don't match those found in expression (%s)" + % (input_order, variable_names)) + + ordered_names = input_order + else: + ordered_names = list(variable_names) + ordered_names.sort() + ordered_variables = [variables[v] for v in ordered_names] + return ordered_variables + + +def convertConstantToKind(x, kind): + # Exception for 'float' types that will return the NumPy float32 type + if kind == 'float': + return numpy.float32(x) + elif isinstance(x,str): + return x.encode('ascii') + return kind_to_type[kind](x) + + +def getConstants(ast): + """ + RAM: implemented magic method __lt__ for ASTNode to fix issues + #88 and #209. The following test code works now, as does the test suite. + + import numexpr as ne + a = 1 + 3j; b = 5.0 + ne.evaluate('a*2 + 15j - b') + """ + constant_registers = set([node.reg for node in ast.allOf("constant")]) + constants_order = sorted([r.node for r in constant_registers]) + constants = [convertConstantToKind(a.value, a.astKind) + for a in constants_order] + return constants_order, constants + + +def sortNodesByOrder(nodes, order): + order_map = {} + for i, (_, v, _) in enumerate(order): + order_map[v] = i + dec_nodes = [(order_map[n.value], n) for n in nodes] + dec_nodes.sort() + return [a[1] for a in dec_nodes] + + +def assignLeafRegisters(inodes, registerMaker): + """ + Assign new registers to each of the leaf nodes. + """ + leafRegisters = {} + for node in inodes: + key = node.key() + if key in leafRegisters: + node.reg = leafRegisters[key] + else: + node.reg = leafRegisters[key] = registerMaker(node) + + +def assignBranchRegisters(inodes, registerMaker): + """ + Assign temporary registers to each of the branch nodes. + """ + for node in inodes: + node.reg = registerMaker(node, temporary=True) + + +def collapseDuplicateSubtrees(ast): + """ + Common subexpression elimination. + """ + seen = {} + aliases = [] + for a in ast.allOf('op'): + if a in seen: + target = seen[a] + a.astType = 'alias' + a.value = target + a.children = () + aliases.append(a) + else: + seen[a] = a + # Set values and registers so optimizeTemporariesAllocation + # doesn't get confused + for a in aliases: + while a.value.astType == 'alias': + a.value = a.value.value + return aliases + + +def optimizeTemporariesAllocation(ast): + """ + Attempt to minimize the number of temporaries needed, by reusing old ones. + """ + nodes = [n for n in ast.postorderWalk() if n.reg.temporary] + users_of = dict((n.reg, set()) for n in nodes) + + node_regs = dict((n, set(c.reg for c in n.children if c.reg.temporary)) + for n in nodes) + if nodes and nodes[-1] is not ast: + nodes_to_check = nodes + [ast] + else: + nodes_to_check = nodes + for n in nodes_to_check: + for c in n.children: + if c.reg.temporary: + users_of[c.reg].add(n) + + unused = dict([(tc, set()) for tc in scalar_constant_kinds]) + for n in nodes: + for c in n.children: + reg = c.reg + if reg.temporary: + users = users_of[reg] + users.discard(n) + if not users: + unused[reg.node.astKind].add(reg) + if unused[n.astKind]: + reg = unused[n.astKind].pop() + users_of[reg] = users_of[n.reg] + n.reg = reg + + +def setOrderedRegisterNumbers(order, start): + """ + Given an order of nodes, assign register numbers. + """ + for i, node in enumerate(order): + node.reg.n = start + i + return start + len(order) + + +def setRegisterNumbersForTemporaries(ast, start): + """ + Assign register numbers for temporary registers, keeping track of + aliases and handling immediate operands. + """ + seen = 0 + signature = '' + aliases = [] + for node in ast.postorderWalk(): + if node.astType == 'alias': + aliases.append(node) + node = node.value + if node.reg.immediate: + node.reg.n = node.value + continue + reg = node.reg + if reg.n is None: + reg.n = start + seen + seen += 1 + signature += reg.node.typecode() + for node in aliases: + node.reg = node.value.reg + return start + seen, signature + + +def convertASTtoThreeAddrForm(ast): + """ + Convert an AST to a three address form. + + Three address form is (op, reg1, reg2, reg3), where reg1 is the + destination of the result of the instruction. + + I suppose this should be called three register form, but three + address form is found in compiler theory. + """ + return [(node.value, node.reg) + tuple([c.reg for c in node.children]) + for node in ast.allOf('op')] + + +def compileThreeAddrForm(program): + """ + Given a three address form of the program, compile it a string that + the VM understands. + """ + + def nToChr(reg): + if reg is None: + return b'\xff' + elif reg.n < 0: + raise ValueError("negative value for register number %s" % reg.n) + else: + return bytes([reg.n]) + + def quadrupleToString(opcode, store, a1=None, a2=None): + cop = chr(interpreter.opcodes[opcode]).encode('ascii') + cs = nToChr(store) + ca1 = nToChr(a1) + ca2 = nToChr(a2) + return cop + cs + ca1 + ca2 + + def toString(args): + while len(args) < 4: + args += (None,) + opcode, store, a1, a2 = args[:4] + s = quadrupleToString(opcode, store, a1, a2) + l = [s] + args = args[4:] + while args: + s = quadrupleToString(b'noop', *args[:3]) + l.append(s) + args = args[3:] + return b''.join(l) + + prog_str = b''.join([toString(t) for t in program]) + return prog_str + + +context_info = [ + ('optimization', ('none', 'moderate', 'aggressive'), 'aggressive'), + ('truediv', (False, True, 'auto'), 'auto') +] + + +def getContext(kwargs, _frame_depth=1): + d = kwargs.copy() + context = {} + for name, allowed, default in context_info: + value = d.pop(name, default) + if value in allowed: + context[name] = value + else: + raise ValueError("'%s' must be one of %s" % (name, allowed)) + + if d: + raise ValueError("Unknown keyword argument '%s'" % d.popitem()[0]) + if context['truediv'] == 'auto': + caller_globals = sys._getframe(_frame_depth + 1).f_globals + context['truediv'] = caller_globals.get('division', None) == __future__.division + + return context + + +def precompile(ex, signature=(), context={}, sanitize: bool=True): + """ + Compile the expression to an intermediate form. + """ + types = dict(signature) + input_order = [name for (name, type_) in signature] + + if isinstance(ex, str): + ex = stringToExpression(ex, types, context, sanitize) + + # the AST is like the expression, but the node objects don't have + # any odd interpretations + + ast = expressionToAST(ex) + + if ex.astType != 'op': + ast = ASTNode('op', value='copy', astKind=ex.astKind, children=(ast,)) + + ast = typeCompileAst(ast) + + aliases = collapseDuplicateSubtrees(ast) + + assignLeafRegisters(ast.allOf('raw'), Immediate) + assignLeafRegisters(ast.allOf('variable', 'constant'), Register) + assignBranchRegisters(ast.allOf('op'), Register) + + # assign registers for aliases + for a in aliases: + a.reg = a.value.reg + + input_order = getInputOrder(ast, input_order) + constants_order, constants = getConstants(ast) + + if isReduction(ast): + ast.reg.temporary = False + + optimizeTemporariesAllocation(ast) + + ast.reg.temporary = False + r_output = 0 + ast.reg.n = 0 + + r_inputs = r_output + 1 + r_constants = setOrderedRegisterNumbers(input_order, r_inputs) + r_temps = setOrderedRegisterNumbers(constants_order, r_constants) + r_end, tempsig = setRegisterNumbersForTemporaries(ast, r_temps) + + threeAddrProgram = convertASTtoThreeAddrForm(ast) + input_names = tuple([a.value for a in input_order]) + signature = ''.join(type_to_typecode[types.get(x, default_type)] + for x in input_names) + return threeAddrProgram, signature, tempsig, constants, input_names + + +def NumExpr(ex, signature=(), sanitize: bool=True, **kwargs): + """ + Compile an expression built using E. variables to a function. + + ex can also be specified as a string "2*a+3*b". + + The order of the input variables and their types can be specified using the + signature parameter, which is a list of (name, type) pairs. + + Returns a `NumExpr` object containing the compiled function. + """ + + # In that case _frame_depth is wrong (it should be 2) but it doesn't matter + # since it will not be used (because truediv='auto' has already been + # translated to either True or False). + _frame_depth = 1 + context = getContext(kwargs, _frame_depth=_frame_depth) + threeAddrProgram, inputsig, tempsig, constants, input_names = precompile(ex, signature, context, sanitize=sanitize) + program = compileThreeAddrForm(threeAddrProgram) + return interpreter.NumExpr(inputsig.encode('ascii'), + tempsig.encode('ascii'), + program, constants, input_names) + + +def disassemble(nex): + """ + Given a NumExpr object, return a list which is the program disassembled. + """ + rev_opcodes = {} + for op in interpreter.opcodes: + rev_opcodes[interpreter.opcodes[op]] = op + r_constants = 1 + len(nex.signature) + r_temps = r_constants + len(nex.constants) + + def parseOp(op): + name, sig = [*op.rsplit(b'_', 1), ''][:2] + return name, sig + + def getArg(pc, offset): + arg = nex.program[pc + (offset if offset < 4 else offset+1)] + _, sig = parseOp(rev_opcodes.get(nex.program[pc])) + try: + code = sig[offset - 1] + except IndexError: + return None + + code = bytes([code]) + + if arg == 255: + return None + if code != b'n': + if arg == 0: + return b'r0' + elif arg < r_constants: + return ('r%d[%s]' % (arg, nex.input_names[arg - 1])).encode('ascii') + elif arg < r_temps: + return ('c%d[%s]' % (arg, nex.constants[arg - r_constants])).encode('ascii') + else: + return ('t%d' % (arg,)).encode('ascii') + else: + return arg + + source = [] + for pc in range(0, len(nex.program), 4): + op = rev_opcodes.get(nex.program[pc]) + _, sig = parseOp(op) + parsed = [op] + for i in range(len(sig)): + parsed.append(getArg(pc, 1 + i)) + while len(parsed) < 4: + parsed.append(None) + source.append(parsed) + return source + + +def getType(a): + kind = a.dtype.kind + if kind == 'b': + return bool + if kind in 'iu': + if a.dtype.itemsize > 4: + return long_ # ``long`` is for integers of more than 32 bits + if kind == 'u' and a.dtype.itemsize == 4: + return long_ # use ``long`` here as an ``int`` is not enough + return int_ + if kind == 'f': + if a.dtype.itemsize > 4: + return double # ``double`` is for floats of more than 32 bits + return float + if kind == 'c': + return complex + if kind == 'S': + return bytes + if kind == 'U': + raise ValueError('NumExpr 2 does not support Unicode as a dtype.') + raise ValueError("unknown type %s" % a.dtype.name) + + +def getExprNames(text, context, sanitize: bool=True): + ex = stringToExpression(text, {}, context, sanitize) + ast = expressionToAST(ex) + input_order = getInputOrder(ast, None) + #try to figure out if vml operations are used by expression + if not use_vml: + ex_uses_vml = False + else: + for node in ast.postorderWalk(): + if node.astType == 'op' and node.value in vml_functions: + ex_uses_vml = True + break + else: + ex_uses_vml = False + + return [a.value for a in input_order], ex_uses_vml + + +def getArguments(names, local_dict=None, global_dict=None, _frame_depth: int=2): + """ + Get the arguments based on the names. + """ + call_frame = sys._getframe(_frame_depth) + + clear_local_dict = False + if local_dict is None: + local_dict = call_frame.f_locals + clear_local_dict = True + try: + frame_globals = call_frame.f_globals + if global_dict is None: + global_dict = frame_globals + + # If `call_frame` is the top frame of the interpreter we can't clear its + # `local_dict`, because it is actually the `global_dict`. + clear_local_dict = clear_local_dict and not frame_globals is local_dict + + arguments = [] + for name in names: + try: + a = local_dict[name] + except KeyError: + a = global_dict[name] + arguments.append(numpy.asarray(a)) + finally: + # If we generated local_dict via an explicit reference to f_locals, + # clear the dict to prevent creating extra ref counts in the caller's scope + # See https://github.com/pydata/numexpr/issues/310 + if clear_local_dict: + local_dict.clear() + + return arguments + + +# Dictionaries for caching variable names and compiled expressions +_names_cache = CacheDict(256) +_numexpr_cache = CacheDict(256) +_numexpr_last = {} +evaluate_lock = threading.Lock() + +# MAYBE: decorate this function to add attributes instead of having the +# _numexpr_last dictionary? +def validate(ex: str, + local_dict: Optional[Dict] = None, + global_dict: Optional[Dict] = None, + out: numpy.ndarray = None, + order: str = 'K', + casting: str = 'safe', + _frame_depth: int = 2, + sanitize: Optional[bool] = None, + **kwargs) -> Optional[Exception]: + r""" + Validate a NumExpr expression with the given `local_dict` or `locals()`. + Returns `None` on success and the Exception object if one occurs. Note that + you can proceed directly to call `re_evaluate()` if you use `validate()` + to sanitize your expressions and variables in advance. + + Parameters + ---------- + ex: str + a string forming an expression, like "2*a+3*b". The values for "a" + and "b" will by default be taken from the calling function's frame + (through use of sys._getframe()). Alternatively, they can be specified + using the 'local_dict' or 'global_dict' arguments. + + local_dict: dictionary, optional + A dictionary that replaces the local operands in current frame. + + global_dict: dictionary, optional + A dictionary that replaces the global operands in current frame. + + out: NumPy array, optional + An existing array where the outcome is going to be stored. Care is + required so that this array has the same shape and type than the + actual outcome of the computation. Useful for avoiding unnecessary + new array allocations. + + order: {'C', 'F', 'A', or 'K'}, optional + Controls the iteration order for operands. 'C' means C order, 'F' + means Fortran order, 'A' means 'F' order if all the arrays are + Fortran contiguous, 'C' order otherwise, and 'K' means as close to + the order the array elements appear in memory as possible. For + efficient computations, typically 'K'eep order (the default) is + desired. + + casting: {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when making a copy or + buffering. Setting this to 'unsafe' is not recommended, as it can + adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + sanitize: Optional[bool] + Both `validate` and by extension `evaluate` call `eval(ex)`, which is + potentially dangerous on unsanitized inputs. As such, NumExpr by default + performs simple sanitization, banning the character ':;[', the + dunder '__[\w+]__', and attribute access to all but '.real' and '.imag'. + + Using `None` defaults to `True` unless the environment variable + `NUMEXPR_SANITIZE=0` is set, in which case the default is `False`. + Nominally this can be set via `os.environ` before `import numexpr`. + + _frame_depth: int + The calling frame depth. Unless you are a NumExpr developer you should + not set this value. + + Note + ---- + + """ + global _numexpr_last + + try: + + if not isinstance(ex, str): + raise ValueError("must specify expression as a string") + + if sanitize is None: + if 'NUMEXPR_SANITIZE' in os.environ: + sanitize = bool(int(os.environ['NUMEXPR_SANITIZE'])) + else: + sanitize = True + + # Get the names for this expression + context = getContext(kwargs) + expr_key = (ex, tuple(sorted(context.items()))) + if expr_key not in _names_cache: + _names_cache[expr_key] = getExprNames(ex, context, sanitize=sanitize) + names, ex_uses_vml = _names_cache[expr_key] + arguments = getArguments(names, local_dict, global_dict, _frame_depth=_frame_depth) + + # Create a signature + signature = [(name, getType(arg)) for (name, arg) in + zip(names, arguments)] + + # Look up numexpr if possible. + numexpr_key = expr_key + (tuple(signature),) + try: + compiled_ex = _numexpr_cache[numexpr_key] + except KeyError: + compiled_ex = _numexpr_cache[numexpr_key] = NumExpr(ex, signature, sanitize=sanitize, **context) + kwargs = {'out': out, 'order': order, 'casting': casting, + 'ex_uses_vml': ex_uses_vml} + _numexpr_last = dict(ex=compiled_ex, argnames=names, kwargs=kwargs) + except Exception as e: + return e + return None + +def evaluate(ex: str, + local_dict: Optional[Dict] = None, + global_dict: Optional[Dict] = None, + out: numpy.ndarray = None, + order: str = 'K', + casting: str = 'safe', + sanitize: Optional[bool] = None, + _frame_depth: int = 3, + **kwargs) -> numpy.ndarray: + r""" + Evaluate a simple array expression element-wise using the virtual machine. + + Parameters + ---------- + ex: str + a string forming an expression, like "2*a+3*b". The values for "a" + and "b" will by default be taken from the calling function's frame + (through use of sys._getframe()). Alternatively, they can be specified + using the 'local_dict' or 'global_dict' arguments. + + local_dict: dictionary, optional + A dictionary that replaces the local operands in current frame. + + global_dict: dictionary, optional + A dictionary that replaces the global operands in current frame. + + out: NumPy array, optional + An existing array where the outcome is going to be stored. Care is + required so that this array has the same shape and type than the + actual outcome of the computation. Useful for avoiding unnecessary + new array allocations. + + order: {'C', 'F', 'A', or 'K'}, optional + Controls the iteration order for operands. 'C' means C order, 'F' + means Fortran order, 'A' means 'F' order if all the arrays are + Fortran contiguous, 'C' order otherwise, and 'K' means as close to + the order the array elements appear in memory as possible. For + efficient computations, typically 'K'eep order (the default) is + desired. + + casting: {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur when making a copy or + buffering. Setting this to 'unsafe' is not recommended, as it can + adversely affect accumulations. + + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. + + sanitize: bool + Both `validate` and by extension `evaluate` call `eval(ex)`, which is + potentially dangerous on unsanitized inputs. As such, NumExpr by default + performs simple sanitization, banning the character ':;[', the + dunder '__[\w+]__', and attribute access to all but '.real' and '.imag'. + + Using `None` defaults to `True` unless the environment variable + `NUMEXPR_SANITIZE=0` is set, in which case the default is `False`. + Nominally this can be set via `os.environ` before `import numexpr`. + + _frame_depth: int + The calling frame depth. Unless you are a NumExpr developer you should + not set this value. + + Note + ---- + Both `validate` and by extension `evaluate` call `eval(ex)`, which is + potentially dangerous on unsanitized inputs. As such, NumExpr does some + sanitization, banning the character ':;[', the dunder '__', and attribute + access to all but '.r' for real and '.i' for imag access to complex numbers. + """ + # We could avoid code duplication if we called validate and then re_evaluate + # here, but they we have difficulties with the `sys.getframe(2)` call in + # `getArguments` + e = validate(ex, local_dict=local_dict, global_dict=global_dict, + out=out, order=order, casting=casting, + _frame_depth=_frame_depth, sanitize=sanitize, **kwargs) + if e is None: + return re_evaluate(local_dict=local_dict, global_dict=global_dict, _frame_depth=_frame_depth) + else: + raise e + +def re_evaluate(local_dict: Optional[Dict] = None, + global_dict: Optional[Dict] = None, + _frame_depth: int=2) -> numpy.ndarray: + """ + Re-evaluate the previous executed array expression without any check. + + This is meant for accelerating loops that are re-evaluating the same + expression repeatedly without changing anything else than the operands. + If unsure, use evaluate() which is safer. + + Parameters + ---------- + local_dict: dictionary, optional + A dictionary that replaces the local operands in current frame. + _frame_depth: int + The calling frame depth. Unless you are a NumExpr developer you should + not set this value. + """ + global _numexpr_last + + try: + compiled_ex = _numexpr_last['ex'] + except KeyError: + raise RuntimeError("A previous evaluate() execution was not found, please call `validate` or `evaluate` once before `re_evaluate`") + argnames = _numexpr_last['argnames'] + args = getArguments(argnames, local_dict, global_dict, _frame_depth=_frame_depth) + kwargs = _numexpr_last['kwargs'] + with evaluate_lock: + return compiled_ex(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/numexpr/tests/__init__.py b/venv/lib/python3.10/site-packages/numexpr/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3fff4117bc23d630dcba2fb87aad5b08cef5b839 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numexpr/tests/__init__.py @@ -0,0 +1,14 @@ +################################################################### +# Numexpr - Fast numerical array expression evaluator for NumPy. +# +# License: MIT +# Author: See AUTHORS.txt +# +# See LICENSE.txt and LICENSES/*.txt for details about copyright and +# rights to use. +#################################################################### + +from numexpr.tests.test_numexpr import test, print_versions + +if __name__ == '__main__': + test() diff --git a/venv/lib/python3.10/site-packages/numexpr/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/numexpr/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59210eea9f87ff6a4f50a53445a6ad00ebe41874 Binary files /dev/null and b/venv/lib/python3.10/site-packages/numexpr/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numexpr/tests/__pycache__/test_numexpr.cpython-310.pyc b/venv/lib/python3.10/site-packages/numexpr/tests/__pycache__/test_numexpr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b140a345a5fc7dc1ff600f41a642e4228f5c7eba Binary files /dev/null and b/venv/lib/python3.10/site-packages/numexpr/tests/__pycache__/test_numexpr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/numexpr/tests/test_numexpr.py b/venv/lib/python3.10/site-packages/numexpr/tests/test_numexpr.py new file mode 100644 index 0000000000000000000000000000000000000000..bb5b177682e3c614dba00a91bcabde1eef286941 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numexpr/tests/test_numexpr.py @@ -0,0 +1,1348 @@ + +################################################################### +# Numexpr - Fast numerical array expression evaluator for NumPy. +# +# License: MIT +# Author: See AUTHORS.txt +# +# See LICENSE.txt and LICENSES/*.txt for details about copyright and +# rights to use. +#################################################################### + + +import os +import sys +import platform +import warnings +from contextlib import contextmanager +import subprocess + +import numpy as np +from numpy import ( + array, arange, empty, zeros, int32, int64, uint16, cdouble, float64, rec, + copy, ones_like, where, all as alltrue, linspace, + sum, prod, sqrt, fmod, floor, ceil, + sin, cos, tan, arcsin, arccos, arctan, arctan2, + sinh, cosh, tanh, arcsinh, arccosh, arctanh, + log, log1p, log10, exp, expm1, conj) +import numpy +from numpy.testing import (assert_equal, assert_array_equal, + assert_array_almost_equal, assert_allclose) +from numpy import shape, allclose, array_equal, ravel, isnan, isinf + +import numexpr +from numexpr import E, NumExpr, evaluate, re_evaluate, validate, disassemble, use_vml +from numexpr.expressions import ConstantNode +from numexpr.utils import detect_number_of_cores + +import unittest + +TestCase = unittest.TestCase + +double = np.double +long = int + + +class test_numexpr(TestCase): + """Testing with 1 thread""" + nthreads = 1 + + def setUp(self): + numexpr.set_num_threads(self.nthreads) + + def test_simple(self): + ex = 2.0 * E.a + 3.0 * E.b * E.c + sig = [('a', double), ('b', double), ('c', double)] + func = NumExpr(ex, signature=sig) + x = func(array([1., 2, 3]), array([4., 5, 6]), array([7., 8, 9])) + assert_array_equal(x, array([86., 124., 168.])) + + def test_simple_expr_small_array(self): + func = NumExpr(E.a) + x = arange(100.0) + y = func(x) + assert_array_equal(x, y) + + def test_simple_expr(self): + func = NumExpr(E.a) + x = arange(1e6) + y = func(x) + assert_array_equal(x, y) + + def test_rational_expr(self): + func = NumExpr((E.a + 2.0 * E.b) / (1 + E.a + 4 * E.b * E.b)) + a = arange(1e6) + b = arange(1e6) * 0.1 + x = (a + 2 * b) / (1 + a + 4 * b * b) + y = func(a, b) + assert_array_almost_equal(x, y) + + def test_reductions(self): + # Check that they compile OK. + assert_equal(disassemble( + NumExpr("sum(x**2+2, axis=None)", [('x', double)])), + [(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'), + (b'add_ddd', b't3', b't3', b'c2[2.0]'), + (b'sum_ddn', b'r0', b't3', None)]) + assert_equal(disassemble( + NumExpr("sum(x**2+2, axis=1)", [('x', double)])), + [(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'), + (b'add_ddd', b't3', b't3', b'c2[2.0]'), + (b'sum_ddn', b'r0', b't3', 1)]) + assert_equal(disassemble( + NumExpr("prod(x**2+2, axis=2)", [('x', double)])), + [(b'mul_ddd', b't3', b'r1[x]', b'r1[x]'), + (b'add_ddd', b't3', b't3', b'c2[2.0]'), + (b'prod_ddn', b'r0', b't3', 2)]) + # Check that full reductions work. + x = zeros(100000) + .01 # checks issue #41 + assert_allclose(evaluate("sum(x+2,axis=None)"), sum(x + 2, axis=None)) + assert_allclose(evaluate("sum(x+2,axis=0)"), sum(x + 2, axis=0)) + assert_allclose(evaluate("prod(x,axis=0)"), prod(x, axis=0)) + assert_allclose(evaluate("min(x)"), np.min(x)) + assert_allclose(evaluate("max(x,axis=0)"), np.max(x, axis=0)) + + # Fix for #277, array with leading singleton dimension + x = np.arange(10).reshape(1,10) + assert_allclose(evaluate("sum(x,axis=None)"), sum(x, axis=None) ) + assert_allclose(evaluate("sum(x,axis=0)"), sum(x, axis=0) ) + assert_allclose(evaluate("sum(x,axis=1)"), sum(x, axis=1) ) + + x = arange(10.0) + assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("min(x**2+2,axis=0)"), np.min(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("max(x**2+2,axis=0)"), np.max(x ** 2 + 2, axis=0)) + + x = arange(100.0) + assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0)) + assert_allclose(evaluate("min(x-1,axis=0)"), np.min(x - 1, axis=0)) + assert_allclose(evaluate("max(x-1,axis=0)"), np.max(x - 1, axis=0)) + x = linspace(0.1, 1.0, 2000) + assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0)) + assert_allclose(evaluate("min(x-1,axis=0)"), np.min(x - 1, axis=0)) + assert_allclose(evaluate("max(x-1,axis=0)"), np.max(x - 1, axis=0)) + + # Check that reductions along an axis work + y = arange(9.0).reshape(3, 3) + assert_allclose(evaluate("sum(y**2, axis=1)"), sum(y ** 2, axis=1)) + assert_allclose(evaluate("sum(y**2, axis=0)"), sum(y ** 2, axis=0)) + assert_allclose(evaluate("sum(y**2, axis=None)"), sum(y ** 2, axis=None)) + assert_allclose(evaluate("prod(y**2, axis=1)"), prod(y ** 2, axis=1)) + assert_allclose(evaluate("prod(y**2, axis=0)"), prod(y ** 2, axis=0)) + assert_allclose(evaluate("prod(y**2, axis=None)"), prod(y ** 2, axis=None)) + assert_allclose(evaluate("min(y**2, axis=1)"), np.min(y ** 2, axis=1)) + assert_allclose(evaluate("min(y**2, axis=0)"), np.min(y ** 2, axis=0)) + assert_allclose(evaluate("min(y**2, axis=None)"), np.min(y ** 2, axis=None)) + assert_allclose(evaluate("max(y**2, axis=1)"), np.max(y ** 2, axis=1)) + assert_allclose(evaluate("max(y**2, axis=0)"), np.max(y ** 2, axis=0)) + assert_allclose(evaluate("max(y**2, axis=None)"), np.max(y ** 2, axis=None)) + # Check integers + x = arange(10.) + x = x.astype(int) + assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("min(x**2+2,axis=0)"), np.min(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("max(x**2+2,axis=0)"), np.max(x ** 2 + 2, axis=0)) + # Check longs + x = x.astype(int) + assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("prod(x**2+2,axis=0)"), prod(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("min(x**2+2,axis=0)"), np.min(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("max(x**2+2,axis=0)"), np.max(x ** 2 + 2, axis=0)) + # Check complex + x = x + .1j + assert_allclose(evaluate("sum(x**2+2,axis=0)"), sum(x ** 2 + 2, axis=0)) + assert_allclose(evaluate("prod(x-1,axis=0)"), prod(x - 1, axis=0)) + + def test_in_place(self): + x = arange(10000.).reshape(1000, 10) + evaluate("x + 3", out=x) + assert_equal(x, arange(10000.).reshape(1000, 10) + 3) + y = arange(10) + evaluate("(x - 3) * y + (x - 3)", out=x) + assert_equal(x, arange(10000.).reshape(1000, 10) * (arange(10) + 1)) + + def test_axis(self): + y = arange(9.0).reshape(3, 3) + try: + evaluate("sum(y, axis=2)") + except ValueError: + pass + else: + raise ValueError("should raise exception!") + try: + evaluate("sum(y, axis=-3)") + except ValueError: + pass + else: + raise ValueError("should raise exception!") + try: + # Negative axis are not supported + evaluate("sum(y, axis=-1)") + except ValueError: + pass + else: + raise ValueError("should raise exception!") + + def test_r0_reuse(self): + assert_equal(disassemble(NumExpr("x * x + 2", [('x', double)])), + [(b'mul_ddd', b'r0', b'r1[x]', b'r1[x]'), + (b'add_ddd', b'r0', b'r0', b'c2[2.0]')]) + + def test_str_contains_basic0(self): + res = evaluate('contains(b"abc", b"ab")') + assert_equal(res, True) + + def test_str_contains_basic1(self): + haystack = array([b'abc', b'def', b'xyz', b'x11', b'za']) + res = evaluate('contains(haystack, b"ab")') + assert_equal(res, [True, False, False, False, False]) + + def test_str_contains_basic2(self): + haystack = array([b'abc', b'def', b'xyz', b'x11', b'za']) + res = evaluate('contains(b"abcd", haystack)') + assert_equal(res, [True, False, False, False, False]) + + def test_str_contains_basic3(self): + haystacks = array( + [b'abckkk', b'adef', b'xyz', b'x11abcp', b'za', b'abc']) + needles = array( + [b'abc', b'def', b'aterr', b'oot', b'zu', b'ab']) + res = evaluate('contains(haystacks, needles)') + assert_equal(res, [True, True, False, False, False, True]) + + def test_str_contains_basic4(self): + needles = array( + [b'abc', b'def', b'aterr', b'oot', b'zu', b'ab c', b' abc', + b'abc ']) + res = evaluate('contains(b"test abc here", needles)') + assert_equal(res, [True, False, False, False, False, False, True, True]) + + def test_str_contains_basic5(self): + needles = array( + [b'abc', b'ab c', b' abc', b' abc ', b'\tabc', b'c h']) + res = evaluate('contains(b"test abc here", needles)') + assert_equal(res, [True, False, True, True, False, True]) + + # Compare operation of Python 'in' operator with 'contains' using a + # product of two lists of strings. + + def test_str_contains_listproduct(self): + from itertools import product + + small = [ + 'It w', 'as th', 'e Whit', 'e Rab', 'bit,', ' tro', 'tting', + ' sl', 'owly', ' back ', 'again,', ' and', ' lo', 'okin', 'g a', + 'nxious', 'ly a', 'bou', 't a', 's it w', 'ent,', ' as i', 'f it', + ' had l', 'ost', ' some', 'thi', 'ng; a', 'nd ', 'she ', 'heard ', + 'it mut', 'terin', 'g to ', 'its', 'elf ', "'The", + ' Duch', 'ess! T', 'he ', 'Duches', 's! Oh ', 'my dea', 'r paws', + '! Oh ', 'my f', 'ur ', 'and ', 'whiske', 'rs! ', 'She', "'ll g", + 'et me', ' ex', 'ecu', 'ted, ', 'as su', 're a', 's f', 'errets', + ' are f', 'errets', '! Wh', 'ere ', 'CAN', ' I hav', 'e d', + 'roppe', 'd t', 'hem,', ' I wo', 'nder?', "' A", 'lice', + ' gu', 'essed', ' in a', ' mom', 'ent ', 'tha', 't it w', 'as ', + 'looki', 'ng f', 'or ', 'the fa', 'n and ', 'the', ' pai', + 'r of w', 'hit', 'e kid', ' glo', 'ves', ', and ', 'she ', + 'very g', 'ood', '-na', 'turedl', 'y be', 'gan h', 'unt', 'ing', + ' about', ' for t', 'hem', ', but', ' they ', 'wer', 'e nowh', + 'ere to', ' be', ' se', 'en--', 'ever', 'ythin', 'g seem', 'ed ', + 'to ', 'have c', 'hang', 'ed ', 'since', ' he', 'r swim', ' in', + ' the', ' pool,', ' and', ' the g', 'reat ', 'hal', 'l, w', 'ith', + ' th', 'e gl', 'ass t', 'abl', 'e and ', 'the', ' li', 'ttle', + ' doo', 'r, ha', 'd v', 'ani', 'shed c', 'omp', 'lete', 'ly.'] + big = [ + 'It wa', 's the', ' W', 'hit', 'e ', 'Ra', 'bb', 'it, t', 'ro', + 'tting s', 'lowly', ' back ', 'agai', 'n, and', ' l', 'ookin', + 'g ', 'an', 'xiously', ' about ', 'as it w', 'ent, as', ' if ', + 'it had', ' los', 't ', 'so', 'mething', '; and', ' she h', + 'eard ', 'it ', 'mutteri', 'ng to', ' itself', " 'The ", + 'Duchess', '! ', 'Th', 'e ', 'Duchess', '! Oh m', 'y de', + 'ar paws', '! ', 'Oh my ', 'fu', 'r and w', 'hiskers', "! She'", + 'll ', 'get', ' me ', 'execute', 'd,', ' a', 's ', 'su', 're as ', + 'fe', 'rrets', ' are f', 'errets!', ' Wher', 'e CAN', ' I ha', + 've dro', 'pped t', 'hem', ', I ', 'won', "der?' A", + 'lice g', 'uess', 'ed ', 'in a m', 'omen', 't that', ' i', + 't was l', 'ook', 'ing f', 'or th', 'e ', 'fan and', ' th', 'e p', + 'air o', 'f whit', 'e ki', 'd glove', 's, and ', 'she v', 'ery ', + 'good-na', 'tu', 'redl', 'y be', 'gan hun', 'ti', 'ng abou', + 't for t', 'he', 'm, bu', 't t', 'hey ', 'were n', 'owhere', + ' to b', 'e s', 'een-', '-eve', 'rythi', 'ng see', 'me', 'd ', + 'to ha', 've', ' c', 'hanged', ' sinc', 'e her s', 'wim ', + 'in the ', 'pool,', ' an', 'd the g', 'rea', 't h', 'all, wi', + 'th the ', 'glas', 's t', 'able an', 'd th', 'e littl', 'e door,', + ' had va', 'ni', 'shed co', 'mpletel', 'y.'] + p = list(product(small, big)) + python_in = [x[0] in x[1] for x in p] + a = [x[0].encode() for x in p] + b = [x[1].encode() for x in p] + res = [bool(x) for x in evaluate('contains(b, a)')] + assert_equal(res, python_in) + + def test_str_contains_withemptystr1(self): + withemptystr = array([b'abc', b'def', b'']) + res = evaluate('contains(b"abcd", withemptystr)') + assert_equal(res, [True, False, True]) + + def test_str_contains_withemptystr2(self): + withemptystr = array([b'abc', b'def', b'']) + res = evaluate('contains(withemptystr, b"")') + assert_equal(res, [True, True, True]) + + def test_str_contains_long_needle(self): + a = b'1' + b'a' * 40 + b = b'a' * 40 + res = evaluate('contains(a, b)') + assert_equal(res, True) + + def test_where_scalar_bool(self): + a = True + b = array([1, 2]) + c = array([3, 4]) + res = evaluate('where(a, b, c)') + assert_array_equal(res, b) + a = False + res = evaluate('where(a, b, c)') + assert_array_equal(res, c) + + @unittest.skipIf(hasattr(sys, "pypy_version_info"), + "PyPy does not have sys.getrefcount()") + def test_refcount(self): + # Regression test for issue #310 + a = array([1]) + assert sys.getrefcount(a) == 2 + evaluate('1') + assert sys.getrefcount(a) == 2 + + def test_locals_clears_globals(self): + # Check for issue #313, whereby clearing f_locals also clear f_globals + # if in the top-frame. This cannot be done inside `unittest` as it is always + # executing code in a child frame. + script = r';'.join([ + r"import numexpr as ne", + r"a=10", + r"ne.evaluate('1')", + r"a += 1", + r"ne.evaluate('2', local_dict={})", + r"a += 1", + r"ne.evaluate('3', global_dict={})", + r"a += 1", + r"ne.evaluate('4', local_dict={}, global_dict={})", + r"a += 1", + ]) + # Raises CalledProcessError on a non-normal exit + check = subprocess.check_call([sys.executable, '-c', script]) + # Ideally this test should also be done against ipython but it's not + # a requirement. + + + +class test_numexpr2(test_numexpr): + """Testing with 2 threads""" + nthreads = 2 + + +class test_evaluate(TestCase): + def test_simple(self): + a = array([1., 2., 3.]) + b = array([4., 5., 6.]) + c = array([7., 8., 9.]) + x = evaluate("2*a + 3*b*c") + assert_array_equal(x, array([86., 124., 168.])) + + def test_simple_expr_small_array(self): + x = arange(100.0) + y = evaluate("x") + assert_array_equal(x, y) + + def test_simple_expr(self): + x = arange(1e6) + y = evaluate("x") + assert_array_equal(x, y) + + def test_re_evaluate(self): + a = array([1., 2., 3.]) + b = array([4., 5., 6.]) + c = array([7., 8., 9.]) + x = evaluate("2*a + 3*b*c") + x = re_evaluate() + assert_array_equal(x, array([86., 124., 168.])) + + def test_re_evaluate_dict(self): + a1 = array([1., 2., 3.]) + b1 = array([4., 5., 6.]) + c1 = array([7., 8., 9.]) + local_dict={'a': a1, 'b': b1, 'c': c1} + x = evaluate("2*a + 3*b*c", local_dict=local_dict) + x = re_evaluate(local_dict=local_dict) + assert_array_equal(x, array([86., 124., 168.])) + + def test_validate(self): + a = array([1., 2., 3.]) + b = array([4., 5., 6.]) + c = array([7., 8., 9.]) + retval = validate("2*a + 3*b*c") + assert(retval is None) + x = re_evaluate() + assert_array_equal(x, array([86., 124., 168.])) + + def test_validate_missing_var(self): + a = array([1., 2., 3.]) + b = array([4., 5., 6.]) + retval = validate("2*a + 3*b*c") + assert(isinstance(retval, KeyError)) + + def test_validate_syntax(self): + retval = validate("2+") + assert(isinstance(retval, SyntaxError)) + + def test_validate_dict(self): + a1 = array([1., 2., 3.]) + b1 = array([4., 5., 6.]) + c1 = array([7., 8., 9.]) + local_dict={'a': a1, 'b': b1, 'c': c1} + retval = validate("2*a + 3*b*c", local_dict=local_dict) + assert(retval is None) + x = re_evaluate(local_dict=local_dict) + assert_array_equal(x, array([86., 124., 168.])) + + # Test for issue #22 + def test_true_div(self): + x = arange(10, dtype='i4') + assert_array_equal(evaluate("x/2"), x / 2) + assert_array_equal(evaluate("x/2", truediv=False), x / 2) + assert_array_equal(evaluate("x/2", truediv='auto'), x / 2) + assert_array_equal(evaluate("x/2", truediv=True), x / 2.0) + + def test_left_shift(self): + x = arange(10, dtype='i4') + assert_array_equal(evaluate("x<<2"), x << 2) + + def test_right_shift(self): + x = arange(10, dtype='i4') + assert_array_equal(evaluate("x>>2"), x >> 2) + + # PyTables uses __nonzero__ among ExpressionNode objects internally + # so this should be commented out for the moment. See #24. + def test_boolean_operator(self): + x = arange(10, dtype='i4') + try: + evaluate("(x > 1) and (x < 9)") + except TypeError: + pass + else: + raise ValueError("should raise exception!") + + def test_rational_expr(self): + a = arange(1e6) + b = arange(1e6) * 0.1 + x = (a + 2 * b) / (1 + a + 4 * b * b) + y = evaluate("(a + 2*b) / (1 + a + 4*b*b)") + assert_array_almost_equal(x, y) + + def test_complex_expr(self): + def complex(a, b): + c = zeros(a.shape, dtype=cdouble) + c.real = a + c.imag = b + return c + + a = arange(1e4) + b = arange(1e4) ** 1e-5 + z = a + 1j * b + x = z.imag + x = sin(complex(a, b)).real + z.imag + y = evaluate("sin(complex(a, b)).real + z.imag") + assert_array_almost_equal(x, y) + + def test_complex_strides(self): + a = arange(100).reshape(10, 10)[::2] + b = arange(50).reshape(5, 10) + assert_array_equal(evaluate("a+b"), a + b) + c = empty([10], dtype=[('c1', int32), ('c2', uint16)]) + c['c1'] = arange(10) + c['c2'].fill(0xaaaa) + c1 = c['c1'] + a0 = a[0] + assert_array_equal(evaluate("c1"), c1) + assert_array_equal(evaluate("a0+c1"), a0 + c1) + + def test_recarray_strides(self): + a = arange(100) + b = arange(100,200) + recarr = np.rec.array(None, formats='f4,f4', shape=(100,)) + recarr['f0'] = a + recarr['f1'] = b + c = recarr['f1'] + assert_array_almost_equal(evaluate("sqrt(c) > 1."), sqrt(c) > 1.) + assert_array_almost_equal(evaluate("log10(c)"), log10(c)) + + def test_broadcasting(self): + a = arange(100).reshape(10, 10)[::2] + c = arange(10) + d = arange(5).reshape(5, 1) + assert_array_equal(evaluate("a+c"), a + c) + assert_array_equal(evaluate("a+d"), a + d) + expr = NumExpr("2.0*a+3.0*c", [('a', double), ('c', double)]) + assert_array_equal(expr(a, c), 2.0 * a + 3.0 * c) + + def test_all_scalar(self): + a = 3. + b = 4. + assert_allclose(evaluate("a+b"), a + b) + expr = NumExpr("2*a+3*b", [('a', double), ('b', double)]) + assert_equal(expr(a, b), 2 * a + 3 * b) + + def test_run(self): + a = arange(100).reshape(10, 10)[::2] + b = arange(10) + expr = NumExpr("2*a+3*b", [('a', double), ('b', double)]) + assert_array_equal(expr(a, b), expr.run(a, b)) + + def test_illegal_value(self): + a = arange(3) + try: + evaluate("a < [0, 0, 0]") + except (ValueError, TypeError): + pass + else: + self.fail() + + def test_sanitize(self): + with _environment('NUMEXPR_SANITIZE', '1'): + # Forbid dunder + try: + evaluate('__builtins__') + except ValueError: + pass + else: + self.fail() + + # Forbid colon for lambda funcs + try: + evaluate('lambda x: x') + except ValueError: + pass + else: + self.fail() + + # Forbid indexing + try: + evaluate('locals()["evaluate"]') + except ValueError: + pass + else: + self.fail() + + # Forbid semicolon + try: + evaluate('import os;') + except ValueError: + pass + else: + self.fail() + + # Attribute access with spaces + try: + evaluate('os. cpu_count()') + except ValueError: + pass + else: + self.fail() + + # Attribute access with funny unicode characters that eval translates + # into ASCII. + try: + evaluate("(3+1).ᵇit_length()") + except ValueError: + pass + else: + self.fail() + + # Pass decimal points including scientific notation + a = 3.0 + evaluate('a*2.e-5') + evaluate('a*2.e+5') + evaluate('a*2e-5') + evaluate('a*2e+5') + evaluate('a*2E-5') + evaluate('a*2.0e5') + evaluate('a*2.2e5') + evaluate('2.+a') + + # pass .real and .imag + c = 2.5 + 1.5j + evaluate('c.real') + evaluate('c.imag') + + # pass imaginary unit j + evaluate('1.5j') + evaluate('3.j') + + # pass forbidden characters within quotes + x = np.array(['a', 'b'], dtype=bytes) + evaluate("x == 'b:'") + + + def test_no_sanitize(self): + try: # Errors on compile() after eval() + evaluate('import os;', sanitize=False) + except SyntaxError: + pass + else: + self.fail() + + with _environment('NUMEXPR_SANITIZE', '0'): + try: # Errors on compile() after eval() + evaluate('import os;', sanitize=None) + except SyntaxError: + pass + else: + self.fail() + + def test_disassemble(self): + assert_equal(disassemble(NumExpr( + "where(m, a, -1)", [('m', bool), ('a', float)])), + [[b'where_fbff', b'r0', b'r1[m]', b'r2[a]', b'c3[-1.0]'], + [b'noop', None, None, None]]) + + def test_constant_deduplication(self): + assert_equal(NumExpr("(a + 1)*(a - 1)", [('a', np.int32)]).constants, (1,)) + + def test_nan_constant(self): + assert_equal(str(ConstantNode(float("nan")).value), 'nan') + + # check de-duplication works for nan + _nan = ConstantNode(float("nan")) + expr = (E.a + _nan)*(E.b + _nan) + assert_equal(NumExpr(expr, [('a', double), ('b', double)]).constants, (float("nan"),)) + + + def test_f32_constant(self): + assert_equal(ConstantNode(numpy.float32(1)).astKind, "float") + assert_equal(ConstantNode(numpy.float32("nan")).astKind, "float") + assert_equal(ConstantNode(numpy.float32(3)).value.dtype, numpy.dtype("float32")) + assert_array_equal(NumExpr(ConstantNode(numpy.float32(1))).run(), + numpy.array(1, dtype="float32")) + + def test_unaligned_singleton(self): + # Test for issue #397 whether singletons outputs assigned to consts must be + # aligned or not. + a = np.empty(5, dtype=np.uint8)[1:].view(np.int32) + evaluate('3', out=a) + assert_equal(a, 3) + + def test_negative_mod(self): + # Test for issue #413, modulus of negative integers. C modulus is + # actually remainder op, and hence different from Python modulus. + a = np.array([-500, -135, 0, 0, 135, 500], dtype=np.int32) + n = np.array([-360, -360, -360, 360, 360, 360], dtype=np.int32) + out_i = evaluate('a % n') + assert_equal(out_i, np.mod(a, n)) + + b = a.astype(np.int64) + m = n.astype(np.int64) + out_l = evaluate('b % m') + assert_equal(out_l, np.mod(b, m)) + + def test_negative_power_scalar(self): + # Test for issue #428, where the power is negative and the base is an + # integer. This was running afoul in the precomputation in `expressions.py:pow_op()` + base = np.array([-2, -1, 1, 2, 3], dtype=np.int32) + out_i = evaluate('base ** -1.0') + assert_equal(out_i, np.power(base, -1.0)) + + base = np.array([-2, -1, 1, 2, 3], dtype=np.int64) + out_l = evaluate('base ** -1.0') + assert_equal(out_l, np.power(base, -1.0)) + + + def test_ex_uses_vml(self): + vml_funcs = [ "sin", "cos", "tan", "arcsin", "arccos", "arctan", + "sinh", "cosh", "tanh", "arcsinh", "arccosh", "arctanh", + "log", "log1p","log10", "exp", "expm1", "abs", "conj", + "arctan2", "fmod"] + for func in vml_funcs: + strexpr = func+'(a)' + _, ex_uses_vml = numexpr.necompiler.getExprNames(strexpr, {}) + assert_equal(ex_uses_vml, use_vml, strexpr) + + if 'sparc' not in platform.machine(): + # Execution order set here so as to not use too many threads + # during the rest of the execution. See #33 for details. + def test_changing_nthreads_00_inc(self): + a = linspace(-1, 1, 1000000) + b = ((.25 * a + .75) * a - 1.5) * a - 2 + for nthreads in range(1, 7): + numexpr.set_num_threads(nthreads) + c = evaluate("((.25*a + .75)*a - 1.5)*a - 2") + assert_array_almost_equal(b, c) + + def test_changing_nthreads_01_dec(self): + a = linspace(-1, 1, 1000000) + b = ((.25 * a + .75) * a - 1.5) * a - 2 + for nthreads in range(6, 1, -1): + numexpr.set_num_threads(nthreads) + c = evaluate("((.25*a + .75)*a - 1.5)*a - 2") + assert_array_almost_equal(b, c) + + +tests = [ + ('MISC', ['b*c+d*e', + '2*a+3*b', + '-a', + 'sinh(a)', + '2*a + (cos(3)+5)*sinh(cos(b))', + '2*a + arctan2(a, b)', + 'arcsin(0.5)', + 'where(a != 0.0, 2, a)', + 'where(a > 10, b < a, b > a)', + 'where((a-10).real != 0.0, a, 2)', + '0.25 * (a < 5) + 0.33 * (a >= 5)', + 'cos(1+1)', + '1+1', + '1', + 'cos(a2)', + ])] + +optests = [] +for op in list('+-*/%') + ['**']: + optests.append("(a+1) %s (b+3)" % op) + optests.append("3 %s (b+3)" % op) + optests.append("(a+1) %s 4" % op) + optests.append("2 %s (b+3)" % op) + optests.append("(a+1) %s 2" % op) + optests.append("(a+1) %s -1" % op) + optests.append("(a+1) %s 0.5" % op) + # Check divisions and modulus by zero (see ticket #107) + optests.append("(a+1) %s 0" % op) +tests.append(('OPERATIONS', optests)) + +cmptests = [] +for op in ['<', '<=', '==', '>=', '>', '!=']: + cmptests.append("a/2+5 %s b" % op) + cmptests.append("a/2+5 %s 7" % op) + cmptests.append("7 %s b" % op) + cmptests.append("7.0 %s 5" % op) +tests.append(('COMPARISONS', cmptests)) + +func1tests = [] +for func in ['copy', 'ones_like', 'sqrt', + 'sin', 'cos', 'tan', 'arcsin', 'arccos', 'arctan', + 'sinh', 'cosh', 'tanh', 'arcsinh', 'arccosh', 'arctanh', + 'log', 'log1p', 'log10', 'exp', 'expm1', 'abs', 'conj', + 'ceil', 'floor']: + func1tests.append("a + %s(b+c)" % func) +tests.append(('1_ARG_FUNCS', func1tests)) + +func2tests = [] +for func in ['arctan2', 'fmod']: + func2tests.append("a + %s(b+c, d+1)" % func) + func2tests.append("a + %s(b+c, 1)" % func) + func2tests.append("a + %s(1, d+1)" % func) +tests.append(('2_ARG_FUNCS', func2tests)) + +powtests = [] +# n = -1, 0.5, 2, 4 already handled in section "OPERATIONS" +for n in (-7, -2.5, -1.5, -1.3, -.5, 0, 0.0, 1, 2.3, 2.5, 3): + powtests.append("(a+1)**%s" % n) +tests.append(('POW_TESTS', powtests)) + + +def equal(a, b, exact): + if array_equal(a, b): + return True + + if hasattr(a, 'dtype') and a.dtype in ['f4', 'f8']: + nnans = isnan(a).sum() + if nnans > 0: + # For results containing NaNs, just check that the number + # of NaNs is the same in both arrays. This check could be + # made more exhaustive, but checking element by element in + # python space is very expensive in general. + return nnans == isnan(b).sum() + ninfs = isinf(a).sum() + if ninfs > 0: + # Ditto for Inf's + return ninfs == isinf(b).sum() + if exact: + return (shape(a) == shape(b)) and alltrue(ravel(a) == ravel(b), axis=0) + else: + if hasattr(a, 'dtype') and a.dtype == 'f4': + atol = 1e-5 # Relax precision for special opcodes, like fmod + else: + atol = 1e-8 + return (shape(a) == shape(b) and + allclose(ravel(a), ravel(b), atol=atol)) + + +class Skip(Exception): pass + + +def test_expressions(): + test_no = [0] + + def make_test_method(a, a2, b, c, d, e, x, expr, + test_scalar, dtype, optimization, exact, section): + this_locals = locals() + + def method(): + try: + # We don't want to listen at RuntimeWarnings like + # "overflows" or "divide by zero" in plain eval(). + warnings.simplefilter("ignore") + npval = eval(expr, globals(), this_locals) + warnings.simplefilter("always") + npval = eval(expr, globals(), this_locals) + except Exception as ex: + # just store the exception in a variable + # compatibility with numpy v1.12 + # see also https://github.com/pydata/numexpr/issues/239 + np_exception = ex + npval = None + else: + np_exception = None + + try: + neval = evaluate(expr, local_dict=this_locals, + optimization=optimization) + except AssertionError: + raise + except NotImplementedError: + print('%r not implemented for %s (scalar=%d, opt=%s)' + % (expr, dtype.__name__, test_scalar, optimization)) + except Exception as ne_exception: + same_exc_type = issubclass(type(ne_exception), + type(np_exception)) + if np_exception is None or not same_exc_type: + print('numexpr error for expression %r' % (expr,)) + raise + except: + print('numexpr error for expression %r' % (expr,)) + raise + else: + msg = ('expected numexpr error not raised for expression ' + '%r' % (expr,)) + assert np_exception is None, msg + + assert equal(npval, neval, exact), """%r +(test_scalar=%r, dtype=%r, optimization=%r, exact=%r, + npval=%r (%r - %r)\n neval=%r (%r - %r))""" % (expr, test_scalar, dtype.__name__, + optimization, exact, + npval, type(npval), shape(npval), + neval, type(neval), shape(neval)) + + method.description = ('test_expressions(%s, test_scalar=%r, ' + 'dtype=%r, optimization=%r, exact=%r)') % (expr, test_scalar, dtype.__name__, optimization, exact) + test_no[0] += 1 + method.__name__ = 'test_scalar%d_%s_%s_%s_%04d' % (test_scalar, + dtype.__name__, + optimization.encode('ascii'), + section.encode('ascii'), + test_no[0]) + return method + + x = None + for test_scalar in (0, 1, 2): + for dtype in (int, int, np.float32, double, complex): + array_size = 100 + a = arange(2 * array_size, dtype=dtype)[::2] + a2 = zeros([array_size, array_size], dtype=dtype) + b = arange(array_size, dtype=dtype) / array_size + c = arange(array_size, dtype=dtype) + d = arange(array_size, dtype=dtype) + e = arange(array_size, dtype=dtype) + if dtype == complex: + a = a.real + for x in [a2, b, c, d, e]: + x += 1j + x *= 1 + 1j + if test_scalar == 1: + a = a[array_size // 2] + if test_scalar == 2: + b = b[array_size // 2] + for optimization, exact in [ + ('none', False), ('moderate', False), ('aggressive', False)]: + for section_name, section_tests in tests: + for expr in section_tests: + if (dtype == complex and + ('<' in expr or '>' in expr or '%' in expr + or "arctan2" in expr or "fmod" in expr + or "floor" in expr or "ceil" in expr)): + # skip complex comparisons or functions not + # defined in complex domain. + continue + if (dtype in (int, int) and test_scalar and + expr == '(a+1) ** -1'): + continue + + m = make_test_method(a, a2, b, c, d, e, x, + expr, test_scalar, dtype, + optimization, exact, + section_name) + yield m + + +class test_int64(TestCase): + def test_neg(self): + a = array([2 ** 31 - 1, 2 ** 31, 2 ** 32, 2 ** 63 - 1], dtype=int64) + res = evaluate('-a') + assert_array_equal(res, [1 - 2 ** 31, -(2 ** 31), -(2 ** 32), 1 - 2 ** 63]) + self.assertEqual(res.dtype.name, 'int64') + + +class test_int32_int64(TestCase): + + def test_small_int(self): + # Small ints (32-bit ones) should not be promoted to longs. + res = evaluate('2') + assert_array_equal(res, 2) + self.assertEqual(res.dtype.name, 'int32') + + def test_big_int(self): + # Big ints should be promoted to longs. + res = evaluate('2**40') + assert_array_equal(res, 2 ** 40) + self.assertEqual(res.dtype.name, 'int64') + + def test_long_constant_promotion(self): + int32array = arange(100, dtype='int32') + itwo = np.int32(2) + ltwo = np.int64(2) + res = int32array * 2 + res32 = evaluate('int32array * itwo') + res64 = evaluate('int32array * ltwo') + assert_array_equal(res, res32) + assert_array_equal(res, res64) + self.assertEqual(res32.dtype.name, 'int32') + self.assertEqual(res64.dtype.name, 'int64') + + def test_int64_array_promotion(self): + int32array = arange(100, dtype='int32') + int64array = arange(100, dtype='int64') + respy = int32array * int64array + resnx = evaluate('int32array * int64array') + assert_array_equal(respy, resnx) + self.assertEqual(resnx.dtype.name, 'int64') + + +class test_uint32_int64(TestCase): + def test_small_uint32(self): + # Small uint32 should not be downgraded to ints. + a = np.uint32(42) + res = evaluate('a') + assert_array_equal(res, 42) + self.assertEqual(res.dtype.name, 'int64') + + def test_uint32_constant_promotion(self): + int32array = arange(100, dtype='int32') + stwo = np.int32(2) + utwo = np.uint32(2) + res = int32array * utwo + res32 = evaluate('int32array * stwo') + res64 = evaluate('int32array * utwo') + assert_array_equal(res, res32) + assert_array_equal(res, res64) + self.assertEqual(res32.dtype.name, 'int32') + self.assertEqual(res64.dtype.name, 'int64') + + def test_int64_array_promotion(self): + uint32array = arange(100, dtype='uint32') + int64array = arange(100, dtype='int64') + respy = uint32array * int64array + resnx = evaluate('uint32array * int64array') + assert_array_equal(respy, resnx) + self.assertEqual(resnx.dtype.name, 'int64') + + +class test_strings(TestCase): + BLOCK_SIZE1 = 128 + BLOCK_SIZE2 = 8 + str_list1 = [b'foo', b'bar', b'', b' '] + str_list2 = [b'foo', b'', b'x', b' '] + str_nloops = len(str_list1) * (BLOCK_SIZE1 + BLOCK_SIZE2 + 1) + str_array1 = array(str_list1 * str_nloops) + str_array2 = array(str_list2 * str_nloops) + str_constant = b'doodoo' + + def test_null_chars(self): + str_list = [ + b'\0\0\0', b'\0\0foo\0', b'\0\0foo\0b', b'\0\0foo\0b\0', + b'foo\0', b'foo\0b', b'foo\0b\0', b'foo\0bar\0baz\0\0'] + for s in str_list: + r = evaluate('s') + self.assertEqual(s, r.tobytes()) # check *all* stored data + + def test_compare_copy(self): + sarr = self.str_array1 + expr = 'sarr' + res1 = eval(expr) + res2 = evaluate(expr) + assert_array_equal(res1, res2) + + def test_compare_array(self): + sarr1 = self.str_array1 + sarr2 = self.str_array2 + expr = 'sarr1 >= sarr2' + res1 = eval(expr) + res2 = evaluate(expr) + assert_array_equal(res1, res2) + + def test_compare_variable(self): + sarr = self.str_array1 + svar = self.str_constant + expr = 'sarr >= svar' + res1 = eval(expr) + res2 = evaluate(expr) + assert_array_equal(res1, res2) + + def test_compare_constant(self): + sarr = self.str_array1 + expr = 'sarr >= %r' % self.str_constant + res1 = eval(expr) + res2 = evaluate(expr) + assert_array_equal(res1, res2) + + def test_add_string_array(self): + sarr1 = self.str_array1 + sarr2 = self.str_array2 + expr = 'sarr1 + sarr2' + self.assert_missing_op('add_sss', expr, locals()) + + def test_empty_string1(self): + a = np.array([b"", b"pepe"]) + b = np.array([b"pepe2", b""]) + res = evaluate("(a == b'') & (b == b'pepe2')") + assert_array_equal(res, np.array([True, False])) + res2 = evaluate("(a == b'pepe') & (b == b'')") + assert_array_equal(res2, np.array([False, True])) + + def test_empty_string2(self): + a = np.array([b"p", b"pepe"]) + b = np.array([b"pepe2", b""]) + res = evaluate("(a == b'') & (b == b'pepe2')") + assert_array_equal(res, np.array([False, False])) + res2 = evaluate("(a == b'pepe') & (b == b'')") + assert_array_equal(res, np.array([False, False])) + + def test_add_numeric_array(self): + sarr = self.str_array1 + narr = arange(len(sarr), dtype='int32') + expr = 'sarr >= narr' + self.assert_missing_op('ge_bsi', expr, locals()) + + def assert_missing_op(self, op, expr, local_dict): + msg = "expected NotImplementedError regarding '%s'" % op + try: + evaluate(expr, local_dict) + except NotImplementedError as nie: + if "'%s'" % op not in nie.args[0]: + self.fail(msg) + else: + self.fail(msg) + + def test_compare_prefix(self): + # Check comparing two strings where one is a prefix of the + # other. + for s1, s2 in [(b'foo', b'foobar'), (b'foo', b'foo\0bar'), + (b'foo\0a', b'foo\0bar')]: + self.assertTrue(evaluate('s1 < s2')) + self.assertTrue(evaluate('s1 <= s2')) + self.assertTrue(evaluate('~(s1 == s2)')) + self.assertTrue(evaluate('~(s1 >= s2)')) + self.assertTrue(evaluate('~(s1 > s2)')) + + # Check for NumPy array-style semantics in string equality. + s1, s2 = b'foo', b'foo\0\0' + self.assertTrue(evaluate('s1 == s2')) + + +# Case for testing selections in fields which are aligned but whose +# data length is not an exact multiple of the length of the record. +# The following test exposes the problem only in 32-bit machines, +# because in 64-bit machines 'c2' is unaligned. However, this should +# check most platforms where, while not unaligned, 'len(datatype) > +# boundary_alignment' is fullfilled. +class test_irregular_stride(TestCase): + def test_select(self): + f0 = arange(10, dtype=int32) + f1 = arange(10, dtype=float64) + + irregular = rec.fromarrays([f0, f1]) + + f0 = irregular['f0'] + f1 = irregular['f1'] + + i0 = evaluate('f0 < 5') + i1 = evaluate('f1 < 5') + + assert_array_equal(f0[i0], arange(5, dtype=int32)) + assert_array_equal(f1[i1], arange(5, dtype=float64)) + + +# Cases for testing arrays with dimensions that can be zero. +class test_zerodim(TestCase): + def test_zerodim1d(self): + a0 = array([], dtype=int32) + a1 = array([], dtype=float64) + + r0 = evaluate('a0 + a1') + r1 = evaluate('a0 * a1') + + assert_array_equal(r0, a1) + assert_array_equal(r1, a1) + + def test_zerodim3d(self): + a0 = array([], dtype=int32).reshape(0, 2, 4) + a1 = array([], dtype=float64).reshape(0, 2, 4) + + r0 = evaluate('a0 + a1') + r1 = evaluate('a0 * a1') + + assert_array_equal(r0, a1) + assert_array_equal(r1, a1) + + +@contextmanager +def _environment(key, value): + old = os.environ.get(key) + os.environ[key] = value + try: + yield + finally: + if old: + os.environ[key] = old + else: + del os.environ[key] + +# Test cases for the threading configuration +class test_threading_config(TestCase): + def test_max_threads_unset(self): + # Has to be done in a subprocess as `importlib.reload` doesn't let us + # re-initialize the threadpool + script = '\n'.join([ + "import os", + "if 'NUMEXPR_MAX_THREADS' in os.environ: os.environ.pop('NUMEXPR_MAX_THREADS')", + "if 'OMP_NUM_THREADS' in os.environ: os.environ.pop('OMP_NUM_THREADS')", + "import numexpr", + "assert(numexpr.nthreads <= 8)", + "exit(0)"]) + subprocess.check_call([sys.executable, '-c', script]) + + def test_max_threads_set(self): + # Has to be done in a subprocess as `importlib.reload` doesn't let us + # re-initialize the threadpool + script = '\n'.join([ + "import os", + "os.environ['NUMEXPR_MAX_THREADS'] = '4'", + "import numexpr", + "assert(numexpr.MAX_THREADS == 4)", + "exit(0)"]) + subprocess.check_call([sys.executable, '-c', script]) + + def test_numexpr_num_threads(self): + with _environment('OMP_NUM_THREADS', '5'): + # NUMEXPR_NUM_THREADS has priority + with _environment('NUMEXPR_NUM_THREADS', '3'): + if 'sparc' in platform.machine(): + self.assertEqual(1, numexpr._init_num_threads()) + else: + self.assertEqual(3, numexpr._init_num_threads()) + + def test_omp_num_threads(self): + with _environment('OMP_NUM_THREADS', '5'): + if 'sparc' in platform.machine(): + self.assertEqual(1, numexpr._init_num_threads()) + else: + self.assertEqual(5, numexpr._init_num_threads()) + + def test_omp_num_threads_empty_string(self): + with _environment('OMP_NUM_THREADS', ''): + if 'sparc' in platform.machine(): + self.assertEqual(1, numexpr._init_num_threads()) + else: + self.assertEqual(detect_number_of_cores(), numexpr._init_num_threads()) + + def test_numexpr_max_threads_empty_string(self): + with _environment('NUMEXPR_MAX_THREADS', ''): + if 'sparc' in platform.machine(): + self.assertEqual(1, numexpr._init_num_threads()) + else: + self.assertEqual(detect_number_of_cores(), numexpr._init_num_threads()) + + def test_vml_threads_round_trip(self): + n_threads = 3 + if use_vml: + numexpr.utils.set_vml_num_threads(n_threads) + set_threads = numexpr.utils.get_vml_num_threads() + self.assertEqual(n_threads, set_threads) + else: + self.assertIsNone(numexpr.utils.set_vml_num_threads(n_threads)) + self.assertIsNone(numexpr.utils.get_vml_num_threads()) + + +# Case test for threads +class test_threading(TestCase): + + def test_thread(self): + import threading + + class ThreadTest(threading.Thread): + def run(self): + a = arange(3) + assert_array_equal(evaluate('a**3'), array([0, 1, 8])) + + test = ThreadTest() + test.start() + test.join() + + def test_multithread(self): + import threading + + # Running evaluate() from multiple threads shouldn't crash + def work(n): + a = arange(n) + evaluate('a+a') + + work(10) # warm compilation cache + + nthreads = 30 + threads = [threading.Thread(target=work, args=(1e5,)) + for i in range(nthreads)] + for t in threads: + t.start() + for t in threads: + t.join() + + +# The worker function for the subprocess (needs to be here because Windows +# has problems pickling nested functions with the multiprocess module :-/) +def _worker(qout=None): + ra = np.arange(1e3) + rows = evaluate('ra > 0') + #print "Succeeded in evaluation!\n" + if qout is not None: + qout.put("Done") + + +# Case test for subprocesses (via multiprocessing module) +class test_subprocess(TestCase): + def test_multiprocess(self): + try: + import multiprocessing as mp + except ImportError: + return + # Check for two threads at least + numexpr.set_num_threads(2) + #print "**** Running from main process:" + _worker() + #print "**** Running from subprocess:" + qout = mp.Queue() + ps = mp.Process(target=_worker, args=(qout,)) + ps.daemon = True + ps.start() + + result = qout.get() + #print result + + +def print_versions(): + """Print the versions of software that numexpr relies on.""" + # from pkg_resources import parse_version + from numexpr.cpuinfo import cpu + import platform + + print('-=' * 38) + print('Numexpr version: %s' % numexpr.__version__) + print('NumPy version: %s' % np.__version__) + print('Python version: %s' % sys.version) + (sysname, nodename, release, os_version, machine, processor) = platform.uname() + print('Platform: %s-%s-%s' % (sys.platform, machine, os_version)) + try: + # cpuinfo doesn't work on OSX well it seems, so protect these outputs + # with a try block + cpu_info = cpu.info[0] + print('CPU vendor: %s' % cpu_info.get('VendorIdentifier', '')) + print('CPU model: %s' % cpu_info.get('ProcessorNameString', '')) + print('CPU clock speed: %s MHz' % cpu_info.get('~MHz','')) + except KeyError: + pass + print('VML available? %s' % use_vml) + if use_vml: + print('VML/MKL version: %s' % numexpr.get_vml_version()) + print('Number of threads used by default: %d ' + '(out of %d detected cores)' % (numexpr.nthreads, numexpr.ncores)) + print('Maximum number of threads: %s' % numexpr.MAX_THREADS) + print('-=' * 38) + + +def test(verbosity=1): + """ + Run all the tests in the test suite. + """ + print_versions() + # For some reason, NumPy issues all kinds of warnings when using Python3. + # Ignoring them in tests should be ok, as all results are checked out. + # See https://github.com/pydata/numexpr/issues/183 for details. + np.seterr(divide='ignore', invalid='ignore', over='ignore', under='ignore') + return unittest.TextTestRunner(verbosity=verbosity).run(suite()) + + +test.__test__ = False + + +def suite(): + import unittest + import platform as pl + + theSuite = unittest.TestSuite() + niter = 1 + + class TestExpressions(TestCase): + pass + + def add_method(func): + def method(self): + return func() + + setattr(TestExpressions, func.__name__, + method.__get__(None, TestExpressions)) + + for func in test_expressions(): + add_method(func) + + for n in range(niter): + theSuite.addTest(unittest.makeSuite(test_numexpr)) + if 'sparc' not in platform.machine(): + theSuite.addTest(unittest.makeSuite(test_numexpr2)) + theSuite.addTest(unittest.makeSuite(test_evaluate)) + theSuite.addTest(unittest.makeSuite(TestExpressions)) + theSuite.addTest(unittest.makeSuite(test_int32_int64)) + theSuite.addTest(unittest.makeSuite(test_uint32_int64)) + theSuite.addTest(unittest.makeSuite(test_strings)) + theSuite.addTest( + unittest.makeSuite(test_irregular_stride)) + theSuite.addTest(unittest.makeSuite(test_zerodim)) + theSuite.addTest(unittest.makeSuite(test_threading_config)) + + # multiprocessing module is not supported on Hurd/kFreeBSD + if (pl.system().lower() not in ('gnu', 'gnu/kfreebsd')): + theSuite.addTest(unittest.makeSuite(test_subprocess)) + + # I need to put this test after test_subprocess because + # if not, the test suite locks immediately before test_subproces. + # This only happens with Windows, so I suspect of a subtle bad + # interaction with threads and subprocess :-/ + theSuite.addTest(unittest.makeSuite(test_threading)) + + return theSuite + + +if __name__ == '__main__': + print_versions() + unittest.main(defaultTest='suite') +# suite = suite() +# unittest.TextTestRunner(verbosity=2).run(suite) diff --git a/venv/lib/python3.10/site-packages/numexpr/utils.py b/venv/lib/python3.10/site-packages/numexpr/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..073b879e12656eb7961e105b1284b3705d76e30b --- /dev/null +++ b/venv/lib/python3.10/site-packages/numexpr/utils.py @@ -0,0 +1,228 @@ +################################################################### +# Numexpr - Fast numerical array expression evaluator for NumPy. +# +# License: MIT +# Author: See AUTHORS.txt +# +# See LICENSE.txt and LICENSES/*.txt for details about copyright and +# rights to use. +#################################################################### + +import logging +log = logging.getLogger(__name__) + +import os +import subprocess + +from numexpr.interpreter import _set_num_threads, _get_num_threads, MAX_THREADS +from numexpr import use_vml +from . import version + +if use_vml: + from numexpr.interpreter import ( + _get_vml_version, _set_vml_accuracy_mode, _set_vml_num_threads, + _get_vml_num_threads) + + +def get_vml_version(): + """ + Get the VML/MKL library version. + """ + if use_vml: + return _get_vml_version() + else: + return None + + +def set_vml_accuracy_mode(mode): + """ + Set the accuracy mode for VML operations. + + The `mode` parameter can take the values: + - 'high': high accuracy mode (HA), <1 least significant bit + - 'low': low accuracy mode (LA), typically 1-2 least significant bits + - 'fast': enhanced performance mode (EP) + - None: mode settings are ignored + + This call is equivalent to the `vmlSetMode()` in the VML library. + See: + + http://www.intel.com/software/products/mkl/docs/webhelp/vml/vml_DataTypesAccuracyModes.html + + for more info on the accuracy modes. + + Returns old accuracy settings. + """ + if use_vml: + acc_dict = {None: 0, 'low': 1, 'high': 2, 'fast': 3} + acc_reverse_dict = {1: 'low', 2: 'high', 3: 'fast'} + if mode not in list(acc_dict.keys()): + raise ValueError( + "mode argument must be one of: None, 'high', 'low', 'fast'") + retval = _set_vml_accuracy_mode(acc_dict.get(mode, 0)) + return acc_reverse_dict.get(retval) + else: + return None + + +def set_vml_num_threads(nthreads): + """ + Suggests a maximum number of threads to be used in VML operations. + + This function is equivalent to the call + `mkl_domain_set_num_threads(nthreads, MKL_DOMAIN_VML)` in the MKL + library. See: + + http://www.intel.com/software/products/mkl/docs/webhelp/support/functn_mkl_domain_set_num_threads.html + + for more info about it. + """ + if use_vml: + _set_vml_num_threads(nthreads) + pass + +def get_vml_num_threads(): + """ + Gets the maximum number of threads to be used in VML operations. + + This function is equivalent to the call + `mkl_domain_get_max_threads (MKL_DOMAIN_VML)` in the MKL + library. See: + + http://software.intel.com/en-us/node/522118 + + for more info about it. + """ + if use_vml: + return _get_vml_num_threads() + return None + +def set_num_threads(nthreads): + """ + Sets a number of threads to be used in operations. + + DEPRECATED: returns the previous setting for the number of threads. + + During initialization time NumExpr sets this number to the number + of detected cores in the system (see `detect_number_of_cores()`). + """ + old_nthreads = _set_num_threads(nthreads) + return old_nthreads + +def get_num_threads(): + """ + Gets the number of threads currently in use for operations. + """ + return _get_num_threads() + +def _init_num_threads(): + """ + Detects the environment variable 'NUMEXPR_MAX_THREADS' to set the threadpool + size, and if necessary the slightly redundant 'NUMEXPR_NUM_THREADS' or + 'OMP_NUM_THREADS' env vars to set the initial number of threads used by + the virtual machine. + """ + # Any platform-specific short-circuits + if 'sparc' in version.platform_machine: + log.warning('The number of threads have been set to 1 because problems related ' + 'to threading have been reported on some sparc machine. ' + 'The number of threads can be changed using the "set_num_threads" ' + 'function.') + set_num_threads(1) + return 1 + + env_configured = False + n_cores = detect_number_of_cores() + if ('NUMEXPR_MAX_THREADS' in os.environ and os.environ['NUMEXPR_MAX_THREADS'] != '' or + 'OMP_NUM_THREADS' in os.environ and os.environ['OMP_NUM_THREADS'] != ''): + # The user has configured NumExpr in the expected way, so suppress logs. + env_configured = True + n_cores = MAX_THREADS + else: + # The use has not set 'NUMEXPR_MAX_THREADS', so likely they have not + # configured NumExpr as desired, so we emit info logs. + if n_cores > MAX_THREADS: + log.info('Note: detected %d virtual cores but NumExpr set to maximum of %d, check "NUMEXPR_MAX_THREADS" environment variable.'%(n_cores, MAX_THREADS)) + if n_cores > 8: + # The historical 'safety' limit. + log.info('Note: NumExpr detected %d cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.'%n_cores) + n_cores = 8 + + # Now we check for 'NUMEXPR_NUM_THREADS' or 'OMP_NUM_THREADS' to set the + # actual number of threads used. + if 'NUMEXPR_NUM_THREADS' in os.environ and os.environ['NUMEXPR_NUM_THREADS'] != '': + requested_threads = int(os.environ['NUMEXPR_NUM_THREADS']) + elif 'OMP_NUM_THREADS' in os.environ and os.environ['OMP_NUM_THREADS'] != '': + # Empty string is commonly used to unset the variable + requested_threads = int(os.environ['OMP_NUM_THREADS']) + else: + requested_threads = n_cores + if not env_configured: + log.info('NumExpr defaulting to %d threads.'%n_cores) + + # The C-extension function performs its own checks against `MAX_THREADS` + set_num_threads(requested_threads) + return requested_threads + + +def detect_number_of_cores(): + """ + Detects the number of cores on a system. Cribbed from pp. + """ + # Linux, Unix and MacOS: + if hasattr(os, "sysconf"): + if "SC_NPROCESSORS_ONLN" in os.sysconf_names: + # Linux & Unix: + ncpus = os.sysconf("SC_NPROCESSORS_ONLN") + if isinstance(ncpus, int) and ncpus > 0: + return ncpus + else: # OSX: + return int(subprocess.check_output(["sysctl", "-n", "hw.ncpu"])) + # Windows: + try: + ncpus = int(os.environ.get("NUMBER_OF_PROCESSORS", "")) + if ncpus > 0: + return ncpus + except ValueError: + pass + return 1 # Default + + +def detect_number_of_threads(): + """ + DEPRECATED: use `_init_num_threads` instead. + If this is modified, please update the note in: https://github.com/pydata/numexpr/wiki/Numexpr-Users-Guide + """ + log.warning('Deprecated, use `init_num_threads` instead.') + try: + nthreads = int(os.environ.get('NUMEXPR_NUM_THREADS', '')) + except ValueError: + try: + nthreads = int(os.environ.get('OMP_NUM_THREADS', '')) + except ValueError: + nthreads = detect_number_of_cores() + + # Check that we don't surpass the MAX_THREADS in interpreter.cpp + if nthreads > MAX_THREADS: + nthreads = MAX_THREADS + return nthreads + + +class CacheDict(dict): + """ + A dictionary that prevents itself from growing too much. + """ + + def __init__(self, maxentries): + self.maxentries = maxentries + super(CacheDict, self).__init__(self) + + def __setitem__(self, key, value): + # Protection against growing the cache too much + if len(self) > self.maxentries: + # Remove a 10% of (arbitrary) elements from the cache + entries_to_remove = self.maxentries // 10 + for k in list(self.keys())[:entries_to_remove]: + super(CacheDict, self).__delitem__(k) + super(CacheDict, self).__setitem__(key, value) + diff --git a/venv/lib/python3.10/site-packages/numexpr/version.py b/venv/lib/python3.10/site-packages/numexpr/version.py new file mode 100644 index 0000000000000000000000000000000000000000..69b5bb37d147033182917b5b9a82e20935490832 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numexpr/version.py @@ -0,0 +1,4 @@ +# THIS FILE IS GENERATED BY `SETUP.PY` +version = '2.10.0' +numpy_build_version = '2.0.0rc1' +platform_machine = 'x86_64' diff --git a/venv/lib/python3.10/site-packages/scipy/__config__.py b/venv/lib/python3.10/site-packages/scipy/__config__.py new file mode 100644 index 0000000000000000000000000000000000000000..9a40526d1d3326491cbd6f6169cf57e258645a22 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/__config__.py @@ -0,0 +1,161 @@ +# This file is generated by SciPy's build process +# It contains system_info results at the time of building this package. +from enum import Enum + +__all__ = ["show"] +_built_with_meson = True + + +class DisplayModes(Enum): + stdout = "stdout" + dicts = "dicts" + + +def _cleanup(d): + """ + Removes empty values in a `dict` recursively + This ensures we remove values that Meson could not provide to CONFIG + """ + if isinstance(d, dict): + return { k: _cleanup(v) for k, v in d.items() if v != '' and _cleanup(v) != '' } + else: + return d + + +CONFIG = _cleanup( + { + "Compilers": { + "c": { + "name": "gcc", + "linker": r"ld.bfd", + "version": "10.2.1", + "commands": r"cc", + "args": r"", + "linker args": r"", + }, + "cython": { + "name": r"cython", + "linker": r"cython", + "version": r"3.0.10", + "commands": r"cython", + "args": r"", + "linker args": r"", + }, + "c++": { + "name": "gcc", + "linker": r"ld.bfd", + "version": "10.2.1", + "commands": r"c++", + "args": r"", + "linker args": r"", + }, + "fortran": { + "name": "gcc", + "linker": r"ld.bfd", + "version": "10.2.1", + "commands": r"gfortran", + "args": r"", + "linker args": r"", + }, + "pythran": { + "version": r"0.15.0", + "include directory": r"../../tmp/pip-build-env-0blqy1or/overlay/lib/python3.10/site-packages/pythran" + }, + }, + "Machine Information": { + "host": { + "cpu": r"x86_64", + "family": r"x86_64", + "endian": r"little", + "system": r"linux", + }, + "build": { + "cpu": r"x86_64", + "family": r"x86_64", + "endian": r"little", + "system": r"linux", + }, + "cross-compiled": bool("False".lower().replace('false', '')), + }, + "Build Dependencies": { + "blas": { + "name": "openblas", + "found": bool("True".lower().replace('false', '')), + "version": "0.3.26.dev", + "detection method": "pkgconfig", + "include directory": r"/usr/local/include", + "lib directory": r"/usr/local/lib", + "openblas configuration": r"USE_64BITINT=0 DYNAMIC_ARCH=1 DYNAMIC_OLDER= NO_CBLAS= NO_LAPACK= NO_LAPACKE= NO_AFFINITY=1 USE_OPENMP= ZEN MAX_THREADS=64", + "pc file directory": r"/usr/local/lib/pkgconfig", + }, + "lapack": { + "name": "openblas", + "found": bool("True".lower().replace('false', '')), + "version": "0.3.26.dev", + "detection method": "pkgconfig", + "include directory": r"/usr/local/include", + "lib directory": r"/usr/local/lib", + "openblas configuration": r"USE_64BITINT=0 DYNAMIC_ARCH=1 DYNAMIC_OLDER= NO_CBLAS= NO_LAPACK= NO_LAPACKE= NO_AFFINITY=1 USE_OPENMP= ZEN MAX_THREADS=64", + "pc file directory": r"/usr/local/lib/pkgconfig", + }, + "pybind11": { + "name": "pybind11", + "version": "2.12.0", + "detection method": "config-tool", + "include directory": r"unknown", + }, + }, + "Python Information": { + "path": r"/opt/python/cp310-cp310/bin/python", + "version": "3.10", + }, + } +) + + +def _check_pyyaml(): + import yaml + + return yaml + + +def show(mode=DisplayModes.stdout.value): + """ + Show libraries and system information on which SciPy was built + and is being used + + Parameters + ---------- + mode : {`'stdout'`, `'dicts'`}, optional. + Indicates how to display the config information. + `'stdout'` prints to console, `'dicts'` returns a dictionary + of the configuration. + + Returns + ------- + out : {`dict`, `None`} + If mode is `'dicts'`, a dict is returned, else None + + Notes + ----- + 1. The `'stdout'` mode will give more readable + output if ``pyyaml`` is installed + + """ + if mode == DisplayModes.stdout.value: + try: # Non-standard library, check import + yaml = _check_pyyaml() + + print(yaml.dump(CONFIG)) + except ModuleNotFoundError: + import warnings + import json + + warnings.warn("Install `pyyaml` for better output", stacklevel=1) + print(json.dumps(CONFIG, indent=2)) + elif mode == DisplayModes.dicts.value: + return CONFIG + else: + raise AttributeError( + f"Invalid `mode`, use one of: {', '.join([e.value for e in DisplayModes])}" + ) diff --git a/venv/lib/python3.10/site-packages/scipy/__init__.py b/venv/lib/python3.10/site-packages/scipy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d6bbed182a50ee24fe15d2a4a13847684317b318 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/__init__.py @@ -0,0 +1,141 @@ +""" +SciPy: A scientific computing package for Python +================================================ + +Documentation is available in the docstrings and +online at https://docs.scipy.org. + +Subpackages +----------- +Using any of these subpackages requires an explicit import. For example, +``import scipy.cluster``. + +:: + + cluster --- Vector Quantization / Kmeans + constants --- Physical and mathematical constants and units + datasets --- Dataset methods + fft --- Discrete Fourier transforms + fftpack --- Legacy discrete Fourier transforms + integrate --- Integration routines + interpolate --- Interpolation Tools + io --- Data input and output + linalg --- Linear algebra routines + misc --- Utilities that don't have another home. + ndimage --- N-D image package + odr --- Orthogonal Distance Regression + optimize --- Optimization Tools + signal --- Signal Processing Tools + sparse --- Sparse Matrices + spatial --- Spatial data structures and algorithms + special --- Special functions + stats --- Statistical Functions + +Public API in the main SciPy namespace +-------------------------------------- +:: + + __version__ --- SciPy version string + LowLevelCallable --- Low-level callback function + show_config --- Show scipy build configuration + test --- Run scipy unittests + +""" + +import importlib as _importlib + +from numpy import __version__ as __numpy_version__ + + +try: + from scipy.__config__ import show as show_config +except ImportError as e: + msg = """Error importing SciPy: you cannot import SciPy while + being in scipy source directory; please exit the SciPy source + tree first and relaunch your Python interpreter.""" + raise ImportError(msg) from e + + +from scipy.version import version as __version__ + + +# Allow distributors to run custom init code +from . import _distributor_init +del _distributor_init + + +from scipy._lib import _pep440 +# In maintenance branch, change to np_maxversion N+3 if numpy is at N +np_minversion = '1.22.4' +np_maxversion = '2.3.0' +if (_pep440.parse(__numpy_version__) < _pep440.Version(np_minversion) or + _pep440.parse(__numpy_version__) >= _pep440.Version(np_maxversion)): + import warnings + warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}" + f" is required for this version of SciPy (detected " + f"version {__numpy_version__})", + UserWarning, stacklevel=2) +del _pep440 + + +# This is the first import of an extension module within SciPy. If there's +# a general issue with the install, such that extension modules are missing +# or cannot be imported, this is where we'll get a failure - so give an +# informative error message. +try: + from scipy._lib._ccallback import LowLevelCallable +except ImportError as e: + msg = "The `scipy` install you are using seems to be broken, " + \ + "(extension modules cannot be imported), " + \ + "please try reinstalling." + raise ImportError(msg) from e + + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester + + +submodules = [ + 'cluster', + 'constants', + 'datasets', + 'fft', + 'fftpack', + 'integrate', + 'interpolate', + 'io', + 'linalg', + 'misc', + 'ndimage', + 'odr', + 'optimize', + 'signal', + 'sparse', + 'spatial', + 'special', + 'stats' +] + +__all__ = submodules + [ + 'LowLevelCallable', + 'test', + 'show_config', + '__version__', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + if name in submodules: + return _importlib.import_module(f'scipy.{name}') + else: + try: + return globals()[name] + except KeyError: + raise AttributeError( + f"Module 'scipy' has no attribute '{name}'" + ) diff --git a/venv/lib/python3.10/site-packages/scipy/_distributor_init.py b/venv/lib/python3.10/site-packages/scipy/_distributor_init.py new file mode 100644 index 0000000000000000000000000000000000000000..5df134975aa27d31beaff74c3cbfd2d3fb0a55dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/_distributor_init.py @@ -0,0 +1,18 @@ +""" Distributor init file + +Distributors: you can replace the contents of this file with your own custom +code to support particular distributions of SciPy. + +For example, this is a good place to put any checks for hardware requirements +or BLAS/LAPACK library initialization. + +The SciPy standard source distribution will not put code in this file beyond +the try-except import of `_distributor_init_local` (which is not part of a +standard source distribution), so you can safely replace this file with your +own version. +""" + +try: + from . import _distributor_init_local # noqa: F401 +except ImportError: + pass diff --git a/venv/lib/python3.10/site-packages/scipy/conftest.py b/venv/lib/python3.10/site-packages/scipy/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..577987a4ac744dc598f05cfea68ce357917a8874 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/conftest.py @@ -0,0 +1,238 @@ +# Pytest customization +import json +import os +import warnings +import tempfile + +import numpy as np +import numpy.testing as npt +import pytest +import hypothesis + +from scipy._lib._fpumode import get_fpu_mode +from scipy._lib._testutils import FPUModeChangeWarning +from scipy._lib import _pep440 +from scipy._lib._array_api import SCIPY_ARRAY_API, SCIPY_DEVICE + + +def pytest_configure(config): + config.addinivalue_line("markers", + "slow: Tests that are very slow.") + config.addinivalue_line("markers", + "xslow: mark test as extremely slow (not run unless explicitly requested)") + config.addinivalue_line("markers", + "xfail_on_32bit: mark test as failing on 32-bit platforms") + try: + import pytest_timeout # noqa:F401 + except Exception: + config.addinivalue_line( + "markers", 'timeout: mark a test for a non-default timeout') + config.addinivalue_line("markers", + "skip_if_array_api(*backends, reasons=None, np_only=False, cpu_only=False): " + "mark the desired skip configuration for the `skip_if_array_api` fixture.") + + +def _get_mark(item, name): + if _pep440.parse(pytest.__version__) >= _pep440.Version("3.6.0"): + mark = item.get_closest_marker(name) + else: + mark = item.get_marker(name) + return mark + + +def pytest_runtest_setup(item): + mark = _get_mark(item, "xslow") + if mark is not None: + try: + v = int(os.environ.get('SCIPY_XSLOW', '0')) + except ValueError: + v = False + if not v: + pytest.skip("very slow test; " + "set environment variable SCIPY_XSLOW=1 to run it") + mark = _get_mark(item, 'xfail_on_32bit') + if mark is not None and np.intp(0).itemsize < 8: + pytest.xfail(f'Fails on our 32-bit test platform(s): {mark.args[0]}') + + # Older versions of threadpoolctl have an issue that may lead to this + # warning being emitted, see gh-14441 + with npt.suppress_warnings() as sup: + sup.filter(pytest.PytestUnraisableExceptionWarning) + + try: + from threadpoolctl import threadpool_limits + + HAS_THREADPOOLCTL = True + except Exception: # observed in gh-14441: (ImportError, AttributeError) + # Optional dependency only. All exceptions are caught, for robustness + HAS_THREADPOOLCTL = False + + if HAS_THREADPOOLCTL: + # Set the number of openmp threads based on the number of workers + # xdist is using to prevent oversubscription. Simplified version of what + # sklearn does (it can rely on threadpoolctl and its builtin OpenMP helper + # functions) + try: + xdist_worker_count = int(os.environ['PYTEST_XDIST_WORKER_COUNT']) + except KeyError: + # raises when pytest-xdist is not installed + return + + if not os.getenv('OMP_NUM_THREADS'): + max_openmp_threads = os.cpu_count() // 2 # use nr of physical cores + threads_per_worker = max(max_openmp_threads // xdist_worker_count, 1) + try: + threadpool_limits(threads_per_worker, user_api='blas') + except Exception: + # May raise AttributeError for older versions of OpenBLAS. + # Catch any error for robustness. + return + + +@pytest.fixture(scope="function", autouse=True) +def check_fpu_mode(request): + """ + Check FPU mode was not changed during the test. + """ + old_mode = get_fpu_mode() + yield + new_mode = get_fpu_mode() + + if old_mode != new_mode: + warnings.warn(f"FPU mode changed from {old_mode:#x} to {new_mode:#x} during " + "the test", + category=FPUModeChangeWarning, stacklevel=0) + + +# Array API backend handling +xp_available_backends = {'numpy': np} + +if SCIPY_ARRAY_API and isinstance(SCIPY_ARRAY_API, str): + # fill the dict of backends with available libraries + try: + import array_api_strict + xp_available_backends.update({'array_api_strict': array_api_strict}) + except ImportError: + pass + + try: + import torch # type: ignore[import] + xp_available_backends.update({'pytorch': torch}) + # can use `mps` or `cpu` + torch.set_default_device(SCIPY_DEVICE) + except ImportError: + pass + + try: + import cupy # type: ignore[import] + xp_available_backends.update({'cupy': cupy}) + except ImportError: + pass + + # by default, use all available backends + if SCIPY_ARRAY_API.lower() not in ("1", "true"): + SCIPY_ARRAY_API_ = json.loads(SCIPY_ARRAY_API) + + if 'all' in SCIPY_ARRAY_API_: + pass # same as True + else: + # only select a subset of backend by filtering out the dict + try: + xp_available_backends = { + backend: xp_available_backends[backend] + for backend in SCIPY_ARRAY_API_ + } + except KeyError: + msg = f"'--array-api-backend' must be in {xp_available_backends.keys()}" + raise ValueError(msg) + +if 'cupy' in xp_available_backends: + SCIPY_DEVICE = 'cuda' + +array_api_compatible = pytest.mark.parametrize("xp", xp_available_backends.values()) + + +@pytest.fixture +def skip_if_array_api(xp, request): + """ + Skip based on the ``skip_if_array_api`` marker. + + Parameters + ---------- + *backends : tuple + Backends to skip, e.g. ``("array_api_strict", "torch")``. + These are overriden when ``np_only`` is ``True``, and are not + necessary to provide for non-CPU backends when ``cpu_only`` is ``True``. + reasons : list, optional + A list of reasons for each skip. When ``np_only`` is ``True``, + this should be a singleton list. Otherwise, this should be a list + of reasons, one for each corresponding backend in ``backends``. + If unprovided, default reasons are used. Note that it is not possible + to specify a custom reason with ``cpu_only``. Default: ``None``. + np_only : bool, optional + When ``True``, the test is skipped for all backends other + than the default NumPy backend. There is no need to provide + any ``backends`` in this case. To specify a reason, pass a + singleton list to ``reasons``. Default: ``False``. + cpu_only : bool, optional + When ``True``, the test is skipped on non-CPU devices. + There is no need to provide any ``backends`` in this case, + but any ``backends`` will also be skipped on the CPU. + Default: ``False``. + """ + if "skip_if_array_api" not in request.keywords: + return + backends = request.keywords["skip_if_array_api"].args + kwargs = request.keywords["skip_if_array_api"].kwargs + np_only = kwargs.get("np_only", False) + cpu_only = kwargs.get("cpu_only", False) + if np_only: + reasons = kwargs.get("reasons", ["do not run with non-NumPy backends."]) + reason = reasons[0] + if xp.__name__ != 'numpy': + pytest.skip(reason=reason) + return + if cpu_only: + reason = "do not run with `SCIPY_ARRAY_API` set and not on CPU" + if SCIPY_ARRAY_API and SCIPY_DEVICE != 'cpu': + if xp.__name__ == 'cupy': + pytest.skip(reason=reason) + elif xp.__name__ == 'torch': + if 'cpu' not in torch.empty(0).device.type: + pytest.skip(reason=reason) + if backends is not None: + reasons = kwargs.get("reasons", False) + for i, backend in enumerate(backends): + if xp.__name__ == backend: + if not reasons: + reason = f"do not run with array API backend: {backend}" + else: + reason = reasons[i] + pytest.skip(reason=reason) + + +# Following the approach of NumPy's conftest.py... +# Use a known and persistent tmpdir for hypothesis' caches, which +# can be automatically cleared by the OS or user. +hypothesis.configuration.set_hypothesis_home_dir( + os.path.join(tempfile.gettempdir(), ".hypothesis") +) + +# We register two custom profiles for SciPy - for details see +# https://hypothesis.readthedocs.io/en/latest/settings.html +# The first is designed for our own CI runs; the latter also +# forces determinism and is designed for use via scipy.test() +hypothesis.settings.register_profile( + name="nondeterministic", deadline=None, print_blob=True, +) +hypothesis.settings.register_profile( + name="deterministic", + deadline=None, print_blob=True, database=None, derandomize=True, + suppress_health_check=list(hypothesis.HealthCheck), +) + +# Profile is currently set by environment variable `SCIPY_HYPOTHESIS_PROFILE` +# In the future, it would be good to work the choice into dev.py. +SCIPY_HYPOTHESIS_PROFILE = os.environ.get("SCIPY_HYPOTHESIS_PROFILE", + "deterministic") +hypothesis.settings.load_profile(SCIPY_HYPOTHESIS_PROFILE) diff --git a/venv/lib/python3.10/site-packages/scipy/special.pxd b/venv/lib/python3.10/site-packages/scipy/special.pxd new file mode 100644 index 0000000000000000000000000000000000000000..1daa9fb379572aac4bc9b6d74330a18c5c52bf79 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special.pxd @@ -0,0 +1 @@ +from scipy.special cimport cython_special